summaryrefslogtreecommitdiff
path: root/lib/test-string_helpers.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2021-02-09 15:02:05 +0000
committerDavid S. Miller <davem@davemloft.net>2021-02-10 15:23:31 -0800
commitdc0e6056decc2c454f4d503fd73f8c57e16579a6 (patch)
treef42e6a310a7dde982ab180bea5d757db99ce8c66 /lib/test-string_helpers.c
parentafdb9af9bcbd579dac77269035c86f1d9e6eac43 (diff)
rxrpc: Fix missing dependency on NET_UDP_TUNNEL
The changes to make rxrpc create the udp socket missed a bit to add the Kconfig dependency on the udp tunnel code to do this. Fix this by adding making AF_RXRPC select NET_UDP_TUNNEL. Fixes: 1a9b86c9fd95 ("rxrpc: use udp tunnel APIs instead of open code in rxrpc_open_socket") Reported-by: kernel test robot <lkp@intel.com> Signed-off-by: Vadim Fedorenko <vfedorenko@novek.ru> Signed-off-by: David Howells <dhowells@redhat.com> Reviewed-by: Xin Long <lucien.xin@gmail.com> cc: alaa@dev.mellanox.co.il cc: Jakub Kicinski <kuba@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/test-string_helpers.c')
0 files changed, 0 insertions, 0 deletions
'right'>90
-rw-r--r--drivers/acpi/acpi_pad.c3
-rw-r--r--drivers/acpi/acpi_platform.c72
-rw-r--r--drivers/acpi/acpica/Makefile8
-rw-r--r--drivers/acpi/acpica/accommon.h3
-rw-r--r--drivers/acpi/acpica/acdebug.h19
-rw-r--r--drivers/acpi/acpica/acdispat.h2
-rw-r--r--drivers/acpi/acpica/acevents.h23
-rw-r--r--drivers/acpi/acpica/acglobal.h43
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/acinterp.h4
-rw-r--r--drivers/acpi/acpica/aclocal.h47
-rw-r--r--drivers/acpi/acpica/acmacros.h173
-rw-r--r--drivers/acpi/acpica/acnamesp.h16
-rw-r--r--drivers/acpi/acpica/acobject.h4
-rw-r--r--drivers/acpi/acpica/acopcode.h2
-rw-r--r--drivers/acpi/acpica/acparser.h27
-rw-r--r--drivers/acpi/acpica/acpredef.h31
-rw-r--r--drivers/acpi/acpica/acresrc.h8
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h59
-rw-r--r--drivers/acpi/acpica/amlcode.h2
-rw-r--r--drivers/acpi/acpica/amlresrc.h8
-rw-r--r--drivers/acpi/acpica/dsargs.c2
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c8
-rw-r--r--drivers/acpi/acpica/dsmthdat.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c20
-rw-r--r--drivers/acpi/acpica/dsopcode.c16
-rw-r--r--drivers/acpi/acpica/dsutils.c12
-rw-r--r--drivers/acpi/acpica/dswexec.c6
-rw-r--r--drivers/acpi/acpica/dswload.c7
-rw-r--r--drivers/acpi/acpica/dswload2.c4
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evglock.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c14
-rw-r--r--drivers/acpi/acpica/evgpeblk.c24
-rw-r--r--drivers/acpi/acpica/evgpeinit.c5
-rw-r--r--drivers/acpi/acpica/evgpeutil.c2
-rw-r--r--drivers/acpi/acpica/evhandler.c529
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evregion.c584
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evsci.c6
-rw-r--r--drivers/acpi/acpica/evxface.c36
-rw-r--r--drivers/acpi/acpica/evxfevnt.c7
-rw-r--r--drivers/acpi/acpica/evxfgpe.c11
-rw-r--r--drivers/acpi/acpica/evxfregn.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c22
-rw-r--r--drivers/acpi/acpica/exconvrt.c4
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c21
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c3
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c5
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c10
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c6
-rw-r--r--drivers/acpi/acpica/exregion.c25
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/exstore.c31
-rw-r--r--drivers/acpi/acpica/exstoren.c4
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c2
-rw-r--r--drivers/acpi/acpica/exutils.c24
-rw-r--r--drivers/acpi/acpica/hwacpi.c13
-rw-r--r--drivers/acpi/acpica/hwesleep.c3
-rw-r--r--drivers/acpi/acpica/hwgpe.c10
-rw-r--r--drivers/acpi/acpica/hwpci.c2
-rw-r--r--drivers/acpi/acpica/hwregs.c8
-rw-r--r--drivers/acpi/acpica/hwsleep.c8
-rw-r--r--drivers/acpi/acpica/hwtimer.c9
-rw-r--r--drivers/acpi/acpica/hwvalid.c20
-rw-r--r--drivers/acpi/acpica/hwxface.c137
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c13
-rw-r--r--drivers/acpi/acpica/nsaccess.c2
-rw-r--r--drivers/acpi/acpica/nsalloc.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c14
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c3
-rw-r--r--drivers/acpi/acpica/nseval.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c29
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsnames.c5
-rw-r--r--drivers/acpi/acpica/nsobject.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c586
-rw-r--r--drivers/acpi/acpica/nsprepkg.c621
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c5
-rw-r--r--drivers/acpi/acpica/nssearch.c7
-rw-r--r--drivers/acpi/acpica/nsutils.c88
-rw-r--r--drivers/acpi/acpica/nswalk.c6
-rw-r--r--drivers/acpi/acpica/nsxfeval.c19
-rw-r--r--drivers/acpi/acpica/nsxfname.c20
-rw-r--r--drivers/acpi/acpica/nsxfobj.c2
-rw-r--r--drivers/acpi/acpica/psargs.c9
-rw-r--r--drivers/acpi/acpica/psloop.c623
-rw-r--r--drivers/acpi/acpica/psobject.c647
-rw-r--r--drivers/acpi/acpica/psopcode.c174
-rw-r--r--drivers/acpi/acpica/psopinfo.c223
-rw-r--r--drivers/acpi/acpica/psparse.c2
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c2
-rw-r--r--drivers/acpi/acpica/psutils.c10
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/rsaddr.c2
-rw-r--r--drivers/acpi/acpica/rscalc.c8
-rw-r--r--drivers/acpi/acpica/rscreate.c9
-rw-r--r--drivers/acpi/acpica/rsdump.c424
-rw-r--r--drivers/acpi/acpica/rsdumpinfo.c454
-rw-r--r--drivers/acpi/acpica/rsinfo.c2
-rw-r--r--drivers/acpi/acpica/rsio.c2
-rw-r--r--drivers/acpi/acpica/rsirq.c40
-rw-r--r--drivers/acpi/acpica/rslist.c9
-rw-r--r--drivers/acpi/acpica/rsmemory.c8
-rw-r--r--drivers/acpi/acpica/rsmisc.c76
-rw-r--r--drivers/acpi/acpica/rsserial.c10
-rw-r--r--drivers/acpi/acpica/rsutils.c14
-rw-r--r--drivers/acpi/acpica/rsxface.c107
-rw-r--r--drivers/acpi/acpica/tbfadt.c7
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c4
-rw-r--r--drivers/acpi/acpica/tbxface.c7
-rw-r--r--drivers/acpi/acpica/tbxfload.c4
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utaddress.c6
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utcache.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c6
-rw-r--r--drivers/acpi/acpica/utdebug.c120
-rw-r--r--drivers/acpi/acpica/utdecode.c2
-rw-r--r--drivers/acpi/acpica/utdelete.c70
-rw-r--r--drivers/acpi/acpica/uteval.c4
-rw-r--r--drivers/acpi/acpica/utexcep.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c11
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c2
-rw-r--r--drivers/acpi/acpica/utlock.c16
-rw-r--r--drivers/acpi/acpica/utmath.c2
-rw-r--r--drivers/acpi/acpica/utmisc.c830
-rw-r--r--drivers/acpi/acpica/utmutex.c2
-rw-r--r--drivers/acpi/acpica/utobject.c4
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utownerid.c218
-rw-r--r--drivers/acpi/acpica/utresrc.c83
-rw-r--r--drivers/acpi/acpica/utstate.c42
-rw-r--r--drivers/acpi/acpica/utstring.c574
-rw-r--r--drivers/acpi/acpica/uttrack.c18
-rw-r--r--drivers/acpi/acpica/utxface.c6
-rw-r--r--drivers/acpi/acpica/utxferror.c6
-rw-r--r--drivers/acpi/acpica/utxfinit.c2
-rw-r--r--drivers/acpi/acpica/utxfmutex.c2
-rw-r--r--drivers/acpi/apei/apei-base.c3
-rw-r--r--drivers/acpi/apei/cper.c19
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/bus.c270
-rw-r--r--drivers/acpi/button.c4
-rw-r--r--drivers/acpi/container.c211
-rw-r--r--drivers/acpi/csrt.c159
-rw-r--r--drivers/acpi/custom_method.c2
-rw-r--r--drivers/acpi/device_pm.c359
-rw-r--r--drivers/acpi/dock.c44
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/fan.c4
-rw-r--r--drivers/acpi/glue.c87
-rw-r--r--drivers/acpi/hed.c2
-rw-r--r--drivers/acpi/internal.h31
-rw-r--r--drivers/acpi/numa.c31
-rw-r--r--drivers/acpi/osl.c4
-rw-r--r--drivers/acpi/pci_bind.c122
-rw-r--r--drivers/acpi/pci_link.c47
-rw-r--r--drivers/acpi/pci_root.c101
-rw-r--r--drivers/acpi/pci_slot.c7
-rw-r--r--drivers/acpi/power.c730
-rw-r--r--drivers/acpi/proc.c9
-rw-r--r--drivers/acpi/processor_driver.c64
-rw-r--r--drivers/acpi/processor_idle.c52
-rw-r--r--drivers/acpi/processor_perflib.c7
-rw-r--r--drivers/acpi/sbs.c6
-rw-r--r--drivers/acpi/sbshc.c4
-rw-r--r--drivers/acpi/scan.c976
-rw-r--r--drivers/acpi/sleep.c97
-rw-r--r--drivers/acpi/sleep.h2
-rw-r--r--drivers/acpi/sysfs.c2
-rw-r--r--drivers/acpi/tables.c6
-rw-r--r--drivers/acpi/thermal.c10
-rw-r--r--drivers/acpi/video.c8
-rw-r--r--drivers/amba/tegra-ahb.c7
-rw-r--r--drivers/ata/Kconfig47
-rw-r--r--drivers/ata/Makefile2
-rw-r--r--drivers/ata/ahci.c125
-rw-r--r--drivers/ata/ahci.h6
-rw-r--r--drivers/ata/ata_piix.c17
-rw-r--r--drivers/ata/libahci.c124
-rw-r--r--drivers/ata/libata-acpi.c195
-rw-r--r--drivers/ata/libata-core.c88
-rw-r--r--drivers/ata/libata-eh.c29
-rw-r--r--drivers/ata/libata-scsi.c20
-rw-r--r--drivers/ata/libata-zpodd.c299
-rw-r--r--drivers/ata/libata.h27
-rw-r--r--drivers/ata/pata_ep93xx.c7
-rw-r--r--drivers/ata/pata_mpc52xx.c6
-rw-r--r--drivers/ata/pata_samsung_cf.c4
-rw-r--r--drivers/ata/sata_rcar.c910
-rw-r--r--drivers/atm/iphase.h146
-rw-r--r--drivers/base/Kconfig11
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/bus.c6
-rw-r--r--drivers/base/class.c4
-rw-r--r--drivers/base/core.c8
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--drivers/base/dd.c9
-rw-r--r--drivers/base/dma-buf.c9
-rw-r--r--drivers/base/firmware_class.c552
-rw-r--r--drivers/base/memory.c10
-rw-r--r--drivers/base/pinctrl.c69
-rw-r--r--drivers/base/power/domain.c3
-rw-r--r--drivers/base/power/opp.c19
-rw-r--r--drivers/base/power/qos.c1
-rw-r--r--drivers/base/power/runtime.c89
-rw-r--r--drivers/base/power/wakeup.c6
-rw-r--r--drivers/base/regmap/Makefile2
-rw-r--r--drivers/base/regmap/internal.h22
-rw-r--r--drivers/base/regmap/regcache-flat.c72
-rw-r--r--drivers/base/regmap/regcache.c1
-rw-r--r--drivers/base/regmap/regmap-debugfs.c89
-rw-r--r--drivers/base/regmap/regmap-irq.c125
-rw-r--r--drivers/base/regmap/regmap-mmio.c79
-rw-r--r--drivers/base/regmap/regmap-spi.c54
-rw-r--r--drivers/base/regmap/regmap.c353
-rw-r--r--drivers/bcma/bcma_private.h8
-rw-r--r--drivers/bcma/driver_chipcommon.c2
-rw-r--r--drivers/bcma/driver_chipcommon_nflash.c6
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c3
-rw-r--r--drivers/bcma/driver_chipcommon_sflash.c4
-rw-r--r--drivers/bcma/driver_gpio.c16
-rw-r--r--drivers/bcma/driver_mips.c195
-rw-r--r--drivers/bcma/driver_pci_host.c62
-rw-r--r--drivers/bcma/main.c19
-rw-r--r--drivers/block/drbd/drbd_req.c2
-rw-r--r--drivers/block/drbd/drbd_req.h1
-rw-r--r--drivers/block/drbd/drbd_state.c7
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c24
-rw-r--r--drivers/block/paride/Kconfig4
-rw-r--r--drivers/block/sunvdc.c2
-rw-r--r--drivers/block/swim.c1
-rw-r--r--drivers/block/virtio_blk.c7
-rw-r--r--drivers/block/xen-blkback/blkback.c18
-rw-r--r--drivers/block/xen-blkfront.c10
-rw-r--r--drivers/bluetooth/Kconfig1
-rw-r--r--drivers/bluetooth/ath3k.c12
-rw-r--r--drivers/bluetooth/btusb.c5
-rw-r--r--drivers/char/Kconfig7
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/hw_random/exynos-rng.c11
-rw-r--r--drivers/char/hw_random/omap-rng.c6
-rw-r--r--drivers/char/hw_random/tx4939-rng.c7
-rw-r--r--drivers/char/mem.c10
-rw-r--r--drivers/char/pcmcia/Kconfig4
-rw-r--r--drivers/char/pcmcia/synclink_cs.c707
-rw-r--r--drivers/char/random.c6
-rw-r--r--drivers/char/sonypi.c2
-rw-r--r--drivers/char/tpm/Kconfig12
-rw-r--r--drivers/char/tpm/Makefile1
-rw-r--r--drivers/char/tpm/tpm.c114
-rw-r--r--drivers/char/tpm/tpm.h52
-rw-r--r--drivers/char/tpm/tpm_acpi.c8
-rw-r--r--drivers/char/tpm/tpm_atmel.c7
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c7
-rw-r--r--drivers/char/tpm/tpm_i2c_stm_st33.c887
-rw-r--r--drivers/char/tpm/tpm_i2c_stm_st33.h61
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c15
-rw-r--r--drivers/char/tpm/tpm_nsc.c7
-rw-r--r--drivers/char/tpm/tpm_tis.c64
-rw-r--r--drivers/char/virtio_console.c3
-rw-r--r--drivers/clk/Makefile13
-rw-r--r--drivers/clk/clk-bcm2835.c9
-rw-r--r--drivers/clk/clk-divider.c6
-rw-r--r--drivers/clk/clk-fixed-factor.c5
-rw-r--r--drivers/clk/clk-fixed-rate.c3
-rw-r--r--drivers/clk/clk-highbank.c20
-rw-r--r--drivers/clk/clk-max77686.c37
-rw-r--r--drivers/clk/clk-prima2.c205
-rw-r--r--drivers/clk/clk-sunxi.c30
-rw-r--r--drivers/clk/clk-vt8500.c143
-rw-r--r--drivers/clk/clk-zynq.c14
-rw-r--r--drivers/clk/clk.c169
-rw-r--r--drivers/clk/mvebu/clk-cpu.c9
-rw-r--r--drivers/clk/mvebu/clk-gating-ctrl.c1
-rw-r--r--drivers/clk/mxs/clk-imx23.c2
-rw-r--r--drivers/clk/mxs/clk-imx28.c4
-rw-r--r--drivers/clk/tegra/Makefile11
-rw-r--r--drivers/clk/tegra/clk-audio-sync.c87
-rw-r--r--drivers/clk/tegra/clk-divider.c187
-rw-r--r--drivers/clk/tegra/clk-periph-gate.c179
-rw-r--r--drivers/clk/tegra/clk-periph.c218
-rw-r--r--drivers/clk/tegra/clk-pll-out.c123
-rw-r--r--drivers/clk/tegra/clk-pll.c648
-rw-r--r--drivers/clk/tegra/clk-super.c166
-rw-r--r--drivers/clk/tegra/clk-tegra20.c1355
-rw-r--r--drivers/clk/tegra/clk-tegra30.c1994
-rw-r--r--drivers/clk/tegra/clk.c85
-rw-r--r--drivers/clk/tegra/clk.h502
-rw-r--r--drivers/clk/versatile/clk-vexpress-osc.c1
-rw-r--r--drivers/clk/versatile/clk-vexpress.c11
-rw-r--r--drivers/clk/x86/Makefile2
-rw-r--r--drivers/clk/x86/clk-lpss.c99
-rw-r--r--drivers/clk/x86/clk-lpss.h36
-rw-r--r--drivers/clk/x86/clk-lpt.c86
-rw-r--r--drivers/clocksource/Kconfig12
-rw-r--r--drivers/clocksource/Makefile5
-rw-r--r--drivers/clocksource/arm_arch_timer.c391
-rw-r--r--drivers/clocksource/arm_generic.c232
-rw-r--r--drivers/clocksource/bcm2835_timer.c9
-rw-r--r--drivers/clocksource/clksrc-of.c35
-rw-r--r--drivers/clocksource/cs5535-clockevt.c11
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c6
-rw-r--r--drivers/clocksource/nomadik-mtu.c44
-rw-r--r--drivers/clocksource/sunxi_timer.c21
-rw-r--r--drivers/clocksource/tcb_clksrc.c7
-rw-r--r--drivers/clocksource/tegra20_timer.c281
-rw-r--r--drivers/clocksource/vt8500_timer.c180
-rw-r--r--drivers/connector/connector.c4
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/cpufreq/Kconfig.arm34
-rw-r--r--drivers/cpufreq/Kconfig.x8621
-rw-r--r--drivers/cpufreq/Makefile13
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c15
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c47
-rw-r--r--drivers/cpufreq/cpufreq.c460
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c19
-rw-r--r--drivers/cpufreq/cpufreq_governor.c131
-rw-r--r--drivers/cpufreq/cpufreq_governor.h6
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c70
-rw-r--r--drivers/cpufreq/cpufreq_stats.c49
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c2
-rw-r--r--drivers/cpufreq/dbx500-cpufreq.c (renamed from drivers/cpufreq/db8500-cpufreq.c)109
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c192
-rw-r--r--drivers/cpufreq/exynos-cpufreq.h48
-rw-r--r--drivers/cpufreq/exynos4210-cpufreq.c153
-rw-r--r--drivers/cpufreq/exynos4x12-cpufreq.c389
-rw-r--r--drivers/cpufreq/exynos5250-cpufreq.c179
-rw-r--r--drivers/cpufreq/freq_table.c15
-rw-r--r--drivers/cpufreq/highbank-cpufreq.c120
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c336
-rw-r--r--drivers/cpufreq/intel_pstate.c823
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c259
-rw-r--r--drivers/cpufreq/maple-cpufreq.c2
-rw-r--r--drivers/cpufreq/omap-cpufreq.c7
-rw-r--r--drivers/cpufreq/powernow-k8.c46
-rw-r--r--drivers/cpufreq/spear-cpufreq.c12
-rw-r--r--drivers/cpuidle/Kconfig6
-rw-r--r--drivers/cpuidle/Makefile1
-rw-r--r--drivers/cpuidle/cpuidle-kirkwood.c106
-rw-r--r--drivers/cpuidle/cpuidle.c19
-rw-r--r--drivers/cpuidle/driver.c25
-rw-r--r--drivers/cpuidle/governors/menu.c8
-rw-r--r--drivers/cpuidle/sysfs.c2
-rw-r--r--drivers/crypto/omap-sham.c3
-rw-r--r--drivers/devfreq/devfreq.c5
-rw-r--r--drivers/devfreq/exynos4_bus.c94
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/bestcomm/Kconfig36
-rw-r--r--drivers/dma/bestcomm/Makefile14
-rw-r--r--drivers/dma/bestcomm/ata.c157
-rw-r--r--drivers/dma/bestcomm/bcom_ata_task.c67
-rw-r--r--drivers/dma/bestcomm/bcom_fec_rx_task.c78
-rw-r--r--drivers/dma/bestcomm/bcom_fec_tx_task.c91
-rw-r--r--drivers/dma/bestcomm/bcom_gen_bd_rx_task.c63
-rw-r--r--drivers/dma/bestcomm/bcom_gen_bd_tx_task.c69
-rw-r--r--drivers/dma/bestcomm/bestcomm.c531
-rw-r--r--drivers/dma/bestcomm/fec.c270
-rw-r--r--drivers/dma/bestcomm/gen_bd.c354
-rw-r--r--drivers/dma/bestcomm/sram.c178
-rw-r--r--drivers/dma/coh901318.c1302
-rw-r--r--drivers/dma/coh901318.h (renamed from drivers/dma/coh901318_lli.h)35
-rw-r--r--drivers/dma/coh901318_lli.c4
-rw-r--r--drivers/dma/dw_dmac.c7
-rw-r--r--drivers/dma/imx-dma.c12
-rw-r--r--drivers/dma/ioat/dma_v3.c2
-rw-r--r--drivers/dma/mmp_pdma.c7
-rw-r--r--drivers/dma/mmp_tdma.c7
-rw-r--r--drivers/dma/omap-dma.c20
-rw-r--r--drivers/dma/tegra20-apb-dma.c20
-rw-r--r--drivers/edac/Kconfig4
-rw-r--r--drivers/edac/amd64_edac.c214
-rw-r--r--drivers/edac/amd64_edac.h12
-rw-r--r--drivers/edac/edac_mc.c6
-rw-r--r--drivers/edac/edac_pci_sysfs.c2
-rw-r--r--drivers/edac/mce_amd.c166
-rw-r--r--drivers/edac/mce_amd.h13
-rw-r--r--drivers/edac/mpc85xx_edac.c4
-rw-r--r--drivers/extcon/Kconfig4
-rw-r--r--drivers/extcon/extcon-adc-jack.c3
-rw-r--r--drivers/extcon/extcon-arizona.c810
-rw-r--r--drivers/extcon/extcon-gpio.c2
-rw-r--r--drivers/extcon/extcon-max77693.c981
-rw-r--r--drivers/extcon/extcon-max8997.c734
-rw-r--r--drivers/firewire/net.c14
-rw-r--r--drivers/firewire/ohci.c2
-rw-r--r--drivers/firmware/dmi_scan.c2
-rw-r--r--drivers/firmware/efivars.c189
-rw-r--r--drivers/firmware/iscsi_ibft_find.c2
-rw-r--r--drivers/firmware/memmap.c196
-rw-r--r--drivers/gpio/Kconfig12
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpio-ab8500.c520
-rw-r--r--drivers/gpio/gpio-mvebu.c24
-rw-r--r--drivers/gpio/gpio-mxs.c9
-rw-r--r--drivers/gpio/gpio-samsung.c19
-rw-r--r--drivers/gpio/gpio-spear-spics.c8
-rw-r--r--drivers/gpio/gpio-stp-xway.c9
-rw-r--r--drivers/gpio/gpio-tegra.c9
-rw-r--r--drivers/gpio/gpiolib-of.c37
-rw-r--r--drivers/gpio/gpiolib.c2
-rw-r--r--drivers/gpu/drm/ast/Kconfig2
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig2
-rw-r--r--drivers/gpu/drm/drm_mm.c45
-rw-r--r--drivers/gpu/drm/exynos/Kconfig4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c24
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c26
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c129
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c9
-rw-r--r--drivers/gpu/drm/gma500/Kconfig2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c7
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c25
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c21
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c11
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c33
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c47
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c42
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c24
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c10
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig2
-rw-r--r--drivers/gpu/drm/nouveau/core/core/client.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/core/falcon.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/core/handle.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/core/subdev.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c46
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/client.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/object.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/base.c35
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c1
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c33
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c86
-rw-r--r--drivers/gpu/drm/radeon/ni.c14
-rw-r--r--drivers/gpu/drm/radeon/r600.c21
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c1
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/cayman1
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv5152
-rw-r--r--drivers/gpu/drm/radeon/rv515.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c6
-rw-r--r--drivers/gpu/drm/tegra/dc.c11
-rw-r--r--drivers/gpu/drm/tegra/drm.c1
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c9
-rw-r--r--drivers/gpu/drm/tegra/host1x.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c24
-rw-r--r--drivers/gpu/drm/udl/Kconfig2
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c17
-rw-r--r--drivers/hid/Kconfig20
-rw-r--r--drivers/hid/Makefile2
-rw-r--r--drivers/hid/hid-a4tech.c13
-rw-r--r--drivers/hid/hid-apple.c19
-rw-r--r--drivers/hid/hid-aureal.c13
-rw-r--r--drivers/hid/hid-axff.c14
-rw-r--r--drivers/hid/hid-belkin.c13
-rw-r--r--drivers/hid/hid-cherry.c13
-rw-r--r--drivers/hid/hid-chicony.c13
-rw-r--r--drivers/hid/hid-core.c17
-rw-r--r--drivers/hid/hid-cypress.c13
-rw-r--r--drivers/hid/hid-dr.c13
-rw-r--r--drivers/hid/hid-elecom.c13
-rw-r--r--drivers/hid/hid-emsff.c13
-rw-r--r--drivers/hid/hid-ezkey.c13
-rw-r--r--drivers/hid/hid-gaff.c13
-rw-r--r--drivers/hid/hid-generic.c14
-rw-r--r--drivers/hid/hid-gyration.c13
-rw-r--r--drivers/hid/hid-holtek-kbd.c13
-rw-r--r--drivers/hid/hid-holtekff.c15
-rw-r--r--drivers/hid/hid-hyperv.c3
-rw-r--r--drivers/hid/hid-icade.c19
-rw-r--r--drivers/hid/hid-ids.h24
-rw-r--r--drivers/hid/hid-kensington.c13
-rw-r--r--drivers/hid/hid-keytouch.c13
-rw-r--r--drivers/hid/hid-kye.c13
-rw-r--r--drivers/hid/hid-lcpower.c13
-rw-r--r--drivers/hid/hid-lenovo-tpkbd.c14
-rw-r--r--drivers/hid/hid-lg.c212
-rw-r--r--drivers/hid/hid-lg4ff.c17
-rw-r--r--drivers/hid/hid-magicmouse.c19
-rw-r--r--drivers/hid/hid-microsoft.c13
-rw-r--r--drivers/hid/hid-monterey.c13
-rw-r--r--drivers/hid/hid-multitouch.c163
-rw-r--r--drivers/hid/hid-ntrig.c81
-rw-r--r--drivers/hid/hid-ortek.c13
-rw-r--r--drivers/hid/hid-petalynx.c13
-rw-r--r--drivers/hid/hid-picolcd_core.c13
-rw-r--r--drivers/hid/hid-pl.c26
-rw-r--r--drivers/hid/hid-primax.c13
-rw-r--r--drivers/hid/hid-prodikeys.c19
-rw-r--r--drivers/hid/hid-ps3remote.c13
-rw-r--r--drivers/hid/hid-roccat-lua.c14
-rw-r--r--drivers/hid/hid-saitek.c13
-rw-r--r--drivers/hid/hid-samsung.c13
-rw-r--r--drivers/hid/hid-sensor-hub.c22
-rw-r--r--drivers/hid/hid-sjoy.c13
-rw-r--r--drivers/hid/hid-sony.c59
-rw-r--r--drivers/hid/hid-speedlink.c13
-rw-r--r--drivers/hid/hid-steelseries.c393
-rw-r--r--drivers/hid/hid-sunplus.c13
-rw-r--r--drivers/hid/hid-thingm.c272
-rw-r--r--drivers/hid/hid-tivo.c13
-rw-r--r--drivers/hid/hid-tmff.c13
-rw-r--r--drivers/hid/hid-topseed.c13
-rw-r--r--drivers/hid/hid-twinhan.c13
-rw-r--r--drivers/hid/hid-uclogic.c13
-rw-r--r--drivers/hid/hid-wacom.c18
-rw-r--r--drivers/hid/hid-waltop.c13
-rw-r--r--drivers/hid/hid-wiimote-core.c19
-rw-r--r--drivers/hid/hid-wiimote-debug.c2
-rw-r--r--drivers/hid/hid-wiimote-ext.c8
-rw-r--r--drivers/hid/hid-zpff.c13
-rw-r--r--drivers/hid/hid-zydacron.c13
-rw-r--r--drivers/hid/hidraw.c1
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c100
-rw-r--r--drivers/hid/uhid.c95
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hv/Kconfig2
-rw-r--r--drivers/hv/channel.c33
-rw-r--r--drivers/hv/channel_mgmt.c93
-rw-r--r--drivers/hv/connection.c232
-rw-r--r--drivers/hv/hv.c72
-rw-r--r--drivers/hv/hv_balloon.c98
-rw-r--r--drivers/hv/hv_util.c46
-rw-r--r--drivers/hv/hyperv_vmbus.h65
-rw-r--r--drivers/hv/ring_buffer.c130
-rw-r--r--drivers/hv/vmbus_drv.c54
-rw-r--r--drivers/hwmon/Kconfig32
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/acpi_power_meter.c2
-rw-r--r--drivers/hwmon/ad7414.c2
-rw-r--r--drivers/hwmon/adm1021.c4
-rw-r--r--drivers/hwmon/adm1026.c16
-rw-r--r--drivers/hwmon/adm1031.c12
-rw-r--r--drivers/hwmon/adm9240.c6
-rw-r--r--drivers/hwmon/ads7828.c6
-rw-r--r--drivers/hwmon/adt7410.c28
-rw-r--r--drivers/hwmon/adt7462.c20
-rw-r--r--drivers/hwmon/adt7470.c20
-rw-r--r--drivers/hwmon/adt7475.c18
-rw-r--r--drivers/hwmon/amc6821.c32
-rw-r--r--drivers/hwmon/asb100.c10
-rw-r--r--drivers/hwmon/asc7621.c26
-rw-r--r--drivers/hwmon/asus_atk0110.c4
-rw-r--r--drivers/hwmon/coretemp.c5
-rw-r--r--drivers/hwmon/dme1737.c15
-rw-r--r--drivers/hwmon/emc2103.c2
-rw-r--r--drivers/hwmon/emc6w201.c6
-rw-r--r--drivers/hwmon/f71882fg.c25
-rw-r--r--drivers/hwmon/f75375s.c12
-rw-r--r--drivers/hwmon/fschmd.c4
-rw-r--r--drivers/hwmon/g760a.c2
-rw-r--r--drivers/hwmon/gl518sm.c10
-rw-r--r--drivers/hwmon/gl520sm.c9
-rw-r--r--drivers/hwmon/gpio-fan.c4
-rw-r--r--drivers/hwmon/ina209.c636
-rw-r--r--drivers/hwmon/it87.c52
-rw-r--r--drivers/hwmon/jc42.c10
-rw-r--r--drivers/hwmon/lm63.c8
-rw-r--r--drivers/hwmon/lm73.c136
-rw-r--r--drivers/hwmon/lm75.h2
-rw-r--r--drivers/hwmon/lm77.c2
-rw-r--r--drivers/hwmon/lm78.c6
-rw-r--r--drivers/hwmon/lm80.c8
-rw-r--r--drivers/hwmon/lm85.c10
-rw-r--r--drivers/hwmon/lm90.c2
-rw-r--r--drivers/hwmon/lm93.c28
-rw-r--r--drivers/hwmon/lm95245.c4
-rw-r--r--drivers/hwmon/max16065.c2
-rw-r--r--drivers/hwmon/max1668.c4
-rw-r--r--drivers/hwmon/max6639.c4
-rw-r--r--drivers/hwmon/max6642.c2
-rw-r--r--drivers/hwmon/max6650.c4
-rw-r--r--drivers/hwmon/max6697.c726
-rw-r--r--drivers/hwmon/ntc_thermistor.c4
-rw-r--r--drivers/hwmon/pmbus/Kconfig2
-rw-r--r--drivers/hwmon/pmbus/max34440.c75
-rw-r--r--drivers/hwmon/pmbus/pmbus.h11
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c840
-rw-r--r--drivers/hwmon/pmbus/zl6100.c176
-rw-r--r--drivers/hwmon/sht15.c157
-rw-r--r--drivers/hwmon/sis5595.c6
-rw-r--r--drivers/hwmon/smsc47m1.c2
-rw-r--r--drivers/hwmon/smsc47m192.c4
-rw-r--r--drivers/hwmon/thmc50.c6
-rw-r--r--drivers/hwmon/tmp102.c2
-rw-r--r--drivers/hwmon/tmp401.c14
-rw-r--r--drivers/hwmon/vexpress.c1
-rw-r--r--drivers/hwmon/via686a.c17
-rw-r--r--drivers/hwmon/vt1211.c10
-rw-r--r--drivers/hwmon/vt8231.c22
-rw-r--r--drivers/hwmon/w83627ehf.c17
-rw-r--r--drivers/hwmon/w83627hf.c23
-rw-r--r--drivers/hwmon/w83781d.c17
-rw-r--r--drivers/hwmon/w83791d.c10
-rw-r--r--drivers/hwmon/w83792d.c25
-rw-r--r--drivers/hwmon/w83793.c18
-rw-r--r--drivers/hwmon/w83795.c28
-rw-r--r--drivers/hwmon/w83l786ng.c17
-rw-r--r--drivers/i2c/Kconfig2
-rw-r--r--drivers/i2c/busses/Kconfig17
-rw-r--r--drivers/i2c/busses/i2c-at91.c6
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c4
-rw-r--r--drivers/i2c/busses/i2c-imx.c6
-rw-r--r--drivers/i2c/busses/i2c-mxs.c6
-rw-r--r--drivers/i2c/busses/i2c-ocores.c7
-rw-r--r--drivers/i2c/busses/i2c-omap.c14
-rw-r--r--drivers/i2c/busses/i2c-rcar.c8
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c7
-rw-r--r--drivers/i2c/busses/i2c-scmi.c2
-rw-r--r--drivers/i2c/busses/i2c-sirf.c11
-rw-r--r--drivers/i2c/busses/i2c-stu300.c6
-rw-r--r--drivers/i2c/busses/i2c-tegra.c11
-rw-r--r--drivers/i2c/busses/i2c-xlr.c9
-rw-r--r--drivers/i2c/muxes/Kconfig2
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c2
-rw-r--r--drivers/ide/Kconfig13
-rw-r--r--drivers/idle/Kconfig1
-rw-r--r--drivers/idle/i7300_idle.c8
-rw-r--r--drivers/idle/intel_idle.c281
-rw-r--r--drivers/iio/accel/Kconfig39
-rw-r--r--drivers/iio/accel/Makefile9
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c3
-rw-r--r--drivers/iio/accel/kxsd9.c (renamed from drivers/staging/iio/accel/kxsd9.c)16
-rw-r--r--drivers/iio/accel/st_accel.h47
-rw-r--r--drivers/iio/accel/st_accel_buffer.c114
-rw-r--r--drivers/iio/accel/st_accel_core.c500
-rw-r--r--drivers/iio/accel/st_accel_i2c.c86
-rw-r--r--drivers/iio/accel/st_accel_spi.c85
-rw-r--r--drivers/iio/adc/Kconfig4
-rw-r--r--drivers/iio/adc/ad7266.c6
-rw-r--r--drivers/iio/adc/at91_adc.c8
-rw-r--r--drivers/iio/adc/lp8788_adc.c18
-rw-r--r--drivers/iio/adc/max1363.c172
-rw-r--r--drivers/iio/buffer_cb.c4
-rw-r--r--drivers/iio/common/Kconfig1
-rw-r--r--drivers/iio/common/Makefile1
-rw-r--r--drivers/iio/common/hid-sensors/Kconfig13
-rw-r--r--drivers/iio/common/hid-sensors/Makefile3
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c11
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.h57
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c5
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.h2
-rw-r--r--drivers/iio/common/st_sensors/Kconfig14
-rw-r--r--drivers/iio/common/st_sensors/Makefile10
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_buffer.c116
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c446
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_i2c.c81
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_spi.c128
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c77
-rw-r--r--drivers/iio/dac/ad5360.c7
-rw-r--r--drivers/iio/dac/ad5380.c6
-rw-r--r--drivers/iio/dac/ad5421.c7
-rw-r--r--drivers/iio/dac/ad5446.c6
-rw-r--r--drivers/iio/dac/ad5504.c12
-rw-r--r--drivers/iio/dac/ad5624r_spi.c6
-rw-r--r--drivers/iio/dac/ad5686.c13
-rw-r--r--drivers/iio/dac/ad5755.c7
-rw-r--r--drivers/iio/dac/ad5764.c7
-rw-r--r--drivers/iio/dac/ad5791.c19
-rw-r--r--drivers/iio/frequency/ad9523.c14
-rw-r--r--drivers/iio/frequency/adf4350.c2
-rw-r--r--drivers/iio/gyro/Kconfig56
-rw-r--r--drivers/iio/gyro/Makefile14
-rw-r--r--drivers/iio/gyro/adis16080.c (renamed from drivers/staging/iio/gyro/adis16080_core.c)154
-rw-r--r--drivers/iio/gyro/adxrs450.c (renamed from drivers/staging/iio/gyro/adxrs450_core.c)202
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c3
-rw-r--r--drivers/iio/gyro/itg3200_buffer.c156
-rw-r--r--drivers/iio/gyro/itg3200_core.c401
-rw-r--r--drivers/iio/gyro/st_gyro.h45
-rw-r--r--drivers/iio/gyro/st_gyro_buffer.c114
-rw-r--r--drivers/iio/gyro/st_gyro_core.c368
-rw-r--r--drivers/iio/gyro/st_gyro_i2c.c84
-rw-r--r--drivers/iio/gyro/st_gyro_spi.c83
-rw-r--r--drivers/iio/imu/Kconfig13
-rw-r--r--drivers/iio/imu/Makefile5
-rw-r--r--drivers/iio/imu/adis16400.h (renamed from drivers/staging/iio/imu/adis16400.h)141
-rw-r--r--drivers/iio/imu/adis16400_buffer.c96
-rw-r--r--drivers/iio/imu/adis16400_core.c965
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig13
-rw-r--r--drivers/iio/imu/inv_mpu6050/Makefile6
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c795
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h246
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c196
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c155
-rw-r--r--drivers/iio/industrialio-trigger.c12
-rw-r--r--drivers/iio/inkern.c53
-rw-r--r--drivers/iio/kfifo_buf.c1
-rw-r--r--drivers/iio/light/Kconfig11
-rw-r--r--drivers/iio/light/Makefile1
-rw-r--r--drivers/iio/light/hid-sensor-als.c3
-rw-r--r--drivers/iio/light/tsl2563.c (renamed from drivers/staging/iio/light/tsl2563.c)96
-rw-r--r--drivers/iio/magnetometer/Kconfig31
-rw-r--r--drivers/iio/magnetometer/Makefile7
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c3
-rw-r--r--drivers/iio/magnetometer/st_magn.h45
-rw-r--r--drivers/iio/magnetometer/st_magn_buffer.c98
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c400
-rw-r--r--drivers/iio/magnetometer/st_magn_i2c.c80
-rw-r--r--drivers/iio/magnetometer/st_magn_spi.c79
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c2
-rw-r--r--drivers/infiniband/hw/nes/nes.c8
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c13
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c11
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c6
-rw-r--r--drivers/input/Kconfig2
-rw-r--r--drivers/input/input-mt.c1
-rw-r--r--drivers/input/input.c16
-rw-r--r--drivers/input/joystick/analog.c2
-rw-r--r--drivers/input/joystick/walkera0701.c82
-rw-r--r--drivers/input/keyboard/Kconfig16
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/atkbd.c74
-rw-r--r--drivers/input/keyboard/goldfish_events.c194
-rw-r--r--drivers/input/keyboard/imx_keypad.c43
-rw-r--r--drivers/input/keyboard/lm8323.c2
-rw-r--r--drivers/input/keyboard/matrix_keypad.c8
-rw-r--r--drivers/input/keyboard/qt2160.c141
-rw-r--r--drivers/input/keyboard/spear-keyboard.c8
-rw-r--r--drivers/input/keyboard/tegra-kbc.c480
-rw-r--r--drivers/input/misc/adxl34x.c7
-rw-r--r--drivers/input/misc/atlas_btns.c2
-rw-r--r--drivers/input/misc/bma150.c14
-rw-r--r--drivers/input/misc/twl4030-vibra.c45
-rw-r--r--drivers/input/misc/twl6040-vibra.c100
-rw-r--r--drivers/input/misc/wm831x-on.c4
-rw-r--r--drivers/input/mouse/Kconfig22
-rw-r--r--drivers/input/mouse/Makefile2
-rw-r--r--drivers/input/mouse/alps.c773
-rw-r--r--drivers/input/mouse/alps.h145
-rw-r--r--drivers/input/mouse/cyapa.c973
-rw-r--r--drivers/input/mouse/cypress_ps2.c725
-rw-r--r--drivers/input/mouse/cypress_ps2.h191
-rw-r--r--drivers/input/mouse/psmouse-base.c32
-rw-r--r--drivers/input/mouse/psmouse.h1
-rw-r--r--drivers/input/mouse/synaptics.c32
-rw-r--r--drivers/input/serio/Kconfig2
-rw-r--r--drivers/input/serio/arc_ps2.c7
-rw-r--r--drivers/input/tablet/wacom_sys.c6
-rw-r--r--drivers/input/tablet/wacom_wac.c192
-rw-r--r--drivers/input/tablet/wacom_wac.h2
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/input/touchscreen/cyttsp_spi.c1
-rw-r--r--drivers/input/touchscreen/mms114.c54
-rw-r--r--drivers/input/touchscreen/stmpe-ts.c2
-rw-r--r--drivers/input/touchscreen/tsc2005.c1
-rw-r--r--drivers/input/touchscreen/wm831x-ts.c4
-rw-r--r--drivers/iommu/Kconfig8
-rw-r--r--drivers/iommu/amd_iommu.c8
-rw-r--r--drivers/iommu/amd_iommu_init.c34
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/iommu/intel-iommu.c23
-rw-r--r--drivers/iommu/intel_irq_remapping.c48
-rw-r--r--drivers/iommu/irq_remapping.c231
-rw-r--r--drivers/iommu/irq_remapping.h1
-rw-r--r--drivers/iommu/tegra-smmu.c7
-rw-r--r--drivers/ipack/devices/Kconfig2
-rw-r--r--drivers/ipack/devices/ipoctal.c130
-rw-r--r--drivers/irqchip/Kconfig27
-rw-r--r--drivers/irqchip/Makefile7
-rw-r--r--drivers/irqchip/exynos-combiner.c230
-rw-r--r--drivers/irqchip/irq-gic.c845
-rw-r--r--drivers/irqchip/irq-vic.c489
-rw-r--r--drivers/irqchip/irqchip.c30
-rw-r--r--drivers/irqchip/irqchip.h29
-rw-r--r--drivers/irqchip/spear-shirq.c5
-rw-r--r--drivers/isdn/Kconfig1
-rw-r--r--drivers/isdn/capi/Kconfig1
-rw-r--r--drivers/isdn/divert/divert_init.c33
-rw-r--r--drivers/isdn/divert/isdn_divert.c423
-rw-r--r--drivers/isdn/divert/isdn_divert.h28
-rw-r--r--drivers/isdn/gigaset/Kconfig1
-rw-r--r--drivers/isdn/gigaset/capi.c2
-rw-r--r--drivers/isdn/gigaset/common.c31
-rw-r--r--drivers/isdn/gigaset/ev-layer.c124
-rw-r--r--drivers/isdn/gigaset/gigaset.h9
-rw-r--r--drivers/isdn/gigaset/interface.c60
-rw-r--r--drivers/isdn/hardware/eicon/divacapi.h6
-rw-r--r--drivers/isdn/hardware/eicon/pc.h4
-rw-r--r--drivers/isdn/hardware/mISDN/Kconfig1
-rw-r--r--drivers/isdn/hisax/Kconfig15
-rw-r--r--drivers/isdn/i4l/isdn_common.c14
-rw-r--r--drivers/isdn/i4l/isdn_common.h2
-rw-r--r--drivers/isdn/i4l/isdn_tty.c59
-rw-r--r--drivers/isdn/i4l/isdn_x25iface.h1
-rw-r--r--drivers/isdn/mISDN/core.c4
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c2
-rw-r--r--drivers/isdn/mISDN/stack.c7
-rw-r--r--drivers/leds/Kconfig2
-rw-r--r--drivers/lguest/Kconfig2
-rw-r--r--drivers/macintosh/Kconfig2
-rw-r--r--drivers/macintosh/windfarm_pm112.c2
-rw-r--r--drivers/macintosh/windfarm_pm72.c2
-rw-r--r--drivers/macintosh/windfarm_rm31.c2
-rw-r--r--drivers/mailbox/Kconfig19
-rw-r--r--drivers/mailbox/Makefile1
-rw-r--r--drivers/mailbox/pl320-ipc.c199
-rw-r--r--drivers/md/dm-raid.c101
-rw-r--r--drivers/md/dm-thin.c13
-rw-r--r--drivers/md/dm.c6
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c14
-rw-r--r--drivers/media/Kconfig7
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c6
-rw-r--r--drivers/media/pci/cx25821/Kconfig2
-rw-r--r--drivers/media/platform/Kconfig8
-rw-r--r--drivers/media/platform/coda.c2
-rw-r--r--drivers/media/platform/davinci/vpss.c1
-rw-r--r--drivers/media/platform/s5p-fimc/Kconfig1
-rw-r--r--drivers/media/platform/s5p-tv/Kconfig3
-rw-r--r--drivers/media/platform/soc_camera/mx2_camera.c2
-rw-r--r--drivers/media/radio/Kconfig2
-rw-r--r--drivers/media/radio/radio-keene.c1
-rw-r--r--drivers/media/radio/radio-si4713.c1
-rw-r--r--drivers/media/radio/radio-wl1273.c1
-rw-r--r--drivers/media/radio/wl128x/Kconfig2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c10
-rw-r--r--drivers/media/rc/Kconfig2
-rw-r--r--drivers/media/usb/dvb-usb-v2/Kconfig2
-rw-r--r--drivers/media/usb/pvrusb2/Kconfig8
-rw-r--r--drivers/memory/emif.c8
-rw-r--r--drivers/memory/tegra20-mc.c7
-rw-r--r--drivers/memory/tegra30-mc.c7
-rw-r--r--drivers/memstick/Kconfig2
-rw-r--r--drivers/memstick/host/Kconfig12
-rw-r--r--drivers/mfd/Kconfig2
-rw-r--r--drivers/mfd/ab8500-core.c15
-rw-r--r--drivers/mfd/arizona-core.c7
-rw-r--r--drivers/mfd/arizona-irq.c18
-rw-r--r--drivers/mfd/da9052-i2c.c61
-rw-r--r--drivers/mfd/db8500-prcmu.c156
-rw-r--r--drivers/mfd/intel_msic.c9
-rw-r--r--drivers/mfd/max77686.c18
-rw-r--r--drivers/mfd/max77693.c34
-rw-r--r--drivers/mfd/pcf50633-core.c5
-rw-r--r--drivers/mfd/rtl8411.c29
-rw-r--r--drivers/mfd/rts5209.c21
-rw-r--r--drivers/mfd/rts5229.c21
-rw-r--r--drivers/mfd/rtsx_pcr.c27
-rw-r--r--drivers/mfd/sec-core.c75
-rw-r--r--drivers/mfd/tc3589x.c17
-rw-r--r--drivers/mfd/twl4030-power.c2
-rw-r--r--drivers/mfd/vexpress-config.c8
-rw-r--r--drivers/mfd/vexpress-sysreg.c32
-rw-r--r--drivers/mfd/wm5102-tables.c13
-rw-r--r--drivers/mfd/wm5110-tables.c1
-rw-r--r--drivers/misc/Kconfig16
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/atmel-ssc.c18
-rw-r--r--drivers/misc/cb710/Kconfig2
-rw-r--r--drivers/misc/lattice-ecp3-config.c243
-rw-r--r--drivers/misc/mei/Kconfig15
-rw-r--r--drivers/misc/mei/Makefile6
-rw-r--r--drivers/misc/mei/amthif.c170
-rw-r--r--drivers/misc/mei/client.c729
-rw-r--r--drivers/misc/mei/client.h102
-rw-r--r--drivers/misc/mei/hbm.c669
-rw-r--r--drivers/misc/mei/hbm.h39
-rw-r--r--drivers/misc/mei/hw-me-regs.h167
-rw-r--r--drivers/misc/mei/hw-me.c576
-rw-r--r--drivers/misc/mei/hw-me.h48
-rw-r--r--drivers/misc/mei/hw.h125
-rw-r--r--drivers/misc/mei/init.c572
-rw-r--r--drivers/misc/mei/interface.c388
-rw-r--r--drivers/misc/mei/interface.h81
-rw-r--r--drivers/misc/mei/interrupt.c656
-rw-r--r--drivers/misc/mei/iorw.c366
-rw-r--r--drivers/misc/mei/main.c536
-rw-r--r--drivers/misc/mei/mei_dev.h350
-rw-r--r--drivers/misc/mei/pci-me.c396
-rw-r--r--drivers/misc/mei/wd.c77
-rw-r--r--drivers/misc/sgi-gru/grufile.c2
-rw-r--r--drivers/misc/ti-st/Kconfig2
-rw-r--r--drivers/misc/ti-st/st_core.c3
-rw-r--r--drivers/misc/ti-st/st_kim.c37
-rw-r--r--drivers/misc/vmw_vmci/Kconfig16
-rw-r--r--drivers/misc/vmw_vmci/Makefile4
-rw-r--r--drivers/misc/vmw_vmci/vmci_context.c1214
-rw-r--r--drivers/misc/vmw_vmci/vmci_context.h182
-rw-r--r--drivers/misc/vmw_vmci/vmci_datagram.c500
-rw-r--r--drivers/misc/vmw_vmci/vmci_datagram.h52
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.c604
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.h51
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.c117
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.h50
-rw-r--r--drivers/misc/vmw_vmci/vmci_event.c224
-rw-r--r--drivers/misc/vmw_vmci/vmci_event.h25
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c759
-rw-r--r--drivers/misc/vmw_vmci/vmci_handle_array.c142
-rw-r--r--drivers/misc/vmw_vmci/vmci_handle_array.h52
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c1043
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c3425
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.h191
-rw-r--r--drivers/misc/vmw_vmci/vmci_resource.c229
-rw-r--r--drivers/misc/vmw_vmci/vmci_resource.h59
-rw-r--r--drivers/misc/vmw_vmci/vmci_route.c226
-rw-r--r--drivers/misc/vmw_vmci/vmci_route.h30
-rw-r--r--drivers/mmc/card/Kconfig1
-rw-r--r--drivers/mmc/card/sdio_uart.c13
-rw-r--r--drivers/mmc/core/Kconfig3
-rw-r--r--drivers/mmc/host/Kconfig22
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c7
-rw-r--r--drivers/mmc/host/mmci.c306
-rw-r--r--drivers/mmc/host/mmci.h3
-rw-r--r--drivers/mmc/host/mvsdio.c92
-rw-r--r--drivers/mmc/host/mxs-mmc.c6
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c30
-rw-r--r--drivers/mmc/host/sdhci-s3c.c7
-rw-r--r--drivers/mtd/Kconfig2
-rw-r--r--drivers/mtd/chips/Kconfig2
-rw-r--r--drivers/mtd/devices/Kconfig5
-rw-r--r--drivers/mtd/devices/spear_smi.c7
-rw-r--r--drivers/mtd/maps/autcpu12-nvram.c9
-rw-r--r--drivers/mtd/maps/lantiq-flash.c8
-rw-r--r--drivers/mtd/maps/physmap_of.c2
-rw-r--r--drivers/mtd/nand/Kconfig7
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c4
-rw-r--r--drivers/mtd/nand/davinci_nand.c2
-rw-r--r--drivers/mtd/nand/fsmc_nand.c34
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c9
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c8
-rw-r--r--drivers/mtd/nand/mxc_nand.c12
-rw-r--r--drivers/mtd/nand/nand_base.c7
-rw-r--r--drivers/mtd/nand/omap2.c4
-rw-r--r--drivers/mtd/nand/s3c2410.c7
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c7
-rw-r--r--drivers/mtd/onenand/omap2.c4
-rw-r--r--drivers/net/Kconfig13
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/Space.c106
-rw-r--r--drivers/net/bonding/bond_3ad.c45
-rw-r--r--drivers/net/bonding/bond_alb.c6
-rw-r--r--drivers/net/bonding/bond_main.c285
-rw-r--r--drivers/net/bonding/bond_sysfs.c1
-rw-r--r--drivers/net/bonding/bonding.h15
-rw-r--r--drivers/net/caif/Kconfig2
-rw-r--r--drivers/net/caif/caif_serial.c2
-rw-r--r--drivers/net/caif/caif_shmcore.c6
-rw-r--r--drivers/net/can/Kconfig38
-rw-r--r--drivers/net/can/Makefile2
-rw-r--r--drivers/net/can/at91_can.c10
-rw-r--r--drivers/net/can/c_can/Kconfig2
-rw-r--r--drivers/net/can/c_can/c_can.c20
-rw-r--r--drivers/net/can/cc770/Kconfig2
-rw-r--r--drivers/net/can/dev.c26
-rw-r--r--drivers/net/can/flexcan.c11
-rw-r--r--drivers/net/can/led.c124
-rw-r--r--drivers/net/can/mcp251x.c23
-rw-r--r--drivers/net/can/mscan/Kconfig2
-rw-r--r--drivers/net/can/pch_can.c2
-rw-r--r--drivers/net/can/sja1000/Kconfig14
-rw-r--r--drivers/net/can/sja1000/ems_pci.c1
-rw-r--r--drivers/net/can/sja1000/peak_pci.c8
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c1
-rw-r--r--drivers/net/can/sja1000/plx_pci.c1
-rw-r--r--drivers/net/can/sja1000/sja1000.c17
-rw-r--r--drivers/net/can/slcan.c8
-rw-r--r--drivers/net/can/softing/Kconfig2
-rw-r--r--drivers/net/can/ti_hecc.c14
-rw-r--r--drivers/net/can/usb/Kconfig8
-rw-r--r--drivers/net/can/usb/Makefile1
-rw-r--r--drivers/net/can/usb/ems_usb.c8
-rw-r--r--drivers/net/can/usb/kvaser_usb.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c5
-rw-r--r--drivers/net/can/usb/usb_8dev.c1031
-rw-r--r--drivers/net/cris/eth_v10.c8
-rw-r--r--drivers/net/dsa/mv88e6060.c54
-rw-r--r--drivers/net/dsa/mv88e6123_61_65.c125
-rw-r--r--drivers/net/dsa/mv88e6131.c114
-rw-r--r--drivers/net/dsa/mv88e6xxx.c141
-rw-r--r--drivers/net/dsa/mv88e6xxx.h11
-rw-r--r--drivers/net/dummy.c10
-rw-r--r--drivers/net/ethernet/3com/3c501.c896
-rw-r--r--drivers/net/ethernet/3com/3c501.h91
-rw-r--r--drivers/net/ethernet/3com/3c509.c4
-rw-r--r--drivers/net/ethernet/3com/3c515.c7
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c1
-rw-r--r--drivers/net/ethernet/3com/Kconfig20
-rw-r--r--drivers/net/ethernet/3com/Makefile1
-rw-r--r--drivers/net/ethernet/8390/3c503.c777
-rw-r--r--drivers/net/ethernet/8390/3c503.h91
-rw-r--r--drivers/net/ethernet/8390/Kconfig120
-rw-r--r--drivers/net/ethernet/8390/Makefile10
-rw-r--r--drivers/net/ethernet/8390/ac3200.c431
-rw-r--r--drivers/net/ethernet/8390/ax88796.c8
-rw-r--r--drivers/net/ethernet/8390/e2100.c489
-rw-r--r--drivers/net/ethernet/8390/es3210.c445
-rw-r--r--drivers/net/ethernet/8390/hp-plus.c505
-rw-r--r--drivers/net/ethernet/8390/hp.c438
-rw-r--r--drivers/net/ethernet/8390/lne390.c433
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c1
-rw-r--r--drivers/net/ethernet/8390/ne3210.c346
-rw-r--r--drivers/net/ethernet/8390/smc-ultra32.c463
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adi/Kconfig1
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c13
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c13
-rw-r--r--drivers/net/ethernet/amd/Kconfig15
-rw-r--r--drivers/net/ethernet/amd/Makefile1
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c12
-rw-r--r--drivers/net/ethernet/amd/depca.c1910
-rw-r--r--drivers/net/ethernet/amd/depca.h183
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c50
-rw-r--r--drivers/net/ethernet/amd/sunlance.c4
-rw-r--r--drivers/net/ethernet/atheros/Kconfig8
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c79
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c3
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c7
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c1
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig18
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c9
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c17
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c1461
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h453
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/Makefile3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h174
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c1025
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h123
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h3274
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c458
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h32
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c1727
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h58
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c203
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h30
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c3198
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h809
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c134
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c1651
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h360
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c10
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h1
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c1161
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h65
-rw-r--r--drivers/net/ethernet/cadence/Kconfig1
-rw-r--r--drivers/net/ethernet/cadence/macb.c7
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c42
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c92
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c22
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c6
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c13
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c7
-rw-r--r--drivers/net/ethernet/dec/Kconfig16
-rw-r--r--drivers/net/ethernet/dec/Makefile1
-rw-r--r--drivers/net/ethernet/dec/ewrk3.c1961
-rw-r--r--drivers/net/ethernet/dec/ewrk3.h322
-rw-r--r--drivers/net/ethernet/dec/tulip/Kconfig4
-rw-r--r--drivers/net/ethernet/dlink/Kconfig32
-rw-r--r--drivers/net/ethernet/dlink/Makefile2
-rw-r--r--drivers/net/ethernet/dlink/de600.c529
-rw-r--r--drivers/net/ethernet/dlink/de600.h168
-rw-r--r--drivers/net/ethernet/dlink/de620.c987
-rw-r--r--drivers/net/ethernet/dlink/de620.h117
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c7
-rw-r--r--drivers/net/ethernet/dlink/sundance.c1
-rw-r--r--drivers/net/ethernet/dnet.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h11
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c46
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h20
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c195
-rw-r--r--drivers/net/ethernet/ethoc.c63
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c9
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c6
-rw-r--r--drivers/net/ethernet/freescale/Kconfig9
-rw-r--r--drivers/net/ethernet/freescale/Makefile3
-rw-r--r--drivers/net/ethernet/freescale/fec.c356
-rw-r--r--drivers/net/ethernet/freescale/fec.h23
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c66
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c252
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h210
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c35
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c8
-rw-r--r--drivers/net/ethernet/fujitsu/Kconfig25
-rw-r--r--drivers/net/ethernet/fujitsu/Makefile2
-rw-r--r--drivers/net/ethernet/fujitsu/at1700.c791
-rw-r--r--drivers/net/ethernet/fujitsu/eth16i.c1483
-rw-r--r--drivers/net/ethernet/i825xx/3c505.c1671
-rw-r--r--drivers/net/ethernet/i825xx/3c505.h292
-rw-r--r--drivers/net/ethernet/i825xx/3c507.c938
-rw-r--r--drivers/net/ethernet/i825xx/82596.c94
-rw-r--r--drivers/net/ethernet/i825xx/Kconfig94
-rw-r--r--drivers/net/ethernet/i825xx/Makefile8
-rw-r--r--drivers/net/ethernet/i825xx/eepro.c1822
-rw-r--r--drivers/net/ethernet/i825xx/eexpress.c1661
-rw-r--r--drivers/net/ethernet/i825xx/eexpress.h179
-rw-r--r--drivers/net/ethernet/i825xx/lp486e.c1337
-rw-r--r--drivers/net/ethernet/i825xx/ni52.c1346
-rw-r--r--drivers/net/ethernet/i825xx/ni52.h310
-rw-r--r--drivers/net/ethernet/i825xx/znet.c928
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c12
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c19
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c9
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c7
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c6
-rw-r--r--drivers/net/ethernet/icplus/Kconfig2
-rw-r--r--drivers/net/ethernet/intel/Kconfig19
-rw-r--r--drivers/net/ethernet/intel/e100.c3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h65
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c140
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c558
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c339
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_param.c29
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c117
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.h95
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c57
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.h58
-rw-r--r--drivers/net/ethernet/intel/e1000e/Makefile4
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h195
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h282
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c254
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h366
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c426
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h268
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c164
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.h74
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.c15
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.h72
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c745
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c28
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c354
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h242
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c277
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h252
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c630
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h19
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h15
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h22
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h18
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h69
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c15
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c242
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c865
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c65
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c87
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h82
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c59
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c57
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c65
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c320
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c23
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c411
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c56
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c223
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c203
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c56
-rw-r--r--drivers/net/ethernet/korina.c6
-rw-r--r--drivers/net/ethernet/lantiq_etop.c12
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c10
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c1
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c11
-rw-r--r--drivers/net/ethernet/marvell/skge.c6
-rw-r--r--drivers/net/ethernet/marvell/sky2.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c174
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c869
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c193
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c1
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c44
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c1
-rw-r--r--drivers/net/ethernet/microchip/Kconfig4
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c1
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c5
-rw-r--r--drivers/net/ethernet/natsemi/Kconfig3
-rw-r--r--drivers/net/ethernet/natsemi/ibmlana.c1075
-rw-r--r--drivers/net/ethernet/natsemi/ibmlana.h278
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c3
-rw-r--r--drivers/net/ethernet/neterion/s2io.c1
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.c6
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c1
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c4
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c4
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c9
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c14
-rw-r--r--drivers/net/ethernet/packetengines/Kconfig4
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c7
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c7
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c5
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c18
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c30
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c10
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c11
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/Makefile4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h544
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c3011
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h438
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c2054
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c225
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c727
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c550
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h108
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c245
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h194
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c97
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c823
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c1217
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c650
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c271
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c26
-rw-r--r--drivers/net/ethernet/racal/Kconfig33
-rw-r--r--drivers/net/ethernet/racal/Makefile5
-rw-r--r--drivers/net/ethernet/racal/ni5010.c771
-rw-r--r--drivers/net/ethernet/racal/ni5010.h144
-rw-r--r--drivers/net/ethernet/rdc/r6040.c14
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c1
-rw-r--r--drivers/net/ethernet/realtek/8139too.c1
-rw-r--r--drivers/net/ethernet/realtek/Kconfig4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c121
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c12
-rw-r--r--drivers/net/ethernet/s6gmac.c2
-rw-r--r--drivers/net/ethernet/seeq/Kconfig12
-rw-r--r--drivers/net/ethernet/seeq/Makefile1
-rw-r--r--drivers/net/ethernet/seeq/seeq8005.c749
-rw-r--r--drivers/net/ethernet/seeq/seeq8005.h156
-rw-r--r--drivers/net/ethernet/sfc/ptp.c2
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c6
-rw-r--r--drivers/net/ethernet/silan/Kconfig6
-rw-r--r--drivers/net/ethernet/silan/sc92031.c12
-rw-r--r--drivers/net/ethernet/sis/sis900.c22
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c9
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c7
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c6
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c1
-rw-r--r--drivers/net/ethernet/sun/Kconfig8
-rw-r--r--drivers/net/ethernet/sun/niu.c48
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c4
-rw-r--r--drivers/net/ethernet/sun/sunqe.c7
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c6
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c8
-rw-r--r--drivers/net/ethernet/ti/Kconfig4
-rw-r--r--drivers/net/ethernet/ti/cpmac.c11
-rw-r--r--drivers/net/ethernet/ti/cpsw.c530
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c107
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h24
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c77
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.h12
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c26
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c4
-rw-r--r--drivers/net/ethernet/tile/tilepro.c1
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c4
-rw-r--r--drivers/net/ethernet/toshiba/spider_net_ethtool.c12
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c13
-rw-r--r--drivers/net/ethernet/via/via-rhine.c56
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c1
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c1
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c37
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c7
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c3
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c7
-rw-r--r--drivers/net/hamradio/Kconfig4
-rw-r--r--drivers/net/hamradio/bpqether.c5
-rw-r--r--drivers/net/hamradio/dmascc.c7
-rw-r--r--drivers/net/hamradio/scc.c4
-rw-r--r--drivers/net/hamradio/yam.c4
-rw-r--r--drivers/net/hippi/Kconfig8
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c11
-rw-r--r--drivers/net/ieee802154/at86rf230.c12
-rw-r--r--drivers/net/ieee802154/fakehard.c1
-rw-r--r--drivers/net/ifb.c2
-rw-r--r--drivers/net/irda/Kconfig38
-rw-r--r--drivers/net/irda/ali-ircc.c2
-rw-r--r--drivers/net/irda/irtty-sir.c2
-rw-r--r--drivers/net/loopback.c5
-rw-r--r--drivers/net/macvlan.c56
-rw-r--r--drivers/net/macvtap.c1
-rw-r--r--drivers/net/netconsole.c44
-rw-r--r--drivers/net/ntb_netdev.c408
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/icplus.c29
-rw-r--r--drivers/net/phy/marvell.c9
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c4
-rw-r--r--drivers/net/phy/mdio_bus.c2
-rw-r--r--drivers/net/phy/micrel.c64
-rw-r--r--drivers/net/phy/phy_device.c15
-rw-r--r--drivers/net/phy/realtek.c50
-rw-r--r--drivers/net/phy/spi_ks8995.c4
-rw-r--r--drivers/net/ppp/Kconfig23
-rw-r--r--drivers/net/ppp/ppp_generic.c11
-rw-r--r--drivers/net/ppp/pppoe.c4
-rw-r--r--drivers/net/rionet.c8
-rw-r--r--drivers/net/slip/Kconfig1
-rw-r--r--drivers/net/team/Kconfig3
-rw-r--r--drivers/net/team/team.c261
-rw-r--r--drivers/net/team/team_mode_activebackup.c13
-rw-r--r--drivers/net/tun.c126
-rw-r--r--drivers/net/usb/Kconfig20
-rw-r--r--drivers/net/usb/asix.h18
-rw-r--r--drivers/net/usb/asix_common.c94
-rw-r--r--drivers/net/usb/asix_devices.c51
-rw-r--r--drivers/net/usb/ax88172a.c19
-rw-r--r--drivers/net/usb/catc.c6
-rw-r--r--drivers/net/usb/cdc_ether.c7
-rw-r--r--drivers/net/usb/cdc_mbim.c19
-rw-r--r--drivers/net/usb/cdc_ncm.c51
-rw-r--r--drivers/net/usb/dm9601.c56
-rw-r--r--drivers/net/usb/hso.c57
-rw-r--r--drivers/net/usb/kalmia.c1
-rw-r--r--drivers/net/usb/pegasus.c19
-rw-r--r--drivers/net/usb/qmi_wwan.c23
-rw-r--r--drivers/net/usb/rndis_host.c1
-rw-r--r--drivers/net/usb/rtl8150.c6
-rw-r--r--drivers/net/usb/sierra_net.c14
-rw-r--r--drivers/net/usb/smsc75xx.c6
-rw-r--r--drivers/net/usb/smsc95xx.c441
-rw-r--r--drivers/net/usb/usbnet.c44
-rw-r--r--drivers/net/veth.c177
-rw-r--r--drivers/net/virtio_net.c233
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c234
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c15
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h3
-rw-r--r--drivers/net/vxlan.c18
-rw-r--r--drivers/net/wan/Kconfig60
-rw-r--r--drivers/net/wan/Makefile5
-rw-r--r--drivers/net/wan/cosa.c9
-rw-r--r--drivers/net/wan/cycx_drv.c569
-rw-r--r--drivers/net/wan/cycx_main.c346
-rw-r--r--drivers/net/wan/cycx_x25.c1602
-rw-r--r--drivers/net/wan/farsync.c6
-rw-r--r--drivers/net/wan/hdlc.c9
-rw-r--r--drivers/net/wan/x25_asy.c1
-rw-r--r--drivers/net/wimax/i2400m/fw.c1
-rw-r--r--drivers/net/wimax/i2400m/netdev.c41
-rw-r--r--drivers/net/wimax/i2400m/rx.c17
-rw-r--r--drivers/net/wimax/i2400m/usb-notif.c1
-rw-r--r--drivers/net/wimax/i2400m/usb.c6
-rw-r--r--drivers/net/wireless/Kconfig10
-rw-r--r--drivers/net/wireless/airo_cs.c5
-rw-r--r--drivers/net/wireless/at76c50x-usb.c4
-rw-r--r--drivers/net/wireless/ath/Kconfig1
-rw-r--r--drivers/net/wireless/ath/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c5
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c133
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c26
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c36
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c30
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_initvals.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c50
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9001_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c66
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h180
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c145
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c128
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c69
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c122
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9340_initvals.h100
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h146
-rw-r--r--drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h132
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h76
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h169
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c117
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c305
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h16
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c55
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h59
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c123
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c165
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c39
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c205
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c150
-rw-r--r--drivers/net/wireless/ath/carl9170/Kconfig2
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h19
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c37
-rw-r--r--drivers/net/wireless/ath/carl9170/fwcmd.h8
-rw-r--r--drivers/net/wireless/ath/carl9170/hw.h2
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c115
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c133
-rw-r--r--drivers/net/wireless/ath/carl9170/version.h6
-rw-r--r--drivers/net/wireless/ath/regd.c37
-rw-r--r--drivers/net/wireless/ath/regd.h10
-rw-r--r--drivers/net/wireless/ath/wil6210/Kconfig29
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile13
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c572
-rw-r--r--drivers/net/wireless/ath/wil6210/dbg_hexdump.h20
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c603
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c490
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c410
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c132
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c223
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c824
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h362
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h363
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c1020
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h1116
-rw-r--r--drivers/net/wireless/atmel_cs.c5
-rw-r--r--drivers/net/wireless/b43/Kconfig12
-rw-r--r--drivers/net/wireless/b43/b43.h5
-rw-r--r--drivers/net/wireless/b43/dma.h2
-rw-r--r--drivers/net/wireless/b43/main.c54
-rw-r--r--drivers/net/wireless/b43/main.h5
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c30
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h96
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h38
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c56
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h35
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c395
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c54
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.c11
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.h6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.c7
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h66
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.c2277
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.h183
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c43
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c1463
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h113
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.c7
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.c16
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c54
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c114
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pub.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/scb.h1
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c2
-rw-r--r--drivers/net/wireless/ipw2x00/Kconfig2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c40
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.h3
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c14
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c98
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c105
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c3
-rw-r--r--drivers/net/wireless/iwlegacy/4965.c3
-rw-r--r--drivers/net/wireless/iwlegacy/commands.h3
-rw-r--r--drivers/net/wireless/iwlegacy/common.c103
-rw-r--r--drivers/net/wireless/iwlegacy/common.h2
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig14
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.h4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/commands.h31
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/devices.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c183
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c55
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c19
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c7
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c42
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/testmode.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.c10
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c109
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c98
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c27
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c229
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c346
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.h80
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c514
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.h82
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.c56
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h164
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/Makefile10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/binding.c197
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c955
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c378
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h282
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h369
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h140
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h312
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h561
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h380
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h580
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h952
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c640
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/led.c134
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c992
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c1314
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h500
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c311
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c682
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c292
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c207
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c197
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c3080
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h393
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c356
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c442
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c1241
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h374
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c519
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.h214
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c916
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c472
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/1000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/2000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/5000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/6000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/7000.c111
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/cfg.h6
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c10
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h17
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c58
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c361
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c89
-rw-r--r--drivers/net/wireless/libertas/cfg.c45
-rw-r--r--drivers/net/wireless/libertas/cfg.h3
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c178
-rw-r--r--drivers/net/wireless/mwifiex/11ac.c261
-rw-r--r--drivers/net/wireless/mwifiex/11ac.h26
-rw-r--r--drivers/net/wireless/mwifiex/11n.c55
-rw-r--r--drivers/net/wireless/mwifiex/11n.h6
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c6
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c5
-rw-r--r--drivers/net/wireless/mwifiex/Kconfig4
-rw-r--r--drivers/net/wireless/mwifiex/Makefile1
-rw-r--r--drivers/net/wireless/mwifiex/README1
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c211
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c159
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c30
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c2
-rw-r--r--drivers/net/wireless/mwifiex/decl.h18
-rw-r--r--drivers/net/wireless/mwifiex/fw.h141
-rw-r--r--drivers/net/wireless/mwifiex/init.c12
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h8
-rw-r--r--drivers/net/wireless/mwifiex/join.c44
-rw-r--r--drivers/net/wireless/mwifiex/main.h30
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c1264
-rw-r--r--drivers/net/wireless/mwifiex/pcie.h228
-rw-r--r--drivers/net/wireless/mwifiex/scan.c101
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c30
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c12
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c6
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c58
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c6
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c44
-rw-r--r--drivers/net/wireless/mwifiex/usb.c34
-rw-r--r--drivers/net/wireless/mwifiex/util.c2
-rw-r--r--drivers/net/wireless/mwifiex/util.h8
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c15
-rw-r--r--drivers/net/wireless/mwl8k.c362
-rw-r--r--drivers/net/wireless/orinoco/main.c17
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c11
-rw-r--r--drivers/net/wireless/orinoco/scan.c4
-rw-r--r--drivers/net/wireless/p54/Kconfig2
-rw-r--r--drivers/net/wireless/p54/p54pci.c7
-rw-r--r--drivers/net/wireless/p54/p54usb.c12
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c1
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.c14
-rw-r--r--drivers/net/wireless/ray_cs.c19
-rw-r--r--drivers/net/wireless/rndis_wlan.c9
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig5
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c12
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c816
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h8
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c36
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c53
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h30
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c108
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c78
-rw-r--r--drivers/net/wireless/rtl818x/Kconfig2
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig50
-rw-r--r--drivers/net/wireless/rtlwifi/base.c14
-rw-r--r--drivers/net/wireless/rtlwifi/core.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rc.c13
-rw-r--r--drivers/net/wireless/rtlwifi/regd.c37
-rw-r--r--drivers/net/wireless/rtlwifi/regd.h6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/fw.c8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hw.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/phy.c22
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/trx.c7
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c57
-rw-r--r--drivers/net/wireless/rtlwifi/usb.h3
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h2
-rw-r--r--drivers/net/wireless/ti/Kconfig9
-rw-r--r--drivers/net/wireless/ti/Makefile4
-rw-r--r--drivers/net/wireless/ti/wilink_platform_data.c (renamed from drivers/net/wireless/ti/wlcore/wl12xx_platform_data.c)0
-rw-r--r--drivers/net/wireless/ti/wl1251/Kconfig2
-rw-r--r--drivers/net/wireless/ti/wl1251/event.c6
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c24
-rw-r--r--drivers/net/wireless/ti/wl1251/ps.c3
-rw-r--r--drivers/net/wireless/ti/wl12xx/Makefile2
-rw-r--r--drivers/net/wireless/ti/wl12xx/cmd.c37
-rw-r--r--drivers/net/wireless/ti/wl12xx/cmd.h20
-rw-r--r--drivers/net/wireless/ti/wl12xx/event.c116
-rw-r--r--drivers/net/wireless/ti/wl12xx/event.h111
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c195
-rw-r--r--drivers/net/wireless/ti/wl12xx/scan.c501
-rw-r--r--drivers/net/wireless/ti/wl12xx/scan.h140
-rw-r--r--drivers/net/wireless/ti/wl12xx/wl12xx.h40
-rw-r--r--drivers/net/wireless/ti/wl18xx/Makefile2
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.c87
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.h55
-rw-r--r--drivers/net/wireless/ti/wl18xx/cmd.c80
-rw-r--r--drivers/net/wireless/ti/wl18xx/cmd.h52
-rw-r--r--drivers/net/wireless/ti/wl18xx/conf.h22
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c111
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.h77
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c272
-rw-r--r--drivers/net/wireless/ti/wl18xx/scan.c326
-rw-r--r--drivers/net/wireless/ti/wl18xx/scan.h127
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.c54
-rw-r--r--drivers/net/wireless/ti/wl18xx/wl18xx.h50
-rw-r--r--drivers/net/wireless/ti/wlcore/Kconfig5
-rw-r--r--drivers/net/wireless/ti/wlcore/Makefile3
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c15
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.c77
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c423
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h81
-rw-r--r--drivers/net/wireless/ti/wlcore/conf.h110
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c10
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c326
-rw-r--r--drivers/net/wireless/ti/wlcore/event.h99
-rw-r--r--drivers/net/wireless/ti/wlcore/hw_ops.h41
-rw-r--r--drivers/net/wireless/ti/wlcore/init.c19
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h12
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c1596
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c11
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c33
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c696
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.h144
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c34
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c31
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c298
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h35
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h119
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h58
-rw-r--r--drivers/net/wireless/zd1211rw/Kconfig2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c1
-rw-r--r--drivers/net/xen-netback/common.h3
-rw-r--r--drivers/net/xen-netback/interface.c28
-rw-r--r--drivers/net/xen-netback/netback.c123
-rw-r--r--drivers/nfc/Kconfig16
-rw-r--r--drivers/nfc/Makefile3
-rw-r--r--drivers/nfc/microread/Kconfig35
-rw-r--r--drivers/nfc/microread/Makefile10
-rw-r--r--drivers/nfc/microread/i2c.c340
-rw-r--r--drivers/nfc/microread/mei.c246
-rw-r--r--drivers/nfc/microread/microread.c728
-rw-r--r--drivers/nfc/microread/microread.h33
-rw-r--r--drivers/nfc/nfcwilink.c10
-rw-r--r--drivers/nfc/pn533.c1593
-rw-r--r--drivers/nfc/pn544/Kconfig23
-rw-r--r--drivers/nfc/pn544/Makefile5
-rw-r--r--drivers/nfc/pn544/i2c.c44
-rw-r--r--drivers/nfc/pn544/pn544.c65
-rw-r--r--drivers/ntb/Kconfig13
-rw-r--r--drivers/ntb/Makefile3
-rw-r--r--drivers/ntb/ntb_hw.c1141
-rw-r--r--drivers/ntb/ntb_hw.h181
-rw-r--r--drivers/ntb/ntb_regs.h139
-rw-r--r--drivers/ntb/ntb_transport.c1441
-rw-r--r--drivers/of/address.c2
-rw-r--r--drivers/of/base.c303
-rw-r--r--drivers/of/device.c13
-rw-r--r--drivers/of/of_mdio.c4
-rw-r--r--drivers/of/of_private.h36
-rw-r--r--drivers/of/platform.c1
-rw-r--r--drivers/of/selftest.c54
-rw-r--r--drivers/parisc/Kconfig1
-rw-r--r--drivers/parisc/dino.c13
-rw-r--r--drivers/parisc/hppb.c6
-rw-r--r--drivers/parisc/pdc_stable.c6
-rw-r--r--drivers/parisc/superio.c2
-rw-r--r--drivers/parport/Kconfig2
-rw-r--r--drivers/parport/parport_serial.c21
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c56
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/pciehp_core.c11
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c8
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c11
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c60
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c15
-rw-r--r--drivers/pci/hotplug/shpchp.h3
-rw-r--r--drivers/pci/hotplug/shpchp_core.c36
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c6
-rw-r--r--drivers/pci/iov.c2
-rw-r--r--drivers/pci/msi.c26
-rw-r--r--drivers/pci/pci-acpi.c56
-rw-r--r--drivers/pci/pci.c26
-rw-r--r--drivers/pci/pci.h5
-rw-r--r--drivers/pci/pcie/Kconfig2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c1
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c63
-rw-r--r--drivers/pci/pcie/aspm.c3
-rw-r--r--drivers/pci/probe.c1
-rw-r--r--drivers/pci/remove.c2
-rw-r--r--drivers/pcmcia/Kconfig4
-rw-r--r--drivers/pcmcia/cs.c37
-rw-r--r--drivers/pcmcia/i82092.c8
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c6
-rw-r--r--drivers/pcmcia/vrc4171_card.c1
-rw-r--r--drivers/pinctrl/Kconfig39
-rw-r--r--drivers/pinctrl/Makefile11
-rw-r--r--drivers/pinctrl/core.c118
-rw-r--r--drivers/pinctrl/core.h29
-rw-r--r--drivers/pinctrl/devicetree.c5
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-dove.c2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-kirkwood.c8
-rw-r--r--drivers/pinctrl/pinconf-generic.c4
-rw-r--r--drivers/pinctrl/pinconf.c207
-rw-r--r--drivers/pinctrl/pinctrl-ab8500.c484
-rw-r--r--drivers/pinctrl/pinctrl-ab8505.c380
-rw-r--r--drivers/pinctrl/pinctrl-ab8540.c407
-rw-r--r--drivers/pinctrl/pinctrl-ab9540.c485
-rw-r--r--drivers/pinctrl/pinctrl-abx500.c1012
-rw-r--r--drivers/pinctrl/pinctrl-abx500.h234
-rw-r--r--drivers/pinctrl/pinctrl-at91.c7
-rw-r--r--drivers/pinctrl/pinctrl-bcm2835.c6
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c8
-rw-r--r--drivers/pinctrl/pinctrl-exynos5440.c18
-rw-r--r--drivers/pinctrl/pinctrl-falcon.c51
-rw-r--r--drivers/pinctrl/pinctrl-imx.c6
-rw-r--r--drivers/pinctrl/pinctrl-lantiq.c56
-rw-r--r--drivers/pinctrl/pinctrl-lantiq.h1
-rw-r--r--drivers/pinctrl/pinctrl-mxs.c9
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c398
-rw-r--r--drivers/pinctrl/pinctrl-pxa3xx.c7
-rw-r--r--drivers/pinctrl/pinctrl-samsung.c13
-rw-r--r--drivers/pinctrl/pinctrl-single.c79
-rw-r--r--drivers/pinctrl/pinctrl-sirf.c18
-rw-r--r--drivers/pinctrl/pinctrl-sunxi.c1505
-rw-r--r--drivers/pinctrl/pinctrl-sunxi.h478
-rw-r--r--drivers/pinctrl/pinctrl-tegra.c14
-rw-r--r--drivers/pinctrl/pinctrl-tegra.h16
-rw-r--r--drivers/pinctrl/pinctrl-tegra114.c2769
-rw-r--r--drivers/pinctrl/pinctrl-tegra20.c6
-rw-r--r--drivers/pinctrl/pinctrl-tegra30.c4
-rw-r--r--drivers/pinctrl/pinctrl-u300.c6
-rw-r--r--drivers/pinctrl/pinctrl-xway.c69
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig116
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile21
-rw-r--r--drivers/pinctrl/sh-pfc/core.c (renamed from drivers/sh/pfc/core.c)355
-rw-r--r--drivers/pinctrl/sh-pfc/core.h72
-rw-r--r--drivers/pinctrl/sh-pfc/gpio.c (renamed from drivers/sh/pfc/gpio.c)114
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7740.c2612
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7779.c2624
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7203.c1592
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7264.c2131
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7269.c2834
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7372.c1658
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh73a0.c2798
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7720.c1236
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7722.c1779
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7723.c1903
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7724.c2225
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7734.c2475
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7757.c2282
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7785.c1304
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7786.c837
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-shx3.c582
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c (renamed from drivers/sh/pfc/pinctrl.c)164
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h195
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c8
-rw-r--r--drivers/platform/Kconfig4
-rw-r--r--drivers/platform/Makefile1
-rw-r--r--drivers/platform/goldfish/Kconfig5
-rw-r--r--drivers/platform/goldfish/Makefile5
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c612
-rw-r--r--drivers/platform/goldfish/pdev_bus.c240
-rw-r--r--drivers/platform/x86/Kconfig6
-rw-r--r--drivers/platform/x86/acer-wmi.c60
-rw-r--r--drivers/platform/x86/asus-laptop.c21
-rw-r--r--drivers/platform/x86/classmate-laptop.c10
-rw-r--r--drivers/platform/x86/eeepc-laptop.c4
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c4
-rw-r--r--drivers/platform/x86/fujitsu-tablet.c2
-rw-r--r--drivers/platform/x86/hp_accel.c2
-rw-r--r--drivers/platform/x86/ibm_rtl.c2
-rw-r--r--drivers/platform/x86/ideapad-laptop.c2
-rw-r--r--drivers/platform/x86/intel_menlow.c2
-rw-r--r--drivers/platform/x86/panasonic-laptop.c4
-rw-r--r--drivers/platform/x86/samsung-laptop.c14
-rw-r--r--drivers/platform/x86/sony-laptop.c19
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c3
-rw-r--r--drivers/platform/x86/topstar-laptop.c2
-rw-r--r--drivers/platform/x86/toshiba_acpi.c4
-rw-r--r--drivers/platform/x86/toshiba_bluetooth.c4
-rw-r--r--drivers/platform/x86/wmi.c4
-rw-r--r--drivers/platform/x86/xo15-ebook.c2
-rw-r--r--drivers/pnp/pnpacpi/core.c10
-rw-r--r--drivers/pnp/pnpbios/Kconfig4
-rw-r--r--drivers/power/88pm860x_battery.c13
-rw-r--r--drivers/power/Kconfig14
-rw-r--r--drivers/power/Makefile4
-rw-r--r--drivers/power/ab8500_bmdata.c524
-rw-r--r--drivers/power/ab8500_btemp.c192
-rw-r--r--drivers/power/ab8500_charger.c1067
-rw-r--r--drivers/power/ab8500_fg.c447
-rw-r--r--drivers/power/abx500_chargalg.c204
-rw-r--r--drivers/power/bq2415x_charger.c54
-rw-r--r--drivers/power/bq27x00_battery.c12
-rw-r--r--drivers/power/charger-manager.c310
-rw-r--r--drivers/power/da9030_battery.c1
-rw-r--r--drivers/power/da9052-battery.c2
-rw-r--r--drivers/power/ds2782_battery.c69
-rw-r--r--drivers/power/generic-adc-battery.c20
-rw-r--r--drivers/power/goldfish_battery.c236
-rw-r--r--drivers/power/jz4740-battery.c7
-rw-r--r--drivers/power/lp8727_charger.c8
-rw-r--r--drivers/power/lp8788-charger.c25
-rw-r--r--drivers/power/max17040_battery.c4
-rw-r--r--drivers/power/pm2301_charger.c1088
-rw-r--r--drivers/power/pm2301_charger.h513
-rw-r--r--drivers/power/power_supply_core.c4
-rw-r--r--drivers/power/power_supply_sysfs.c3
-rw-r--r--drivers/power/reset/Kconfig17
-rw-r--r--drivers/power/reset/Makefile2
-rw-r--r--drivers/power/reset/qnap-poweroff.c116
-rw-r--r--drivers/power/reset/restart-poweroff.c65
-rw-r--r--drivers/pps/clients/Kconfig2
-rw-r--r--drivers/pps/clients/pps-ldisc.c30
-rw-r--r--drivers/pps/pps.c47
-rw-r--r--drivers/pwm/pwm-imx.c6
-rw-r--r--drivers/pwm/pwm-lpc32xx.c6
-rw-r--r--drivers/pwm/pwm-mxs.c6
-rw-r--r--drivers/pwm/pwm-puv3.c6
-rw-r--r--drivers/pwm/pwm-pxa.c6
-rw-r--r--drivers/pwm/pwm-spear.c6
-rw-r--r--drivers/pwm/pwm-tegra.c6
-rw-r--r--drivers/pwm/pwm-tiecap.c6
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c6
-rw-r--r--drivers/pwm/pwm-tipwmss.c6
-rw-r--r--drivers/pwm/pwm-vt8500.c6
-rw-r--r--drivers/regulator/88pm8607.c40
-rw-r--r--drivers/regulator/Kconfig10
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/anatop-regulator.c41
-rw-r--r--drivers/regulator/arizona-micsupp.c78
-rw-r--r--drivers/regulator/as3711-regulator.c2
-rw-r--r--drivers/regulator/core.c58
-rw-r--r--drivers/regulator/da9052-regulator.c46
-rw-r--r--drivers/regulator/da9055-regulator.c3
-rw-r--r--drivers/regulator/dbx500-prcmu.c1
-rw-r--r--drivers/regulator/gpio-regulator.c7
-rw-r--r--drivers/regulator/lp3971.c22
-rw-r--r--drivers/regulator/lp3972.c22
-rw-r--r--drivers/regulator/lp872x.c36
-rw-r--r--drivers/regulator/lp8755.c566
-rw-r--r--drivers/regulator/lp8788-buck.c41
-rw-r--r--drivers/regulator/lp8788-ldo.c133
-rw-r--r--drivers/regulator/max77686.c29
-rw-r--r--drivers/regulator/max8907-regulator.c10
-rw-r--r--drivers/regulator/max8925-regulator.c3
-rw-r--r--drivers/regulator/max8997.c164
-rw-r--r--drivers/regulator/max8998.c58
-rw-r--r--drivers/regulator/mc13892-regulator.c111
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c25
-rw-r--r--drivers/regulator/mc13xxx.h4
-rw-r--r--drivers/regulator/of_regulator.c6
-rw-r--r--drivers/regulator/palmas-regulator.c7
-rw-r--r--drivers/regulator/s2mps11.c4
-rw-r--r--drivers/regulator/s5m8767.c270
-rw-r--r--drivers/regulator/tps51632-regulator.c152
-rw-r--r--drivers/regulator/tps6507x-regulator.c92
-rw-r--r--drivers/regulator/tps65090-regulator.c106
-rw-r--r--drivers/regulator/tps65217-regulator.c4
-rw-r--r--drivers/regulator/tps6586x-regulator.c54
-rw-r--r--drivers/regulator/tps65910-regulator.c8
-rw-r--r--drivers/regulator/tps80031-regulator.c2
-rw-r--r--drivers/remoteproc/Kconfig7
-rw-r--r--drivers/rpmsg/Kconfig3
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c2
-rw-r--r--drivers/rtc/Kconfig72
-rw-r--r--drivers/rtc/Makefile7
-rw-r--r--drivers/rtc/class.c11
-rw-r--r--drivers/rtc/interface.c6
-rw-r--r--drivers/rtc/rtc-at91rm9200.c17
-rw-r--r--drivers/rtc/rtc-cmos.c15
-rw-r--r--drivers/rtc/rtc-coh901331.c7
-rw-r--r--drivers/rtc/rtc-da9052.c18
-rw-r--r--drivers/rtc/rtc-da9055.c2
-rw-r--r--drivers/rtc/rtc-davinci.c28
-rw-r--r--drivers/rtc/rtc-dev.c11
-rw-r--r--drivers/rtc/rtc-ds1305.c8
-rw-r--r--drivers/rtc/rtc-ds1307.c11
-rw-r--r--drivers/rtc/rtc-ds2404.c18
-rw-r--r--drivers/rtc/rtc-efi.c10
-rw-r--r--drivers/rtc/rtc-fm3130.c33
-rw-r--r--drivers/rtc/rtc-hid-sensor-time.c292
-rw-r--r--drivers/rtc/rtc-imxdi.c4
-rw-r--r--drivers/rtc/rtc-isl12022.c2
-rw-r--r--drivers/rtc/rtc-isl1208.c3
-rw-r--r--drivers/rtc/rtc-lp8788.c338
-rw-r--r--drivers/rtc/rtc-max77686.c641
-rw-r--r--drivers/rtc/rtc-max8907.c6
-rw-r--r--drivers/rtc/rtc-max8997.c552
-rw-r--r--drivers/rtc/rtc-mpc5121.c5
-rw-r--r--drivers/rtc/rtc-pcf8523.c31
-rw-r--r--drivers/rtc/rtc-pcf8563.c2
-rw-r--r--drivers/rtc/rtc-pcf8583.c4
-rw-r--r--drivers/rtc/rtc-pl031.c12
-rw-r--r--drivers/rtc/rtc-pxa.c23
-rw-r--r--drivers/rtc/rtc-rs5c313.c5
-rw-r--r--drivers/rtc/rtc-rs5c372.c10
-rw-r--r--drivers/rtc/rtc-rx4581.c314
-rw-r--r--drivers/rtc/rtc-s3c.c26
-rw-r--r--drivers/rtc/rtc-sa1100.c15
-rw-r--r--drivers/rtc/rtc-snvs.c8
-rw-r--r--drivers/rtc/rtc-spear.c8
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c3
-rw-r--r--drivers/rtc/rtc-sun4v.c10
-rw-r--r--drivers/rtc/rtc-tegra.c8
-rw-r--r--drivers/rtc/rtc-tps6586x.c4
-rw-r--r--drivers/rtc/rtc-tps65910.c44
-rw-r--r--drivers/rtc/rtc-tps80031.c349
-rw-r--r--drivers/rtc/rtc-twl.c6
-rw-r--r--drivers/rtc/rtc-vr41xx.c2
-rw-r--r--drivers/rtc/rtc-vt8500.c30
-rw-r--r--drivers/rtc/rtc-wm831x.c9
-rw-r--r--drivers/rtc/systohc.c44
-rw-r--r--drivers/s390/block/dasd.c23
-rw-r--r--drivers/s390/block/dasd_3990_erp.c8
-rw-r--r--drivers/s390/block/dasd_alias.c4
-rw-r--r--drivers/s390/block/dasd_diag.c12
-rw-r--r--drivers/s390/block/dasd_eckd.c32
-rw-r--r--drivers/s390/block/dasd_eer.c2
-rw-r--r--drivers/s390/block/dasd_erp.c4
-rw-r--r--drivers/s390/block/dasd_fba.c4
-rw-r--r--drivers/s390/block/scm_blk.h41
-rw-r--r--drivers/s390/char/Kconfig8
-rw-r--r--drivers/s390/char/con3215.c20
-rw-r--r--drivers/s390/char/fs3270.c29
-rw-r--r--drivers/s390/char/keyboard.h16
-rw-r--r--drivers/s390/char/raw3270.c613
-rw-r--r--drivers/s390/char/raw3270.h12
-rw-r--r--drivers/s390/char/sclp.c8
-rw-r--r--drivers/s390/char/sclp_cmd.c10
-rw-r--r--drivers/s390/char/sclp_tty.c14
-rw-r--r--drivers/s390/char/sclp_vt220.c12
-rw-r--r--drivers/s390/char/tape_34xx.c2
-rw-r--r--drivers/s390/char/tape_3590.c2
-rw-r--r--drivers/s390/char/tty3270.c191
-rw-r--r--drivers/s390/char/vmur.c2
-rw-r--r--drivers/s390/char/zcore.c64
-rw-r--r--drivers/s390/cio/chsc.c81
-rw-r--r--drivers/s390/cio/chsc.h2
-rw-r--r--drivers/s390/cio/chsc_sch.c2
-rw-r--r--drivers/s390/cio/cio.c14
-rw-r--r--drivers/s390/cio/cmf.c6
-rw-r--r--drivers/s390/cio/css.c2
-rw-r--r--drivers/s390/cio/device.c22
-rw-r--r--drivers/s390/cio/device.h5
-rw-r--r--drivers/s390/cio/device_fsm.c2
-rw-r--r--drivers/s390/cio/device_pgid.c123
-rw-r--r--drivers/s390/cio/eadm_sch.c2
-rw-r--r--drivers/s390/cio/io_sch.h5
-rw-r--r--drivers/s390/cio/qdio_main.c12
-rw-r--r--drivers/s390/cio/qdio_thinint.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c2
-rw-r--r--drivers/s390/kvm/Makefile2
-rw-r--r--drivers/s390/kvm/kvm_virtio.c40
-rw-r--r--drivers/s390/kvm/virtio_ccw.c926
-rw-r--r--drivers/s390/net/Kconfig4
-rw-r--r--drivers/s390/net/claw.c2
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/lcs.c2
-rw-r--r--drivers/s390/net/qeth_core.h10
-rw-r--r--drivers/s390/net/qeth_core_main.c256
-rw-r--r--drivers/s390/net/qeth_core_mpc.c1
-rw-r--r--drivers/s390/net/qeth_core_mpc.h5
-rw-r--r--drivers/s390/net/qeth_core_sys.c3
-rw-r--r--drivers/s390/net/qeth_l2_main.c16
-rw-r--r--drivers/s390/net/qeth_l3_main.c35
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c2
-rw-r--r--drivers/sbus/char/Kconfig7
-rw-r--r--drivers/scsi/Kconfig20
-rw-r--r--drivers/scsi/arm/Kconfig10
-rw-r--r--drivers/scsi/device_handler/Kconfig4
-rw-r--r--drivers/scsi/hosts.c4
-rw-r--r--drivers/scsi/isci/init.c2
-rw-r--r--drivers/scsi/osd/osd_uld.c26
-rw-r--r--drivers/scsi/scsi_lib.c14
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c4
-rw-r--r--drivers/scsi/sr.c46
-rw-r--r--drivers/scsi/storvsc_drv.c12
-rw-r--r--drivers/sh/Kconfig1
-rw-r--r--drivers/sh/Makefile1
-rw-r--r--drivers/sh/clk/cpg.c6
-rw-r--r--drivers/sh/pfc/Kconfig26
-rw-r--r--drivers/sh/pfc/Makefile3
-rw-r--r--drivers/spi/Kconfig32
-rw-r--r--drivers/spi/Makefile5
-rw-r--r--drivers/spi/spi-altera.c2
-rw-r--r--drivers/spi/spi-ath79.c115
-rw-r--r--drivers/spi/spi-atmel.c2
-rw-r--r--drivers/spi/spi-au1550.c8
-rw-r--r--drivers/spi/spi-bcm63xx.c179
-rw-r--r--drivers/spi/spi-bfin-sport.c3
-rw-r--r--drivers/spi/spi-bfin5xx.c5
-rw-r--r--drivers/spi/spi-bitbang.c33
-rw-r--r--drivers/spi/spi-clps711x.c2
-rw-r--r--drivers/spi/spi-coldfire-qspi.c3
-rw-r--r--drivers/spi/spi-davinci.c119
-rw-r--r--drivers/spi/spi-ep93xx.c9
-rw-r--r--drivers/spi/spi-falcon.c3
-rw-r--r--drivers/spi/spi-fsl-spi.c4
-rw-r--r--drivers/spi/spi-gpio.c23
-rw-r--r--drivers/spi/spi-imx.c1
-rw-r--r--drivers/spi/spi-mpc512x-psc.c17
-rw-r--r--drivers/spi/spi-mxs.c11
-rw-r--r--drivers/spi/spi-oc-tiny.c8
-rw-r--r--drivers/spi/spi-omap-100k.c6
-rw-r--r--drivers/spi/spi-omap-uwire.c6
-rw-r--r--drivers/spi/spi-omap2-mcspi.c49
-rw-r--r--drivers/spi/spi-orion.c21
-rw-r--r--drivers/spi/spi-ppc4xx.c10
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c392
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c133
-rw-r--r--drivers/spi/spi-pxa2xx-pxadma.c490
-rw-r--r--drivers/spi/spi-pxa2xx.c1103
-rw-r--r--drivers/spi/spi-pxa2xx.h221
-rw-r--r--drivers/spi/spi-s3c64xx.c160
-rw-r--r--drivers/spi/spi-sh-msiof.c56
-rw-r--r--drivers/spi/spi-sirf.c17
-rw-r--r--drivers/spi/spi-tegra20-sflash.c26
-rw-r--r--drivers/spi/spi-tegra20-slink.c27
-rw-r--r--drivers/spi/spi-txx9.c12
-rw-r--r--drivers/spi/spi.c26
-rw-r--r--drivers/ssb/Kconfig5
-rw-r--r--drivers/ssb/Makefile1
-rw-r--r--drivers/ssb/driver_chipcommon_sflash.c140
-rw-r--r--drivers/ssb/driver_gpio.c34
-rw-r--r--drivers/ssb/driver_mipscore.c51
-rw-r--r--drivers/ssb/main.c17
-rw-r--r--drivers/ssb/ssb_private.h20
-rw-r--r--drivers/staging/Kconfig8
-rw-r--r--drivers/staging/Makefile4
-rw-r--r--drivers/staging/android/Kconfig29
-rw-r--r--drivers/staging/android/alarm-dev.c277
-rw-r--r--drivers/staging/android/android_alarm.h19
-rw-r--r--drivers/staging/android/binder.c6
-rw-r--r--drivers/staging/android/binder.h6
-rw-r--r--drivers/staging/asus_oled/asus_oled.c15
-rw-r--r--drivers/staging/bcm/Adapter.h20
-rw-r--r--drivers/staging/bcm/Bcmchar.c14
-rw-r--r--drivers/staging/bcm/Bcmnet.c4
-rw-r--r--drivers/staging/bcm/CmHost.c6
-rw-r--r--drivers/staging/bcm/CmHost.h12
-rw-r--r--drivers/staging/bcm/Debug.h356
-rw-r--r--drivers/staging/bcm/IPv6Protocol.c34
-rw-r--r--drivers/staging/bcm/IPv6ProtocolHdr.h149
-rw-r--r--drivers/staging/bcm/InterfaceDld.c6
-rw-r--r--drivers/staging/bcm/InterfaceInit.c14
-rw-r--r--drivers/staging/bcm/Ioctl.h6
-rw-r--r--drivers/staging/bcm/Macros.h25
-rw-r--r--drivers/staging/bcm/Misc.c10
-rw-r--r--drivers/staging/bcm/PHSDefines.h200
-rw-r--r--drivers/staging/bcm/PHSModule.c160
-rw-r--r--drivers/staging/bcm/PHSModule.h14
-rw-r--r--drivers/staging/bcm/Protocol.h177
-rw-r--r--drivers/staging/bcm/Prototypes.h6
-rw-r--r--drivers/staging/bcm/Qos.c46
-rw-r--r--drivers/staging/bcm/hostmibs.c12
-rw-r--r--drivers/staging/bcm/led_control.c8
-rw-r--r--drivers/staging/bcm/led_control.h138
-rw-r--r--drivers/staging/bcm/nvm.c87
-rw-r--r--drivers/staging/bcm/nvm.h665
-rw-r--r--drivers/staging/bcm/target_params.h128
-rw-r--r--drivers/staging/bcm/vendorspecificextn.c2
-rw-r--r--drivers/staging/bcm/vendorspecificextn.h2
-rw-r--r--drivers/staging/ccg/Kconfig2
-rw-r--r--drivers/staging/ccg/u_ether.c10
-rw-r--r--drivers/staging/ccg/u_serial.c13
-rw-r--r--drivers/staging/ced1401/ced_ioc.c18
-rw-r--r--drivers/staging/ced1401/usb1401.c14
-rw-r--r--drivers/staging/ced1401/usb1401.h2
-rw-r--r--drivers/staging/comedi/Kconfig47
-rw-r--r--drivers/staging/comedi/Makefile19
-rw-r--r--drivers/staging/comedi/comedi.h12
-rw-r--r--drivers/staging/comedi/comedi_buf.c415
-rw-r--r--drivers/staging/comedi/comedi_compat32.c4
-rw-r--r--drivers/staging/comedi/comedi_fops.c1008
-rw-r--r--drivers/staging/comedi/comedi_internal.h33
-rw-r--r--drivers/staging/comedi/comedi_pci.c140
-rw-r--r--drivers/staging/comedi/comedi_pcmcia.c160
-rw-r--r--drivers/staging/comedi/comedi_usb.c108
-rw-r--r--drivers/staging/comedi/comedidev.h370
-rw-r--r--drivers/staging/comedi/drivers.c859
-rw-r--r--drivers/staging/comedi/drivers/8255_pci.c9
-rw-r--r--drivers/staging/comedi/drivers/Makefile4
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.c26
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci16xx.c807
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci2200.c263
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c589
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_035.c9
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1032.c10
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1500.c9
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1516.c114
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1564.c9
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_16xx.c257
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1710.c9
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2032.c282
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2200.c187
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3120.c9
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3200.c9
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3501.c482
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3xxx.c9
-rw-r--r--drivers/staging/comedi/drivers/addi_watchdog.c172
-rw-r--r--drivers/staging/comedi/drivers/addi_watchdog.h10
-rw-r--r--drivers/staging/comedi/drivers/adl_pci6208.c9
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7x3x.c17
-rw-r--r--drivers/staging/comedi/drivers/adl_pci8164.c39
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9111.c12
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9118.c13
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c8
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1723.c9
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c12
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200.c20
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc236.c8
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc263.c9
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci224.c8
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c12
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c132
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas.c11
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c12
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidda.c9
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdas.c12
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdda.c9
-rw-r--r--drivers/staging/comedi/drivers/comedi_bond.c5
-rw-r--r--drivers/staging/comedi/drivers/comedi_test.c77
-rw-r--r--drivers/staging/comedi/drivers/contec_pci_dio.c9
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c14
-rw-r--r--drivers/staging/comedi/drivers/das08.c358
-rw-r--r--drivers/staging/comedi/drivers/das08.h2
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c152
-rw-r--r--drivers/staging/comedi/drivers/das08_isa.c217
-rw-r--r--drivers/staging/comedi/drivers/das08_pci.c121
-rw-r--r--drivers/staging/comedi/drivers/das16.c2
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c11
-rw-r--r--drivers/staging/comedi/drivers/dt9812.c30
-rw-r--r--drivers/staging/comedi/drivers/dyna_pci10xx.c11
-rw-r--r--drivers/staging/comedi/drivers/gsc_hpdi.c11
-rw-r--r--drivers/staging/comedi/drivers/icp_multi.c13
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c15
-rw-r--r--drivers/staging/comedi/drivers/ke_counter.c9
-rw-r--r--drivers/staging/comedi/drivers/me4000.c14
-rw-r--r--drivers/staging/comedi/drivers/me_daq.c9
-rw-r--r--drivers/staging/comedi/drivers/mite.c5
-rw-r--r--drivers/staging/comedi/drivers/ni_6527.c9
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c10
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c10
-rw-r--r--drivers/staging/comedi/drivers/ni_670x.c9
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c119
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_dio24.c303
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c55
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_cs.c257
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_cs.c399
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c9
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c27
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_tiocmd.c1
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c19
-rw-r--r--drivers/staging/comedi/drivers/pcm_common.c63
-rw-r--r--drivers/staging/comedi/drivers/pcm_common.h8
-rw-r--r--drivers/staging/comedi/drivers/pcmda12.c3
-rw-r--r--drivers/staging/comedi/drivers/pcmmio.c62
-rw-r--r--drivers/staging/comedi/drivers/pcmuio.c72
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c589
-rw-r--r--drivers/staging/comedi/drivers/rtd520.c10
-rw-r--r--drivers/staging/comedi/drivers/s626.c8
-rw-r--r--drivers/staging/comedi/drivers/skel.c12
-rw-r--r--drivers/staging/comedi/drivers/unioxx5.c6
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c29
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c8
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c33
-rw-r--r--drivers/staging/comedi/drivers/vmk80xx.c1272
-rw-r--r--drivers/staging/comedi/kcomedilib/kcomedilib_main.c8
-rw-r--r--drivers/staging/comedi/proc.c9
-rw-r--r--drivers/staging/cptm1217/clearpad_tm1217.c5
-rw-r--r--drivers/staging/csr/bh.c2
-rw-r--r--drivers/staging/csr/drv.c6
-rw-r--r--drivers/staging/csr/sme_sys.c41
-rw-r--r--drivers/staging/csr/unifi_sme.c3
-rw-r--r--drivers/staging/cxt1e1/linux.c4
-rw-r--r--drivers/staging/dgrp/Kconfig2
-rw-r--r--drivers/staging/dgrp/dgrp_net_ops.c13
-rw-r--r--drivers/staging/dgrp/dgrp_specproc.c81
-rw-r--r--drivers/staging/dgrp/dgrp_tty.c1
-rw-r--r--drivers/staging/echo/echo.c42
-rw-r--r--drivers/staging/et131x/README4
-rw-r--r--drivers/staging/et131x/et131x.c801
-rw-r--r--drivers/staging/et131x/et131x.h96
-rw-r--r--drivers/staging/frontier/alphatrack.c51
-rw-r--r--drivers/staging/frontier/tranzport.c26
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000.h33
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c30
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c10
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_debug.c153
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_download.c110
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_hw.c166
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_proc.c6
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_usb.c39
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_usb.h84
-rw-r--r--drivers/staging/ft1000/ft1000.h35
-rw-r--r--drivers/staging/fwserial/Kconfig6
-rw-r--r--drivers/staging/fwserial/TODO29
-rw-r--r--drivers/staging/fwserial/fwserial.c412
-rw-r--r--drivers/staging/fwserial/fwserial.h25
-rw-r--r--drivers/staging/gdm72xx/gdm_sdio.c8
-rw-r--r--drivers/staging/gdm72xx/sdio_boot.c9
-rw-r--r--drivers/staging/gdm72xx/usb_boot.c8
-rw-r--r--drivers/staging/goldfish/Kconfig13
-rw-r--r--drivers/staging/goldfish/Makefile6
-rw-r--r--drivers/staging/goldfish/README12
-rw-r--r--drivers/staging/goldfish/goldfish_audio.c363
-rw-r--r--drivers/staging/goldfish/goldfish_nand.c444
-rw-r--r--drivers/staging/goldfish/goldfish_nand_reg.h72
-rw-r--r--drivers/staging/iio/Kconfig14
-rw-r--r--drivers/staging/iio/Makefile3
-rw-r--r--drivers/staging/iio/accel/Kconfig30
-rw-r--r--drivers/staging/iio/accel/Makefile2
-rw-r--r--drivers/staging/iio/accel/lis3l02dq.h8
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_core.c18
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c12
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c13
-rw-r--r--drivers/staging/iio/accel/sca3000_ring.c6
-rw-r--r--drivers/staging/iio/adc/Kconfig4
-rw-r--r--drivers/staging/iio/adc/ad7280a.c6
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c534
-rw-r--r--drivers/staging/iio/frequency/ad5930.c5
-rw-r--r--drivers/staging/iio/frequency/ad9850.c5
-rw-r--r--drivers/staging/iio/frequency/ad9852.c5
-rw-r--r--drivers/staging/iio/gyro/Kconfig21
-rw-r--r--drivers/staging/iio/gyro/Makefile9
-rw-r--r--drivers/staging/iio/gyro/adxrs450.h62
-rw-r--r--drivers/staging/iio/iio_hwmon.c83
-rw-r--r--drivers/staging/iio/iio_simple_dummy.c4
-rw-r--r--drivers/staging/iio/iio_simple_dummy_buffer.c2
-rw-r--r--drivers/staging/iio/impedance-analyzer/Kconfig2
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c8
-rw-r--r--drivers/staging/iio/imu/Kconfig17
-rw-r--r--drivers/staging/iio/imu/Makefile7
-rw-r--r--drivers/staging/iio/imu/adis16400_core.c1320
-rw-r--r--drivers/staging/iio/imu/adis16400_ring.c204
-rw-r--r--drivers/staging/iio/imu/adis16400_trigger.c74
-rw-r--r--drivers/staging/iio/light/Kconfig10
-rw-r--r--drivers/staging/iio/light/Makefile1
-rw-r--r--drivers/staging/iio/light/tsl2563.h9
-rw-r--r--drivers/staging/iio/light/tsl2x7x_core.c78
-rw-r--r--drivers/staging/iio/meter/Kconfig2
-rw-r--r--drivers/staging/iio/meter/ade7753.c6
-rw-r--r--drivers/staging/iio/meter/ade7754.c5
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c28
-rw-r--r--drivers/staging/iio/meter/ade7758_ring.c12
-rw-r--r--drivers/staging/iio/meter/ade7759.c5
-rw-r--r--drivers/staging/iio/meter/ade7854-spi.c44
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c5
-rw-r--r--drivers/staging/iio/ring_sw.c366
-rw-r--r--drivers/staging/iio/ring_sw.h30
-rw-r--r--drivers/staging/iio/trigger/Kconfig1
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c1
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-common.c8
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-di.c2
-rw-r--r--drivers/staging/imx-drm/ipuv3-crtc.c6
-rw-r--r--drivers/staging/keucr/usb.c188
-rw-r--r--drivers/staging/line6/Kconfig10
-rw-r--r--drivers/staging/line6/capture.c10
-rw-r--r--drivers/staging/line6/driver.c86
-rw-r--r--drivers/staging/line6/driver.h13
-rw-r--r--drivers/staging/line6/midi.c2
-rw-r--r--drivers/staging/line6/midi.h4
-rw-r--r--drivers/staging/line6/midibuf.c25
-rw-r--r--drivers/staging/line6/midibuf.h22
-rw-r--r--drivers/staging/line6/pcm.c36
-rw-r--r--drivers/staging/line6/playback.c9
-rw-r--r--drivers/staging/line6/pod.c105
-rw-r--r--drivers/staging/line6/toneport.c6
-rw-r--r--drivers/staging/line6/variax.c14
-rw-r--r--drivers/staging/nvec/TODO4
-rw-r--r--drivers/staging/nvec/nvec.c95
-rw-r--r--drivers/staging/nvec/nvec.h5
-rw-r--r--drivers/staging/nvec/nvec_kbd.c42
-rw-r--r--drivers/staging/nvec/nvec_power.c8
-rw-r--r--drivers/staging/nvec/nvec_ps2.c37
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c6
-rw-r--r--drivers/staging/octeon/ethernet.c8
-rw-r--r--drivers/staging/omap-thermal/omap-bandgap.c13
-rw-r--r--drivers/staging/omap-thermal/omap-thermal-common.c4
-rw-r--r--drivers/staging/omapdrm/Kconfig2
-rw-r--r--drivers/staging/omapdrm/Makefile1
-rw-r--r--drivers/staging/omapdrm/TODO3
-rw-r--r--drivers/staging/omapdrm/omap_connector.c115
-rw-r--r--drivers/staging/omapdrm/omap_crtc.c510
-rw-r--r--drivers/staging/omapdrm/omap_dmm_priv.h5
-rw-r--r--drivers/staging/omapdrm/omap_dmm_tiler.c172
-rw-r--r--drivers/staging/omapdrm/omap_drv.c457
-rw-r--r--drivers/staging/omapdrm/omap_drv.h144
-rw-r--r--drivers/staging/omapdrm/omap_encoder.c136
-rw-r--r--drivers/staging/omapdrm/omap_fb.c1
-rw-r--r--drivers/staging/omapdrm/omap_fbdev.c4
-rw-r--r--drivers/staging/omapdrm/omap_gem.c42
-rw-r--r--drivers/staging/omapdrm/omap_gem_dmabuf.c8
-rw-r--r--drivers/staging/omapdrm/omap_irq.c322
-rw-r--r--drivers/staging/omapdrm/omap_plane.c456
-rw-r--r--drivers/staging/omapdrm/tcm.h2
-rw-r--r--drivers/staging/ozwpan/TODO3
-rw-r--r--drivers/staging/ozwpan/ozcdev.c52
-rw-r--r--drivers/staging/ozwpan/ozcdev.h1
-rw-r--r--drivers/staging/ozwpan/ozeltbuf.c18
-rw-r--r--drivers/staging/ozwpan/ozevent.c8
-rw-r--r--drivers/staging/ozwpan/ozhcd.c152
-rw-r--r--drivers/staging/ozwpan/ozmain.c2
-rw-r--r--drivers/staging/ozwpan/ozpd.c88
-rw-r--r--drivers/staging/ozwpan/ozpd.h4
-rw-r--r--drivers/staging/ozwpan/ozproto.c84
-rw-r--r--drivers/staging/ozwpan/ozproto.h2
-rw-r--r--drivers/staging/ozwpan/ozusbif.h8
-rw-r--r--drivers/staging/ozwpan/ozusbsvc.c22
-rw-r--r--drivers/staging/ozwpan/ozusbsvc1.c26
-rw-r--r--drivers/staging/panel/panel.c31
-rw-r--r--drivers/staging/quickstart/quickstart.c2
-rw-r--r--drivers/staging/ramster/Kconfig31
-rw-r--r--drivers/staging/ramster/Makefile6
-rw-r--r--drivers/staging/ramster/tmem.c894
-rw-r--r--drivers/staging/ramster/tmem.h259
-rw-r--r--drivers/staging/ramster/zcache-main.c1820
-rw-r--r--drivers/staging/rtl8187se/ieee80211/dot11d.c71
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_module.c4
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c6
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c408
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c3
-rw-r--r--drivers/staging/rtl8187se/r8185b_init.c301
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c11
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c6
-rw-r--r--drivers/staging/rtl8192u/changes1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/Makefile1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/aes.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/arc4.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/crypto_compat.h2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/dot11d.c1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h102
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_module.c8
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c394
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c280
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c31
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c54
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/internal.h1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BA.h6
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c63
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h13
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c124
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h83
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TS.h3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c18
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl_crypto.h93
-rw-r--r--drivers/staging/rtl8192u/r8180_93cx6.c2
-rw-r--r--drivers/staging/rtl8192u/r8180_pm.h2
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.c13
-rw-r--r--drivers/staging/rtl8192u/r8192U.h299
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c589
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c197
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.h24
-rw-r--r--drivers/staging/rtl8192u/r8192U_hw.h16
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.c37
-rw-r--r--drivers/staging/rtl8192u/r819xU_HTGen.h1
-rw-r--r--drivers/staging/rtl8192u/r819xU_HTType.h9
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c38
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.h44
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.c109
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.h1
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.c127
-rw-r--r--drivers/staging/rtl8192u/r819xU_phyreg.h1044
-rw-r--r--drivers/staging/rtl8712/ethernet.h7
-rw-r--r--drivers/staging/rtl8712/hal_init.c17
-rw-r--r--drivers/staging/rtl8712/ieee80211.h2
-rw-r--r--drivers/staging/rtl8712/mlme_linux.c2
-rw-r--r--drivers/staging/rtl8712/os_intfs.c3
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c14
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c1
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.h2
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c66
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_rtl.c181
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c9
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp.h56
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.c393
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.h6
-rw-r--r--drivers/staging/rtl8712/sta_info.h2
-rw-r--r--drivers/staging/rtl8712/usb_intf.c35
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c15
-rw-r--r--drivers/staging/rtl8712/wifi.h171
-rw-r--r--drivers/staging/rtl8712/xmit_linux.c3
-rw-r--r--drivers/staging/sb105x/Kconfig2
-rw-r--r--drivers/staging/sb105x/sb_mp_register.h2
-rw-r--r--drivers/staging/sb105x/sb_pci_mp.c20
-rw-r--r--drivers/staging/sbe-2t3e3/dc.c9
-rw-r--r--drivers/staging/sbe-2t3e3/module.c7
-rw-r--r--drivers/staging/sep/sep_crypto.c10
-rw-r--r--drivers/staging/sep/sep_main.c44
-rw-r--r--drivers/staging/serqt_usb2/serqt_usb2.c41
-rw-r--r--drivers/staging/slicoss/slic.h504
-rw-r--r--drivers/staging/slicoss/slichw.h6
-rw-r--r--drivers/staging/slicoss/slicoss.c35
-rw-r--r--drivers/staging/speakup/Kconfig2
-rw-r--r--drivers/staging/speakup/buffers.c14
-rw-r--r--drivers/staging/speakup/fakekey.c2
-rw-r--r--drivers/staging/speakup/i18n.c12
-rw-r--r--drivers/staging/speakup/i18n.h12
-rw-r--r--drivers/staging/speakup/keyhelp.c39
-rw-r--r--drivers/staging/speakup/kobjects.c84
-rw-r--r--drivers/staging/speakup/main.c370
-rw-r--r--drivers/staging/speakup/selection.c16
-rw-r--r--drivers/staging/speakup/serialio.c6
-rw-r--r--drivers/staging/speakup/speakup.h72
-rw-r--r--drivers/staging/speakup/speakup_acntpc.c6
-rw-r--r--drivers/staging/speakup/speakup_acntsa.c2
-rw-r--r--drivers/staging/speakup/speakup_apollo.c8
-rw-r--r--drivers/staging/speakup/speakup_audptr.c2
-rw-r--r--drivers/staging/speakup/speakup_bns.c2
-rw-r--r--drivers/staging/speakup/speakup_decext.c6
-rw-r--r--drivers/staging/speakup/speakup_decpc.c4
-rw-r--r--drivers/staging/speakup/speakup_dectlk.c6
-rw-r--r--drivers/staging/speakup/speakup_dtlk.c4
-rw-r--r--drivers/staging/speakup/speakup_dummy.c2
-rw-r--r--drivers/staging/speakup/speakup_keypc.c6
-rw-r--r--drivers/staging/speakup/speakup_ltlk.c2
-rw-r--r--drivers/staging/speakup/speakup_spkout.c2
-rw-r--r--drivers/staging/speakup/speakup_txprt.c2
-rw-r--r--drivers/staging/speakup/spk_priv.h8
-rw-r--r--drivers/staging/speakup/synth.c42
-rw-r--r--drivers/staging/speakup/thread.c4
-rw-r--r--drivers/staging/speakup/varhandlers.c66
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c18
-rw-r--r--drivers/staging/tidspbridge/Kconfig2
-rw-r--r--drivers/staging/tidspbridge/core/_tiomap.h2
-rw-r--r--drivers/staging/tidspbridge/core/dsp-clock.c13
-rw-r--r--drivers/staging/tidspbridge/core/msg_sm.c3
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c34
-rw-r--r--drivers/staging/tidspbridge/core/wdt.c12
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/proc.h2
-rw-r--r--drivers/staging/tidspbridge/pmgr/cod.c2
-rw-r--r--drivers/staging/tidspbridge/pmgr/dbll.c2
-rw-r--r--drivers/staging/tidspbridge/pmgr/dspapi.c11
-rw-r--r--drivers/staging/tidspbridge/rmgr/dbdcd.c3
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c1
-rw-r--r--drivers/staging/tidspbridge/rmgr/nldr.c6
-rw-r--r--drivers/staging/tidspbridge/rmgr/node.c12
-rw-r--r--drivers/staging/tidspbridge/rmgr/proc.c23
-rw-r--r--drivers/staging/usbip/Kconfig2
-rw-r--r--drivers/staging/usbip/stub_dev.c42
-rw-r--r--drivers/staging/usbip/stub_rx.c5
-rw-r--r--drivers/staging/usbip/stub_tx.c1
-rw-r--r--drivers/staging/usbip/usbip_common.c3
-rw-r--r--drivers/staging/usbip/usbip_event.c6
-rw-r--r--drivers/staging/usbip/userspace/.gitignore28
-rw-r--r--drivers/staging/usbip/userspace/Makefile.am2
-rw-r--r--drivers/staging/usbip/userspace/README2
-rw-r--r--drivers/staging/usbip/userspace/configure.ac20
-rw-r--r--drivers/staging/usbip/userspace/src/Makefile.am4
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_attach.c15
-rw-r--r--drivers/staging/usbip/userspace/src/usbipd.c96
-rw-r--r--drivers/staging/usbip/vhci_hcd.c80
-rw-r--r--drivers/staging/usbip/vhci_rx.c10
-rw-r--r--drivers/staging/usbip/vhci_tx.c14
-rw-r--r--drivers/staging/vme/devices/Kconfig2
-rw-r--r--drivers/staging/vme/devices/vme_pio2_core.c15
-rw-r--r--drivers/staging/vme/devices/vme_pio2_gpio.c4
-rw-r--r--drivers/staging/vme/devices/vme_user.c2
-rw-r--r--drivers/staging/vt6655/channel.c11
-rw-r--r--drivers/staging/vt6655/device.h24
-rw-r--r--drivers/staging/vt6655/rxtx.c6
-rw-r--r--drivers/staging/vt6655/wcmd.c2
-rw-r--r--drivers/staging/vt6655/wmgr.c2
-rw-r--r--drivers/staging/vt6656/80211mgr.c36
-rw-r--r--drivers/staging/vt6656/80211mgr.h169
-rw-r--r--drivers/staging/vt6656/aes_ccmp.c10
-rw-r--r--drivers/staging/vt6656/aes_ccmp.h2
-rw-r--r--drivers/staging/vt6656/baseband.c91
-rw-r--r--drivers/staging/vt6656/baseband.h70
-rw-r--r--drivers/staging/vt6656/bssdb.c435
-rw-r--r--drivers/staging/vt6656/bssdb.h173
-rw-r--r--drivers/staging/vt6656/card.c366
-rw-r--r--drivers/staging/vt6656/card.h47
-rw-r--r--drivers/staging/vt6656/channel.c157
-rw-r--r--drivers/staging/vt6656/channel.h9
-rw-r--r--drivers/staging/vt6656/control.c55
-rw-r--r--drivers/staging/vt6656/control.h26
-rw-r--r--drivers/staging/vt6656/datarate.c109
-rw-r--r--drivers/staging/vt6656/datarate.h43
-rw-r--r--drivers/staging/vt6656/desc.h195
-rw-r--r--drivers/staging/vt6656/device.h879
-rw-r--r--drivers/staging/vt6656/device_cfg.h12
-rw-r--r--drivers/staging/vt6656/dpc.c470
-rw-r--r--drivers/staging/vt6656/dpc.h16
-rw-r--r--drivers/staging/vt6656/firmware.c33
-rw-r--r--drivers/staging/vt6656/firmware.h17
-rw-r--r--drivers/staging/vt6656/hostap.c147
-rw-r--r--drivers/staging/vt6656/hostap.h4
-rw-r--r--drivers/staging/vt6656/int.c22
-rw-r--r--drivers/staging/vt6656/int.h8
-rw-r--r--drivers/staging/vt6656/iocmd.h55
-rw-r--r--drivers/staging/vt6656/iowpa.h8
-rw-r--r--drivers/staging/vt6656/iwctl.c184
-rw-r--r--drivers/staging/vt6656/key.c364
-rw-r--r--drivers/staging/vt6656/key.h104
-rw-r--r--drivers/staging/vt6656/mac.c92
-rw-r--r--drivers/staging/vt6656/mac.h35
-rw-r--r--drivers/staging/vt6656/main_usb.c530
-rw-r--r--drivers/staging/vt6656/power.c104
-rw-r--r--drivers/staging/vt6656/power.h16
-rw-r--r--drivers/staging/vt6656/rf.c178
-rw-r--r--drivers/staging/vt6656/rf.h26
-rw-r--r--drivers/staging/vt6656/rxtx.c965
-rw-r--r--drivers/staging/vt6656/rxtx.h31
-rw-r--r--drivers/staging/vt6656/tether.c8
-rw-r--r--drivers/staging/vt6656/tether.h2
-rw-r--r--drivers/staging/vt6656/ttype.h23
-rw-r--r--drivers/staging/vt6656/usbpipe.c216
-rw-r--r--drivers/staging/vt6656/usbpipe.h40
-rw-r--r--drivers/staging/vt6656/wcmd.c323
-rw-r--r--drivers/staging/vt6656/wcmd.h18
-rw-r--r--drivers/staging/vt6656/wctl.c50
-rw-r--r--drivers/staging/vt6656/wctl.h10
-rw-r--r--drivers/staging/vt6656/wmgr.c1406
-rw-r--r--drivers/staging/vt6656/wmgr.h394
-rw-r--r--drivers/staging/vt6656/wpa.c30
-rw-r--r--drivers/staging/vt6656/wpa.h4
-rw-r--r--drivers/staging/vt6656/wpa2.c41
-rw-r--r--drivers/staging/vt6656/wpactl.c34
-rw-r--r--drivers/staging/vt6656/wpactl.h2
-rw-r--r--drivers/staging/winbond/Kconfig2
-rw-r--r--drivers/staging/wlags49_h2/ap_h2.c8
-rw-r--r--drivers/staging/wlags49_h2/ap_h25.c78
-rw-r--r--drivers/staging/wlags49_h2/sta_h2.c80
-rw-r--r--drivers/staging/wlags49_h2/wl_enc.c128
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.c14
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.h94
-rw-r--r--drivers/staging/wlags49_h2/wl_priv.c1113
-rw-r--r--drivers/staging/wlags49_h2/wl_priv.h58
-rw-r--r--drivers/staging/wlags49_h2/wl_profile.h12
-rw-r--r--drivers/staging/wlags49_h2/wl_util.h38
-rw-r--r--drivers/staging/wlags49_h2/wl_wext.c2
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c9
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h31
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c2
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c42
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c84
-rw-r--r--drivers/staging/xgifb/XGIfb.h2
-rw-r--r--drivers/staging/xgifb/vb_init.c119
-rw-r--r--drivers/staging/xgifb/vb_init.h1
-rw-r--r--drivers/staging/xgifb/vb_setmode.c684
-rw-r--r--drivers/staging/xgifb/vb_struct.h5
-rw-r--r--drivers/staging/xgifb/vb_table.h168
-rw-r--r--drivers/staging/zcache/Kconfig34
-rw-r--r--drivers/staging/zcache/Makefile5
-rw-r--r--drivers/staging/zcache/TODO69
-rw-r--r--drivers/staging/zcache/ramster.h (renamed from drivers/staging/ramster/ramster.h)0
-rw-r--r--drivers/staging/zcache/ramster/heartbeat.c (renamed from drivers/staging/ramster/ramster/heartbeat.c)0
-rw-r--r--drivers/staging/zcache/ramster/heartbeat.h (renamed from drivers/staging/ramster/ramster/heartbeat.h)0
-rw-r--r--drivers/staging/zcache/ramster/masklog.c (renamed from drivers/staging/ramster/ramster/masklog.c)0
-rw-r--r--drivers/staging/zcache/ramster/masklog.h (renamed from drivers/staging/ramster/ramster/masklog.h)0
-rw-r--r--drivers/staging/zcache/ramster/nodemanager.c (renamed from drivers/staging/ramster/ramster/nodemanager.c)0
-rw-r--r--drivers/staging/zcache/ramster/nodemanager.h (renamed from drivers/staging/ramster/ramster/nodemanager.h)0
-rw-r--r--drivers/staging/zcache/ramster/r2net.c (renamed from drivers/staging/ramster/ramster/r2net.c)0
-rw-r--r--drivers/staging/zcache/ramster/ramster.c (renamed from drivers/staging/ramster/ramster/ramster.c)34
-rw-r--r--drivers/staging/zcache/ramster/ramster.h (renamed from drivers/staging/ramster/ramster/ramster.h)0
-rw-r--r--drivers/staging/zcache/ramster/ramster_nodemanager.h (renamed from drivers/staging/ramster/ramster/ramster_nodemanager.h)0
-rw-r--r--drivers/staging/zcache/ramster/tcp.c (renamed from drivers/staging/ramster/ramster/tcp.c)0
-rw-r--r--drivers/staging/zcache/ramster/tcp.h (renamed from drivers/staging/ramster/ramster/tcp.h)0
-rw-r--r--drivers/staging/zcache/ramster/tcp_internal.h (renamed from drivers/staging/ramster/ramster/tcp_internal.h)0
-rw-r--r--drivers/staging/zcache/tmem.c327
-rw-r--r--drivers/staging/zcache/tmem.h83
-rw-r--r--drivers/staging/zcache/zbud.c (renamed from drivers/staging/ramster/zbud.c)43
-rw-r--r--drivers/staging/zcache/zbud.h (renamed from drivers/staging/ramster/zbud.h)0
-rw-r--r--drivers/staging/zcache/zcache-main.c2558
-rw-r--r--drivers/staging/zcache/zcache.h (renamed from drivers/staging/ramster/zcache.h)0
-rw-r--r--drivers/staging/zram/Kconfig2
-rw-r--r--drivers/staging/zram/zram.txt27
-rw-r--r--drivers/staging/zram/zram_drv.c323
-rw-r--r--drivers/staging/zram/zram_drv.h17
-rw-r--r--drivers/staging/zram/zram_sysfs.c16
-rw-r--r--drivers/staging/zsmalloc/zsmalloc-main.c31
-rw-r--r--drivers/staging/zsmalloc/zsmalloc.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c2
-rw-r--r--drivers/target/sbp/sbp_target.c2
-rw-r--r--drivers/target/target_core_alua.c2
-rw-r--r--drivers/target/target_core_device.c8
-rw-r--r--drivers/target/target_core_fabric_configfs.c5
-rw-r--r--drivers/target/target_core_pr.c2
-rw-r--r--drivers/target/target_core_sbc.c18
-rw-r--r--drivers/target/target_core_spc.c44
-rw-r--r--drivers/target/target_core_transport.c20
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c12
-rw-r--r--drivers/thermal/exynos_thermal.c8
-rw-r--r--drivers/tty/Kconfig32
-rw-r--r--drivers/tty/Makefile4
-rw-r--r--drivers/tty/amiserial.c18
-rw-r--r--drivers/tty/bfin_jtag_comm.c22
-rw-r--r--drivers/tty/cyclades.c297
-rw-r--r--drivers/tty/ehv_bytechan.c13
-rw-r--r--drivers/tty/goldfish.c328
-rw-r--r--drivers/tty/hvc/Kconfig3
-rw-r--r--drivers/tty/hvc/hvc_console.c6
-rw-r--r--drivers/tty/hvc/hvc_xen.c2
-rw-r--r--drivers/tty/hvc/hvcs.c6
-rw-r--r--drivers/tty/hvc/hvsi.c32
-rw-r--r--drivers/tty/ipwireless/hardware.c4
-rw-r--r--drivers/tty/ipwireless/tty.c12
-rw-r--r--drivers/tty/isicom.c12
-rw-r--r--drivers/tty/metag_da.c677
-rw-r--r--drivers/tty/moxa.c10
-rw-r--r--drivers/tty/mxser.c50
-rw-r--r--drivers/tty/n_gsm.c120
-rw-r--r--drivers/tty/n_tty.c59
-rw-r--r--drivers/tty/nozomi.c37
-rw-r--r--drivers/tty/pty.c30
-rw-r--r--drivers/tty/rocket.c60
-rw-r--r--drivers/tty/serial/21285.c3
-rw-r--r--drivers/tty/serial/68328serial.c19
-rw-r--r--drivers/tty/serial/8250/8250.c143
-rw-r--r--drivers/tty/serial/8250/8250.h51
-rw-r--r--drivers/tty/serial/8250/8250_dma.c216
-rw-r--r--drivers/tty/serial/8250/8250_dw.c259
-rw-r--r--drivers/tty/serial/8250/8250_early.c2
-rw-r--r--drivers/tty/serial/8250/8250_pci.c384
-rw-r--r--drivers/tty/serial/8250/Kconfig27
-rw-r--r--drivers/tty/serial/8250/Makefile1
-rw-r--r--drivers/tty/serial/Kconfig41
-rw-r--r--drivers/tty/serial/Makefile2
-rw-r--r--drivers/tty/serial/altera_jtaguart.c8
-rw-r--r--drivers/tty/serial/altera_uart.c8
-rw-r--r--drivers/tty/serial/amba-pl010.c3
-rw-r--r--drivers/tty/serial/amba-pl011.c11
-rw-r--r--drivers/tty/serial/apbuart.c3
-rw-r--r--drivers/tty/serial/ar933x_uart.c15
-rw-r--r--drivers/tty/serial/arc_uart.c104
-rw-r--r--drivers/tty/serial/atmel_serial.c9
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c9
-rw-r--r--drivers/tty/serial/bfin_sport_uart.c12
-rw-r--r--drivers/tty/serial/bfin_uart.c10
-rw-r--r--drivers/tty/serial/clps711x.c8
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c10
-rw-r--r--drivers/tty/serial/crisv10.c35
-rw-r--r--drivers/tty/serial/dz.c4
-rw-r--r--drivers/tty/serial/efm32-uart.c52
-rw-r--r--drivers/tty/serial/icom.c10
-rw-r--r--drivers/tty/serial/ifx6x60.c15
-rw-r--r--drivers/tty/serial/imx.c282
-rw-r--r--drivers/tty/serial/ioc3_serial.c11
-rw-r--r--drivers/tty/serial/ioc4_serial.c13
-rw-r--r--drivers/tty/serial/ip22zilog.c30
-rw-r--r--drivers/tty/serial/jsm/jsm_tty.c18
-rw-r--r--drivers/tty/serial/kgdb_nmi.c13
-rw-r--r--drivers/tty/serial/lantiq.c20
-rw-r--r--drivers/tty/serial/lpc32xx_hs.c30
-rw-r--r--drivers/tty/serial/m32r_sio.c8
-rw-r--r--drivers/tty/serial/max3100.c13
-rw-r--r--drivers/tty/serial/max310x.c8
-rw-r--r--drivers/tty/serial/mcf.c73
-rw-r--r--drivers/tty/serial/mfd.c15
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c8
-rw-r--r--drivers/tty/serial/mpsc.c15
-rw-r--r--drivers/tty/serial/mrst_max3110.c19
-rw-r--r--drivers/tty/serial/msm_serial.c16
-rw-r--r--drivers/tty/serial/msm_serial_hs.c19
-rw-r--r--drivers/tty/serial/msm_smd_tty.c4
-rw-r--r--drivers/tty/serial/mux.c9
-rw-r--r--drivers/tty/serial/mxs-auart.c17
-rw-r--r--drivers/tty/serial/netx-serial.c4
-rw-r--r--drivers/tty/serial/nwpserial.c6
-rw-r--r--drivers/tty/serial/of_serial.c7
-rw-r--r--drivers/tty/serial/omap-serial.c51
-rw-r--r--drivers/tty/serial/pch_uart.c90
-rw-r--r--drivers/tty/serial/pmac_zilog.c36
-rw-r--r--drivers/tty/serial/pnx8xxx_uart.c3
-rw-r--r--drivers/tty/serial/pxa.c17
-rw-r--r--drivers/tty/serial/rp2.c885
-rw-r--r--drivers/tty/serial/sa1100.c3
-rw-r--r--drivers/tty/serial/samsung.c16
-rw-r--r--drivers/tty/serial/sb1250-duart.c2
-rw-r--r--drivers/tty/serial/sc26xx.c29
-rw-r--r--drivers/tty/serial/sccnxp.c179
-rw-r--r--drivers/tty/serial/serial-tegra.c1401
-rw-r--r--drivers/tty/serial/serial_core.c94
-rw-r--r--drivers/tty/serial/serial_ks8695.c3
-rw-r--r--drivers/tty/serial/serial_txx9.c3
-rw-r--r--drivers/tty/serial/sh-sci.c52
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c55
-rw-r--r--drivers/tty/serial/sirfsoc_uart.h3
-rw-r--r--drivers/tty/serial/sn_console.c16
-rw-r--r--drivers/tty/serial/sunhv.c33
-rw-r--r--drivers/tty/serial/sunsab.c28
-rw-r--r--drivers/tty/serial/sunsu.c18
-rw-r--r--drivers/tty/serial/sunzilog.c39
-rw-r--r--drivers/tty/serial/timbuart.c6
-rw-r--r--drivers/tty/serial/uartlite.c119
-rw-r--r--drivers/tty/serial/ucc_uart.c10
-rw-r--r--drivers/tty/serial/vr41xx_siu.c4
-rw-r--r--drivers/tty/serial/vt8500_serial.c54
-rw-r--r--drivers/tty/serial/xilinx_uartps.c50
-rw-r--r--drivers/tty/serial/zs.c2
-rw-r--r--drivers/tty/synclink.c50
-rw-r--r--drivers/tty/synclink_gt.c61
-rw-r--r--drivers/tty/synclinkmp.c103
-rw-r--r--drivers/tty/sysrq.c277
-rw-r--r--drivers/tty/tty_buffer.c131
-rw-r--r--drivers/tty/tty_io.c15
-rw-r--r--drivers/tty/tty_ioctl.c12
-rw-r--r--drivers/tty/tty_ldisc.c14
-rw-r--r--drivers/tty/vt/Makefile4
-rw-r--r--drivers/tty/vt/keyboard.c25
-rw-r--r--drivers/tty/vt/vt.c18
-rw-r--r--drivers/uio/Kconfig1
-rw-r--r--drivers/usb/Kconfig1
-rw-r--r--drivers/usb/c67x00/c67x00-ll-hpi.c2
-rw-r--r--drivers/usb/chipidea/ci13xxx_imx.h2
-rw-r--r--drivers/usb/chipidea/core.c2
-rw-r--r--drivers/usb/chipidea/debug.c2
-rw-r--r--drivers/usb/chipidea/host.c3
-rw-r--r--drivers/usb/chipidea/usbmisc_imx6q.c6
-rw-r--r--drivers/usb/class/Kconfig2
-rw-r--r--drivers/usb/class/cdc-acm.c16
-rw-r--r--drivers/usb/core/Makefile1
-rw-r--r--drivers/usb/core/devices.c13
-rw-r--r--drivers/usb/core/devio.c3
-rw-r--r--drivers/usb/core/generic.c2
-rw-r--r--drivers/usb/core/hcd.c49
-rw-r--r--drivers/usb/core/hub.c787
-rw-r--r--drivers/usb/core/hub.h122
-rw-r--r--drivers/usb/core/message.c2
-rw-r--r--drivers/usb/core/port.c202
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/core/sysfs.c31
-rw-r--r--drivers/usb/core/usb.h12
-rw-r--r--drivers/usb/dwc3/Kconfig31
-rw-r--r--drivers/usb/dwc3/Makefile10
-rw-r--r--drivers/usb/dwc3/core.c31
-rw-r--r--drivers/usb/dwc3/core.h24
-rw-r--r--drivers/usb/dwc3/debugfs.c40
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c57
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c152
-rw-r--r--drivers/usb/dwc3/gadget.c293
-rw-r--r--drivers/usb/dwc3/host.c2
-rw-r--r--drivers/usb/gadget/Kconfig30
-rw-r--r--drivers/usb/gadget/Makefile8
-rw-r--r--drivers/usb/gadget/acm_ms.c42
-rw-r--r--drivers/usb/gadget/amd5536udc.c63
-rw-r--r--drivers/usb/gadget/amd5536udc.h2
-rw-r--r--drivers/usb/gadget/at91_udc.c17
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c12
-rw-r--r--drivers/usb/gadget/bcm63xx_udc.c13
-rw-r--r--drivers/usb/gadget/cdc2.c36
-rw-r--r--drivers/usb/gadget/composite.c326
-rw-r--r--drivers/usb/gadget/dbgp.c14
-rw-r--r--drivers/usb/gadget/dummy_hcd.c9
-rw-r--r--drivers/usb/gadget/f_acm.c153
-rw-r--r--drivers/usb/gadget/f_fs.c11
-rw-r--r--drivers/usb/gadget/f_loopback.c103
-rw-r--r--drivers/usb/gadget/f_mass_storage.c37
-rw-r--r--drivers/usb/gadget/f_ncm.c18
-rw-r--r--drivers/usb/gadget/f_obex.c4
-rw-r--r--drivers/usb/gadget/f_serial.c4
-rw-r--r--drivers/usb/gadget/f_sourcesink.c200
-rw-r--r--drivers/usb/gadget/f_uac2.c9
-rw-r--r--drivers/usb/gadget/f_uvc.c3
-rw-r--r--drivers/usb/gadget/fsl_mxc_udc.c40
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c2
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c102
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.h5
-rw-r--r--drivers/usb/gadget/functions.c116
-rw-r--r--drivers/usb/gadget/fusb300_udc.c80
-rw-r--r--drivers/usb/gadget/fusb300_udc.h2
-rw-r--r--drivers/usb/gadget/g_zero.h35
-rw-r--r--drivers/usb/gadget/gmidi.c2
-rw-r--r--drivers/usb/gadget/goku_udc.c70
-rw-r--r--drivers/usb/gadget/goku_udc.h1
-rw-r--r--drivers/usb/gadget/imx_udc.c12
-rw-r--r--drivers/usb/gadget/lpc32xx_udc.c12
-rw-r--r--drivers/usb/gadget/m66592-udc.c84
-rw-r--r--drivers/usb/gadget/m66592-udc.h1
-rw-r--r--drivers/usb/gadget/multi.c71
-rw-r--r--drivers/usb/gadget/mv_udc_core.c250
-rw-r--r--drivers/usb/gadget/net2280.c15
-rw-r--r--drivers/usb/gadget/nokia.c43
-rw-r--r--drivers/usb/gadget/omap_udc.c51
-rw-r--r--drivers/usb/gadget/pch_udc.c67
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c77
-rw-r--r--drivers/usb/gadget/pxa25x_udc.h1
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c61
-rw-r--r--drivers/usb/gadget/pxa27x_udc.h1
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c17
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c56
-rw-r--r--drivers/usb/gadget/s3c-hsudc.c20
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c65
-rw-r--r--drivers/usb/gadget/s3c2410_udc.h1
-rw-r--r--drivers/usb/gadget/serial.c118
-rw-r--r--drivers/usb/gadget/storage_common.c61
-rw-r--r--drivers/usb/gadget/tcm_usb_gadget.c3
-rw-r--r--drivers/usb/gadget/u_ether.c10
-rw-r--r--drivers/usb/gadget/u_serial.c330
-rw-r--r--drivers/usb/gadget/u_serial.h13
-rw-r--r--drivers/usb/gadget/udc-core.c157
-rw-r--r--drivers/usb/gadget/webcam.c2
-rw-r--r--drivers/usb/gadget/zero.c233
-rw-r--r--drivers/usb/host/Kconfig4
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/ehci-atmel.c7
-rw-r--r--drivers/usb/host/ehci-fsl.c9
-rw-r--r--drivers/usb/host/ehci-grlib.c9
-rw-r--r--drivers/usb/host/ehci-hcd.c13
-rw-r--r--drivers/usb/host/ehci-hub.c9
-rw-r--r--drivers/usb/host/ehci-mv.c5
-rw-r--r--drivers/usb/host/ehci-mxc.c147
-rw-r--r--drivers/usb/host/ehci-omap.c10
-rw-r--r--drivers/usb/host/ehci-pci.c39
-rw-r--r--drivers/usb/host/ehci-platform.c7
-rw-r--r--drivers/usb/host/ehci-ppc-of.c8
-rw-r--r--drivers/usb/host/ehci-q.c50
-rw-r--r--drivers/usb/host/ehci-s5p.c83
-rw-r--r--drivers/usb/host/ehci-sched.c9
-rw-r--r--drivers/usb/host/ehci-sead3.c8
-rw-r--r--drivers/usb/host/ehci-sh.c7
-rw-r--r--drivers/usb/host/ehci-tegra.c97
-rw-r--r--drivers/usb/host/ehci-timer.c29
-rw-r--r--drivers/usb/host/ehci-vt8500.c8
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c8
-rw-r--r--drivers/usb/host/ehci.h7
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c3
-rw-r--r--drivers/usb/host/imx21-hcd.c1
-rw-r--r--drivers/usb/host/isp1760-hcd.c4
-rw-r--r--drivers/usb/host/ohci-exynos.c87
-rw-r--r--drivers/usb/host/ohci-nxp.c7
-rw-r--r--drivers/usb/host/ohci-platform.c7
-rw-r--r--drivers/usb/host/ohci-q.c1
-rw-r--r--drivers/usb/host/ohci-s3c2410.c7
-rw-r--r--drivers/usb/host/ohci-tmio.c3
-rw-r--r--drivers/usb/host/pci-quirks.c1
-rw-r--r--drivers/usb/host/uhci-debug.c178
-rw-r--r--drivers/usb/host/uhci-hcd.c46
-rw-r--r--drivers/usb/host/uhci-hcd.h4
-rw-r--r--drivers/usb/host/uhci-hub.c7
-rw-r--r--drivers/usb/host/uhci-q.c2
-rw-r--r--drivers/usb/host/xhci-hub.c38
-rw-r--r--drivers/usb/host/xhci-mem.c2
-rw-r--r--drivers/usb/host/xhci-ring.c24
-rw-r--r--drivers/usb/host/xhci.c10
-rw-r--r--drivers/usb/misc/Kconfig6
-rw-r--r--drivers/usb/misc/Makefile1
-rw-r--r--drivers/usb/misc/usb3503.c325
-rw-r--r--drivers/usb/misc/usbtest.c15
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/musb/am35x.c2
-rw-r--r--drivers/usb/musb/blackfin.c2
-rw-r--r--drivers/usb/musb/cppi_dma.c4
-rw-r--r--drivers/usb/musb/da8xx.c7
-rw-r--r--drivers/usb/musb/davinci.c7
-rw-r--r--drivers/usb/musb/musb_core.c6
-rw-r--r--drivers/usb/musb/musb_dsps.c15
-rw-r--r--drivers/usb/musb/musb_gadget.c22
-rw-r--r--drivers/usb/musb/musb_host.c44
-rw-r--r--drivers/usb/musb/omap2430.c91
-rw-r--r--drivers/usb/musb/omap2430.h9
-rw-r--r--drivers/usb/musb/tusb6010.c2
-rw-r--r--drivers/usb/musb/ux500.c12
-rw-r--r--drivers/usb/otg/Kconfig2
-rw-r--r--drivers/usb/otg/gpio_vbus.c12
-rw-r--r--drivers/usb/otg/msm_otg.c13
-rw-r--r--drivers/usb/otg/mv_otg.c88
-rw-r--r--drivers/usb/otg/mxs-phy.c26
-rw-r--r--drivers/usb/otg/otg.c235
-rw-r--r--drivers/usb/otg/twl4030-usb.c3
-rw-r--r--drivers/usb/phy/Kconfig28
-rw-r--r--drivers/usb/phy/Makefile3
-rw-r--r--drivers/usb/phy/mv_u3d_phy.c8
-rw-r--r--drivers/usb/phy/omap-control-usb.c295
-rw-r--r--drivers/usb/phy/omap-usb2.c72
-rw-r--r--drivers/usb/phy/omap-usb3.c355
-rw-r--r--drivers/usb/phy/samsung-usbphy.c930
-rw-r--r--drivers/usb/phy/tegra_usb_phy.c132
-rw-r--r--drivers/usb/renesas_usbhs/Kconfig2
-rw-r--r--drivers/usb/renesas_usbhs/common.c9
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c24
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c3
-rw-r--r--drivers/usb/serial/Kconfig14
-rw-r--r--drivers/usb/serial/Makefile1
-rw-r--r--drivers/usb/serial/aircable.c17
-rw-r--r--drivers/usb/serial/ark3116.c12
-rw-r--r--drivers/usb/serial/belkin_sa.c12
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/cyberjack.c11
-rw-r--r--drivers/usb/serial/cypress_m8.c6
-rw-r--r--drivers/usb/serial/digi_acceleport.c14
-rw-r--r--drivers/usb/serial/f81232.c15
-rw-r--r--drivers/usb/serial/ftdi_sio.c45
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h15
-rw-r--r--drivers/usb/serial/garmin_gps.c9
-rw-r--r--drivers/usb/serial/generic.c12
-rw-r--r--drivers/usb/serial/io_edgeport.c39
-rw-r--r--drivers/usb/serial/io_ti.c118
-rw-r--r--drivers/usb/serial/ir-usb.c9
-rw-r--r--drivers/usb/serial/iuu_phoenix.c9
-rw-r--r--drivers/usb/serial/keyspan.c64
-rw-r--r--drivers/usb/serial/keyspan_pda.c9
-rw-r--r--drivers/usb/serial/kl5kusb105.c10
-rw-r--r--drivers/usb/serial/kobil_sct.c9
-rw-r--r--drivers/usb/serial/mct_u232.c33
-rw-r--r--drivers/usb/serial/metro-usb.c9
-rw-r--r--drivers/usb/serial/mos7720.c9
-rw-r--r--drivers/usb/serial/mos7840.c10
-rw-r--r--drivers/usb/serial/navman.c9
-rw-r--r--drivers/usb/serial/omninet.c10
-rw-r--r--drivers/usb/serial/opticon.c11
-rw-r--r--drivers/usb/serial/option.c56
-rw-r--r--drivers/usb/serial/oti6858.c9
-rw-r--r--drivers/usb/serial/pl2303.c15
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/usb/serial/quatech2.c47
-rw-r--r--drivers/usb/serial/safe_serial.c15
-rw-r--r--drivers/usb/serial/sierra.c25
-rw-r--r--drivers/usb/serial/spcp8x5.c24
-rw-r--r--drivers/usb/serial/ssu100.c50
-rw-r--r--drivers/usb/serial/symbolserial.c9
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c44
-rw-r--r--drivers/usb/serial/usb-serial.c28
-rw-r--r--drivers/usb/serial/usb_wwan.c25
-rw-r--r--drivers/usb/serial/xsens_mt.c86
-rw-r--r--drivers/usb/storage/initializers.c76
-rw-r--r--drivers/usb/storage/initializers.h4
-rw-r--r--drivers/usb/storage/uas.c124
-rw-r--r--drivers/usb/storage/unusual_cypress.h2
-rw-r--r--drivers/usb/storage/unusual_devs.h329
-rw-r--r--drivers/usb/storage/usb.c15
-rw-r--r--drivers/usb/storage/usual-tables.c15
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c6
-rw-r--r--drivers/uwb/lc-rc.c21
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c4
-rw-r--r--drivers/vhost/Kconfig4
-rw-r--r--drivers/vhost/Kconfig.tcm4
-rw-r--r--drivers/vhost/net.c41
-rw-r--r--drivers/vhost/tcm_vhost.c4
-rw-r--r--drivers/vhost/vhost.c18
-rw-r--r--drivers/vhost/vhost.h2
-rw-r--r--drivers/video/Kconfig41
-rw-r--r--drivers/video/Makefile2
-rw-r--r--drivers/video/auo_k190x.c1
-rw-r--r--drivers/video/backlight/88pm860x_bl.c5
-rw-r--r--drivers/video/backlight/Kconfig24
-rw-r--r--drivers/video/backlight/Makefile89
-rw-r--r--drivers/video/backlight/aat2870_bl.c2
-rw-r--r--drivers/video/backlight/adp8860_bl.c2
-rw-r--r--drivers/video/backlight/adp8870_bl.c2
-rw-r--r--drivers/video/backlight/ams369fg06.c104
-rw-r--r--drivers/video/backlight/apple_bl.c2
-rw-r--r--drivers/video/backlight/as3711_bl.c380
-rw-r--r--drivers/video/backlight/backlight.c2
-rw-r--r--drivers/video/backlight/corgi_lcd.c18
-rw-r--r--drivers/video/backlight/hx8357.c497
-rw-r--r--drivers/video/backlight/l4f00242t03.c36
-rw-r--r--drivers/video/backlight/ld9040.c109
-rw-r--r--drivers/video/backlight/lm3630_bl.c4
-rw-r--r--drivers/video/backlight/lm3639_bl.c5
-rw-r--r--drivers/video/backlight/lms283gf05.c4
-rw-r--r--drivers/video/backlight/lms501kf03.c441
-rw-r--r--drivers/video/backlight/lp855x_bl.c169
-rw-r--r--drivers/video/backlight/ltv350qv.c10
-rw-r--r--drivers/video/backlight/omap1_bl.c10
-rw-r--r--drivers/video/backlight/ot200_bl.c1
-rw-r--r--drivers/video/backlight/pwm_bl.c8
-rw-r--r--drivers/video/backlight/s6e63m0.c153
-rw-r--r--drivers/video/backlight/tdo24m.c10
-rw-r--r--drivers/video/backlight/tosa_bl.c2
-rw-r--r--drivers/video/backlight/tosa_lcd.c12
-rw-r--r--drivers/video/backlight/vgg2432a4.c9
-rw-r--r--drivers/video/clps711xfb.c1
-rw-r--r--drivers/video/console/Kconfig2
-rw-r--r--drivers/video/console/fbcon.c10
-rw-r--r--drivers/video/exynos/exynos_dp_core.c37
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi.c70
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi_common.c1
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi_lowlevel.c1
-rw-r--r--drivers/video/exynos/s6e8ax0.c14
-rw-r--r--drivers/video/fsl-diu-fb.c64
-rw-r--r--drivers/video/geode/Kconfig14
-rw-r--r--drivers/video/goldfishfb.c318
-rw-r--r--drivers/video/imxfb.c13
-rw-r--r--drivers/video/jz4740_fb.c6
-rw-r--r--drivers/video/mmp/Kconfig11
-rw-r--r--drivers/video/mmp/Makefile1
-rw-r--r--drivers/video/mmp/core.c258
-rw-r--r--drivers/video/mmp/fb/Kconfig13
-rw-r--r--drivers/video/mmp/fb/Makefile1
-rw-r--r--drivers/video/mmp/fb/mmpfb.c685
-rw-r--r--drivers/video/mmp/fb/mmpfb.h54
-rw-r--r--drivers/video/mmp/hw/Kconfig20
-rw-r--r--drivers/video/mmp/hw/Makefile2
-rw-r--r--drivers/video/mmp/hw/mmp_ctrl.c591
-rw-r--r--drivers/video/mmp/hw/mmp_ctrl.h1974
-rw-r--r--drivers/video/mmp/hw/mmp_spi.c180
-rw-r--r--drivers/video/mmp/panel/Kconfig6
-rw-r--r--drivers/video/mmp/panel/Makefile1
-rw-r--r--drivers/video/mmp/panel/tpo_tj032md01bw.c186
-rw-r--r--drivers/video/mx3fb.c2
-rw-r--r--drivers/video/omap/Kconfig2
-rw-r--r--drivers/video/omap2/displays/panel-generic-dpi.c24
-rw-r--r--drivers/video/omap2/dss/dss_features.c1
-rw-r--r--drivers/video/omap2/dss/hdmi.c8
-rw-r--r--drivers/video/omap2/vrfb.c9
-rw-r--r--drivers/video/s3c-fb.c7
-rw-r--r--drivers/video/ssd1307fb.c4
-rw-r--r--drivers/virtio/Kconfig8
-rw-r--r--drivers/vlynq/Kconfig2
-rw-r--r--drivers/vme/vme.c1
-rw-r--r--drivers/w1/masters/ds1wm.c52
-rw-r--r--drivers/w1/masters/ds2482.c51
-rw-r--r--drivers/w1/masters/mxc_w1.c49
-rw-r--r--drivers/w1/masters/omap_hdq.c8
-rw-r--r--drivers/w1/masters/w1-gpio.c2
-rw-r--r--drivers/w1/slaves/w1_therm.c36
-rw-r--r--drivers/watchdog/ar7_wdt.c8
-rw-r--r--drivers/watchdog/at32ap700x_wdt.c12
-rw-r--r--drivers/watchdog/at91sam9_wdt.c13
-rw-r--r--drivers/watchdog/coh901327_wdt.c12
-rw-r--r--drivers/watchdog/dw_wdt.c6
-rw-r--r--drivers/watchdog/imx2_wdt.c20
-rw-r--r--drivers/watchdog/jz4740_wdt.c6
-rw-r--r--drivers/watchdog/lantiq_wdt.c8
-rw-r--r--drivers/watchdog/max63xx_wdt.c7
-rw-r--r--drivers/watchdog/pnx4008_wdt.c6
-rw-r--r--drivers/watchdog/txx9wdt.c19
-rw-r--r--drivers/xen/Kconfig34
-rw-r--r--drivers/xen/Makefile3
-rw-r--r--drivers/xen/cpu_hotplug.c4
-rw-r--r--drivers/xen/events.c126
-rw-r--r--drivers/xen/evtchn.c14
-rw-r--r--drivers/xen/gntdev.c130
-rw-r--r--drivers/xen/grant-table.c50
-rw-r--r--drivers/xen/pcpu.c38
-rw-r--r--drivers/xen/privcmd.c89
-rw-r--r--drivers/xen/swiotlb-xen.c4
-rw-r--r--drivers/xen/tmem.c2
-rw-r--r--drivers/xen/xen-acpi-cpuhotplug.c471
-rw-r--r--drivers/xen/xen-acpi-memhotplug.c483
-rw-r--r--drivers/xen/xen-acpi-pad.c3
-rw-r--r--drivers/xen/xen-pciback/pciback.h2
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c14
-rw-r--r--drivers/xen/xen-stub.c101
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c2
3261 files changed, 238694 insertions, 108208 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index f5fb0722a63a..202fa6d051b9 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -134,6 +134,8 @@ source "drivers/hwspinlock/Kconfig"
source "drivers/clocksource/Kconfig"
+source "drivers/mailbox/Kconfig"
+
source "drivers/iommu/Kconfig"
source "drivers/remoteproc/Kconfig"
@@ -150,6 +152,8 @@ source "drivers/memory/Kconfig"
source "drivers/iio/Kconfig"
+source "drivers/ntb/Kconfig"
+
source "drivers/vme/Kconfig"
source "drivers/pwm/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 7863b9fee50b..dce39a95fa71 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -29,7 +29,7 @@ obj-$(CONFIG_PNP) += pnp/
obj-y += amba/
# Many drivers will want to use DMA so this has to be made available
# really early.
-obj-$(CONFIG_DMA_ENGINE) += dma/
+obj-$(CONFIG_DMADEVICES) += dma/
obj-$(CONFIG_VIRTIO) += virtio/
obj-$(CONFIG_XEN) += xen/
@@ -130,6 +130,7 @@ obj-y += platform/
#common clk code
obj-y += clk/
+obj-$(CONFIG_MAILBOX) += mailbox/
obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
obj-$(CONFIG_NFC) += nfc/
obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
@@ -146,3 +147,4 @@ obj-$(CONFIG_MEMORY) += memory/
obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_IPACK_BUS) += ipack/
+obj-$(CONFIG_NTB) += ntb/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 38c5078da11d..1a4ed64586a7 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -176,7 +176,6 @@ config ACPI_FAN
config ACPI_DOCK
bool "Dock"
- depends on EXPERIMENTAL
help
This driver supports ACPI-controlled docking stations and removable
drive bays such as the IBM Ultrabay and the Dell Module Bay.
@@ -202,7 +201,7 @@ config ACPI_PROCESSOR
the module will be called processor.
config ACPI_IPMI
tristate "IPMI"
- depends on EXPERIMENTAL && IPMI_SI && IPMI_HANDLER
+ depends on IPMI_SI && IPMI_HANDLER
default n
help
This driver enables the ACPI to access the BMC controller. And it
@@ -214,14 +213,13 @@ config ACPI_IPMI
config ACPI_HOTPLUG_CPU
bool
- depends on EXPERIMENTAL && ACPI_PROCESSOR && HOTPLUG_CPU
+ depends on ACPI_PROCESSOR && HOTPLUG_CPU
select ACPI_CONTAINER
default y
config ACPI_PROCESSOR_AGGREGATOR
tristate "Processor Aggregator"
depends on ACPI_PROCESSOR
- depends on EXPERIMENTAL
depends on X86
help
ACPI 4.0 defines processor Aggregator, which enables OS to perform
@@ -337,8 +335,7 @@ config X86_PM_TIMER
systems require this timer.
config ACPI_CONTAINER
- tristate "Container and Module Devices (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ bool "Container and Module Devices"
default (ACPI_HOTPLUG_MEMORY || ACPI_HOTPLUG_CPU || ACPI_HOTPLUG_IO)
help
This driver supports ACPI Container and Module devices (IDs
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 2a4502becd13..474fcfeba66c 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -37,7 +37,8 @@ acpi-y += resource.o
acpi-y += processor_core.o
acpi-y += ec.o
acpi-$(CONFIG_ACPI_DOCK) += dock.o
-acpi-y += pci_root.o pci_link.o pci_irq.o pci_bind.o
+acpi-y += pci_root.o pci_link.o pci_irq.o
+acpi-y += csrt.o
acpi-y += acpi_platform.o
acpi-y += power.o
acpi-y += event.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index d5fdd36190cc..6d5bf649196d 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -60,7 +60,7 @@ static int acpi_ac_open_fs(struct inode *inode, struct file *file);
#endif
static int acpi_ac_add(struct acpi_device *device);
-static int acpi_ac_remove(struct acpi_device *device, int type);
+static int acpi_ac_remove(struct acpi_device *device);
static void acpi_ac_notify(struct acpi_device *device, u32 event);
static const struct acpi_device_id ac_device_ids[] = {
@@ -337,7 +337,7 @@ static int acpi_ac_resume(struct device *dev)
}
#endif
-static int acpi_ac_remove(struct acpi_device *device, int type)
+static int acpi_ac_remove(struct acpi_device *device)
{
struct acpi_ac *ac = NULL;
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index b679bf8478f7..da1f82b445e0 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -54,7 +54,7 @@ MODULE_LICENSE("GPL");
#define MEMORY_POWER_OFF_STATE 2
static int acpi_memory_device_add(struct acpi_device *device);
-static int acpi_memory_device_remove(struct acpi_device *device, int type);
+static int acpi_memory_device_remove(struct acpi_device *device);
static const struct acpi_device_id memory_device_ids[] = {
{ACPI_MEMORY_DEVICE_HID, 0},
@@ -153,51 +153,46 @@ acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
return 0;
}
-static int
-acpi_memory_get_device(acpi_handle handle,
- struct acpi_memory_device **mem_device)
+static int acpi_memory_get_device(acpi_handle handle,
+ struct acpi_memory_device **mem_device)
{
- acpi_status status;
- acpi_handle phandle;
struct acpi_device *device = NULL;
- struct acpi_device *pdevice = NULL;
- int result;
+ int result = 0;
+ acpi_scan_lock_acquire();
- if (!acpi_bus_get_device(handle, &device) && device)
+ acpi_bus_get_device(handle, &device);
+ if (device)
goto end;
- status = acpi_get_parent(handle, &phandle);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Cannot find acpi parent"));
- return -EINVAL;
- }
-
- /* Get the parent device */
- result = acpi_bus_get_device(phandle, &pdevice);
- if (result) {
- acpi_handle_warn(phandle, "Cannot get acpi bus device\n");
- return -EINVAL;
- }
-
/*
* Now add the notified device. This creates the acpi_device
* and invokes .add function
*/
- result = acpi_bus_add(&device, pdevice, handle, ACPI_BUS_TYPE_DEVICE);
+ result = acpi_bus_scan(handle);
if (result) {
- acpi_handle_warn(handle, "Cannot add acpi bus\n");
- return -EINVAL;
+ acpi_handle_warn(handle, "ACPI namespace scan failed\n");
+ result = -EINVAL;
+ goto out;
+ }
+ result = acpi_bus_get_device(handle, &device);
+ if (result) {
+ acpi_handle_warn(handle, "Missing device object\n");
+ result = -EINVAL;
+ goto out;
}
- end:
+ end:
*mem_device = acpi_driver_data(device);
if (!(*mem_device)) {
dev_err(&device->dev, "driver data not found\n");
- return -ENODEV;
+ result = -ENODEV;
+ goto out;
}
- return 0;
+ out:
+ acpi_scan_lock_release();
+ return result;
}
static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
@@ -285,9 +280,11 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
static int acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
{
- int result = 0;
+ int result = 0, nid;
struct acpi_memory_info *info, *n;
+ nid = acpi_get_node(mem_device->device->handle);
+
list_for_each_entry_safe(info, n, &mem_device->res_list, list) {
if (info->failed)
/* The kernel does not use this memory block */
@@ -300,7 +297,9 @@ static int acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
*/
return -EBUSY;
- result = remove_memory(info->start_addr, info->length);
+ if (nid < 0)
+ nid = memory_add_physaddr_to_nid(info->start_addr);
+ result = remove_memory(nid, info->start_addr, info->length);
if (result)
return result;
@@ -317,6 +316,7 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
struct acpi_device *device;
struct acpi_eject_event *ej_event = NULL;
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
+ acpi_status status;
switch (event) {
case ACPI_NOTIFY_BUS_CHECK:
@@ -339,29 +339,40 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"\nReceived EJECT REQUEST notification for device\n"));
+ status = AE_ERROR;
+ acpi_scan_lock_acquire();
+
if (acpi_bus_get_device(handle, &device)) {
acpi_handle_err(handle, "Device doesn't exist\n");
- break;
+ goto unlock;
}
mem_device = acpi_driver_data(device);
if (!mem_device) {
acpi_handle_err(handle, "Driver Data is NULL\n");
- break;
+ goto unlock;
}
ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
if (!ej_event) {
pr_err(PREFIX "No memory, dropping EJECT\n");
- break;
+ goto unlock;
}
- ej_event->handle = handle;
+ get_device(&device->dev);
+ ej_event->device = device;
ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
- acpi_os_hotplug_execute(acpi_bus_hot_remove_device,
- (void *)ej_event);
+ /* The eject is carried out asynchronously. */
+ status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device,
+ ej_event);
+ if (ACPI_FAILURE(status)) {
+ put_device(&device->dev);
+ kfree(ej_event);
+ }
- /* eject is performed asynchronously */
- return;
+ unlock:
+ acpi_scan_lock_release();
+ if (ACPI_SUCCESS(status))
+ return;
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Unsupported event [0x%x]\n", event));
@@ -372,7 +383,6 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
/* Inform firmware that the hotplug operation has completed */
(void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
- return;
}
static void acpi_memory_device_free(struct acpi_memory_device *mem_device)
@@ -427,7 +437,7 @@ static int acpi_memory_device_add(struct acpi_device *device)
return result;
}
-static int acpi_memory_device_remove(struct acpi_device *device, int type)
+static int acpi_memory_device_remove(struct acpi_device *device)
{
struct acpi_memory_device *mem_device = NULL;
int result;
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 16fa979f7180..31de1043eea0 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -482,8 +482,7 @@ static int acpi_pad_add(struct acpi_device *device)
return 0;
}
-static int acpi_pad_remove(struct acpi_device *device,
- int type)
+static int acpi_pad_remove(struct acpi_device *device)
{
mutex_lock(&isolated_cpus_lock);
acpi_pad_idle_cpus(0);
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index db129b9f52cb..26fce4b8a632 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -13,6 +13,7 @@
#include <linux/acpi.h>
#include <linux/device.h>
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -21,18 +22,59 @@
ACPI_MODULE_NAME("platform");
+/* Flags for acpi_create_platform_device */
+#define ACPI_PLATFORM_CLK BIT(0)
+
+/*
+ * The following ACPI IDs are known to be suitable for representing as
+ * platform devices.
+ */
+static const struct acpi_device_id acpi_platform_device_ids[] = {
+
+ { "PNP0D40" },
+
+ /* Haswell LPSS devices */
+ { "INT33C0", ACPI_PLATFORM_CLK },
+ { "INT33C1", ACPI_PLATFORM_CLK },
+ { "INT33C2", ACPI_PLATFORM_CLK },
+ { "INT33C3", ACPI_PLATFORM_CLK },
+ { "INT33C4", ACPI_PLATFORM_CLK },
+ { "INT33C5", ACPI_PLATFORM_CLK },
+ { "INT33C6", ACPI_PLATFORM_CLK },
+ { "INT33C7", ACPI_PLATFORM_CLK },
+
+ { }
+};
+
+static int acpi_create_platform_clks(struct acpi_device *adev)
+{
+ static struct platform_device *pdev;
+
+ /* Create Lynxpoint LPSS clocks */
+ if (!pdev && !strncmp(acpi_device_hid(adev), "INT33C", 6)) {
+ pdev = platform_device_register_simple("clk-lpt", -1, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+ }
+
+ return 0;
+}
+
/**
* acpi_create_platform_device - Create platform device for ACPI device node
* @adev: ACPI device node to create a platform device for.
+ * @id: ACPI device ID used to match @adev.
*
* Check if the given @adev can be represented as a platform device and, if
* that's the case, create and register a platform device, populate its common
* resources and returns a pointer to it. Otherwise, return %NULL.
*
- * The platform device's name will be taken from the @adev's _HID and _UID.
+ * Name of the platform device will be the same as @adev's.
*/
-struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
+static int acpi_create_platform_device(struct acpi_device *adev,
+ const struct acpi_device_id *id)
{
+ unsigned long flags = id->driver_data;
struct platform_device *pdev = NULL;
struct acpi_device *acpi_parent;
struct platform_device_info pdevinfo;
@@ -41,20 +83,28 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
struct resource *resources;
int count;
+ if (flags & ACPI_PLATFORM_CLK) {
+ int ret = acpi_create_platform_clks(adev);
+ if (ret) {
+ dev_err(&adev->dev, "failed to create clocks\n");
+ return ret;
+ }
+ }
+
/* If the ACPI node already has a physical device attached, skip it. */
if (adev->physical_node_count)
- return NULL;
+ return 0;
INIT_LIST_HEAD(&resource_list);
count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
if (count <= 0)
- return NULL;
+ return 0;
resources = kmalloc(count * sizeof(struct resource), GFP_KERNEL);
if (!resources) {
dev_err(&adev->dev, "No memory for resources\n");
acpi_dev_free_resource_list(&resource_list);
- return NULL;
+ return -ENOMEM;
}
count = 0;
list_for_each_entry(rentry, &resource_list, node)
@@ -100,5 +150,15 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
}
kfree(resources);
- return pdev;
+ return 1;
+}
+
+static struct acpi_scan_handler platform_handler = {
+ .ids = acpi_platform_device_ids,
+ .attach = acpi_create_platform_device,
+};
+
+void __init acpi_platform_init(void)
+{
+ acpi_scan_add_handler(&platform_handler);
}
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index bc7a03ded064..a1b9bf5085a2 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -31,6 +31,7 @@ acpi-y += \
evgpeinit.o \
evgpeutil.o \
evglock.o \
+ evhandler.o \
evmisc.o \
evregion.o \
evrgnini.o \
@@ -90,6 +91,7 @@ acpi-y += \
nsobject.o \
nsparse.o \
nspredef.o \
+ nsprepkg.o \
nsrepair.o \
nsrepair2.o \
nssearch.o \
@@ -104,7 +106,9 @@ acpi-$(ACPI_FUTURE_USAGE) += nsdumpdv.o
acpi-y += \
psargs.o \
psloop.o \
+ psobject.o \
psopcode.o \
+ psopinfo.o \
psparse.o \
psscope.o \
pstree.o \
@@ -126,7 +130,7 @@ acpi-y += \
rsutils.o \
rsxface.o
-acpi-$(ACPI_FUTURE_USAGE) += rsdump.o
+acpi-$(ACPI_FUTURE_USAGE) += rsdump.o rsdumpinfo.o
acpi-y += \
tbfadt.o \
@@ -155,8 +159,10 @@ acpi-y += \
utmutex.o \
utobject.o \
utosi.o \
+ utownerid.o \
utresrc.o \
utstate.o \
+ utstring.o \
utxface.o \
utxfinit.o \
utxferror.o \
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 8a7d51bfb3b3..8a6c4a0d22db 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -51,6 +51,7 @@
*
* Note: The order of these include files is important.
*/
+#include <acpi/acconfig.h> /* Global configuration constants */
#include "acmacros.h" /* C macros */
#include "aclocal.h" /* Internal data types */
#include "acobject.h" /* ACPI internal object */
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 432a318c9ed1..9feba08c29fe 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -115,6 +115,21 @@ ACPI_HW_DEPENDENT_RETURN_VOID(void
char *block_arg))
/*
+ * dbconvert - miscellaneous conversion routines
+ */
+ acpi_status acpi_db_hex_char_to_value(int hex_char, u8 *return_value);
+
+acpi_status acpi_db_convert_to_package(char *string, union acpi_object *object);
+
+acpi_status
+acpi_db_convert_to_object(acpi_object_type type,
+ char *string, union acpi_object *object);
+
+u8 *acpi_db_encode_pld_buffer(struct acpi_pld_info *pld_info);
+
+void acpi_db_dump_pld_buffer(union acpi_object *obj_desc);
+
+/*
* dbmethod - control method commands
*/
void
@@ -191,6 +206,8 @@ void
acpi_db_create_execution_threads(char *num_threads_arg,
char *num_loops_arg, char *method_name_arg);
+void acpi_db_delete_objects(u32 count, union acpi_object *objects);
+
#ifdef ACPI_DBG_TRACK_ALLOCATIONS
u32 acpi_db_get_cache_info(struct acpi_memory_list *cache);
#endif
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index ed33ebcdaebe..427db72a6302 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index e975c6720448..ab0e97710381 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -158,10 +158,23 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
void *context);
/*
- * evregion - Address Space handling
+ * evhandler - Address space handling
*/
+u8
+acpi_ev_has_default_handler(struct acpi_namespace_node *node,
+ acpi_adr_space_type space_id);
+
acpi_status acpi_ev_install_region_handlers(void);
+acpi_status
+acpi_ev_install_space_handler(struct acpi_namespace_node *node,
+ acpi_adr_space_type space_id,
+ acpi_adr_space_handler handler,
+ acpi_adr_space_setup setup, void *context);
+
+/*
+ * evregion - Operation region support
+ */
acpi_status acpi_ev_initialize_op_regions(void);
acpi_status
@@ -180,12 +193,6 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
u8 acpi_ns_is_locked);
acpi_status
-acpi_ev_install_space_handler(struct acpi_namespace_node *node,
- acpi_adr_space_type space_id,
- acpi_adr_space_handler handler,
- acpi_adr_space_setup setup, void *context);
-
-acpi_status
acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
acpi_adr_space_type space_id);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 64472e4ec329..ecb49927b817 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -192,14 +192,6 @@ ACPI_EXTERN u8 acpi_gbl_integer_bit_width;
ACPI_EXTERN u8 acpi_gbl_integer_byte_width;
ACPI_EXTERN u8 acpi_gbl_integer_nybble_width;
-/* Mutex for _OSI support */
-
-ACPI_EXTERN acpi_mutex acpi_gbl_osi_mutex;
-
-/* Reader/Writer lock is used for namespace walk and dynamic table unload */
-
-ACPI_EXTERN struct acpi_rw_lock acpi_gbl_namespace_rw_lock;
-
/*****************************************************************************
*
* Mutual exclusion within ACPICA subsystem
@@ -233,6 +225,14 @@ ACPI_EXTERN u8 acpi_gbl_global_lock_pending;
ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock; /* For GPE data structs and registers */
ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
+/* Mutex for _OSI support */
+
+ACPI_EXTERN acpi_mutex acpi_gbl_osi_mutex;
+
+/* Reader/Writer lock is used for namespace walk and dynamic table unload */
+
+ACPI_EXTERN struct acpi_rw_lock acpi_gbl_namespace_rw_lock;
+
/*****************************************************************************
*
* Miscellaneous globals
@@ -252,7 +252,7 @@ ACPI_EXTERN acpi_cache_t *acpi_gbl_operand_cache;
ACPI_EXTERN struct acpi_global_notify_handler acpi_gbl_global_notify[2];
ACPI_EXTERN acpi_exception_handler acpi_gbl_exception_handler;
ACPI_EXTERN acpi_init_handler acpi_gbl_init_handler;
-ACPI_EXTERN acpi_tbl_handler acpi_gbl_table_handler;
+ACPI_EXTERN acpi_table_handler acpi_gbl_table_handler;
ACPI_EXTERN void *acpi_gbl_table_handler_context;
ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk;
ACPI_EXTERN acpi_interface_handler acpi_gbl_interface_handler;
@@ -304,6 +304,7 @@ extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS];
ACPI_EXTERN struct acpi_memory_list *acpi_gbl_global_list;
ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list;
ACPI_EXTERN u8 acpi_gbl_display_final_mem_stats;
+ACPI_EXTERN u8 acpi_gbl_disable_mem_tracking;
#endif
/*****************************************************************************
@@ -365,19 +366,18 @@ ACPI_EXTERN u8 acpi_gbl_sleep_type_b;
*
****************************************************************************/
-extern struct acpi_fixed_event_info
- acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS];
-ACPI_EXTERN struct acpi_fixed_event_handler
- acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS];
-ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
-ACPI_EXTERN struct acpi_gpe_block_info
-*acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
-
#if (!ACPI_REDUCED_HARDWARE)
ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized;
+ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
+ACPI_EXTERN struct acpi_gpe_block_info
+ *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
ACPI_EXTERN acpi_gbl_event_handler acpi_gbl_global_event_handler;
ACPI_EXTERN void *acpi_gbl_global_event_handler_context;
+ACPI_EXTERN struct acpi_fixed_event_handler
+ acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS];
+extern struct acpi_fixed_event_info
+ acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS];
#endif /* !ACPI_REDUCED_HARDWARE */
@@ -405,7 +405,7 @@ ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
/*****************************************************************************
*
- * Debugger globals
+ * Debugger and Disassembler globals
*
****************************************************************************/
@@ -413,8 +413,12 @@ ACPI_EXTERN u8 acpi_gbl_db_output_flags;
#ifdef ACPI_DISASSEMBLER
+u8 ACPI_INIT_GLOBAL(acpi_gbl_ignore_noop_operator, FALSE);
+
ACPI_EXTERN u8 acpi_gbl_db_opt_disasm;
ACPI_EXTERN u8 acpi_gbl_db_opt_verbose;
+ACPI_EXTERN struct acpi_external_list *acpi_gbl_external_list;
+ACPI_EXTERN struct acpi_external_file *acpi_gbl_external_file_list;
#endif
#ifdef ACPI_DEBUGGER
@@ -426,6 +430,7 @@ extern u8 acpi_gbl_db_terminate_threads;
ACPI_EXTERN u8 acpi_gbl_db_opt_tables;
ACPI_EXTERN u8 acpi_gbl_db_opt_stats;
ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods;
+ACPI_EXTERN u8 acpi_gbl_db_opt_no_region_support;
ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS];
ACPI_EXTERN acpi_object_type acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS];
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index d902d31abc6c..6357e932bfd9 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index eb308635da72..8af8c9bdeb35 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -458,7 +458,7 @@ void acpi_ex_reacquire_interpreter(void);
void acpi_ex_relinquish_interpreter(void);
-void acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc);
+u8 acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc);
void acpi_ex_acquire_global_lock(u32 rule);
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index ff8bd0061e8b..805f419086ab 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -189,11 +189,10 @@ struct acpi_namespace_node {
#define ANOBJ_EVALUATED 0x20 /* Set on first evaluation of node */
#define ANOBJ_ALLOCATED_BUFFER 0x40 /* Method AML buffer is dynamic (install_method) */
-#define ANOBJ_IS_EXTERNAL 0x08 /* i_aSL only: This object created via External() */
-#define ANOBJ_METHOD_NO_RETVAL 0x10 /* i_aSL only: Method has no return value */
-#define ANOBJ_METHOD_SOME_NO_RETVAL 0x20 /* i_aSL only: Method has at least one return value */
-#define ANOBJ_IS_BIT_OFFSET 0x40 /* i_aSL only: Reference is a bit offset */
-#define ANOBJ_IS_REFERENCED 0x80 /* i_aSL only: Object was referenced */
+#define ANOBJ_IS_EXTERNAL 0x08 /* iASL only: This object created via External() */
+#define ANOBJ_METHOD_NO_RETVAL 0x10 /* iASL only: Method has no return value */
+#define ANOBJ_METHOD_SOME_NO_RETVAL 0x20 /* iASL only: Method has at least one return value */
+#define ANOBJ_IS_REFERENCED 0x80 /* iASL only: Object was referenced */
/* Internal ACPI table management - master table list */
@@ -411,11 +410,10 @@ struct acpi_gpe_notify_info {
struct acpi_gpe_notify_info *next;
};
-struct acpi_gpe_notify_object {
- struct acpi_namespace_node *node;
- struct acpi_gpe_notify_object *next;
-};
-
+/*
+ * GPE dispatch info. At any time, the GPE can have at most one type
+ * of dispatch - Method, Handler, or Implicit Notify.
+ */
union acpi_gpe_dispatch_info {
struct acpi_namespace_node *method_node; /* Method node for this GPE level */
struct acpi_gpe_handler_info *handler; /* Installed GPE handler */
@@ -679,6 +677,8 @@ struct acpi_opcode_info {
u8 type; /* Opcode type */
};
+/* Value associated with the parse object */
+
union acpi_parse_value {
u64 integer; /* Integer constant (Up to 64 bits) */
u32 size; /* bytelist or field size */
@@ -1025,6 +1025,31 @@ struct acpi_port_info {
/*****************************************************************************
*
+ * Disassembler
+ *
+ ****************************************************************************/
+
+struct acpi_external_list {
+ char *path;
+ char *internal_path;
+ struct acpi_external_list *next;
+ u32 value;
+ u16 length;
+ u8 type;
+ u8 flags;
+};
+
+/* Values for Flags field above */
+
+#define ACPI_IPATH_ALLOCATED 0x01
+
+struct acpi_external_file {
+ char *path;
+ struct acpi_external_file *next;
+};
+
+/*****************************************************************************
+ *
* Debugger
*
****************************************************************************/
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 5efad99f2169..ed7943b9044f 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -49,14 +49,18 @@
* get into potential aligment issues -- see the STORE macros below.
* Use with care.
*/
-#define ACPI_GET8(ptr) *ACPI_CAST_PTR (u8, ptr)
-#define ACPI_GET16(ptr) *ACPI_CAST_PTR (u16, ptr)
-#define ACPI_GET32(ptr) *ACPI_CAST_PTR (u32, ptr)
-#define ACPI_GET64(ptr) *ACPI_CAST_PTR (u64, ptr)
-#define ACPI_SET8(ptr) *ACPI_CAST_PTR (u8, ptr)
-#define ACPI_SET16(ptr) *ACPI_CAST_PTR (u16, ptr)
-#define ACPI_SET32(ptr) *ACPI_CAST_PTR (u32, ptr)
-#define ACPI_SET64(ptr) *ACPI_CAST_PTR (u64, ptr)
+#define ACPI_CAST8(ptr) ACPI_CAST_PTR (u8, (ptr))
+#define ACPI_CAST16(ptr) ACPI_CAST_PTR (u16, (ptr))
+#define ACPI_CAST32(ptr) ACPI_CAST_PTR (u32, (ptr))
+#define ACPI_CAST64(ptr) ACPI_CAST_PTR (u64, (ptr))
+#define ACPI_GET8(ptr) (*ACPI_CAST8 (ptr))
+#define ACPI_GET16(ptr) (*ACPI_CAST16 (ptr))
+#define ACPI_GET32(ptr) (*ACPI_CAST32 (ptr))
+#define ACPI_GET64(ptr) (*ACPI_CAST64 (ptr))
+#define ACPI_SET8(ptr, val) (*ACPI_CAST8 (ptr) = (u8) (val))
+#define ACPI_SET16(ptr, val) (*ACPI_CAST16 (ptr) = (u16) (val))
+#define ACPI_SET32(ptr, val) (*ACPI_CAST32 (ptr) = (u32) (val))
+#define ACPI_SET64(ptr, val) (*ACPI_CAST64 (ptr) = (u64) (val))
/*
* printf() format helpers
@@ -293,6 +297,26 @@
#define ACPI_16BIT_MASK 0x0000FFFF
#define ACPI_24BIT_MASK 0x00FFFFFF
+/* Macros to extract flag bits from position zero */
+
+#define ACPI_GET_1BIT_FLAG(value) ((value) & ACPI_1BIT_MASK)
+#define ACPI_GET_2BIT_FLAG(value) ((value) & ACPI_2BIT_MASK)
+#define ACPI_GET_3BIT_FLAG(value) ((value) & ACPI_3BIT_MASK)
+#define ACPI_GET_4BIT_FLAG(value) ((value) & ACPI_4BIT_MASK)
+
+/* Macros to extract flag bits from position one and above */
+
+#define ACPI_EXTRACT_1BIT_FLAG(field, position) (ACPI_GET_1BIT_FLAG ((field) >> position))
+#define ACPI_EXTRACT_2BIT_FLAG(field, position) (ACPI_GET_2BIT_FLAG ((field) >> position))
+#define ACPI_EXTRACT_3BIT_FLAG(field, position) (ACPI_GET_3BIT_FLAG ((field) >> position))
+#define ACPI_EXTRACT_4BIT_FLAG(field, position) (ACPI_GET_4BIT_FLAG ((field) >> position))
+
+/* ACPI Pathname helpers */
+
+#define ACPI_IS_ROOT_PREFIX(c) ((c) == (u8) 0x5C) /* Backslash */
+#define ACPI_IS_PARENT_PREFIX(c) ((c) == (u8) 0x5E) /* Carat */
+#define ACPI_IS_PATH_SEPARATOR(c) ((c) == (u8) 0x2E) /* Period (dot) */
+
/*
* An object of type struct acpi_namespace_node can appear in some contexts
* where a pointer to an object of type union acpi_operand_object can also
@@ -364,137 +388,6 @@
#endif /* ACPI_NO_ERROR_MESSAGES */
-/*
- * Debug macros that are conditionally compiled
- */
-#ifdef ACPI_DEBUG_OUTPUT
-/*
- * Function entry tracing
- */
-#define ACPI_FUNCTION_TRACE(a) ACPI_FUNCTION_NAME(a) \
- acpi_ut_trace(ACPI_DEBUG_PARAMETERS)
-#define ACPI_FUNCTION_TRACE_PTR(a, b) ACPI_FUNCTION_NAME(a) \
- acpi_ut_trace_ptr(ACPI_DEBUG_PARAMETERS, (void *)b)
-#define ACPI_FUNCTION_TRACE_U32(a, b) ACPI_FUNCTION_NAME(a) \
- acpi_ut_trace_u32(ACPI_DEBUG_PARAMETERS, (u32)b)
-#define ACPI_FUNCTION_TRACE_STR(a, b) ACPI_FUNCTION_NAME(a) \
- acpi_ut_trace_str(ACPI_DEBUG_PARAMETERS, (char *)b)
-
-#define ACPI_FUNCTION_ENTRY() acpi_ut_track_stack_ptr()
-
-/*
- * Function exit tracing.
- * WARNING: These macros include a return statement. This is usually considered
- * bad form, but having a separate exit macro is very ugly and difficult to maintain.
- * One of the FUNCTION_TRACE macros above must be used in conjunction with these macros
- * so that "_AcpiFunctionName" is defined.
- *
- * Note: the DO_WHILE0 macro is used to prevent some compilers from complaining
- * about these constructs.
- */
-#ifdef ACPI_USE_DO_WHILE_0
-#define ACPI_DO_WHILE0(a) do a while(0)
-#else
-#define ACPI_DO_WHILE0(a) a
-#endif
-
-#define return_VOID ACPI_DO_WHILE0 ({ \
- acpi_ut_exit (ACPI_DEBUG_PARAMETERS); \
- return;})
-/*
- * There are two versions of most of the return macros. The default version is
- * safer, since it avoids side-effects by guaranteeing that the argument will
- * not be evaluated twice.
- *
- * A less-safe version of the macros is provided for optional use if the
- * compiler uses excessive CPU stack (for example, this may happen in the
- * debug case if code optimzation is disabled.)
- */
-#ifndef ACPI_SIMPLE_RETURN_MACROS
-
-#define return_ACPI_STATUS(s) ACPI_DO_WHILE0 ({ \
- register acpi_status _s = (s); \
- acpi_ut_status_exit (ACPI_DEBUG_PARAMETERS, _s); \
- return (_s); })
-#define return_PTR(s) ACPI_DO_WHILE0 ({ \
- register void *_s = (void *) (s); \
- acpi_ut_ptr_exit (ACPI_DEBUG_PARAMETERS, (u8 *) _s); \
- return (_s); })
-#define return_VALUE(s) ACPI_DO_WHILE0 ({ \
- register u64 _s = (s); \
- acpi_ut_value_exit (ACPI_DEBUG_PARAMETERS, _s); \
- return (_s); })
-#define return_UINT8(s) ACPI_DO_WHILE0 ({ \
- register u8 _s = (u8) (s); \
- acpi_ut_value_exit (ACPI_DEBUG_PARAMETERS, (u64) _s); \
- return (_s); })
-#define return_UINT32(s) ACPI_DO_WHILE0 ({ \
- register u32 _s = (u32) (s); \
- acpi_ut_value_exit (ACPI_DEBUG_PARAMETERS, (u64) _s); \
- return (_s); })
-#else /* Use original less-safe macros */
-
-#define return_ACPI_STATUS(s) ACPI_DO_WHILE0 ({ \
- acpi_ut_status_exit (ACPI_DEBUG_PARAMETERS, (s)); \
- return((s)); })
-#define return_PTR(s) ACPI_DO_WHILE0 ({ \
- acpi_ut_ptr_exit (ACPI_DEBUG_PARAMETERS, (u8 *) (s)); \
- return((s)); })
-#define return_VALUE(s) ACPI_DO_WHILE0 ({ \
- acpi_ut_value_exit (ACPI_DEBUG_PARAMETERS, (u64) (s)); \
- return((s)); })
-#define return_UINT8(s) return_VALUE(s)
-#define return_UINT32(s) return_VALUE(s)
-
-#endif /* ACPI_SIMPLE_RETURN_MACROS */
-
-/* Conditional execution */
-
-#define ACPI_DEBUG_EXEC(a) a
-#define ACPI_DEBUG_ONLY_MEMBERS(a) a;
-#define _VERBOSE_STRUCTURES
-
-/* Various object display routines for debug */
-
-#define ACPI_DUMP_STACK_ENTRY(a) acpi_ex_dump_operand((a), 0)
-#define ACPI_DUMP_OPERANDS(a, b ,c) acpi_ex_dump_operands(a, b, c)
-#define ACPI_DUMP_ENTRY(a, b) acpi_ns_dump_entry (a, b)
-#define ACPI_DUMP_PATHNAME(a, b, c, d) acpi_ns_dump_pathname(a, b, c, d)
-#define ACPI_DUMP_BUFFER(a, b) acpi_ut_debug_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT)
-
-#else
-/*
- * This is the non-debug case -- make everything go away,
- * leaving no executable debug code!
- */
-#define ACPI_DEBUG_EXEC(a)
-#define ACPI_DEBUG_ONLY_MEMBERS(a)
-#define ACPI_FUNCTION_TRACE(a)
-#define ACPI_FUNCTION_TRACE_PTR(a, b)
-#define ACPI_FUNCTION_TRACE_U32(a, b)
-#define ACPI_FUNCTION_TRACE_STR(a, b)
-#define ACPI_FUNCTION_EXIT
-#define ACPI_FUNCTION_STATUS_EXIT(s)
-#define ACPI_FUNCTION_VALUE_EXIT(s)
-#define ACPI_FUNCTION_ENTRY()
-#define ACPI_DUMP_STACK_ENTRY(a)
-#define ACPI_DUMP_OPERANDS(a, b, c)
-#define ACPI_DUMP_ENTRY(a, b)
-#define ACPI_DUMP_TABLES(a, b)
-#define ACPI_DUMP_PATHNAME(a, b, c, d)
-#define ACPI_DUMP_BUFFER(a, b)
-#define ACPI_DEBUG_PRINT(pl)
-#define ACPI_DEBUG_PRINT_RAW(pl)
-
-#define return_VOID return
-#define return_ACPI_STATUS(s) return(s)
-#define return_VALUE(s) return(s)
-#define return_UINT8(s) return(s)
-#define return_UINT32(s) return(s)
-#define return_PTR(s) return(s)
-
-#endif /* ACPI_DEBUG_OUTPUT */
-
#if (!ACPI_REDUCED_HARDWARE)
#define ACPI_HW_OPTIONAL_FUNCTION(addr) addr
#else
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 9b19d4b86424..02cd5482ff8b 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -218,6 +218,18 @@ acpi_ns_check_parameter_count(char *pathname,
u32 user_param_count,
const union acpi_predefined_info *info);
+acpi_status
+acpi_ns_check_object_type(struct acpi_predefined_data *data,
+ union acpi_operand_object **return_object_ptr,
+ u32 expected_btypes, u32 package_index);
+
+/*
+ * nsprepkg - Validation of predefined name packages
+ */
+acpi_status
+acpi_ns_check_package(struct acpi_predefined_data *data,
+ union acpi_operand_object **return_object_ptr);
+
/*
* nsnames - Name and Scope manipulation
*/
@@ -333,8 +345,6 @@ acpi_ns_install_node(struct acpi_walk_state *walk_state,
/*
* nsutils - Utility functions
*/
-u8 acpi_ns_valid_root_prefix(char prefix);
-
acpi_object_type acpi_ns_get_type(struct acpi_namespace_node *node);
u32 acpi_ns_local(acpi_object_type type);
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 24eb9eac9514..cc7ab6dd724e 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -307,7 +307,7 @@ struct acpi_object_addr_handler {
struct acpi_namespace_node *node; /* Parent device */
void *context;
acpi_adr_space_setup setup;
- union acpi_operand_object *region_list; /* regions using this handler */
+ union acpi_operand_object *region_list; /* Regions using this handler */
union acpi_operand_object *next;
};
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index d786a5128b78..3fc9ca7e8aa3 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index eefcf47a61a0..aed319318835 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -105,7 +105,28 @@ union acpi_parse_object *acpi_ps_find_name(union acpi_parse_object *scope,
union acpi_parse_object *acpi_ps_get_parent(union acpi_parse_object *op);
/*
- * psopcode - AML Opcode information
+ * psobject - support for parse object processing
+ */
+acpi_status
+acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
+ u8 *aml_op_start,
+ union acpi_parse_object *unnamed_op,
+ union acpi_parse_object **op);
+
+acpi_status
+acpi_ps_create_op(struct acpi_walk_state *walk_state,
+ u8 *aml_op_start, union acpi_parse_object **new_op);
+
+acpi_status
+acpi_ps_complete_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object **op, acpi_status status);
+
+acpi_status
+acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op, acpi_status status);
+
+/*
+ * psopinfo - AML Opcode information
*/
const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode);
@@ -211,8 +232,6 @@ void acpi_ps_free_op(union acpi_parse_object *op);
u8 acpi_ps_is_leading_char(u32 c);
-u8 acpi_ps_is_prefix_char(u32 c);
-
#ifdef ACPI_FUTURE_USAGE
u32 acpi_ps_get_name(union acpi_parse_object *op);
#endif /* ACPI_FUTURE_USAGE */
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 9dfa1c83bd4e..752cc40cdc1e 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -1,12 +1,11 @@
/******************************************************************************
*
* Name: acpredef - Information table for ACPI predefined methods and objects
- * $Revision: 1.1 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -51,13 +50,13 @@
*
* 1) PTYPE1 packages do not contain sub-packages.
*
- * ACPI_PTYPE1_FIXED: Fixed length, 1 or 2 object types:
+ * ACPI_PTYPE1_FIXED: Fixed-length length, 1 or 2 object types:
* object type
* count
* object type
* count
*
- * ACPI_PTYPE1_VAR: Variable length:
+ * ACPI_PTYPE1_VAR: Variable-length length:
* object type (Int/Buf/Ref)
*
* ACPI_PTYPE1_OPTION: Package has some required and some optional elements
@@ -85,10 +84,10 @@
* count
* (Used for _CST)
*
- * ACPI_PTYPE2_FIXED: Each subpackage is of fixed length
+ * ACPI_PTYPE2_FIXED: Each subpackage is of Fixed-length
* (Used for _PRT)
*
- * ACPI_PTYPE2_MIN: Each subpackage has a variable but minimum length
+ * ACPI_PTYPE2_MIN: Each subpackage has a Variable-length but minimum length
* (Used for _HPX)
*
* ACPI_PTYPE2_REV_FIXED: Revision at start, each subpackage is Fixed-length
@@ -124,7 +123,8 @@ enum acpi_return_package_types {
* These are the names that can actually be evaluated via acpi_evaluate_object.
* Not present in this table are the following:
*
- * 1) Predefined/Reserved names that are never evaluated via acpi_evaluate_object:
+ * 1) Predefined/Reserved names that are never evaluated via
+ * acpi_evaluate_object:
* _Lxx and _Exx GPE methods
* _Qxx EC methods
* _T_x compiler temporary variables
@@ -149,6 +149,8 @@ enum acpi_return_package_types {
* information about the expected structure of the package. This information
* is saved here (rather than in a separate table) in order to minimize the
* overall size of the stored data.
+ *
+ * Note: The additional braces are intended to promote portability.
*/
static const union acpi_predefined_info predefined_names[] = {
{{"_AC0", 0, ACPI_RTYPE_INTEGER}},
@@ -212,9 +214,8 @@ static const union acpi_predefined_info predefined_names[] = {
{{"_BCT", 1, ACPI_RTYPE_INTEGER}},
{{"_BDN", 0, ACPI_RTYPE_INTEGER}},
{{"_BFS", 1, 0}},
- {{"_BIF", 0, ACPI_RTYPE_PACKAGE} }, /* Fixed-length (9 Int),(4 Str/Buf) */
- {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 9,
- ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER}, 4, 0} },
+ {{"_BIF", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (9 Int),(4 Str) */
+ {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 9, ACPI_RTYPE_STRING}, 4, 0}},
{{"_BIX", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (16 Int),(4 Str) */
{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16, ACPI_RTYPE_STRING}, 4,
@@ -236,7 +237,8 @@ static const union acpi_predefined_info predefined_names[] = {
{{"_CBA", 0, ACPI_RTYPE_INTEGER}}, /* See PCI firmware spec 3.0 */
{{"_CDM", 0, ACPI_RTYPE_INTEGER}},
{{"_CID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Strs) */
- {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING, 0,0}, 0,0}},
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING, 0, 0}, 0,
+ 0}},
{{"_CLS", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (3 Int) */
{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}},
@@ -251,7 +253,8 @@ static const union acpi_predefined_info predefined_names[] = {
{{{ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER, 0,0}, 0,0}},
{{"_CST", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n Pkg (1 Buf/3 Int) */
- {{{ACPI_PTYPE2_PKG_COUNT,ACPI_RTYPE_BUFFER, 1, ACPI_RTYPE_INTEGER}, 3,0}},
+ {{{ACPI_PTYPE2_PKG_COUNT, ACPI_RTYPE_BUFFER, 1, ACPI_RTYPE_INTEGER}, 3,
+ 0}},
{{"_CWS", 1, ACPI_RTYPE_INTEGER}},
{{"_DCK", 1, ACPI_RTYPE_INTEGER}},
@@ -342,8 +345,8 @@ static const union acpi_predefined_info predefined_names[] = {
{{"_MBM", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (8 Int) */
{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 8, 0}, 0, 0}},
- {{"_MLS", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (2 Str) */
- {{{ACPI_PTYPE2, ACPI_RTYPE_STRING, 2,0}, 0,0}},
+ {{"_MLS", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (1 Str/1 Buf) */
+ {{{ACPI_PTYPE2, ACPI_RTYPE_STRING, 1, ACPI_RTYPE_BUFFER}, 1, 0}},
{{"_MSG", 1, 0}},
{{"_MSM", 4, ACPI_RTYPE_INTEGER}},
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index 0347d0993497..f691d0e4d9fa 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -347,18 +347,21 @@ extern struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[];
extern struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[];
/*
- * rsdump
+ * rsdumpinfo
*/
extern struct acpi_rsdump_info acpi_rs_dump_irq[];
+extern struct acpi_rsdump_info acpi_rs_dump_prt[];
extern struct acpi_rsdump_info acpi_rs_dump_dma[];
extern struct acpi_rsdump_info acpi_rs_dump_start_dpf[];
extern struct acpi_rsdump_info acpi_rs_dump_end_dpf[];
extern struct acpi_rsdump_info acpi_rs_dump_io[];
+extern struct acpi_rsdump_info acpi_rs_dump_io_flags[];
extern struct acpi_rsdump_info acpi_rs_dump_fixed_io[];
extern struct acpi_rsdump_info acpi_rs_dump_vendor[];
extern struct acpi_rsdump_info acpi_rs_dump_end_tag[];
extern struct acpi_rsdump_info acpi_rs_dump_memory24[];
extern struct acpi_rsdump_info acpi_rs_dump_memory32[];
+extern struct acpi_rsdump_info acpi_rs_dump_memory_flags[];
extern struct acpi_rsdump_info acpi_rs_dump_fixed_memory32[];
extern struct acpi_rsdump_info acpi_rs_dump_address16[];
extern struct acpi_rsdump_info acpi_rs_dump_address32[];
@@ -372,6 +375,7 @@ extern struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[];
extern struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[];
extern struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[];
extern struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[];
+extern struct acpi_rsdump_info acpi_rs_dump_general_flags[];
#endif
#endif /* __ACRESRC_H__ */
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 937e66c65d1e..7896d85876ca 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 6712965ba8ae..7755e915a007 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index b0f5f92b674a..0082fa0a6139 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -483,39 +483,17 @@ acpi_ut_short_divide(u64 in_dividend,
/*
* utmisc
*/
-void ut_convert_backslashes(char *pathname);
-
const char *acpi_ut_validate_exception(acpi_status status);
u8 acpi_ut_is_pci_root_bridge(char *id);
u8 acpi_ut_is_aml_table(struct acpi_table_header *table);
-acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id);
-
-void acpi_ut_release_owner_id(acpi_owner_id * owner_id);
-
acpi_status
acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
void *target_object,
acpi_pkg_callback walk_callback, void *context);
-void acpi_ut_strupr(char *src_string);
-
-void acpi_ut_strlwr(char *src_string);
-
-int acpi_ut_stricmp(char *string1, char *string2);
-
-void acpi_ut_print_string(char *string, u8 max_length);
-
-u8 acpi_ut_valid_acpi_name(u32 name);
-
-void acpi_ut_repair_name(char *name);
-
-u8 acpi_ut_valid_acpi_char(char character, u32 position);
-
-acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer);
-
/* Values for Base above (16=Hex, 10=Decimal) */
#define ACPI_ANY_BASE 0
@@ -532,15 +510,25 @@ acpi_ut_display_init_pathname(u8 type,
#endif
/*
+ * utownerid - Support for Table/Method Owner IDs
+ */
+acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id);
+
+void acpi_ut_release_owner_id(acpi_owner_id * owner_id);
+
+/*
* utresrc
*/
acpi_status
-acpi_ut_walk_aml_resources(u8 *aml,
+acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
+ u8 *aml,
acpi_size aml_length,
acpi_walk_aml_callback user_function,
void **context);
-acpi_status acpi_ut_validate_resource(void *aml, u8 *return_index);
+acpi_status
+acpi_ut_validate_resource(struct acpi_walk_state *walk_state,
+ void *aml, u8 *return_index);
u32 acpi_ut_get_descriptor_length(void *aml);
@@ -554,6 +542,27 @@ acpi_status
acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc, u8 **end_tag);
/*
+ * utstring - String and character utilities
+ */
+void acpi_ut_strupr(char *src_string);
+
+void acpi_ut_strlwr(char *src_string);
+
+int acpi_ut_stricmp(char *string1, char *string2);
+
+acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer);
+
+void acpi_ut_print_string(char *string, u8 max_length);
+
+void ut_convert_backslashes(char *pathname);
+
+u8 acpi_ut_valid_acpi_name(u32 name);
+
+u8 acpi_ut_valid_acpi_char(char character, u32 position);
+
+void acpi_ut_repair_name(char *name);
+
+/*
* utmutex - mutex support
*/
acpi_status acpi_ut_mutex_initialize(void);
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index c26f8ff6c3b9..48a3e331b72d 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -7,7 +7,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 968449685e06..87c26366d1df 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -199,6 +199,12 @@ struct aml_resource_fixed_dma {
struct aml_resource_large_header {
AML_RESOURCE_LARGE_HEADER_COMMON};
+/* General Flags for address space resource descriptors */
+
+#define ACPI_RESOURCE_FLAG_DEC 2
+#define ACPI_RESOURCE_FLAG_MIF 4
+#define ACPI_RESOURCE_FLAG_MAF 8
+
struct aml_resource_memory24 {
AML_RESOURCE_LARGE_HEADER_COMMON u8 flags;
u16 minimum;
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index c8b5e2565b98..fb09b08d7080 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 57895db3231a..7ea0f162f11c 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index b5b904ee815f..feadeed1012d 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index 87eff701ecfa..bc8e63f7784b 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 52eb4e01622a..a9ffd44c18fe 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,7 @@
#include "acinterp.h"
#include "acnamesp.h"
#ifdef ACPI_DISASSEMBLER
-#include <acpi/acdisasm.h>
+#include "acdisasm.h"
#endif
#define _COMPONENT ACPI_DISPATCHER
@@ -151,6 +151,7 @@ acpi_ds_create_method_mutex(union acpi_operand_object *method_desc)
status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex);
if (ACPI_FAILURE(status)) {
+ acpi_ut_delete_object_desc(mutex_desc);
return_ACPI_STATUS(status);
}
@@ -378,7 +379,8 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
*/
info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
if (!info) {
- return_ACPI_STATUS(AE_NO_MEMORY);
+ status = AE_NO_MEMORY;
+ goto cleanup;
}
info->parameters = &this_walk_state->operands[0];
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index 9a83b7e0f3ba..3da80460ce38 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index c9f15d3a3686..e20e9f84eee8 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -388,7 +388,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
union acpi_parse_object *parent;
union acpi_operand_object *obj_desc = NULL;
acpi_status status = AE_OK;
- unsigned i;
+ u32 i;
u16 index;
u16 reference_count;
@@ -525,7 +525,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
}
ACPI_INFO((AE_INFO,
- "Actual Package length (%u) is larger than NumElements field (%u), truncated\n",
+ "Actual Package length (%u) is larger than NumElements field (%u), truncated",
i, element_count));
} else if (i < element_count) {
/*
@@ -703,7 +703,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
/* Truncate value if we are executing from a 32-bit ACPI table */
#ifndef ACPI_NO_METHOD_EXECUTION
- acpi_ex_truncate_for32bit_table(obj_desc);
+ (void)acpi_ex_truncate_for32bit_table(obj_desc);
#endif
break;
@@ -725,8 +725,18 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
case AML_TYPE_LITERAL:
obj_desc->integer.value = op->common.value.integer;
+
#ifndef ACPI_NO_METHOD_EXECUTION
- acpi_ex_truncate_for32bit_table(obj_desc);
+ if (acpi_ex_truncate_for32bit_table(obj_desc)) {
+
+ /* Warn if we found a 64-bit constant in a 32-bit table */
+
+ ACPI_WARNING((AE_INFO,
+ "Truncated 64-bit constant found in 32-bit table: %8.8X%8.8X => %8.8X",
+ ACPI_FORMAT_UINT64(op->common.
+ value.integer),
+ (u32)obj_desc->integer.value));
+ }
#endif
break;
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index d09c6b4bab2c..ee6367b8eaf7 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -486,18 +486,18 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
ACPI_FUNCTION_TRACE_PTR(ds_eval_table_region_operands, op);
/*
- * This is where we evaluate the signature_string and oem_iDString
- * and oem_table_iDString of the data_table_region declaration
+ * This is where we evaluate the Signature string, oem_id string,
+ * and oem_table_id string of the Data Table Region declaration
*/
node = op->common.node;
- /* next_op points to signature_string op */
+ /* next_op points to Signature string op */
next_op = op->common.value.arg;
/*
- * Evaluate/create the signature_string and oem_iDString
- * and oem_table_iDString operands
+ * Evaluate/create the Signature string, oem_id string,
+ * and oem_table_id string operands
*/
status = acpi_ds_create_operands(walk_state, next_op);
if (ACPI_FAILURE(status)) {
@@ -505,8 +505,8 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
}
/*
- * Resolve the signature_string and oem_iDString
- * and oem_table_iDString operands
+ * Resolve the Signature string, oem_id string,
+ * and oem_table_id string operands
*/
status = acpi_ex_resolve_operands(op->common.aml_opcode,
ACPI_WALK_OPERANDS, walk_state);
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index afeb99f49482..4d8c992a51d8 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -178,7 +178,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
if (!op) {
ACPI_ERROR((AE_INFO, "Null Op"));
- return_UINT8(TRUE);
+ return_VALUE(TRUE);
}
/*
@@ -210,7 +210,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
"At Method level, result of [%s] not used\n",
acpi_ps_get_opcode_name(op->common.
aml_opcode)));
- return_UINT8(FALSE);
+ return_VALUE(FALSE);
}
/* Get info on the parent. The root_op is AML_SCOPE */
@@ -219,7 +219,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
acpi_ps_get_opcode_info(op->common.parent->common.aml_opcode);
if (parent_info->class == AML_CLASS_UNKNOWN) {
ACPI_ERROR((AE_INFO, "Unknown parent opcode Op=%p", op));
- return_UINT8(FALSE);
+ return_VALUE(FALSE);
}
/*
@@ -307,7 +307,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
acpi_ps_get_opcode_name(op->common.parent->common.
aml_opcode), op));
- return_UINT8(TRUE);
+ return_VALUE(TRUE);
result_not_used:
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
@@ -316,7 +316,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
acpi_ps_get_opcode_name(op->common.parent->common.
aml_opcode), op));
- return_UINT8(FALSE);
+ return_VALUE(FALSE);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 58593931be96..44f8325c2bae 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -149,7 +149,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
/* Truncate the predicate to 32-bits if necessary */
- acpi_ex_truncate_for32bit_table(local_obj_desc);
+ (void)acpi_ex_truncate_for32bit_table(local_obj_desc);
/*
* Save the result of the predicate evaluation on
@@ -706,7 +706,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
* ACPI 2.0 support for 64-bit integers: Truncate numeric
* result value if we are executing from a 32-bit ACPI table
*/
- acpi_ex_truncate_for32bit_table(walk_state->result_obj);
+ (void)acpi_ex_truncate_for32bit_table(walk_state->result_obj);
/*
* Check if we just completed the evaluation of a
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 557510084c7a..6e17c0e24e63 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -50,7 +50,7 @@
#include "acnamesp.h"
#ifdef ACPI_ASL_COMPILER
-#include <acpi/acdisasm.h>
+#include "acdisasm.h"
#endif
#define _COMPONENT ACPI_DISPATCHER
@@ -178,7 +178,8 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
* Target of Scope() not found. Generate an External for it, and
* insert the name into the namespace.
*/
- acpi_dm_add_to_external_list(path, ACPI_TYPE_DEVICE, 0);
+ acpi_dm_add_to_external_list(op, path, ACPI_TYPE_DEVICE,
+ 0);
status =
acpi_ns_lookup(walk_state->scope_info, path,
object_type, ACPI_IMODE_LOAD_PASS1,
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 379835748357..4407ff2377d5 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -222,7 +222,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
*/
ACPI_WARNING((AE_INFO,
"Type override - [%4.4s] had invalid type (%s) "
- "for Scope operator, changed to type ANY\n",
+ "for Scope operator, changed to type ANY",
acpi_ut_get_node_name(node),
acpi_ut_get_type_name(node->type)));
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index f6c4295470ae..d67891de1b54 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 3e65a15a735f..ecb12e2137ff 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index d4acfbbe5b29..b8ea0b26cde3 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index af14a7137632..a621481c6cf2 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 36d120574423..b9adb9a7ed85 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -561,8 +561,8 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
status = AE_NO_MEMORY;
} else {
/*
- * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx
- * control method that corresponds to this GPE
+ * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the
+ * _Lxx/_Exx control method that corresponds to this GPE
*/
info->prefix_node =
local_gpe_event_info->dispatch.method_node;
@@ -707,7 +707,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Unable to clear GPE%02X", gpe_number));
- return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
+ return_VALUE(ACPI_INTERRUPT_NOT_HANDLED);
}
}
@@ -724,7 +724,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Unable to disable GPE%02X", gpe_number));
- return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
+ return_VALUE(ACPI_INTERRUPT_NOT_HANDLED);
}
/*
@@ -765,7 +765,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Unable to queue handler for GPE%2X - event disabled",
+ "Unable to queue handler for GPE%02X - event disabled",
gpe_number));
}
break;
@@ -784,7 +784,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
break;
}
- return_UINT32(ACPI_INTERRUPT_HANDLED);
+ return_VALUE(ACPI_INTERRUPT_HANDLED);
}
#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 1571a61a7833..a2d688bbac02 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -405,13 +405,13 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
(*return_gpe_block) = gpe_block;
}
- ACPI_DEBUG_PRINT((ACPI_DB_INIT,
- "GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n",
- (u32) gpe_block->block_base_number,
- (u32) (gpe_block->block_base_number +
- (gpe_block->gpe_count - 1)),
- gpe_device->name.ascii, gpe_block->register_count,
- interrupt_number));
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+ " Initialized GPE %02X to %02X [%4.4s] %u regs on interrupt 0x%X\n",
+ (u32)gpe_block->block_base_number,
+ (u32)(gpe_block->block_base_number +
+ (gpe_block->gpe_count - 1)),
+ gpe_device->name.ascii, gpe_block->register_count,
+ interrupt_number));
/* Update global count of currently available GPEs */
@@ -496,9 +496,11 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
}
if (gpe_enabled_count) {
- ACPI_DEBUG_PRINT((ACPI_DB_INIT,
- "Enabled %u GPEs in this block\n",
- gpe_enabled_count));
+ ACPI_INFO((AE_INFO,
+ "Enabled %u GPEs in block %02X to %02X",
+ gpe_enabled_count, (u32)gpe_block->block_base_number,
+ (u32)(gpe_block->block_base_number +
+ (gpe_block->gpe_count - 1))));
}
gpe_block->initialized = TRUE;
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index da0add858f81..72b8f6b3f4ca 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -86,6 +86,9 @@ acpi_status acpi_ev_gpe_initialize(void)
ACPI_FUNCTION_TRACE(ev_gpe_initialize);
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+ "Initializing General Purpose Events (GPEs):\n"));
+
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 228a0c3b1d49..b24dbb80fab8 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
new file mode 100644
index 000000000000..d4f83112c2e2
--- /dev/null
+++ b/drivers/acpi/acpica/evhandler.c
@@ -0,0 +1,529 @@
+/******************************************************************************
+ *
+ * Module Name: evhandler - Support for Address Space handlers
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2013, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+#include "acnamesp.h"
+#include "acinterp.h"
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evhandler")
+
+/* Local prototypes */
+static acpi_status
+acpi_ev_install_handler(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value);
+
+/* These are the address spaces that will get default handlers */
+
+u8 acpi_gbl_default_address_spaces[ACPI_NUM_DEFAULT_SPACES] = {
+ ACPI_ADR_SPACE_SYSTEM_MEMORY,
+ ACPI_ADR_SPACE_SYSTEM_IO,
+ ACPI_ADR_SPACE_PCI_CONFIG,
+ ACPI_ADR_SPACE_DATA_TABLE
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_install_region_handlers
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Installs the core subsystem default address space handlers.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_install_region_handlers(void)
+{
+ acpi_status status;
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(ev_install_region_handlers);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * All address spaces (PCI Config, EC, SMBus) are scope dependent and
+ * registration must occur for a specific device.
+ *
+ * In the case of the system memory and IO address spaces there is
+ * currently no device associated with the address space. For these we
+ * use the root.
+ *
+ * We install the default PCI config space handler at the root so that
+ * this space is immediately available even though the we have not
+ * enumerated all the PCI Root Buses yet. This is to conform to the ACPI
+ * specification which states that the PCI config space must be always
+ * available -- even though we are nowhere near ready to find the PCI root
+ * buses at this point.
+ *
+ * NOTE: We ignore AE_ALREADY_EXISTS because this means that a handler
+ * has already been installed (via acpi_install_address_space_handler).
+ * Similar for AE_SAME_HANDLER.
+ */
+ for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) {
+ status = acpi_ev_install_space_handler(acpi_gbl_root_node,
+ acpi_gbl_default_address_spaces
+ [i],
+ ACPI_DEFAULT_HANDLER,
+ NULL, NULL);
+ switch (status) {
+ case AE_OK:
+ case AE_SAME_HANDLER:
+ case AE_ALREADY_EXISTS:
+
+ /* These exceptions are all OK */
+
+ status = AE_OK;
+ break;
+
+ default:
+
+ goto unlock_and_exit;
+ }
+ }
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_has_default_handler
+ *
+ * PARAMETERS: node - Namespace node for the device
+ * space_id - The address space ID
+ *
+ * RETURN: TRUE if default handler is installed, FALSE otherwise
+ *
+ * DESCRIPTION: Check if the default handler is installed for the requested
+ * space ID.
+ *
+ ******************************************************************************/
+
+u8
+acpi_ev_has_default_handler(struct acpi_namespace_node *node,
+ acpi_adr_space_type space_id)
+{
+ union acpi_operand_object *obj_desc;
+ union acpi_operand_object *handler_obj;
+
+ /* Must have an existing internal object */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (obj_desc) {
+ handler_obj = obj_desc->device.handler;
+
+ /* Walk the linked list of handlers for this object */
+
+ while (handler_obj) {
+ if (handler_obj->address_space.space_id == space_id) {
+ if (handler_obj->address_space.handler_flags &
+ ACPI_ADDR_HANDLER_DEFAULT_INSTALLED) {
+ return (TRUE);
+ }
+ }
+
+ handler_obj = handler_obj->address_space.next;
+ }
+ }
+
+ return (FALSE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_install_handler
+ *
+ * PARAMETERS: walk_namespace callback
+ *
+ * DESCRIPTION: This routine installs an address handler into objects that are
+ * of type Region or Device.
+ *
+ * If the Object is a Device, and the device has a handler of
+ * the same type then the search is terminated in that branch.
+ *
+ * This is because the existing handler is closer in proximity
+ * to any more regions than the one we are trying to install.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ev_install_handler(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value)
+{
+ union acpi_operand_object *handler_obj;
+ union acpi_operand_object *next_handler_obj;
+ union acpi_operand_object *obj_desc;
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ ACPI_FUNCTION_NAME(ev_install_handler);
+
+ handler_obj = (union acpi_operand_object *)context;
+
+ /* Parameter validation */
+
+ if (!handler_obj) {
+ return (AE_OK);
+ }
+
+ /* Convert and validate the device handle */
+
+ node = acpi_ns_validate_handle(obj_handle);
+ if (!node) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /*
+ * We only care about regions and objects that are allowed to have
+ * address space handlers
+ */
+ if ((node->type != ACPI_TYPE_DEVICE) &&
+ (node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) {
+ return (AE_OK);
+ }
+
+ /* Check for an existing internal object */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc) {
+
+ /* No object, just exit */
+
+ return (AE_OK);
+ }
+
+ /* Devices are handled different than regions */
+
+ if (obj_desc->common.type == ACPI_TYPE_DEVICE) {
+
+ /* Check if this Device already has a handler for this address space */
+
+ next_handler_obj = obj_desc->device.handler;
+ while (next_handler_obj) {
+
+ /* Found a handler, is it for the same address space? */
+
+ if (next_handler_obj->address_space.space_id ==
+ handler_obj->address_space.space_id) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Found handler for region [%s] in device %p(%p) "
+ "handler %p\n",
+ acpi_ut_get_region_name
+ (handler_obj->address_space.
+ space_id), obj_desc,
+ next_handler_obj,
+ handler_obj));
+
+ /*
+ * Since the object we found it on was a device, then it
+ * means that someone has already installed a handler for
+ * the branch of the namespace from this device on. Just
+ * bail out telling the walk routine to not traverse this
+ * branch. This preserves the scoping rule for handlers.
+ */
+ return (AE_CTRL_DEPTH);
+ }
+
+ /* Walk the linked list of handlers attached to this device */
+
+ next_handler_obj = next_handler_obj->address_space.next;
+ }
+
+ /*
+ * As long as the device didn't have a handler for this space we
+ * don't care about it. We just ignore it and proceed.
+ */
+ return (AE_OK);
+ }
+
+ /* Object is a Region */
+
+ if (obj_desc->region.space_id != handler_obj->address_space.space_id) {
+
+ /* This region is for a different address space, just ignore it */
+
+ return (AE_OK);
+ }
+
+ /*
+ * Now we have a region and it is for the handler's address space type.
+ *
+ * First disconnect region for any previous handler (if any)
+ */
+ acpi_ev_detach_region(obj_desc, FALSE);
+
+ /* Connect the region to the new handler */
+
+ status = acpi_ev_attach_region(handler_obj, obj_desc, FALSE);
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_install_space_handler
+ *
+ * PARAMETERS: node - Namespace node for the device
+ * space_id - The address space ID
+ * handler - Address of the handler
+ * setup - Address of the setup function
+ * context - Value passed to the handler on each access
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install a handler for all op_regions of a given space_id.
+ * Assumes namespace is locked
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_install_space_handler(struct acpi_namespace_node * node,
+ acpi_adr_space_type space_id,
+ acpi_adr_space_handler handler,
+ acpi_adr_space_setup setup, void *context)
+{
+ union acpi_operand_object *obj_desc;
+ union acpi_operand_object *handler_obj;
+ acpi_status status;
+ acpi_object_type type;
+ u8 flags = 0;
+
+ ACPI_FUNCTION_TRACE(ev_install_space_handler);
+
+ /*
+ * This registration is valid for only the types below and the root. This
+ * is where the default handlers get placed.
+ */
+ if ((node->type != ACPI_TYPE_DEVICE) &&
+ (node->type != ACPI_TYPE_PROCESSOR) &&
+ (node->type != ACPI_TYPE_THERMAL) && (node != acpi_gbl_root_node)) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ if (handler == ACPI_DEFAULT_HANDLER) {
+ flags = ACPI_ADDR_HANDLER_DEFAULT_INSTALLED;
+
+ switch (space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ handler = acpi_ex_system_memory_space_handler;
+ setup = acpi_ev_system_memory_region_setup;
+ break;
+
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ handler = acpi_ex_system_io_space_handler;
+ setup = acpi_ev_io_space_region_setup;
+ break;
+
+ case ACPI_ADR_SPACE_PCI_CONFIG:
+ handler = acpi_ex_pci_config_space_handler;
+ setup = acpi_ev_pci_config_region_setup;
+ break;
+
+ case ACPI_ADR_SPACE_CMOS:
+ handler = acpi_ex_cmos_space_handler;
+ setup = acpi_ev_cmos_region_setup;
+ break;
+
+ case ACPI_ADR_SPACE_PCI_BAR_TARGET:
+ handler = acpi_ex_pci_bar_space_handler;
+ setup = acpi_ev_pci_bar_region_setup;
+ break;
+
+ case ACPI_ADR_SPACE_DATA_TABLE:
+ handler = acpi_ex_data_table_space_handler;
+ setup = NULL;
+ break;
+
+ default:
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+ }
+
+ /* If the caller hasn't specified a setup routine, use the default */
+
+ if (!setup) {
+ setup = acpi_ev_default_region_setup;
+ }
+
+ /* Check for an existing internal object */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (obj_desc) {
+ /*
+ * The attached device object already exists. Make sure the handler
+ * is not already installed.
+ */
+ handler_obj = obj_desc->device.handler;
+
+ /* Walk the handler list for this device */
+
+ while (handler_obj) {
+
+ /* Same space_id indicates a handler already installed */
+
+ if (handler_obj->address_space.space_id == space_id) {
+ if (handler_obj->address_space.handler ==
+ handler) {
+ /*
+ * It is (relatively) OK to attempt to install the SAME
+ * handler twice. This can easily happen with the
+ * PCI_Config space.
+ */
+ status = AE_SAME_HANDLER;
+ goto unlock_and_exit;
+ } else {
+ /* A handler is already installed */
+
+ status = AE_ALREADY_EXISTS;
+ }
+ goto unlock_and_exit;
+ }
+
+ /* Walk the linked list of handlers */
+
+ handler_obj = handler_obj->address_space.next;
+ }
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Creating object on Device %p while installing handler\n",
+ node));
+
+ /* obj_desc does not exist, create one */
+
+ if (node->type == ACPI_TYPE_ANY) {
+ type = ACPI_TYPE_DEVICE;
+ } else {
+ type = node->type;
+ }
+
+ obj_desc = acpi_ut_create_internal_object(type);
+ if (!obj_desc) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
+
+ /* Init new descriptor */
+
+ obj_desc->common.type = (u8)type;
+
+ /* Attach the new object to the Node */
+
+ status = acpi_ns_attach_object(node, obj_desc, type);
+
+ /* Remove local reference to the object */
+
+ acpi_ut_remove_reference(obj_desc);
+
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Installing address handler for region %s(%X) on Device %4.4s %p(%p)\n",
+ acpi_ut_get_region_name(space_id), space_id,
+ acpi_ut_get_node_name(node), node, obj_desc));
+
+ /*
+ * Install the handler
+ *
+ * At this point there is no existing handler. Just allocate the object
+ * for the handler and link it into the list.
+ */
+ handler_obj =
+ acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_ADDRESS_HANDLER);
+ if (!handler_obj) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
+
+ /* Init handler obj */
+
+ handler_obj->address_space.space_id = (u8)space_id;
+ handler_obj->address_space.handler_flags = flags;
+ handler_obj->address_space.region_list = NULL;
+ handler_obj->address_space.node = node;
+ handler_obj->address_space.handler = handler;
+ handler_obj->address_space.context = context;
+ handler_obj->address_space.setup = setup;
+
+ /* Install at head of Device.address_space list */
+
+ handler_obj->address_space.next = obj_desc->device.handler;
+
+ /*
+ * The Device object is the first reference on the handler_obj.
+ * Each region that uses the handler adds a reference.
+ */
+ obj_desc->device.handler = handler_obj;
+
+ /*
+ * Walk the namespace finding all of the regions this
+ * handler will manage.
+ *
+ * Start at the device and search the branch toward
+ * the leaf nodes until either the leaf is encountered or
+ * a device is detected that has an address handler of the
+ * same type.
+ *
+ * In either case, back up and search down the remainder
+ * of the branch
+ */
+ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
+ ACPI_NS_WALK_UNLOCK,
+ acpi_ev_install_handler, NULL,
+ handler_obj, NULL);
+
+ unlock_and_exit:
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 51f537937c1f..c986b2336b81 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 0cc6a16fedc7..6555e350fc1f 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -1,11 +1,11 @@
/******************************************************************************
*
- * Module Name: evregion - ACPI address_space (op_region) handler dispatch
+ * Module Name: evregion - Operation Region support
*
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -50,10 +50,9 @@
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evregion")
+extern u8 acpi_gbl_default_address_spaces[];
+
/* Local prototypes */
-static u8
-acpi_ev_has_default_handler(struct acpi_namespace_node *node,
- acpi_adr_space_type space_id);
static void acpi_ev_orphan_ec_reg_method(void);
@@ -61,135 +60,6 @@ static acpi_status
acpi_ev_reg_run(acpi_handle obj_handle,
u32 level, void *context, void **return_value);
-static acpi_status
-acpi_ev_install_handler(acpi_handle obj_handle,
- u32 level, void *context, void **return_value);
-
-/* These are the address spaces that will get default handlers */
-
-#define ACPI_NUM_DEFAULT_SPACES 4
-
-static u8 acpi_gbl_default_address_spaces[ACPI_NUM_DEFAULT_SPACES] = {
- ACPI_ADR_SPACE_SYSTEM_MEMORY,
- ACPI_ADR_SPACE_SYSTEM_IO,
- ACPI_ADR_SPACE_PCI_CONFIG,
- ACPI_ADR_SPACE_DATA_TABLE
-};
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_install_region_handlers
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Installs the core subsystem default address space handlers.
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_install_region_handlers(void)
-{
- acpi_status status;
- u32 i;
-
- ACPI_FUNCTION_TRACE(ev_install_region_handlers);
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /*
- * All address spaces (PCI Config, EC, SMBus) are scope dependent and
- * registration must occur for a specific device.
- *
- * In the case of the system memory and IO address spaces there is
- * currently no device associated with the address space. For these we
- * use the root.
- *
- * We install the default PCI config space handler at the root so that
- * this space is immediately available even though the we have not
- * enumerated all the PCI Root Buses yet. This is to conform to the ACPI
- * specification which states that the PCI config space must be always
- * available -- even though we are nowhere near ready to find the PCI root
- * buses at this point.
- *
- * NOTE: We ignore AE_ALREADY_EXISTS because this means that a handler
- * has already been installed (via acpi_install_address_space_handler).
- * Similar for AE_SAME_HANDLER.
- */
- for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) {
- status = acpi_ev_install_space_handler(acpi_gbl_root_node,
- acpi_gbl_default_address_spaces
- [i],
- ACPI_DEFAULT_HANDLER,
- NULL, NULL);
- switch (status) {
- case AE_OK:
- case AE_SAME_HANDLER:
- case AE_ALREADY_EXISTS:
-
- /* These exceptions are all OK */
-
- status = AE_OK;
- break;
-
- default:
-
- goto unlock_and_exit;
- }
- }
-
- unlock_and_exit:
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_has_default_handler
- *
- * PARAMETERS: node - Namespace node for the device
- * space_id - The address space ID
- *
- * RETURN: TRUE if default handler is installed, FALSE otherwise
- *
- * DESCRIPTION: Check if the default handler is installed for the requested
- * space ID.
- *
- ******************************************************************************/
-
-static u8
-acpi_ev_has_default_handler(struct acpi_namespace_node *node,
- acpi_adr_space_type space_id)
-{
- union acpi_operand_object *obj_desc;
- union acpi_operand_object *handler_obj;
-
- /* Must have an existing internal object */
-
- obj_desc = acpi_ns_get_attached_object(node);
- if (obj_desc) {
- handler_obj = obj_desc->device.handler;
-
- /* Walk the linked list of handlers for this object */
-
- while (handler_obj) {
- if (handler_obj->address_space.space_id == space_id) {
- if (handler_obj->address_space.handler_flags &
- ACPI_ADDR_HANDLER_DEFAULT_INSTALLED) {
- return (TRUE);
- }
- }
-
- handler_obj = handler_obj->address_space.next;
- }
- }
-
- return (FALSE);
-}
-
/*******************************************************************************
*
* FUNCTION: acpi_ev_initialize_op_regions
@@ -241,91 +111,6 @@ acpi_status acpi_ev_initialize_op_regions(void)
/*******************************************************************************
*
- * FUNCTION: acpi_ev_execute_reg_method
- *
- * PARAMETERS: region_obj - Region object
- * function - Passed to _REG: On (1) or Off (0)
- *
- * RETURN: Status
- *
- * DESCRIPTION: Execute _REG method for a region
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
-{
- struct acpi_evaluate_info *info;
- union acpi_operand_object *args[3];
- union acpi_operand_object *region_obj2;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ev_execute_reg_method);
-
- region_obj2 = acpi_ns_get_secondary_object(region_obj);
- if (!region_obj2) {
- return_ACPI_STATUS(AE_NOT_EXIST);
- }
-
- if (region_obj2->extra.method_REG == NULL) {
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Allocate and initialize the evaluation information block */
-
- info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
- if (!info) {
- return_ACPI_STATUS(AE_NO_MEMORY);
- }
-
- info->prefix_node = region_obj2->extra.method_REG;
- info->pathname = NULL;
- info->parameters = args;
- info->flags = ACPI_IGNORE_RETURN_VALUE;
-
- /*
- * The _REG method has two arguments:
- *
- * arg0 - Integer:
- * Operation region space ID Same value as region_obj->Region.space_id
- *
- * arg1 - Integer:
- * connection status 1 for connecting the handler, 0 for disconnecting
- * the handler (Passed as a parameter)
- */
- args[0] =
- acpi_ut_create_integer_object((u64) region_obj->region.space_id);
- if (!args[0]) {
- status = AE_NO_MEMORY;
- goto cleanup1;
- }
-
- args[1] = acpi_ut_create_integer_object((u64) function);
- if (!args[1]) {
- status = AE_NO_MEMORY;
- goto cleanup2;
- }
-
- args[2] = NULL; /* Terminate list */
-
- /* Execute the method, no return value */
-
- ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
- (ACPI_TYPE_METHOD, info->prefix_node, NULL));
-
- status = acpi_ns_evaluate(info);
- acpi_ut_remove_reference(args[1]);
-
- cleanup2:
- acpi_ut_remove_reference(args[0]);
-
- cleanup1:
- ACPI_FREE(info);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ev_address_space_dispatch
*
* PARAMETERS: region_obj - Internal region object
@@ -709,351 +494,86 @@ acpi_ev_attach_region(union acpi_operand_object *handler_obj,
/*******************************************************************************
*
- * FUNCTION: acpi_ev_install_handler
- *
- * PARAMETERS: walk_namespace callback
- *
- * DESCRIPTION: This routine installs an address handler into objects that are
- * of type Region or Device.
- *
- * If the Object is a Device, and the device has a handler of
- * the same type then the search is terminated in that branch.
- *
- * This is because the existing handler is closer in proximity
- * to any more regions than the one we are trying to install.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ev_install_handler(acpi_handle obj_handle,
- u32 level, void *context, void **return_value)
-{
- union acpi_operand_object *handler_obj;
- union acpi_operand_object *next_handler_obj;
- union acpi_operand_object *obj_desc;
- struct acpi_namespace_node *node;
- acpi_status status;
-
- ACPI_FUNCTION_NAME(ev_install_handler);
-
- handler_obj = (union acpi_operand_object *)context;
-
- /* Parameter validation */
-
- if (!handler_obj) {
- return (AE_OK);
- }
-
- /* Convert and validate the device handle */
-
- node = acpi_ns_validate_handle(obj_handle);
- if (!node) {
- return (AE_BAD_PARAMETER);
- }
-
- /*
- * We only care about regions and objects that are allowed to have
- * address space handlers
- */
- if ((node->type != ACPI_TYPE_DEVICE) &&
- (node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) {
- return (AE_OK);
- }
-
- /* Check for an existing internal object */
-
- obj_desc = acpi_ns_get_attached_object(node);
- if (!obj_desc) {
-
- /* No object, just exit */
-
- return (AE_OK);
- }
-
- /* Devices are handled different than regions */
-
- if (obj_desc->common.type == ACPI_TYPE_DEVICE) {
-
- /* Check if this Device already has a handler for this address space */
-
- next_handler_obj = obj_desc->device.handler;
- while (next_handler_obj) {
-
- /* Found a handler, is it for the same address space? */
-
- if (next_handler_obj->address_space.space_id ==
- handler_obj->address_space.space_id) {
- ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
- "Found handler for region [%s] in device %p(%p) "
- "handler %p\n",
- acpi_ut_get_region_name
- (handler_obj->address_space.
- space_id), obj_desc,
- next_handler_obj,
- handler_obj));
-
- /*
- * Since the object we found it on was a device, then it
- * means that someone has already installed a handler for
- * the branch of the namespace from this device on. Just
- * bail out telling the walk routine to not traverse this
- * branch. This preserves the scoping rule for handlers.
- */
- return (AE_CTRL_DEPTH);
- }
-
- /* Walk the linked list of handlers attached to this device */
-
- next_handler_obj = next_handler_obj->address_space.next;
- }
-
- /*
- * As long as the device didn't have a handler for this space we
- * don't care about it. We just ignore it and proceed.
- */
- return (AE_OK);
- }
-
- /* Object is a Region */
-
- if (obj_desc->region.space_id != handler_obj->address_space.space_id) {
-
- /* This region is for a different address space, just ignore it */
-
- return (AE_OK);
- }
-
- /*
- * Now we have a region and it is for the handler's address space type.
- *
- * First disconnect region for any previous handler (if any)
- */
- acpi_ev_detach_region(obj_desc, FALSE);
-
- /* Connect the region to the new handler */
-
- status = acpi_ev_attach_region(handler_obj, obj_desc, FALSE);
- return (status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_install_space_handler
+ * FUNCTION: acpi_ev_execute_reg_method
*
- * PARAMETERS: node - Namespace node for the device
- * space_id - The address space ID
- * handler - Address of the handler
- * setup - Address of the setup function
- * context - Value passed to the handler on each access
+ * PARAMETERS: region_obj - Region object
+ * function - Passed to _REG: On (1) or Off (0)
*
* RETURN: Status
*
- * DESCRIPTION: Install a handler for all op_regions of a given space_id.
- * Assumes namespace is locked
+ * DESCRIPTION: Execute _REG method for a region
*
******************************************************************************/
acpi_status
-acpi_ev_install_space_handler(struct acpi_namespace_node * node,
- acpi_adr_space_type space_id,
- acpi_adr_space_handler handler,
- acpi_adr_space_setup setup, void *context)
+acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
{
- union acpi_operand_object *obj_desc;
- union acpi_operand_object *handler_obj;
+ struct acpi_evaluate_info *info;
+ union acpi_operand_object *args[3];
+ union acpi_operand_object *region_obj2;
acpi_status status;
- acpi_object_type type;
- u8 flags = 0;
- ACPI_FUNCTION_TRACE(ev_install_space_handler);
-
- /*
- * This registration is valid for only the types below and the root. This
- * is where the default handlers get placed.
- */
- if ((node->type != ACPI_TYPE_DEVICE) &&
- (node->type != ACPI_TYPE_PROCESSOR) &&
- (node->type != ACPI_TYPE_THERMAL) && (node != acpi_gbl_root_node)) {
- status = AE_BAD_PARAMETER;
- goto unlock_and_exit;
- }
+ ACPI_FUNCTION_TRACE(ev_execute_reg_method);
- if (handler == ACPI_DEFAULT_HANDLER) {
- flags = ACPI_ADDR_HANDLER_DEFAULT_INSTALLED;
-
- switch (space_id) {
- case ACPI_ADR_SPACE_SYSTEM_MEMORY:
- handler = acpi_ex_system_memory_space_handler;
- setup = acpi_ev_system_memory_region_setup;
- break;
-
- case ACPI_ADR_SPACE_SYSTEM_IO:
- handler = acpi_ex_system_io_space_handler;
- setup = acpi_ev_io_space_region_setup;
- break;
-
- case ACPI_ADR_SPACE_PCI_CONFIG:
- handler = acpi_ex_pci_config_space_handler;
- setup = acpi_ev_pci_config_region_setup;
- break;
-
- case ACPI_ADR_SPACE_CMOS:
- handler = acpi_ex_cmos_space_handler;
- setup = acpi_ev_cmos_region_setup;
- break;
-
- case ACPI_ADR_SPACE_PCI_BAR_TARGET:
- handler = acpi_ex_pci_bar_space_handler;
- setup = acpi_ev_pci_bar_region_setup;
- break;
-
- case ACPI_ADR_SPACE_DATA_TABLE:
- handler = acpi_ex_data_table_space_handler;
- setup = NULL;
- break;
-
- default:
- status = AE_BAD_PARAMETER;
- goto unlock_and_exit;
- }
+ region_obj2 = acpi_ns_get_secondary_object(region_obj);
+ if (!region_obj2) {
+ return_ACPI_STATUS(AE_NOT_EXIST);
}
- /* If the caller hasn't specified a setup routine, use the default */
-
- if (!setup) {
- setup = acpi_ev_default_region_setup;
+ if (region_obj2->extra.method_REG == NULL) {
+ return_ACPI_STATUS(AE_OK);
}
- /* Check for an existing internal object */
-
- obj_desc = acpi_ns_get_attached_object(node);
- if (obj_desc) {
- /*
- * The attached device object already exists. Make sure the handler
- * is not already installed.
- */
- handler_obj = obj_desc->device.handler;
-
- /* Walk the handler list for this device */
-
- while (handler_obj) {
-
- /* Same space_id indicates a handler already installed */
-
- if (handler_obj->address_space.space_id == space_id) {
- if (handler_obj->address_space.handler ==
- handler) {
- /*
- * It is (relatively) OK to attempt to install the SAME
- * handler twice. This can easily happen with the
- * PCI_Config space.
- */
- status = AE_SAME_HANDLER;
- goto unlock_and_exit;
- } else {
- /* A handler is already installed */
-
- status = AE_ALREADY_EXISTS;
- }
- goto unlock_and_exit;
- }
-
- /* Walk the linked list of handlers */
-
- handler_obj = handler_obj->address_space.next;
- }
- } else {
- ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
- "Creating object on Device %p while installing handler\n",
- node));
-
- /* obj_desc does not exist, create one */
-
- if (node->type == ACPI_TYPE_ANY) {
- type = ACPI_TYPE_DEVICE;
- } else {
- type = node->type;
- }
-
- obj_desc = acpi_ut_create_internal_object(type);
- if (!obj_desc) {
- status = AE_NO_MEMORY;
- goto unlock_and_exit;
- }
-
- /* Init new descriptor */
-
- obj_desc->common.type = (u8) type;
-
- /* Attach the new object to the Node */
-
- status = acpi_ns_attach_object(node, obj_desc, type);
-
- /* Remove local reference to the object */
-
- acpi_ut_remove_reference(obj_desc);
+ /* Allocate and initialize the evaluation information block */
- if (ACPI_FAILURE(status)) {
- goto unlock_and_exit;
- }
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
}
- ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
- "Installing address handler for region %s(%X) on Device %4.4s %p(%p)\n",
- acpi_ut_get_region_name(space_id), space_id,
- acpi_ut_get_node_name(node), node, obj_desc));
+ info->prefix_node = region_obj2->extra.method_REG;
+ info->pathname = NULL;
+ info->parameters = args;
+ info->flags = ACPI_IGNORE_RETURN_VALUE;
/*
- * Install the handler
+ * The _REG method has two arguments:
+ *
+ * arg0 - Integer:
+ * Operation region space ID Same value as region_obj->Region.space_id
*
- * At this point there is no existing handler. Just allocate the object
- * for the handler and link it into the list.
+ * arg1 - Integer:
+ * connection status 1 for connecting the handler, 0 for disconnecting
+ * the handler (Passed as a parameter)
*/
- handler_obj =
- acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_ADDRESS_HANDLER);
- if (!handler_obj) {
+ args[0] =
+ acpi_ut_create_integer_object((u64)region_obj->region.space_id);
+ if (!args[0]) {
status = AE_NO_MEMORY;
- goto unlock_and_exit;
+ goto cleanup1;
}
- /* Init handler obj */
+ args[1] = acpi_ut_create_integer_object((u64)function);
+ if (!args[1]) {
+ status = AE_NO_MEMORY;
+ goto cleanup2;
+ }
- handler_obj->address_space.space_id = (u8) space_id;
- handler_obj->address_space.handler_flags = flags;
- handler_obj->address_space.region_list = NULL;
- handler_obj->address_space.node = node;
- handler_obj->address_space.handler = handler;
- handler_obj->address_space.context = context;
- handler_obj->address_space.setup = setup;
+ args[2] = NULL; /* Terminate list */
- /* Install at head of Device.address_space list */
+ /* Execute the method, no return value */
- handler_obj->address_space.next = obj_desc->device.handler;
+ ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+ (ACPI_TYPE_METHOD, info->prefix_node, NULL));
- /*
- * The Device object is the first reference on the handler_obj.
- * Each region that uses the handler adds a reference.
- */
- obj_desc->device.handler = handler_obj;
+ status = acpi_ns_evaluate(info);
+ acpi_ut_remove_reference(args[1]);
- /*
- * Walk the namespace finding all of the regions this
- * handler will manage.
- *
- * Start at the device and search the branch toward
- * the leaf nodes until either the leaf is encountered or
- * a device is detected that has an address handler of the
- * same type.
- *
- * In either case, back up and search down the remainder
- * of the branch
- */
- status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
- ACPI_NS_WALK_UNLOCK,
- acpi_ev_install_handler, NULL,
- handler_obj, NULL);
+ cleanup2:
+ acpi_ut_remove_reference(args[0]);
- unlock_and_exit:
+ cleanup1:
+ ACPI_FREE(info);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 1474241bfc7e..3bb616794b3b 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index f9661e2b46a9..f4b43bede015 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -89,7 +89,7 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context)
*/
interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
- return_UINT32(interrupt_handled);
+ return_VALUE(interrupt_handled);
}
/*******************************************************************************
@@ -120,7 +120,7 @@ u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context)
interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
- return_UINT32(interrupt_handled);
+ return_VALUE(interrupt_handled);
}
/******************************************************************************
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index ae668f32cf16..ddffd6847914 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -56,13 +56,13 @@ ACPI_MODULE_NAME("evxface")
*
* FUNCTION: acpi_install_notify_handler
*
- * PARAMETERS: Device - The device for which notifies will be handled
+ * PARAMETERS: device - The device for which notifies will be handled
* handler_type - The type of handler:
* ACPI_SYSTEM_NOTIFY: System Handler (00-7F)
* ACPI_DEVICE_NOTIFY: Device Handler (80-FF)
* ACPI_ALL_NOTIFY: Both System and Device
- * Handler - Address of the handler
- * Context - Value passed to the handler on each GPE
+ * handler - Address of the handler
+ * context - Value passed to the handler on each GPE
*
* RETURN: Status
*
@@ -217,12 +217,12 @@ ACPI_EXPORT_SYMBOL(acpi_install_notify_handler)
*
* FUNCTION: acpi_remove_notify_handler
*
- * PARAMETERS: Device - The device for which the handler is installed
+ * PARAMETERS: device - The device for which the handler is installed
* handler_type - The type of handler:
* ACPI_SYSTEM_NOTIFY: System Handler (00-7F)
* ACPI_DEVICE_NOTIFY: Device Handler (80-FF)
* ACPI_ALL_NOTIFY: Both System and Device
- * Handler - Address of the handler
+ * handler - Address of the handler
*
* RETURN: Status
*
@@ -249,7 +249,8 @@ acpi_remove_notify_handler(acpi_handle device,
(handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- /* Make sure all deferred tasks are completed */
+
+ /* Make sure all deferred notify tasks are completed */
acpi_os_wait_events_complete();
@@ -596,7 +597,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
return_ACPI_STATUS(status);
}
- /* Allocate memory for the handler object */
+ /* Allocate and init handler object (before lock) */
handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_handler_info));
if (!handler) {
@@ -622,16 +623,15 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
goto free_and_exit;
}
- /* Allocate and init handler object */
-
handler->address = address;
handler->context = context;
handler->method_node = gpe_event_info->dispatch.method_node;
- handler->original_flags = gpe_event_info->flags &
- (ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
+ handler->original_flags = (u8)(gpe_event_info->flags &
+ (ACPI_GPE_XRUPT_TYPE_MASK |
+ ACPI_GPE_DISPATCH_MASK));
/*
- * If the GPE is associated with a method, it might have been enabled
+ * If the GPE is associated with a method, it may have been enabled
* automatically during initialization, in which case it has to be
* disabled now to avoid spurious execution of the handler.
*/
@@ -646,7 +646,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
gpe_event_info->dispatch.handler = handler;
- /* Setup up dispatch flags to indicate handler (vs. method) */
+ /* Setup up dispatch flags to indicate handler (vs. method/notify) */
gpe_event_info->flags &=
~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
@@ -697,7 +697,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- /* Make sure all deferred tasks are completed */
+ /* Make sure all deferred GPE tasks are completed */
acpi_os_wait_events_complete();
@@ -747,10 +747,10 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
* enabled, it should be enabled at this point to restore the
* post-initialization configuration.
*/
-
- if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD)
- && handler->originally_enabled)
+ if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) &&
+ handler->originally_enabled) {
(void)acpi_ev_add_gpe_reference(gpe_event_info);
+ }
/* Now we can free the handler object */
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 35520c6eeefb..d6e4e42316db 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -61,7 +61,6 @@ ACPI_MODULE_NAME("evxfevnt")
* DESCRIPTION: Transfers the system into ACPI mode.
*
******************************************************************************/
-
acpi_status acpi_enable(void)
{
acpi_status status;
@@ -210,8 +209,8 @@ ACPI_EXPORT_SYMBOL(acpi_enable_event)
*
* FUNCTION: acpi_disable_event
*
- * PARAMETERS: Event - The fixed eventto be enabled
- * Flags - Reserved
+ * PARAMETERS: event - The fixed event to be disabled
+ * flags - Reserved
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 3f30e753b652..aff4cc261211 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -51,7 +51,7 @@
ACPI_MODULE_NAME("evxfgpe")
#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
-/******************************************************************************
+/*******************************************************************************
*
* FUNCTION: acpi_update_all_gpes
*
@@ -172,6 +172,7 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
+
ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
@@ -225,7 +226,7 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
}
- /* Validate WakeDevice is of type Device */
+ /* Validate wake_device is of type Device */
if (device_node->type != ACPI_TYPE_DEVICE) {
return_ACPI_STATUS (AE_BAD_PARAMETER);
@@ -432,8 +433,8 @@ ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
*
* PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
- * event_status - Where the current status of the event will
- * be returned
+ * event_status - Where the current status of the event
+ * will be returned
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 96b412d03950..96c9e5f355ae 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 16219bde48da..d93b70be60ad 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -48,6 +48,7 @@
#include "actables.h"
#include "acdispat.h"
#include "acevents.h"
+#include "amlcode.h"
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exconfig")
@@ -120,8 +121,11 @@ acpi_ex_add_table(u32 table_index,
acpi_ns_exec_module_code_list();
acpi_ex_enter_interpreter();
- /* Update GPEs for any new _Lxx/_Exx methods. Ignore errors */
-
+ /*
+ * Update GPEs for any new _Lxx/_Exx methods. Ignore errors. The host is
+ * responsible for discovering any new wake GPEs by running _PRW methods
+ * that may have been loaded by this table.
+ */
status = acpi_tb_get_owner_id(table_index, &owner_id);
if (ACPI_SUCCESS(status)) {
acpi_ev_update_gpes(owner_id);
@@ -158,12 +162,12 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
ACPI_FUNCTION_TRACE(ex_load_table_op);
- /* Validate lengths for the signature_string, OEMIDString, OEMtable_iD */
+ /* Validate lengths for the Signature, oem_id, and oem_table_id strings */
if ((operand[0]->string.length > ACPI_NAME_SIZE) ||
(operand[1]->string.length > ACPI_OEM_ID_SIZE) ||
(operand[2]->string.length > ACPI_OEM_TABLE_ID_SIZE)) {
- return_ACPI_STATUS(AE_BAD_PARAMETER);
+ return_ACPI_STATUS(AE_AML_STRING_LIMIT);
}
/* Find the ACPI table in the RSDT/XSDT */
@@ -210,8 +214,8 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
/* parameter_path (optional parameter) */
if (operand[4]->string.length > 0) {
- if ((operand[4]->string.pointer[0] != '\\') &&
- (operand[4]->string.pointer[0] != '^')) {
+ if ((operand[4]->string.pointer[0] != AML_ROOT_PREFIX) &&
+ (operand[4]->string.pointer[0] != AML_PARENT_PREFIX)) {
/*
* Path is not absolute, so it will be relative to the node
* referenced by the root_path_string (or the NS root if omitted)
@@ -301,7 +305,7 @@ acpi_ex_region_read(union acpi_operand_object *obj_desc, u32 length, u8 *buffer)
acpi_ev_address_space_dispatch(obj_desc, NULL, ACPI_READ,
region_offset, 8, &value);
if (ACPI_FAILURE(status)) {
- return status;
+ return (status);
}
*buffer = (u8)value;
@@ -309,7 +313,7 @@ acpi_ex_region_read(union acpi_operand_object *obj_desc, u32 length, u8 *buffer)
region_offset++;
}
- return AE_OK;
+ return (AE_OK);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index 4492a4e03022..d2b9613bbf01 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -176,7 +176,7 @@ acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc,
/* Save the Result */
- acpi_ex_truncate_for32bit_table(return_desc);
+ (void)acpi_ex_truncate_for32bit_table(return_desc);
*result_desc = return_desc;
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 66554bc6f9a8..26a13f67977e 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index d7c9f51608a7..7eb853cd279f 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 858b43a7dcf6..e5a3c249f7fa 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -464,9 +464,8 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
ACPI_FUNCTION_NAME(ex_dump_operand)
- if (!
- ((ACPI_LV_EXEC & acpi_dbg_level)
- && (_COMPONENT & acpi_dbg_layer))) {
+ /* Check if debug output enabled */
+ if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_EXEC, _COMPONENT)) {
return;
}
@@ -811,9 +810,10 @@ void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags)
ACPI_FUNCTION_ENTRY();
if (!flags) {
- if (!
- ((ACPI_LV_OBJECTS & acpi_dbg_level)
- && (_COMPONENT & acpi_dbg_layer))) {
+
+ /* Check if debug output enabled */
+
+ if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_OBJECTS, _COMPONENT)) {
return;
}
}
@@ -999,9 +999,10 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
}
if (!flags) {
- if (!
- ((ACPI_LV_OBJECTS & acpi_dbg_level)
- && (_COMPONENT & acpi_dbg_layer))) {
+
+ /* Check if debug output enabled */
+
+ if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_OBJECTS, _COMPONENT)) {
return_VOID;
}
}
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index ebc55fbf3ff7..7d4bae71e8c6 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index aa2ccfb7cb61..ec7f5690031b 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -329,7 +329,6 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
static u8
acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value)
{
- ACPI_FUNCTION_NAME(ex_register_overflow);
if (obj_desc->common_field.bit_length >= ACPI_INTEGER_BIT_SIZE) {
/*
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 84058705ed12..72a2a13b6d36 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index d1f449d93dcf..7be0205ad067 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -377,7 +377,8 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED);
}
- /* Must have a valid thread. */
+ /* Must have a valid thread ID */
+
if (!walk_state->thread) {
ACPI_ERROR((AE_INFO,
"Cannot release Mutex [%4.4s], null thread info",
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 2ff578a16adc..14689dec4960 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index bbf01e9bf057..b60c877f5906 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -948,13 +948,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
*/
return_desc =
acpi_ut_create_integer_object((u64)
- temp_desc->
- buffer.
- pointer
- [operand
- [0]->
- reference.
- value]);
+ temp_desc->buffer.pointer[operand[0]->reference.value]);
if (!return_desc) {
status = AE_NO_MEMORY;
goto cleanup;
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index ee5634a074c4..e491e46f17df 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 2c89b4651f08..2d7491f3126e 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 3e08695c3b30..b76b97002dff 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index ba9db4de7c89..d6eab81f54fb 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -276,7 +276,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
/* Invalid field access type */
ACPI_ERROR((AE_INFO, "Unknown field access type 0x%X", access));
- return_UINT32(0);
+ return_VALUE(0);
}
if (obj_desc->common.type == ACPI_TYPE_BUFFER_FIELD) {
@@ -289,7 +289,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
}
*return_byte_alignment = byte_alignment;
- return_UINT32(bit_length);
+ return_VALUE(bit_length);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 1db2c0bfde0b..182abaf045e1 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -142,9 +142,9 @@ acpi_ex_system_memory_space_handler(u32 function,
}
/*
- * Attempt to map from the requested address to the end of the region.
- * However, we will never map more than one page, nor will we cross
- * a page boundary.
+ * October 2009: Attempt to map from the requested address to the
+ * end of the region. However, we will never map more than one
+ * page, nor will we cross a page boundary.
*/
map_length = (acpi_size)
((mem_info->address + mem_info->length) - address);
@@ -154,12 +154,15 @@ acpi_ex_system_memory_space_handler(u32 function,
* a page boundary, just map up to the page boundary, do not cross.
* On some systems, crossing a page boundary while mapping regions
* can cause warnings if the pages have different attributes
- * due to resource management
+ * due to resource management.
+ *
+ * This has the added benefit of constraining a single mapping to
+ * one page, which is similar to the original code that used a 4k
+ * maximum window.
*/
page_boundary_map_length =
ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address;
-
- if (!page_boundary_map_length) {
+ if (page_boundary_map_length == 0) {
page_boundary_map_length = ACPI_DEFAULT_PAGE_SIZE;
}
@@ -236,19 +239,19 @@ acpi_ex_system_memory_space_handler(u32 function,
switch (bit_width) {
case 8:
- ACPI_SET8(logical_addr_ptr) = (u8) * value;
+ ACPI_SET8(logical_addr_ptr, *value);
break;
case 16:
- ACPI_SET16(logical_addr_ptr) = (u16) * value;
+ ACPI_SET16(logical_addr_ptr, *value);
break;
case 32:
- ACPI_SET32(logical_addr_ptr) = (u32) * value;
+ ACPI_SET32(logical_addr_ptr, *value);
break;
case 64:
- ACPI_SET64(logical_addr_ptr) = (u64) * value;
+ ACPI_SET64(logical_addr_ptr, *value);
break;
default:
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index 6239956786eb..8565b6bd12bb 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index cc176b245e22..e4f9dfbb2a13 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index b9ebff2f6a09..9fb9f5e9a4da 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 90431f12f831..93c6049c2d75 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -487,14 +487,33 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
default:
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Storing %s (%p) directly into node (%p) with no implicit conversion\n",
+ "Storing [%s] (%p) directly into node [%s] (%p)"
+ " with no implicit conversion\n",
acpi_ut_get_object_type_name(source_desc),
- source_desc, node));
+ source_desc,
+ acpi_ut_get_object_type_name(target_desc),
+ node));
- /* No conversions for all other types. Just attach the source object */
+ /*
+ * No conversions for all other types. Directly store a copy of
+ * the source object. NOTE: This is a departure from the ACPI
+ * spec, which states "If conversion is impossible, abort the
+ * running control method".
+ *
+ * This code implements "If conversion is impossible, treat the
+ * Store operation as a CopyObject".
+ */
+ status =
+ acpi_ut_copy_iobject_to_iobject(source_desc, &new_desc,
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
- status = acpi_ns_attach_object(node, source_desc,
- source_desc->common.type);
+ status =
+ acpi_ns_attach_object(node, new_desc,
+ new_desc->common.type);
+ acpi_ut_remove_reference(new_desc);
break;
}
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index 87153bbc4b43..1cefe777068e 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -253,7 +253,7 @@ acpi_ex_store_object_to_object(union acpi_operand_object *source_desc,
/* Truncate value if we are executing from a 32-bit ACPI table */
- acpi_ex_truncate_for32bit_table(dest_desc);
+ (void)acpi_ex_truncate_for32bit_table(dest_desc);
break;
case ACPI_TYPE_STRING:
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index b5f339cb1305..26e371073b1a 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index c8a0ad5c1f55..6578dee2e51b 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 264d22d8018c..b205cbb4b50c 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -202,35 +202,39 @@ void acpi_ex_relinquish_interpreter(void)
*
* PARAMETERS: obj_desc - Object to be truncated
*
- * RETURN: none
+ * RETURN: TRUE if a truncation was performed, FALSE otherwise.
*
* DESCRIPTION: Truncate an ACPI Integer to 32 bits if the execution mode is
* 32-bit, as determined by the revision of the DSDT.
*
******************************************************************************/
-void acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc)
+u8 acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc)
{
ACPI_FUNCTION_ENTRY();
/*
* Object must be a valid number and we must be executing
- * a control method. NS node could be there for AML_INT_NAMEPATH_OP.
+ * a control method. Object could be NS node for AML_INT_NAMEPATH_OP.
*/
if ((!obj_desc) ||
(ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) ||
(obj_desc->common.type != ACPI_TYPE_INTEGER)) {
- return;
+ return (FALSE);
}
- if (acpi_gbl_integer_byte_width == 4) {
+ if ((acpi_gbl_integer_byte_width == 4) &&
+ (obj_desc->integer.value > (u64)ACPI_UINT32_MAX)) {
/*
- * We are running a method that exists in a 32-bit ACPI table.
+ * We are executing in a 32-bit ACPI table.
* Truncate the value to 32 bits by zeroing out the upper 32-bit field
*/
- obj_desc->integer.value &= (u64) ACPI_UINT32_MAX;
+ obj_desc->integer.value &= (u64)ACPI_UINT32_MAX;
+ return (TRUE);
}
+
+ return (FALSE);
}
/*******************************************************************************
@@ -336,7 +340,7 @@ static u32 acpi_ex_digits_needed(u64 value, u32 base)
/* u64 is unsigned, so we don't worry about a '-' prefix */
if (value == 0) {
- return_UINT32(1);
+ return_VALUE(1);
}
current_value = value;
@@ -350,7 +354,7 @@ static u32 acpi_ex_digits_needed(u64 value, u32 base)
num_digits++;
}
- return_UINT32(num_digits);
+ return_VALUE(num_digits);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 90a9aea1cee9..deb3f61e2bd1 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -108,8 +108,7 @@ acpi_status acpi_hw_set_mode(u32 mode)
* enable bits to default
*/
status = acpi_hw_write_port(acpi_gbl_FADT.smi_command,
- (u32) acpi_gbl_FADT.acpi_disable,
- 8);
+ (u32)acpi_gbl_FADT.acpi_disable, 8);
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Attempting to enable Legacy (non-ACPI) mode\n"));
break;
@@ -152,18 +151,18 @@ u32 acpi_hw_get_mode(void)
* system does not support mode transition.
*/
if (!acpi_gbl_FADT.smi_command) {
- return_UINT32(ACPI_SYS_MODE_ACPI);
+ return_VALUE(ACPI_SYS_MODE_ACPI);
}
status = acpi_read_bit_register(ACPI_BITREG_SCI_ENABLE, &value);
if (ACPI_FAILURE(status)) {
- return_UINT32(ACPI_SYS_MODE_LEGACY);
+ return_VALUE(ACPI_SYS_MODE_LEGACY);
}
if (value) {
- return_UINT32(ACPI_SYS_MODE_ACPI);
+ return_VALUE(ACPI_SYS_MODE_ACPI);
} else {
- return_UINT32(ACPI_SYS_MODE_LEGACY);
+ return_VALUE(ACPI_SYS_MODE_LEGACY);
}
}
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index 94996f9ae3ad..5e5f76230f5e 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -200,7 +200,6 @@ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state)
* FUNCTION: acpi_hw_extended_wake
*
* PARAMETERS: sleep_state - Which sleep state we just exited
- * flags - Reserved, set to zero
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 64560045052d..20d02e93c990 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -69,8 +69,10 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info)
{
- return (u32)1 << (gpe_event_info->gpe_number -
- gpe_event_info->register_info->base_gpe_number);
+
+ return ((u32)1 <<
+ (gpe_event_info->gpe_number -
+ gpe_event_info->register_info->base_gpe_number));
}
/******************************************************************************
@@ -133,7 +135,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
break;
default:
- ACPI_ERROR((AE_INFO, "Invalid GPE Action, %u\n", action));
+ ACPI_ERROR((AE_INFO, "Invalid GPE Action, %u", action));
return (AE_BAD_PARAMETER);
}
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c
index 65bc3453a29c..0889a629505f 100644
--- a/drivers/acpi/acpica/hwpci.c
+++ b/drivers/acpi/acpica/hwpci.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index f4e57503576b..083d6551f0e2 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -44,7 +44,6 @@
#include <acpi/acpi.h>
#include "accommon.h"
-#include "acnamesp.h"
#include "acevents.h"
#define _COMPONENT ACPI_HARDWARE
@@ -364,8 +363,7 @@ acpi_status acpi_hw_write_pm1_control(u32 pm1a_control, u32 pm1b_control)
* DESCRIPTION: Read from the specified ACPI register
*
******************************************************************************/
-acpi_status
-acpi_hw_register_read(u32 register_id, u32 * return_value)
+acpi_status acpi_hw_register_read(u32 register_id, u32 *return_value)
{
u32 value = 0;
acpi_status status;
@@ -485,7 +483,7 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value)
&acpi_gbl_xpm1b_status);
break;
- case ACPI_REGISTER_PM1_ENABLE: /* PM1 A/B: 16-bit access */
+ case ACPI_REGISTER_PM1_ENABLE: /* PM1 A/B: 16-bit access each */
status = acpi_hw_write_multiple(value,
&acpi_gbl_xpm1a_enable,
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 3fddde056a5e..e3828cc4361b 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -45,7 +45,6 @@
#include <acpi/acpi.h>
#include <linux/acpi.h>
#include "accommon.h"
-#include <linux/module.h>
#define _COMPONENT ACPI_HARDWARE
ACPI_MODULE_NAME("hwsleep")
@@ -178,7 +177,7 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
* to still read the right value. Ideally, this block would go
* away entirely.
*/
- acpi_os_stall(10000000);
+ acpi_os_stall(10 * ACPI_USEC_PER_SEC);
status = acpi_hw_register_write(ACPI_REGISTER_PM1_CONTROL,
sleep_enable_reg_info->
@@ -323,7 +322,8 @@ acpi_status acpi_hw_legacy_wake(u8 sleep_state)
* and use it to determine whether the system is rebooting or
* resuming. Clear WAK_STS for compatibility.
*/
- acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS, 1);
+ (void)acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS,
+ ACPI_CLEAR_STATUS);
acpi_gbl_system_awake_and_running = TRUE;
/* Enable power button */
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index bfdce22f3798..0c1a8bbd05d6 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -176,10 +176,11 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
/*
* Compute Duration (Requires a 64-bit multiply and divide):
*
- * time_elapsed = (delta_ticks * 1000000) / PM_TIMER_FREQUENCY;
+ * time_elapsed (microseconds) =
+ * (delta_ticks * ACPI_USEC_PER_SEC) / ACPI_PM_TIMER_FREQUENCY;
*/
- status = acpi_ut_short_divide(((u64) delta_ticks) * 1000000,
- PM_TIMER_FREQUENCY, &quotient, NULL);
+ status = acpi_ut_short_divide(((u64)delta_ticks) * ACPI_USEC_PER_SEC,
+ ACPI_PM_TIMER_FREQUENCY, &quotient, NULL);
*time_elapsed = (u32) quotient;
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index b6aae58299dc..eab70d58852a 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -135,7 +135,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
if ((bit_width != 8) && (bit_width != 16) && (bit_width != 32)) {
ACPI_ERROR((AE_INFO,
"Bad BitWidth parameter: %8.8X", bit_width));
- return AE_BAD_PARAMETER;
+ return (AE_BAD_PARAMETER);
}
port_info = acpi_protected_ports;
@@ -234,11 +234,11 @@ acpi_status acpi_hw_read_port(acpi_io_address address, u32 *value, u32 width)
status = acpi_hw_validate_io_request(address, width);
if (ACPI_SUCCESS(status)) {
status = acpi_os_read_port(address, value, width);
- return status;
+ return (status);
}
if (status != AE_AML_ILLEGAL_ADDRESS) {
- return status;
+ return (status);
}
/*
@@ -253,7 +253,7 @@ acpi_status acpi_hw_read_port(acpi_io_address address, u32 *value, u32 width)
if (acpi_hw_validate_io_request(address, 8) == AE_OK) {
status = acpi_os_read_port(address, &one_byte, 8);
if (ACPI_FAILURE(status)) {
- return status;
+ return (status);
}
*value |= (one_byte << i);
@@ -262,7 +262,7 @@ acpi_status acpi_hw_read_port(acpi_io_address address, u32 *value, u32 width)
address++;
}
- return AE_OK;
+ return (AE_OK);
}
/******************************************************************************
@@ -297,11 +297,11 @@ acpi_status acpi_hw_write_port(acpi_io_address address, u32 value, u32 width)
status = acpi_hw_validate_io_request(address, width);
if (ACPI_SUCCESS(status)) {
status = acpi_os_write_port(address, value, width);
- return status;
+ return (status);
}
if (status != AE_AML_ILLEGAL_ADDRESS) {
- return status;
+ return (status);
}
/*
@@ -317,12 +317,12 @@ acpi_status acpi_hw_write_port(acpi_io_address address, u32 value, u32 width)
status =
acpi_os_write_port(address, (value >> i) & 0xFF, 8);
if (ACPI_FAILURE(status)) {
- return status;
+ return (status);
}
}
address++;
}
- return AE_OK;
+ return (AE_OK);
}
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 05a154c3c9ac..04c2e16f2c0a 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -80,10 +80,10 @@ acpi_status acpi_reset(void)
if (reset_reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
/*
- * For I/O space, write directly to the OSL. This
- * bypasses the port validation mechanism, which may
- * block a valid write to the reset register. Spec
- * section 4.7.3.6 requires register width to be 8.
+ * For I/O space, write directly to the OSL. This bypasses the port
+ * validation mechanism, which may block a valid write to the reset
+ * register.
+ * Spec section 4.7.3.6 requires register width to be 8.
*/
status =
acpi_os_write_port((acpi_io_address) reset_reg->address,
@@ -333,7 +333,7 @@ ACPI_EXPORT_SYMBOL(acpi_read_bit_register)
* FUNCTION: acpi_write_bit_register
*
* PARAMETERS: register_id - ID of ACPI Bit Register to access
- * Value - Value to write to the register, in bit
+ * value - Value to write to the register, in bit
* position zero. The bit is automatically
* shifted to the correct position.
*
@@ -440,17 +440,41 @@ ACPI_EXPORT_SYMBOL(acpi_write_bit_register)
* *sleep_type_a - Where SLP_TYPa is returned
* *sleep_type_b - Where SLP_TYPb is returned
*
- * RETURN: status - ACPI status
+ * RETURN: Status
+ *
+ * DESCRIPTION: Obtain the SLP_TYPa and SLP_TYPb values for the requested
+ * sleep state via the appropriate \_Sx object.
+ *
+ * The sleep state package returned from the corresponding \_Sx_ object
+ * must contain at least one integer.
+ *
+ * March 2005:
+ * Added support for a package that contains two integers. This
+ * goes against the ACPI specification which defines this object as a
+ * package with one encoded DWORD integer. However, existing practice
+ * by many BIOS vendors is to return a package with 2 or more integer
+ * elements, at least one per sleep type (A/B).
*
- * DESCRIPTION: Obtain the SLP_TYPa and SLP_TYPb values for the requested sleep
- * state.
+ * January 2013:
+ * Therefore, we must be prepared to accept a package with either a
+ * single integer or multiple integers.
+ *
+ * The single integer DWORD format is as follows:
+ * BYTE 0 - Value for the PM1A SLP_TYP register
+ * BYTE 1 - Value for the PM1B SLP_TYP register
+ * BYTE 2-3 - Reserved
+ *
+ * The dual integer format is as follows:
+ * Integer 0 - Value for the PM1A SLP_TYP register
+ * Integer 1 - Value for the PM1A SLP_TYP register
*
******************************************************************************/
acpi_status
acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b)
{
- acpi_status status = AE_OK;
+ acpi_status status;
struct acpi_evaluate_info *info;
+ union acpi_operand_object **elements;
ACPI_FUNCTION_TRACE(acpi_get_sleep_type_data);
@@ -467,18 +491,14 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b)
return_ACPI_STATUS(AE_NO_MEMORY);
}
+ /*
+ * Evaluate the \_Sx namespace object containing the register values
+ * for this state
+ */
info->pathname =
ACPI_CAST_PTR(char, acpi_gbl_sleep_state_names[sleep_state]);
-
- /* Evaluate the namespace object containing the values for this state */
-
status = acpi_ns_evaluate(info);
if (ACPI_FAILURE(status)) {
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "%s while evaluating SleepState [%s]\n",
- acpi_format_exception(status),
- info->pathname));
-
goto cleanup;
}
@@ -487,64 +507,67 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b)
if (!info->return_object) {
ACPI_ERROR((AE_INFO, "No Sleep State object returned from [%s]",
info->pathname));
- status = AE_NOT_EXIST;
+ status = AE_AML_NO_RETURN_VALUE;
+ goto cleanup;
}
- /* It must be of type Package */
+ /* Return object must be of type Package */
- else if (info->return_object->common.type != ACPI_TYPE_PACKAGE) {
+ if (info->return_object->common.type != ACPI_TYPE_PACKAGE) {
ACPI_ERROR((AE_INFO,
"Sleep State return object is not a Package"));
status = AE_AML_OPERAND_TYPE;
+ goto cleanup1;
}
/*
- * The package must have at least two elements. NOTE (March 2005): This
- * goes against the current ACPI spec which defines this object as a
- * package with one encoded DWORD element. However, existing practice
- * by BIOS vendors seems to be to have 2 or more elements, at least
- * one per sleep type (A/B).
+ * Any warnings about the package length or the object types have
+ * already been issued by the predefined name module -- there is no
+ * need to repeat them here.
*/
- else if (info->return_object->package.count < 2) {
- ACPI_ERROR((AE_INFO,
- "Sleep State return package does not have at least two elements"));
- status = AE_AML_NO_OPERAND;
- }
+ elements = info->return_object->package.elements;
+ switch (info->return_object->package.count) {
+ case 0:
+ status = AE_AML_PACKAGE_LIMIT;
+ break;
+
+ case 1:
+ if (elements[0]->common.type != ACPI_TYPE_INTEGER) {
+ status = AE_AML_OPERAND_TYPE;
+ break;
+ }
- /* The first two elements must both be of type Integer */
+ /* A valid _Sx_ package with one integer */
- else if (((info->return_object->package.elements[0])->common.type
- != ACPI_TYPE_INTEGER) ||
- ((info->return_object->package.elements[1])->common.type
- != ACPI_TYPE_INTEGER)) {
- ACPI_ERROR((AE_INFO,
- "Sleep State return package elements are not both Integers "
- "(%s, %s)",
- acpi_ut_get_object_type_name(info->return_object->
- package.elements[0]),
- acpi_ut_get_object_type_name(info->return_object->
- package.elements[1])));
- status = AE_AML_OPERAND_TYPE;
- } else {
- /* Valid _Sx_ package size, type, and value */
+ *sleep_type_a = (u8)elements[0]->integer.value;
+ *sleep_type_b = (u8)(elements[0]->integer.value >> 8);
+ break;
- *sleep_type_a = (u8)
- (info->return_object->package.elements[0])->integer.value;
- *sleep_type_b = (u8)
- (info->return_object->package.elements[1])->integer.value;
- }
+ case 2:
+ default:
+ if ((elements[0]->common.type != ACPI_TYPE_INTEGER) ||
+ (elements[1]->common.type != ACPI_TYPE_INTEGER)) {
+ status = AE_AML_OPERAND_TYPE;
+ break;
+ }
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "While evaluating SleepState [%s], bad Sleep object %p type %s",
- info->pathname, info->return_object,
- acpi_ut_get_object_type_name(info->
- return_object)));
+ /* A valid _Sx_ package with two integers */
+
+ *sleep_type_a = (u8)elements[0]->integer.value;
+ *sleep_type_b = (u8)elements[1]->integer.value;
+ break;
}
+ cleanup1:
acpi_ut_remove_reference(info->return_object);
cleanup:
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "While evaluating Sleep State [%s]",
+ info->pathname));
+ }
+
ACPI_FREE(info);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index ae443fe2ebf6..35eebdac0f9d 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -41,9 +41,9 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
-#include <linux/module.h>
#define _COMPONENT ACPI_HARDWARE
ACPI_MODULE_NAME("hwxfsleep")
@@ -207,7 +207,7 @@ acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
(u32)acpi_gbl_FADT.s4_bios_request, 8);
do {
- acpi_os_stall(1000);
+ acpi_os_stall(ACPI_USEC_PER_MSEC);
status =
acpi_read_bit_register(ACPI_BITREG_WAKE_STATUS, &in_value);
if (ACPI_FAILURE(status)) {
@@ -350,7 +350,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
*
* RETURN: Status
*
- * DESCRIPTION: Enter a system sleep state (see ACPI 2.0 spec p 231)
+ * DESCRIPTION: Enter a system sleep state
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
*
******************************************************************************/
@@ -382,8 +382,9 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
* RETURN: Status
*
* DESCRIPTION: Perform the first state of OS-independent ACPI cleanup after a
- * sleep.
- * Called with interrupts DISABLED.
+ * sleep. Called with interrupts DISABLED.
+ * We break wake/resume into 2 stages so that OSPM can handle
+ * various OS-specific tasks between the two steps.
*
******************************************************************************/
acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index d70eaf39dfdf..8769cf83b044 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 15143c44f5e5..243737363fb8 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 924b3c71473a..ce6e97326205 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -44,6 +44,7 @@
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
+#include <acpi/acoutput.h>
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsdump")
@@ -77,8 +78,9 @@ void acpi_ns_print_pathname(u32 num_segments, char *pathname)
ACPI_FUNCTION_NAME(ns_print_pathname);
- if (!(acpi_dbg_level & ACPI_LV_NAMES)
- || !(acpi_dbg_layer & ACPI_NAMESPACE)) {
+ /* Check if debug output enabled */
+
+ if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_NAMES, ACPI_NAMESPACE)) {
return;
}
@@ -127,7 +129,7 @@ acpi_ns_dump_pathname(acpi_handle handle, char *msg, u32 level, u32 component)
/* Do this only if the requested debug level and component are enabled */
- if (!(acpi_dbg_level & level) || !(acpi_dbg_layer & component)) {
+ if (!ACPI_IS_DEBUG_ENABLED(level, component)) {
return_VOID;
}
@@ -729,5 +731,5 @@ void acpi_ns_dump_tables(acpi_handle search_base, u32 max_depth)
ACPI_OWNER_ID_MAX, search_handle);
return_VOID;
}
-#endif /* _ACPI_ASL_COMPILER */
-#endif /* defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) */
+#endif
+#endif
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 944d4c8d9438..409ae80824d1 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -42,7 +42,6 @@
*/
#include <acpi/acpi.h>
-#include "accommon.h"
/* TBD: This entire module is apparently obsolete and should be removed */
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index 69074be498e8..1538f3eb2a8f 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 4328e2adfeb9..2a431ec50a25 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -46,7 +46,6 @@
#include "acnamesp.h"
#include "acdispat.h"
#include "acinterp.h"
-#include <linux/nmi.h>
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsinit")
@@ -87,7 +86,7 @@ acpi_status acpi_ns_initialize_objects(void)
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"**** Starting initialization of namespace objects ****\n"));
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
- "Completing Region/Field/Buffer/Package initialization:"));
+ "Completing Region/Field/Buffer/Package initialization:\n"));
/* Set all init info to zero */
@@ -103,7 +102,7 @@ acpi_status acpi_ns_initialize_objects(void)
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
- "\nInitialized %u/%u Regions %u/%u Fields %u/%u "
+ " Initialized %u/%u Regions %u/%u Fields %u/%u "
"Buffers %u/%u Packages (%u nodes)\n",
info.op_region_init, info.op_region_count,
info.field_init, info.field_count,
@@ -150,7 +149,7 @@ acpi_status acpi_ns_initialize_devices(void)
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
"Initializing Device/Processor/Thermal objects "
- "by executing _INI methods:"));
+ "and executing _INI/_STA methods:\n"));
/* Tree analysis: find all subtrees that contain _INI methods */
@@ -208,7 +207,7 @@ acpi_status acpi_ns_initialize_devices(void)
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
- "\nExecuted %u _INI methods requiring %u _STA executions "
+ " Executed %u _INI methods requiring %u _STA executions "
"(examined %u objects)\n",
info.num_INI, info.num_STA, info.device_count));
@@ -350,14 +349,6 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
}
/*
- * Print a dot for each object unless we are going to print the entire
- * pathname
- */
- if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "."));
- }
-
- /*
* We ignore errors from above, and always return OK, since we don't want
* to abort the walk on any single error.
*/
@@ -572,20 +563,10 @@ acpi_ns_init_one_device(acpi_handle obj_handle,
info->parameters = NULL;
info->flags = ACPI_IGNORE_RETURN_VALUE;
- /*
- * Some hardware relies on this being executed as atomically
- * as possible (without an NMI being received in the middle of
- * this) - so disable NMIs and initialize the device:
- */
status = acpi_ns_evaluate(info);
if (ACPI_SUCCESS(status)) {
walk_info->num_INI++;
-
- if ((acpi_dbg_level <= ACPI_LV_ALL_EXCEPTIONS) &&
- (!(acpi_dbg_level & ACPI_LV_INFO))) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "."));
- }
}
#ifdef ACPI_DEBUG_OUTPUT
else if (status != AE_NOT_FOUND) {
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 911f99127b99..0a7badc3179f 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index 55a175eadcc3..90a0380fb8a0 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -126,7 +126,8 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
* the node, In external format (name segments separated by path
* separators.)
*
- * DESCRIPTION: Used for debug printing in acpi_ns_search_table().
+ * DESCRIPTION: Used to obtain the full pathname to a namespace node, usually
+ * for error and debug statements.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index e69f7fa2579d..7a736f4d1fd8 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 233f756d5cfa..35dde8151c0d 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 2419f417ea33..224c30053401 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -1,12 +1,11 @@
/******************************************************************************
*
* Module Name: nspredef - Validation of ACPI predefined methods and objects
- * $Revision: 1.1 $
*
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -74,27 +73,6 @@ ACPI_MODULE_NAME("nspredef")
******************************************************************************/
/* Local prototypes */
static acpi_status
-acpi_ns_check_package(struct acpi_predefined_data *data,
- union acpi_operand_object **return_object_ptr);
-
-static acpi_status
-acpi_ns_check_package_list(struct acpi_predefined_data *data,
- const union acpi_predefined_info *package,
- union acpi_operand_object **elements, u32 count);
-
-static acpi_status
-acpi_ns_check_package_elements(struct acpi_predefined_data *data,
- union acpi_operand_object **elements,
- u8 type1,
- u32 count1,
- u8 type2, u32 count2, u32 start_index);
-
-static acpi_status
-acpi_ns_check_object_type(struct acpi_predefined_data *data,
- union acpi_operand_object **return_object_ptr,
- u32 expected_btypes, u32 package_index);
-
-static acpi_status
acpi_ns_check_reference(struct acpi_predefined_data *data,
union acpi_operand_object *return_object);
@@ -148,7 +126,7 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
pathname = acpi_ns_get_external_pathname(node);
if (!pathname) {
- return AE_OK; /* Could not get pathname, ignore */
+ return (AE_OK); /* Could not get pathname, ignore */
}
/*
@@ -408,564 +386,6 @@ const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct
/*******************************************************************************
*
- * FUNCTION: acpi_ns_check_package
- *
- * PARAMETERS: data - Pointer to validation data structure
- * return_object_ptr - Pointer to the object returned from the
- * evaluation of a method or object
- *
- * RETURN: Status
- *
- * DESCRIPTION: Check a returned package object for the correct count and
- * correct type of all sub-objects.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ns_check_package(struct acpi_predefined_data *data,
- union acpi_operand_object **return_object_ptr)
-{
- union acpi_operand_object *return_object = *return_object_ptr;
- const union acpi_predefined_info *package;
- union acpi_operand_object **elements;
- acpi_status status = AE_OK;
- u32 expected_count;
- u32 count;
- u32 i;
-
- ACPI_FUNCTION_NAME(ns_check_package);
-
- /* The package info for this name is in the next table entry */
-
- package = data->predefined + 1;
-
- ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
- "%s Validating return Package of Type %X, Count %X\n",
- data->pathname, package->ret_info.type,
- return_object->package.count));
-
- /*
- * For variable-length Packages, we can safely remove all embedded
- * and trailing NULL package elements
- */
- acpi_ns_remove_null_elements(data, package->ret_info.type,
- return_object);
-
- /* Extract package count and elements array */
-
- elements = return_object->package.elements;
- count = return_object->package.count;
-
- /* The package must have at least one element, else invalid */
-
- if (!count) {
- ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
- "Return Package has no elements (empty)"));
-
- return (AE_AML_OPERAND_VALUE);
- }
-
- /*
- * Decode the type of the expected package contents
- *
- * PTYPE1 packages contain no subpackages
- * PTYPE2 packages contain sub-packages
- */
- switch (package->ret_info.type) {
- case ACPI_PTYPE1_FIXED:
-
- /*
- * The package count is fixed and there are no sub-packages
- *
- * If package is too small, exit.
- * If package is larger than expected, issue warning but continue
- */
- expected_count =
- package->ret_info.count1 + package->ret_info.count2;
- if (count < expected_count) {
- goto package_too_small;
- } else if (count > expected_count) {
- ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
- "%s: Return Package is larger than needed - "
- "found %u, expected %u\n",
- data->pathname, count,
- expected_count));
- }
-
- /* Validate all elements of the returned package */
-
- status = acpi_ns_check_package_elements(data, elements,
- package->ret_info.
- object_type1,
- package->ret_info.
- count1,
- package->ret_info.
- object_type2,
- package->ret_info.
- count2, 0);
- break;
-
- case ACPI_PTYPE1_VAR:
-
- /*
- * The package count is variable, there are no sub-packages, and all
- * elements must be of the same type
- */
- for (i = 0; i < count; i++) {
- status = acpi_ns_check_object_type(data, elements,
- package->ret_info.
- object_type1, i);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- elements++;
- }
- break;
-
- case ACPI_PTYPE1_OPTION:
-
- /*
- * The package count is variable, there are no sub-packages. There are
- * a fixed number of required elements, and a variable number of
- * optional elements.
- *
- * Check if package is at least as large as the minimum required
- */
- expected_count = package->ret_info3.count;
- if (count < expected_count) {
- goto package_too_small;
- }
-
- /* Variable number of sub-objects */
-
- for (i = 0; i < count; i++) {
- if (i < package->ret_info3.count) {
-
- /* These are the required package elements (0, 1, or 2) */
-
- status =
- acpi_ns_check_object_type(data, elements,
- package->
- ret_info3.
- object_type[i],
- i);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- } else {
- /* These are the optional package elements */
-
- status =
- acpi_ns_check_object_type(data, elements,
- package->
- ret_info3.
- tail_object_type,
- i);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- }
- elements++;
- }
- break;
-
- case ACPI_PTYPE2_REV_FIXED:
-
- /* First element is the (Integer) revision */
-
- status = acpi_ns_check_object_type(data, elements,
- ACPI_RTYPE_INTEGER, 0);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- elements++;
- count--;
-
- /* Examine the sub-packages */
-
- status =
- acpi_ns_check_package_list(data, package, elements, count);
- break;
-
- case ACPI_PTYPE2_PKG_COUNT:
-
- /* First element is the (Integer) count of sub-packages to follow */
-
- status = acpi_ns_check_object_type(data, elements,
- ACPI_RTYPE_INTEGER, 0);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- /*
- * Count cannot be larger than the parent package length, but allow it
- * to be smaller. The >= accounts for the Integer above.
- */
- expected_count = (u32) (*elements)->integer.value;
- if (expected_count >= count) {
- goto package_too_small;
- }
-
- count = expected_count;
- elements++;
-
- /* Examine the sub-packages */
-
- status =
- acpi_ns_check_package_list(data, package, elements, count);
- break;
-
- case ACPI_PTYPE2:
- case ACPI_PTYPE2_FIXED:
- case ACPI_PTYPE2_MIN:
- case ACPI_PTYPE2_COUNT:
- case ACPI_PTYPE2_FIX_VAR:
-
- /*
- * These types all return a single Package that consists of a
- * variable number of sub-Packages.
- *
- * First, ensure that the first element is a sub-Package. If not,
- * the BIOS may have incorrectly returned the object as a single
- * package instead of a Package of Packages (a common error if
- * there is only one entry). We may be able to repair this by
- * wrapping the returned Package with a new outer Package.
- */
- if (*elements
- && ((*elements)->common.type != ACPI_TYPE_PACKAGE)) {
-
- /* Create the new outer package and populate it */
-
- status =
- acpi_ns_wrap_with_package(data, return_object,
- return_object_ptr);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- /* Update locals to point to the new package (of 1 element) */
-
- return_object = *return_object_ptr;
- elements = return_object->package.elements;
- count = 1;
- }
-
- /* Examine the sub-packages */
-
- status =
- acpi_ns_check_package_list(data, package, elements, count);
- break;
-
- default:
-
- /* Should not get here if predefined info table is correct */
-
- ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
- "Invalid internal return type in table entry: %X",
- package->ret_info.type));
-
- return (AE_AML_INTERNAL);
- }
-
- return (status);
-
-package_too_small:
-
- /* Error exit for the case with an incorrect package count */
-
- ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
- "Return Package is too small - found %u elements, expected %u",
- count, expected_count));
-
- return (AE_AML_OPERAND_VALUE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ns_check_package_list
- *
- * PARAMETERS: data - Pointer to validation data structure
- * package - Pointer to package-specific info for method
- * elements - Element list of parent package. All elements
- * of this list should be of type Package.
- * count - Count of subpackages
- *
- * RETURN: Status
- *
- * DESCRIPTION: Examine a list of subpackages
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ns_check_package_list(struct acpi_predefined_data *data,
- const union acpi_predefined_info *package,
- union acpi_operand_object **elements, u32 count)
-{
- union acpi_operand_object *sub_package;
- union acpi_operand_object **sub_elements;
- acpi_status status;
- u32 expected_count;
- u32 i;
- u32 j;
-
- /*
- * Validate each sub-Package in the parent Package
- *
- * NOTE: assumes list of sub-packages contains no NULL elements.
- * Any NULL elements should have been removed by earlier call
- * to acpi_ns_remove_null_elements.
- */
- for (i = 0; i < count; i++) {
- sub_package = *elements;
- sub_elements = sub_package->package.elements;
- data->parent_package = sub_package;
-
- /* Each sub-object must be of type Package */
-
- status = acpi_ns_check_object_type(data, &sub_package,
- ACPI_RTYPE_PACKAGE, i);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- /* Examine the different types of expected sub-packages */
-
- data->parent_package = sub_package;
- switch (package->ret_info.type) {
- case ACPI_PTYPE2:
- case ACPI_PTYPE2_PKG_COUNT:
- case ACPI_PTYPE2_REV_FIXED:
-
- /* Each subpackage has a fixed number of elements */
-
- expected_count =
- package->ret_info.count1 + package->ret_info.count2;
- if (sub_package->package.count < expected_count) {
- goto package_too_small;
- }
-
- status =
- acpi_ns_check_package_elements(data, sub_elements,
- package->ret_info.
- object_type1,
- package->ret_info.
- count1,
- package->ret_info.
- object_type2,
- package->ret_info.
- count2, 0);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- break;
-
- case ACPI_PTYPE2_FIX_VAR:
- /*
- * Each subpackage has a fixed number of elements and an
- * optional element
- */
- expected_count =
- package->ret_info.count1 + package->ret_info.count2;
- if (sub_package->package.count < expected_count) {
- goto package_too_small;
- }
-
- status =
- acpi_ns_check_package_elements(data, sub_elements,
- package->ret_info.
- object_type1,
- package->ret_info.
- count1,
- package->ret_info.
- object_type2,
- sub_package->package.
- count -
- package->ret_info.
- count1, 0);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- break;
-
- case ACPI_PTYPE2_FIXED:
-
- /* Each sub-package has a fixed length */
-
- expected_count = package->ret_info2.count;
- if (sub_package->package.count < expected_count) {
- goto package_too_small;
- }
-
- /* Check the type of each sub-package element */
-
- for (j = 0; j < expected_count; j++) {
- status =
- acpi_ns_check_object_type(data,
- &sub_elements[j],
- package->
- ret_info2.
- object_type[j],
- j);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- }
- break;
-
- case ACPI_PTYPE2_MIN:
-
- /* Each sub-package has a variable but minimum length */
-
- expected_count = package->ret_info.count1;
- if (sub_package->package.count < expected_count) {
- goto package_too_small;
- }
-
- /* Check the type of each sub-package element */
-
- status =
- acpi_ns_check_package_elements(data, sub_elements,
- package->ret_info.
- object_type1,
- sub_package->package.
- count, 0, 0, 0);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- break;
-
- case ACPI_PTYPE2_COUNT:
-
- /*
- * First element is the (Integer) count of elements, including
- * the count field (the ACPI name is num_elements)
- */
- status = acpi_ns_check_object_type(data, sub_elements,
- ACPI_RTYPE_INTEGER,
- 0);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- /*
- * Make sure package is large enough for the Count and is
- * is as large as the minimum size
- */
- expected_count = (u32)(*sub_elements)->integer.value;
- if (sub_package->package.count < expected_count) {
- goto package_too_small;
- }
- if (sub_package->package.count <
- package->ret_info.count1) {
- expected_count = package->ret_info.count1;
- goto package_too_small;
- }
- if (expected_count == 0) {
- /*
- * Either the num_entries element was originally zero or it was
- * a NULL element and repaired to an Integer of value zero.
- * In either case, repair it by setting num_entries to be the
- * actual size of the subpackage.
- */
- expected_count = sub_package->package.count;
- (*sub_elements)->integer.value = expected_count;
- }
-
- /* Check the type of each sub-package element */
-
- status =
- acpi_ns_check_package_elements(data,
- (sub_elements + 1),
- package->ret_info.
- object_type1,
- (expected_count - 1),
- 0, 0, 1);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- break;
-
- default: /* Should not get here, type was validated by caller */
-
- return (AE_AML_INTERNAL);
- }
-
- elements++;
- }
-
- return (AE_OK);
-
-package_too_small:
-
- /* The sub-package count was smaller than required */
-
- ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
- "Return Sub-Package[%u] is too small - found %u elements, expected %u",
- i, sub_package->package.count, expected_count));
-
- return (AE_AML_OPERAND_VALUE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ns_check_package_elements
- *
- * PARAMETERS: data - Pointer to validation data structure
- * elements - Pointer to the package elements array
- * type1 - Object type for first group
- * count1 - Count for first group
- * type2 - Object type for second group
- * count2 - Count for second group
- * start_index - Start of the first group of elements
- *
- * RETURN: Status
- *
- * DESCRIPTION: Check that all elements of a package are of the correct object
- * type. Supports up to two groups of different object types.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ns_check_package_elements(struct acpi_predefined_data *data,
- union acpi_operand_object **elements,
- u8 type1,
- u32 count1,
- u8 type2, u32 count2, u32 start_index)
-{
- union acpi_operand_object **this_element = elements;
- acpi_status status;
- u32 i;
-
- /*
- * Up to two groups of package elements are supported by the data
- * structure. All elements in each group must be of the same type.
- * The second group can have a count of zero.
- */
- for (i = 0; i < count1; i++) {
- status = acpi_ns_check_object_type(data, this_element,
- type1, i + start_index);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- this_element++;
- }
-
- for (i = 0; i < count2; i++) {
- status = acpi_ns_check_object_type(data, this_element,
- type2,
- (i + count1 + start_index));
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- this_element++;
- }
-
- return (AE_OK);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ns_check_object_type
*
* PARAMETERS: data - Pointer to validation data structure
@@ -983,7 +403,7 @@ acpi_ns_check_package_elements(struct acpi_predefined_data *data,
*
******************************************************************************/
-static acpi_status
+acpi_status
acpi_ns_check_object_type(struct acpi_predefined_data *data,
union acpi_operand_object **return_object_ptr,
u32 expected_btypes, u32 package_index)
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
new file mode 100644
index 000000000000..a40155467d2e
--- /dev/null
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -0,0 +1,621 @@
+/******************************************************************************
+ *
+ * Module Name: nsprepkg - Validation of package objects for predefined names
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2013, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+#include "acpredef.h"
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsprepkg")
+
+/* Local prototypes */
+static acpi_status
+acpi_ns_check_package_list(struct acpi_predefined_data *data,
+ const union acpi_predefined_info *package,
+ union acpi_operand_object **elements, u32 count);
+
+static acpi_status
+acpi_ns_check_package_elements(struct acpi_predefined_data *data,
+ union acpi_operand_object **elements,
+ u8 type1,
+ u32 count1,
+ u8 type2, u32 count2, u32 start_index);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_check_package
+ *
+ * PARAMETERS: data - Pointer to validation data structure
+ * return_object_ptr - Pointer to the object returned from the
+ * evaluation of a method or object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Check a returned package object for the correct count and
+ * correct type of all sub-objects.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_check_package(struct acpi_predefined_data *data,
+ union acpi_operand_object **return_object_ptr)
+{
+ union acpi_operand_object *return_object = *return_object_ptr;
+ const union acpi_predefined_info *package;
+ union acpi_operand_object **elements;
+ acpi_status status = AE_OK;
+ u32 expected_count;
+ u32 count;
+ u32 i;
+
+ ACPI_FUNCTION_NAME(ns_check_package);
+
+ /* The package info for this name is in the next table entry */
+
+ package = data->predefined + 1;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "%s Validating return Package of Type %X, Count %X\n",
+ data->pathname, package->ret_info.type,
+ return_object->package.count));
+
+ /*
+ * For variable-length Packages, we can safely remove all embedded
+ * and trailing NULL package elements
+ */
+ acpi_ns_remove_null_elements(data, package->ret_info.type,
+ return_object);
+
+ /* Extract package count and elements array */
+
+ elements = return_object->package.elements;
+ count = return_object->package.count;
+
+ /* The package must have at least one element, else invalid */
+
+ if (!count) {
+ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
+ "Return Package has no elements (empty)"));
+
+ return (AE_AML_OPERAND_VALUE);
+ }
+
+ /*
+ * Decode the type of the expected package contents
+ *
+ * PTYPE1 packages contain no subpackages
+ * PTYPE2 packages contain sub-packages
+ */
+ switch (package->ret_info.type) {
+ case ACPI_PTYPE1_FIXED:
+
+ /*
+ * The package count is fixed and there are no sub-packages
+ *
+ * If package is too small, exit.
+ * If package is larger than expected, issue warning but continue
+ */
+ expected_count =
+ package->ret_info.count1 + package->ret_info.count2;
+ if (count < expected_count) {
+ goto package_too_small;
+ } else if (count > expected_count) {
+ ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
+ "%s: Return Package is larger than needed - "
+ "found %u, expected %u\n",
+ data->pathname, count,
+ expected_count));
+ }
+
+ /* Validate all elements of the returned package */
+
+ status = acpi_ns_check_package_elements(data, elements,
+ package->ret_info.
+ object_type1,
+ package->ret_info.
+ count1,
+ package->ret_info.
+ object_type2,
+ package->ret_info.
+ count2, 0);
+ break;
+
+ case ACPI_PTYPE1_VAR:
+
+ /*
+ * The package count is variable, there are no sub-packages, and all
+ * elements must be of the same type
+ */
+ for (i = 0; i < count; i++) {
+ status = acpi_ns_check_object_type(data, elements,
+ package->ret_info.
+ object_type1, i);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ elements++;
+ }
+ break;
+
+ case ACPI_PTYPE1_OPTION:
+
+ /*
+ * The package count is variable, there are no sub-packages. There are
+ * a fixed number of required elements, and a variable number of
+ * optional elements.
+ *
+ * Check if package is at least as large as the minimum required
+ */
+ expected_count = package->ret_info3.count;
+ if (count < expected_count) {
+ goto package_too_small;
+ }
+
+ /* Variable number of sub-objects */
+
+ for (i = 0; i < count; i++) {
+ if (i < package->ret_info3.count) {
+
+ /* These are the required package elements (0, 1, or 2) */
+
+ status =
+ acpi_ns_check_object_type(data, elements,
+ package->
+ ret_info3.
+ object_type[i],
+ i);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ } else {
+ /* These are the optional package elements */
+
+ status =
+ acpi_ns_check_object_type(data, elements,
+ package->
+ ret_info3.
+ tail_object_type,
+ i);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+ elements++;
+ }
+ break;
+
+ case ACPI_PTYPE2_REV_FIXED:
+
+ /* First element is the (Integer) revision */
+
+ status = acpi_ns_check_object_type(data, elements,
+ ACPI_RTYPE_INTEGER, 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ elements++;
+ count--;
+
+ /* Examine the sub-packages */
+
+ status =
+ acpi_ns_check_package_list(data, package, elements, count);
+ break;
+
+ case ACPI_PTYPE2_PKG_COUNT:
+
+ /* First element is the (Integer) count of sub-packages to follow */
+
+ status = acpi_ns_check_object_type(data, elements,
+ ACPI_RTYPE_INTEGER, 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /*
+ * Count cannot be larger than the parent package length, but allow it
+ * to be smaller. The >= accounts for the Integer above.
+ */
+ expected_count = (u32)(*elements)->integer.value;
+ if (expected_count >= count) {
+ goto package_too_small;
+ }
+
+ count = expected_count;
+ elements++;
+
+ /* Examine the sub-packages */
+
+ status =
+ acpi_ns_check_package_list(data, package, elements, count);
+ break;
+
+ case ACPI_PTYPE2:
+ case ACPI_PTYPE2_FIXED:
+ case ACPI_PTYPE2_MIN:
+ case ACPI_PTYPE2_COUNT:
+ case ACPI_PTYPE2_FIX_VAR:
+
+ /*
+ * These types all return a single Package that consists of a
+ * variable number of sub-Packages.
+ *
+ * First, ensure that the first element is a sub-Package. If not,
+ * the BIOS may have incorrectly returned the object as a single
+ * package instead of a Package of Packages (a common error if
+ * there is only one entry). We may be able to repair this by
+ * wrapping the returned Package with a new outer Package.
+ */
+ if (*elements
+ && ((*elements)->common.type != ACPI_TYPE_PACKAGE)) {
+
+ /* Create the new outer package and populate it */
+
+ status =
+ acpi_ns_wrap_with_package(data, return_object,
+ return_object_ptr);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Update locals to point to the new package (of 1 element) */
+
+ return_object = *return_object_ptr;
+ elements = return_object->package.elements;
+ count = 1;
+ }
+
+ /* Examine the sub-packages */
+
+ status =
+ acpi_ns_check_package_list(data, package, elements, count);
+ break;
+
+ default:
+
+ /* Should not get here if predefined info table is correct */
+
+ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
+ "Invalid internal return type in table entry: %X",
+ package->ret_info.type));
+
+ return (AE_AML_INTERNAL);
+ }
+
+ return (status);
+
+ package_too_small:
+
+ /* Error exit for the case with an incorrect package count */
+
+ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
+ "Return Package is too small - found %u elements, expected %u",
+ count, expected_count));
+
+ return (AE_AML_OPERAND_VALUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_check_package_list
+ *
+ * PARAMETERS: data - Pointer to validation data structure
+ * package - Pointer to package-specific info for method
+ * elements - Element list of parent package. All elements
+ * of this list should be of type Package.
+ * count - Count of subpackages
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Examine a list of subpackages
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_check_package_list(struct acpi_predefined_data *data,
+ const union acpi_predefined_info *package,
+ union acpi_operand_object **elements, u32 count)
+{
+ union acpi_operand_object *sub_package;
+ union acpi_operand_object **sub_elements;
+ acpi_status status;
+ u32 expected_count;
+ u32 i;
+ u32 j;
+
+ /*
+ * Validate each sub-Package in the parent Package
+ *
+ * NOTE: assumes list of sub-packages contains no NULL elements.
+ * Any NULL elements should have been removed by earlier call
+ * to acpi_ns_remove_null_elements.
+ */
+ for (i = 0; i < count; i++) {
+ sub_package = *elements;
+ sub_elements = sub_package->package.elements;
+ data->parent_package = sub_package;
+
+ /* Each sub-object must be of type Package */
+
+ status = acpi_ns_check_object_type(data, &sub_package,
+ ACPI_RTYPE_PACKAGE, i);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Examine the different types of expected sub-packages */
+
+ data->parent_package = sub_package;
+ switch (package->ret_info.type) {
+ case ACPI_PTYPE2:
+ case ACPI_PTYPE2_PKG_COUNT:
+ case ACPI_PTYPE2_REV_FIXED:
+
+ /* Each subpackage has a fixed number of elements */
+
+ expected_count =
+ package->ret_info.count1 + package->ret_info.count2;
+ if (sub_package->package.count < expected_count) {
+ goto package_too_small;
+ }
+
+ status =
+ acpi_ns_check_package_elements(data, sub_elements,
+ package->ret_info.
+ object_type1,
+ package->ret_info.
+ count1,
+ package->ret_info.
+ object_type2,
+ package->ret_info.
+ count2, 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
+
+ case ACPI_PTYPE2_FIX_VAR:
+ /*
+ * Each subpackage has a fixed number of elements and an
+ * optional element
+ */
+ expected_count =
+ package->ret_info.count1 + package->ret_info.count2;
+ if (sub_package->package.count < expected_count) {
+ goto package_too_small;
+ }
+
+ status =
+ acpi_ns_check_package_elements(data, sub_elements,
+ package->ret_info.
+ object_type1,
+ package->ret_info.
+ count1,
+ package->ret_info.
+ object_type2,
+ sub_package->package.
+ count -
+ package->ret_info.
+ count1, 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
+
+ case ACPI_PTYPE2_FIXED:
+
+ /* Each sub-package has a fixed length */
+
+ expected_count = package->ret_info2.count;
+ if (sub_package->package.count < expected_count) {
+ goto package_too_small;
+ }
+
+ /* Check the type of each sub-package element */
+
+ for (j = 0; j < expected_count; j++) {
+ status =
+ acpi_ns_check_object_type(data,
+ &sub_elements[j],
+ package->
+ ret_info2.
+ object_type[j],
+ j);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+ break;
+
+ case ACPI_PTYPE2_MIN:
+
+ /* Each sub-package has a variable but minimum length */
+
+ expected_count = package->ret_info.count1;
+ if (sub_package->package.count < expected_count) {
+ goto package_too_small;
+ }
+
+ /* Check the type of each sub-package element */
+
+ status =
+ acpi_ns_check_package_elements(data, sub_elements,
+ package->ret_info.
+ object_type1,
+ sub_package->package.
+ count, 0, 0, 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
+
+ case ACPI_PTYPE2_COUNT:
+
+ /*
+ * First element is the (Integer) count of elements, including
+ * the count field (the ACPI name is num_elements)
+ */
+ status = acpi_ns_check_object_type(data, sub_elements,
+ ACPI_RTYPE_INTEGER,
+ 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /*
+ * Make sure package is large enough for the Count and is
+ * is as large as the minimum size
+ */
+ expected_count = (u32)(*sub_elements)->integer.value;
+ if (sub_package->package.count < expected_count) {
+ goto package_too_small;
+ }
+ if (sub_package->package.count <
+ package->ret_info.count1) {
+ expected_count = package->ret_info.count1;
+ goto package_too_small;
+ }
+ if (expected_count == 0) {
+ /*
+ * Either the num_entries element was originally zero or it was
+ * a NULL element and repaired to an Integer of value zero.
+ * In either case, repair it by setting num_entries to be the
+ * actual size of the subpackage.
+ */
+ expected_count = sub_package->package.count;
+ (*sub_elements)->integer.value = expected_count;
+ }
+
+ /* Check the type of each sub-package element */
+
+ status =
+ acpi_ns_check_package_elements(data,
+ (sub_elements + 1),
+ package->ret_info.
+ object_type1,
+ (expected_count - 1),
+ 0, 0, 1);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
+
+ default: /* Should not get here, type was validated by caller */
+
+ return (AE_AML_INTERNAL);
+ }
+
+ elements++;
+ }
+
+ return (AE_OK);
+
+ package_too_small:
+
+ /* The sub-package count was smaller than required */
+
+ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
+ "Return Sub-Package[%u] is too small - found %u elements, expected %u",
+ i, sub_package->package.count, expected_count));
+
+ return (AE_AML_OPERAND_VALUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_check_package_elements
+ *
+ * PARAMETERS: data - Pointer to validation data structure
+ * elements - Pointer to the package elements array
+ * type1 - Object type for first group
+ * count1 - Count for first group
+ * type2 - Object type for second group
+ * count2 - Count for second group
+ * start_index - Start of the first group of elements
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Check that all elements of a package are of the correct object
+ * type. Supports up to two groups of different object types.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_check_package_elements(struct acpi_predefined_data *data,
+ union acpi_operand_object **elements,
+ u8 type1,
+ u32 count1,
+ u8 type2, u32 count2, u32 start_index)
+{
+ union acpi_operand_object **this_element = elements;
+ acpi_status status;
+ u32 i;
+
+ /*
+ * Up to two groups of package elements are supported by the data
+ * structure. All elements in each group must be of the same type.
+ * The second group can have a count of zero.
+ */
+ for (i = 0; i < count1; i++) {
+ status = acpi_ns_check_object_type(data, this_element,
+ type1, i + start_index);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ this_element++;
+ }
+
+ for (i = 0; i < count2; i++) {
+ status = acpi_ns_check_object_type(data, this_element,
+ type2,
+ (i + count1 + start_index));
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ this_element++;
+ }
+
+ return (AE_OK);
+}
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 8c5f292860fc..9e833353c06a 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 90189251cdf0..ba4d98287c6a 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -55,7 +55,8 @@ ACPI_MODULE_NAME("nsrepair2")
*/
typedef
acpi_status(*acpi_repair_function) (struct acpi_predefined_data *data,
- union acpi_operand_object **return_object_ptr);
+ union acpi_operand_object
+ **return_object_ptr);
typedef struct acpi_repair_info {
char name[ACPI_NAME_SIZE];
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 1d2d8ffc1bc5..5d43efc53a61 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -328,6 +328,11 @@ acpi_ns_search_and_enter(u32 target_name,
if ((status == AE_OK) && (flags & ACPI_NS_ERROR_IF_FOUND)) {
status = AE_ALREADY_EXISTS;
}
+#ifdef ACPI_ASL_COMPILER
+ if (*return_node && (*return_node)->type == ACPI_TYPE_ANY) {
+ (*return_node)->flags |= ANOBJ_IS_EXTERNAL;
+ }
+#endif
/* Either found it or there was an error: finished either way */
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index b5b4cb72a8a8..686420df684f 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -46,14 +46,11 @@
#include "accommon.h"
#include "acnamesp.h"
#include "amlcode.h"
-#include "actables.h"
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsutils")
/* Local prototypes */
-static u8 acpi_ns_valid_path_separator(char sep);
-
#ifdef ACPI_OBSOLETE_FUNCTIONS
acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node *node_to_search);
#endif
@@ -99,42 +96,6 @@ acpi_ns_print_node_pathname(struct acpi_namespace_node *node,
/*******************************************************************************
*
- * FUNCTION: acpi_ns_valid_root_prefix
- *
- * PARAMETERS: prefix - Character to be checked
- *
- * RETURN: TRUE if a valid prefix
- *
- * DESCRIPTION: Check if a character is a valid ACPI Root prefix
- *
- ******************************************************************************/
-
-u8 acpi_ns_valid_root_prefix(char prefix)
-{
-
- return ((u8) (prefix == '\\'));
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ns_valid_path_separator
- *
- * PARAMETERS: sep - Character to be checked
- *
- * RETURN: TRUE if a valid path separator
- *
- * DESCRIPTION: Check if a character is a valid ACPI path separator
- *
- ******************************************************************************/
-
-static u8 acpi_ns_valid_path_separator(char sep)
-{
-
- return ((u8) (sep == '.'));
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ns_get_type
*
* PARAMETERS: node - Parent Node to be examined
@@ -151,10 +112,10 @@ acpi_object_type acpi_ns_get_type(struct acpi_namespace_node * node)
if (!node) {
ACPI_WARNING((AE_INFO, "Null Node parameter"));
- return_UINT32(ACPI_TYPE_ANY);
+ return_VALUE(ACPI_TYPE_ANY);
}
- return_UINT32((acpi_object_type) node->type);
+ return_VALUE(node->type);
}
/*******************************************************************************
@@ -179,10 +140,10 @@ u32 acpi_ns_local(acpi_object_type type)
/* Type code out of range */
ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type));
- return_UINT32(ACPI_NS_NORMAL);
+ return_VALUE(ACPI_NS_NORMAL);
}
- return_UINT32((u32) acpi_gbl_ns_properties[type] & ACPI_NS_LOCAL);
+ return_VALUE(acpi_gbl_ns_properties[type] & ACPI_NS_LOCAL);
}
/*******************************************************************************
@@ -218,19 +179,19 @@ void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info)
*
* strlen() + 1 covers the first name_seg, which has no path separator
*/
- if (acpi_ns_valid_root_prefix(*next_external_char)) {
+ if (ACPI_IS_ROOT_PREFIX(*next_external_char)) {
info->fully_qualified = TRUE;
next_external_char++;
/* Skip redundant root_prefix, like \\_SB.PCI0.SBRG.EC0 */
- while (acpi_ns_valid_root_prefix(*next_external_char)) {
+ while (ACPI_IS_ROOT_PREFIX(*next_external_char)) {
next_external_char++;
}
} else {
/* Handle Carat prefixes */
- while (*next_external_char == '^') {
+ while (ACPI_IS_PARENT_PREFIX(*next_external_char)) {
info->num_carats++;
next_external_char++;
}
@@ -244,7 +205,7 @@ void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info)
if (*next_external_char) {
info->num_segments = 1;
for (i = 0; next_external_char[i]; i++) {
- if (acpi_ns_valid_path_separator(next_external_char[i])) {
+ if (ACPI_IS_PATH_SEPARATOR(next_external_char[i])) {
info->num_segments++;
}
}
@@ -282,7 +243,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
/* Setup the correct prefixes, counts, and pointers */
if (info->fully_qualified) {
- internal_name[0] = '\\';
+ internal_name[0] = AML_ROOT_PREFIX;
if (num_segments <= 1) {
result = &internal_name[1];
@@ -302,7 +263,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
i = 0;
if (info->num_carats) {
for (i = 0; i < info->num_carats; i++) {
- internal_name[i] = '^';
+ internal_name[i] = AML_PARENT_PREFIX;
}
}
@@ -322,7 +283,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
for (; num_segments; num_segments--) {
for (i = 0; i < ACPI_NAME_SIZE; i++) {
- if (acpi_ns_valid_path_separator(*external_name) ||
+ if (ACPI_IS_PATH_SEPARATOR(*external_name) ||
(*external_name == 0)) {
/* Pad the segment with underscore(s) if segment is short */
@@ -339,7 +300,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
/* Now we must have a path separator, or the pathname is bad */
- if (!acpi_ns_valid_path_separator(*external_name) &&
+ if (!ACPI_IS_PATH_SEPARATOR(*external_name) &&
(*external_name != 0)) {
return_ACPI_STATUS(AE_BAD_PATHNAME);
}
@@ -457,13 +418,13 @@ acpi_ns_externalize_name(u32 internal_name_length,
/* Check for a prefix (one '\' | one or more '^') */
switch (internal_name[0]) {
- case '\\':
+ case AML_ROOT_PREFIX:
prefix_length = 1;
break;
- case '^':
+ case AML_PARENT_PREFIX:
for (i = 0; i < internal_name_length; i++) {
- if (internal_name[i] == '^') {
+ if (ACPI_IS_PARENT_PREFIX(internal_name[i])) {
prefix_length = i + 1;
} else {
break;
@@ -664,17 +625,17 @@ void acpi_ns_terminate(void)
u32 acpi_ns_opens_scope(acpi_object_type type)
{
- ACPI_FUNCTION_TRACE_STR(ns_opens_scope, acpi_ut_get_type_name(type));
+ ACPI_FUNCTION_ENTRY();
- if (!acpi_ut_valid_object_type(type)) {
+ if (type > ACPI_TYPE_LOCAL_MAX) {
/* type code out of range */
ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type));
- return_UINT32(ACPI_NS_NORMAL);
+ return (ACPI_NS_NORMAL);
}
- return_UINT32(((u32) acpi_gbl_ns_properties[type]) & ACPI_NS_NEWSCOPE);
+ return (((u32)acpi_gbl_ns_properties[type]) & ACPI_NS_NEWSCOPE);
}
/*******************************************************************************
@@ -710,6 +671,8 @@ acpi_ns_get_node(struct acpi_namespace_node *prefix_node,
ACPI_FUNCTION_TRACE_PTR(ns_get_node, ACPI_CAST_PTR(char, pathname));
+ /* Simplest case is a null pathname */
+
if (!pathname) {
*return_node = prefix_node;
if (!prefix_node) {
@@ -718,6 +681,13 @@ acpi_ns_get_node(struct acpi_namespace_node *prefix_node,
return_ACPI_STATUS(AE_OK);
}
+ /* Quick check for a reference to the root */
+
+ if (ACPI_IS_ROOT_PREFIX(pathname[0]) && (!pathname[1])) {
+ *return_node = acpi_gbl_root_node;
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* Convert path to internal representation */
status = acpi_ns_internalize_name(pathname, &internal_path);
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index 0483877f26b8..e70911a9e441 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -76,12 +76,12 @@ struct acpi_namespace_node *acpi_ns_get_next_node(struct acpi_namespace_node
/* It's really the parent's _scope_ that we want */
- return parent_node->child;
+ return (parent_node->child);
}
/* Otherwise just return the next peer */
- return child_node->peer;
+ return (child_node->peer);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index d6a9f77972b6..fc69949151bb 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -236,7 +236,7 @@ acpi_evaluate_object(acpi_handle handle,
* 2) No handle, not fully qualified pathname (error)
* 3) Valid handle
*/
- if ((pathname) && (acpi_ns_valid_root_prefix(pathname[0]))) {
+ if ((pathname) && (ACPI_IS_ROOT_PREFIX(pathname[0]))) {
/* The path is fully qualified, just evaluate by name */
@@ -492,7 +492,7 @@ acpi_walk_namespace(acpi_object_type type,
*/
status = acpi_ut_acquire_read_lock(&acpi_gbl_namespace_rw_lock);
if (ACPI_FAILURE(status)) {
- return status;
+ return_ACPI_STATUS(status);
}
/*
@@ -550,7 +550,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ return (status);
}
node = acpi_ns_validate_handle(obj_handle);
@@ -602,17 +602,22 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
/* Walk the CID list */
- found = 0;
+ found = FALSE;
for (i = 0; i < cid->count; i++) {
if (ACPI_STRCMP(cid->ids[i].string, info->hid)
== 0) {
- found = 1;
+
+ /* Found a matching CID */
+
+ found = TRUE;
break;
}
}
+
ACPI_FREE(cid);
- if (!found)
+ if (!found) {
return (AE_OK);
+ }
}
}
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 811c6f13f476..f3a4d95899f7 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -107,7 +107,7 @@ acpi_get_handle(acpi_handle parent,
*
* Error for <null Parent + relative path>
*/
- if (acpi_ns_valid_root_prefix(pathname[0])) {
+ if (ACPI_IS_ROOT_PREFIX(pathname[0])) {
/* Pathname is fully qualified (starts with '\') */
@@ -290,7 +290,7 @@ acpi_get_object_info(acpi_handle handle,
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
- goto cleanup;
+ return (status);
}
node = acpi_ns_validate_handle(handle);
@@ -539,14 +539,14 @@ acpi_status acpi_install_method(u8 *buffer)
/* Parameter validation */
if (!buffer) {
- return AE_BAD_PARAMETER;
+ return (AE_BAD_PARAMETER);
}
/* Table must be a DSDT or SSDT */
if (!ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT) &&
!ACPI_COMPARE_NAME(table->signature, ACPI_SIG_SSDT)) {
- return AE_BAD_HEADER;
+ return (AE_BAD_HEADER);
}
/* First AML opcode in the table must be a control method */
@@ -554,7 +554,7 @@ acpi_status acpi_install_method(u8 *buffer)
parser_state.aml = buffer + sizeof(struct acpi_table_header);
opcode = acpi_ps_peek_opcode(&parser_state);
if (opcode != AML_METHOD_OP) {
- return AE_BAD_PARAMETER;
+ return (AE_BAD_PARAMETER);
}
/* Extract method information from the raw AML */
@@ -572,13 +572,13 @@ acpi_status acpi_install_method(u8 *buffer)
*/
aml_buffer = ACPI_ALLOCATE(aml_length);
if (!aml_buffer) {
- return AE_NO_MEMORY;
+ return (AE_NO_MEMORY);
}
method_obj = acpi_ut_create_internal_object(ACPI_TYPE_METHOD);
if (!method_obj) {
ACPI_FREE(aml_buffer);
- return AE_NO_MEMORY;
+ return (AE_NO_MEMORY);
}
/* Lock namespace for acpi_ns_lookup, we may be creating a new node */
@@ -644,12 +644,12 @@ acpi_status acpi_install_method(u8 *buffer)
/* Remove local reference to the method object */
acpi_ut_remove_reference(method_obj);
- return status;
+ return (status);
error_exit:
ACPI_FREE(aml_buffer);
ACPI_FREE(method_obj);
- return status;
+ return (status);
}
ACPI_EXPORT_SYMBOL(acpi_install_method)
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index 9d029dac6b64..c0853ef294e4 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index cb79e2d4d743..f51308cdbc65 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -108,7 +108,7 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state)
/* Byte 0 is a special case, either bits [0:3] or [0:5] are used */
package_length |= (aml[0] & byte_zero_mask);
- return_UINT32(package_length);
+ return_VALUE(package_length);
}
/*******************************************************************************
@@ -162,7 +162,7 @@ char *acpi_ps_get_next_namestring(struct acpi_parse_state *parser_state)
/* Point past any namestring prefix characters (backslash or carat) */
- while (acpi_ps_is_prefix_char(*end)) {
+ while (ACPI_IS_ROOT_PREFIX(*end) || ACPI_IS_PARENT_PREFIX(*end)) {
end++;
}
@@ -798,7 +798,8 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
subop = acpi_ps_peek_opcode(parser_state);
if (subop == 0 ||
acpi_ps_is_leading_char(subop) ||
- acpi_ps_is_prefix_char(subop)) {
+ ACPI_IS_ROOT_PREFIX(subop) ||
+ ACPI_IS_PARENT_PREFIX(subop)) {
/* null_name or name_string */
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 5607805aab26..63c455447481 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -58,352 +58,17 @@
#define _COMPONENT ACPI_PARSER
ACPI_MODULE_NAME("psloop")
-static u32 acpi_gbl_depth = 0;
-
/* Local prototypes */
-
-static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state);
-
-static acpi_status
-acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
- u8 * aml_op_start,
- union acpi_parse_object *unnamed_op,
- union acpi_parse_object **op);
-
-static acpi_status
-acpi_ps_create_op(struct acpi_walk_state *walk_state,
- u8 * aml_op_start, union acpi_parse_object **new_op);
-
static acpi_status
acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
u8 * aml_op_start, union acpi_parse_object *op);
-static acpi_status
-acpi_ps_complete_op(struct acpi_walk_state *walk_state,
- union acpi_parse_object **op, acpi_status status);
-
-static acpi_status
-acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
- union acpi_parse_object *op, acpi_status status);
-
static void
acpi_ps_link_module_code(union acpi_parse_object *parent_op,
u8 *aml_start, u32 aml_length, acpi_owner_id owner_id);
/*******************************************************************************
*
- * FUNCTION: acpi_ps_get_aml_opcode
- *
- * PARAMETERS: walk_state - Current state
- *
- * RETURN: Status
- *
- * DESCRIPTION: Extract the next AML opcode from the input stream.
- *
- ******************************************************************************/
-
-static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
-{
-
- ACPI_FUNCTION_TRACE_PTR(ps_get_aml_opcode, walk_state);
-
- walk_state->aml_offset =
- (u32) ACPI_PTR_DIFF(walk_state->parser_state.aml,
- walk_state->parser_state.aml_start);
- walk_state->opcode = acpi_ps_peek_opcode(&(walk_state->parser_state));
-
- /*
- * First cut to determine what we have found:
- * 1) A valid AML opcode
- * 2) A name string
- * 3) An unknown/invalid opcode
- */
- walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
-
- switch (walk_state->op_info->class) {
- case AML_CLASS_ASCII:
- case AML_CLASS_PREFIX:
- /*
- * Starts with a valid prefix or ASCII char, this is a name
- * string. Convert the bare name string to a namepath.
- */
- walk_state->opcode = AML_INT_NAMEPATH_OP;
- walk_state->arg_types = ARGP_NAMESTRING;
- break;
-
- case AML_CLASS_UNKNOWN:
-
- /* The opcode is unrecognized. Complain and skip unknown opcodes */
-
- if (walk_state->pass_number == 2) {
- ACPI_ERROR((AE_INFO,
- "Unknown opcode 0x%.2X at table offset 0x%.4X, ignoring",
- walk_state->opcode,
- (u32)(walk_state->aml_offset +
- sizeof(struct acpi_table_header))));
-
- ACPI_DUMP_BUFFER(walk_state->parser_state.aml - 16, 48);
-
-#ifdef ACPI_ASL_COMPILER
- /*
- * This is executed for the disassembler only. Output goes
- * to the disassembled ASL output file.
- */
- acpi_os_printf
- ("/*\nError: Unknown opcode 0x%.2X at table offset 0x%.4X, context:\n",
- walk_state->opcode,
- (u32)(walk_state->aml_offset +
- sizeof(struct acpi_table_header)));
-
- /* Dump the context surrounding the invalid opcode */
-
- acpi_ut_dump_buffer(((u8 *)walk_state->parser_state.
- aml - 16), 48, DB_BYTE_DISPLAY,
- walk_state->aml_offset +
- sizeof(struct acpi_table_header) -
- 16);
- acpi_os_printf(" */\n");
-#endif
- }
-
- /* Increment past one-byte or two-byte opcode */
-
- walk_state->parser_state.aml++;
- if (walk_state->opcode > 0xFF) { /* Can only happen if first byte is 0x5B */
- walk_state->parser_state.aml++;
- }
-
- return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
-
- default:
-
- /* Found opcode info, this is a normal opcode */
-
- walk_state->parser_state.aml +=
- acpi_ps_get_opcode_size(walk_state->opcode);
- walk_state->arg_types = walk_state->op_info->parse_args;
- break;
- }
-
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ps_build_named_op
- *
- * PARAMETERS: walk_state - Current state
- * aml_op_start - Begin of named Op in AML
- * unnamed_op - Early Op (not a named Op)
- * op - Returned Op
- *
- * RETURN: Status
- *
- * DESCRIPTION: Parse a named Op
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
- u8 * aml_op_start,
- union acpi_parse_object *unnamed_op,
- union acpi_parse_object **op)
-{
- acpi_status status = AE_OK;
- union acpi_parse_object *arg = NULL;
-
- ACPI_FUNCTION_TRACE_PTR(ps_build_named_op, walk_state);
-
- unnamed_op->common.value.arg = NULL;
- unnamed_op->common.arg_list_length = 0;
- unnamed_op->common.aml_opcode = walk_state->opcode;
-
- /*
- * Get and append arguments until we find the node that contains
- * the name (the type ARGP_NAME).
- */
- while (GET_CURRENT_ARG_TYPE(walk_state->arg_types) &&
- (GET_CURRENT_ARG_TYPE(walk_state->arg_types) != ARGP_NAME)) {
- status =
- acpi_ps_get_next_arg(walk_state,
- &(walk_state->parser_state),
- GET_CURRENT_ARG_TYPE(walk_state->
- arg_types), &arg);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- acpi_ps_append_arg(unnamed_op, arg);
- INCREMENT_ARG_LIST(walk_state->arg_types);
- }
-
- /*
- * Make sure that we found a NAME and didn't run out of arguments
- */
- if (!GET_CURRENT_ARG_TYPE(walk_state->arg_types)) {
- return_ACPI_STATUS(AE_AML_NO_OPERAND);
- }
-
- /* We know that this arg is a name, move to next arg */
-
- INCREMENT_ARG_LIST(walk_state->arg_types);
-
- /*
- * Find the object. This will either insert the object into
- * the namespace or simply look it up
- */
- walk_state->op = NULL;
-
- status = walk_state->descending_callback(walk_state, op);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "During name lookup/catalog"));
- return_ACPI_STATUS(status);
- }
-
- if (!*op) {
- return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
- }
-
- status = acpi_ps_next_parse_state(walk_state, *op, status);
- if (ACPI_FAILURE(status)) {
- if (status == AE_CTRL_PENDING) {
- return_ACPI_STATUS(AE_CTRL_PARSE_PENDING);
- }
- return_ACPI_STATUS(status);
- }
-
- acpi_ps_append_arg(*op, unnamed_op->common.value.arg);
- acpi_gbl_depth++;
-
- if ((*op)->common.aml_opcode == AML_REGION_OP ||
- (*op)->common.aml_opcode == AML_DATA_REGION_OP) {
- /*
- * Defer final parsing of an operation_region body, because we don't
- * have enough info in the first pass to parse it correctly (i.e.,
- * there may be method calls within the term_arg elements of the body.)
- *
- * However, we must continue parsing because the opregion is not a
- * standalone package -- we don't know where the end is at this point.
- *
- * (Length is unknown until parse of the body complete)
- */
- (*op)->named.data = aml_op_start;
- (*op)->named.length = 0;
- }
-
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ps_create_op
- *
- * PARAMETERS: walk_state - Current state
- * aml_op_start - Op start in AML
- * new_op - Returned Op
- *
- * RETURN: Status
- *
- * DESCRIPTION: Get Op from AML
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ps_create_op(struct acpi_walk_state *walk_state,
- u8 * aml_op_start, union acpi_parse_object **new_op)
-{
- acpi_status status = AE_OK;
- union acpi_parse_object *op;
- union acpi_parse_object *named_op = NULL;
- union acpi_parse_object *parent_scope;
- u8 argument_count;
- const struct acpi_opcode_info *op_info;
-
- ACPI_FUNCTION_TRACE_PTR(ps_create_op, walk_state);
-
- status = acpi_ps_get_aml_opcode(walk_state);
- if (status == AE_CTRL_PARSE_CONTINUE) {
- return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
- }
-
- /* Create Op structure and append to parent's argument list */
-
- walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
- op = acpi_ps_alloc_op(walk_state->opcode);
- if (!op) {
- return_ACPI_STATUS(AE_NO_MEMORY);
- }
-
- if (walk_state->op_info->flags & AML_NAMED) {
- status =
- acpi_ps_build_named_op(walk_state, aml_op_start, op,
- &named_op);
- acpi_ps_free_op(op);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- *new_op = named_op;
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Not a named opcode, just allocate Op and append to parent */
-
- if (walk_state->op_info->flags & AML_CREATE) {
- /*
- * Backup to beginning of create_XXXfield declaration
- * body_length is unknown until we parse the body
- */
- op->named.data = aml_op_start;
- op->named.length = 0;
- }
-
- if (walk_state->opcode == AML_BANK_FIELD_OP) {
- /*
- * Backup to beginning of bank_field declaration
- * body_length is unknown until we parse the body
- */
- op->named.data = aml_op_start;
- op->named.length = 0;
- }
-
- parent_scope = acpi_ps_get_parent_scope(&(walk_state->parser_state));
- acpi_ps_append_arg(parent_scope, op);
-
- if (parent_scope) {
- op_info =
- acpi_ps_get_opcode_info(parent_scope->common.aml_opcode);
- if (op_info->flags & AML_HAS_TARGET) {
- argument_count =
- acpi_ps_get_argument_count(op_info->type);
- if (parent_scope->common.arg_list_length >
- argument_count) {
- op->common.flags |= ACPI_PARSEOP_TARGET;
- }
- } else if (parent_scope->common.aml_opcode == AML_INCREMENT_OP) {
- op->common.flags |= ACPI_PARSEOP_TARGET;
- }
- }
-
- if (walk_state->descending_callback != NULL) {
- /*
- * Find the object. This will either insert the object into
- * the namespace or simply look it up
- */
- walk_state->op = *new_op = op;
-
- status = walk_state->descending_callback(walk_state, &op);
- status = acpi_ps_next_parse_state(walk_state, op, status);
- if (status == AE_CTRL_PENDING) {
- status = AE_CTRL_PARSE_PENDING;
- }
- }
-
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ps_get_arguments
*
* PARAMETERS: walk_state - Current state
@@ -711,288 +376,6 @@ acpi_ps_link_module_code(union acpi_parse_object *parent_op,
/*******************************************************************************
*
- * FUNCTION: acpi_ps_complete_op
- *
- * PARAMETERS: walk_state - Current state
- * op - Returned Op
- * status - Parse status before complete Op
- *
- * RETURN: Status
- *
- * DESCRIPTION: Complete Op
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ps_complete_op(struct acpi_walk_state *walk_state,
- union acpi_parse_object **op, acpi_status status)
-{
- acpi_status status2;
-
- ACPI_FUNCTION_TRACE_PTR(ps_complete_op, walk_state);
-
- /*
- * Finished one argument of the containing scope
- */
- walk_state->parser_state.scope->parse_scope.arg_count--;
-
- /* Close this Op (will result in parse subtree deletion) */
-
- status2 = acpi_ps_complete_this_op(walk_state, *op);
- if (ACPI_FAILURE(status2)) {
- return_ACPI_STATUS(status2);
- }
-
- *op = NULL;
-
- switch (status) {
- case AE_OK:
- break;
-
- case AE_CTRL_TRANSFER:
-
- /* We are about to transfer to a called method */
-
- walk_state->prev_op = NULL;
- walk_state->prev_arg_types = walk_state->arg_types;
- return_ACPI_STATUS(status);
-
- case AE_CTRL_END:
-
- acpi_ps_pop_scope(&(walk_state->parser_state), op,
- &walk_state->arg_types,
- &walk_state->arg_count);
-
- if (*op) {
- walk_state->op = *op;
- walk_state->op_info =
- acpi_ps_get_opcode_info((*op)->common.aml_opcode);
- walk_state->opcode = (*op)->common.aml_opcode;
-
- status = walk_state->ascending_callback(walk_state);
- status =
- acpi_ps_next_parse_state(walk_state, *op, status);
-
- status2 = acpi_ps_complete_this_op(walk_state, *op);
- if (ACPI_FAILURE(status2)) {
- return_ACPI_STATUS(status2);
- }
- }
-
- status = AE_OK;
- break;
-
- case AE_CTRL_BREAK:
- case AE_CTRL_CONTINUE:
-
- /* Pop off scopes until we find the While */
-
- while (!(*op) || ((*op)->common.aml_opcode != AML_WHILE_OP)) {
- acpi_ps_pop_scope(&(walk_state->parser_state), op,
- &walk_state->arg_types,
- &walk_state->arg_count);
- }
-
- /* Close this iteration of the While loop */
-
- walk_state->op = *op;
- walk_state->op_info =
- acpi_ps_get_opcode_info((*op)->common.aml_opcode);
- walk_state->opcode = (*op)->common.aml_opcode;
-
- status = walk_state->ascending_callback(walk_state);
- status = acpi_ps_next_parse_state(walk_state, *op, status);
-
- status2 = acpi_ps_complete_this_op(walk_state, *op);
- if (ACPI_FAILURE(status2)) {
- return_ACPI_STATUS(status2);
- }
-
- status = AE_OK;
- break;
-
- case AE_CTRL_TERMINATE:
-
- /* Clean up */
- do {
- if (*op) {
- status2 =
- acpi_ps_complete_this_op(walk_state, *op);
- if (ACPI_FAILURE(status2)) {
- return_ACPI_STATUS(status2);
- }
-
- acpi_ut_delete_generic_state
- (acpi_ut_pop_generic_state
- (&walk_state->control_state));
- }
-
- acpi_ps_pop_scope(&(walk_state->parser_state), op,
- &walk_state->arg_types,
- &walk_state->arg_count);
-
- } while (*op);
-
- return_ACPI_STATUS(AE_OK);
-
- default: /* All other non-AE_OK status */
-
- do {
- if (*op) {
- status2 =
- acpi_ps_complete_this_op(walk_state, *op);
- if (ACPI_FAILURE(status2)) {
- return_ACPI_STATUS(status2);
- }
- }
-
- acpi_ps_pop_scope(&(walk_state->parser_state), op,
- &walk_state->arg_types,
- &walk_state->arg_count);
-
- } while (*op);
-
-#if 0
- /*
- * TBD: Cleanup parse ops on error
- */
- if (*op == NULL) {
- acpi_ps_pop_scope(parser_state, op,
- &walk_state->arg_types,
- &walk_state->arg_count);
- }
-#endif
- walk_state->prev_op = NULL;
- walk_state->prev_arg_types = walk_state->arg_types;
- return_ACPI_STATUS(status);
- }
-
- /* This scope complete? */
-
- if (acpi_ps_has_completed_scope(&(walk_state->parser_state))) {
- acpi_ps_pop_scope(&(walk_state->parser_state), op,
- &walk_state->arg_types,
- &walk_state->arg_count);
- ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Popped scope, Op=%p\n", *op));
- } else {
- *op = NULL;
- }
-
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ps_complete_final_op
- *
- * PARAMETERS: walk_state - Current state
- * op - Current Op
- * status - Current parse status before complete last
- * Op
- *
- * RETURN: Status
- *
- * DESCRIPTION: Complete last Op.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
- union acpi_parse_object *op, acpi_status status)
-{
- acpi_status status2;
-
- ACPI_FUNCTION_TRACE_PTR(ps_complete_final_op, walk_state);
-
- /*
- * Complete the last Op (if not completed), and clear the scope stack.
- * It is easily possible to end an AML "package" with an unbounded number
- * of open scopes (such as when several ASL blocks are closed with
- * sequential closing braces). We want to terminate each one cleanly.
- */
- ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "AML package complete at Op %p\n",
- op));
- do {
- if (op) {
- if (walk_state->ascending_callback != NULL) {
- walk_state->op = op;
- walk_state->op_info =
- acpi_ps_get_opcode_info(op->common.
- aml_opcode);
- walk_state->opcode = op->common.aml_opcode;
-
- status =
- walk_state->ascending_callback(walk_state);
- status =
- acpi_ps_next_parse_state(walk_state, op,
- status);
- if (status == AE_CTRL_PENDING) {
- status =
- acpi_ps_complete_op(walk_state, &op,
- AE_OK);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
-
- if (status == AE_CTRL_TERMINATE) {
- status = AE_OK;
-
- /* Clean up */
- do {
- if (op) {
- status2 =
- acpi_ps_complete_this_op
- (walk_state, op);
- if (ACPI_FAILURE
- (status2)) {
- return_ACPI_STATUS
- (status2);
- }
- }
-
- acpi_ps_pop_scope(&
- (walk_state->
- parser_state),
- &op,
- &walk_state->
- arg_types,
- &walk_state->
- arg_count);
-
- } while (op);
-
- return_ACPI_STATUS(status);
- }
-
- else if (ACPI_FAILURE(status)) {
-
- /* First error is most important */
-
- (void)
- acpi_ps_complete_this_op(walk_state,
- op);
- return_ACPI_STATUS(status);
- }
- }
-
- status2 = acpi_ps_complete_this_op(walk_state, op);
- if (ACPI_FAILURE(status2)) {
- return_ACPI_STATUS(status2);
- }
- }
-
- acpi_ps_pop_scope(&(walk_state->parser_state), &op,
- &walk_state->arg_types,
- &walk_state->arg_count);
-
- } while (op);
-
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ps_parse_loop
*
* PARAMETERS: walk_state - Current state
@@ -1177,10 +560,6 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
walk_state->op_info =
acpi_ps_get_opcode_info(op->common.aml_opcode);
if (walk_state->op_info->flags & AML_NAMED) {
- if (acpi_gbl_depth) {
- acpi_gbl_depth--;
- }
-
if (op->common.aml_opcode == AML_REGION_OP ||
op->common.aml_opcode == AML_DATA_REGION_OP) {
/*
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
new file mode 100644
index 000000000000..12c4028002b1
--- /dev/null
+++ b/drivers/acpi/acpica/psobject.c
@@ -0,0 +1,647 @@
+/******************************************************************************
+ *
+ * Module Name: psobject - Support for parse objects
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2013, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acparser.h"
+#include "amlcode.h"
+
+#define _COMPONENT ACPI_PARSER
+ACPI_MODULE_NAME("psobject")
+
+/* Local prototypes */
+static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_get_aml_opcode
+ *
+ * PARAMETERS: walk_state - Current state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Extract the next AML opcode from the input stream.
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
+{
+
+ ACPI_FUNCTION_TRACE_PTR(ps_get_aml_opcode, walk_state);
+
+ walk_state->aml_offset =
+ (u32)ACPI_PTR_DIFF(walk_state->parser_state.aml,
+ walk_state->parser_state.aml_start);
+ walk_state->opcode = acpi_ps_peek_opcode(&(walk_state->parser_state));
+
+ /*
+ * First cut to determine what we have found:
+ * 1) A valid AML opcode
+ * 2) A name string
+ * 3) An unknown/invalid opcode
+ */
+ walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
+
+ switch (walk_state->op_info->class) {
+ case AML_CLASS_ASCII:
+ case AML_CLASS_PREFIX:
+ /*
+ * Starts with a valid prefix or ASCII char, this is a name
+ * string. Convert the bare name string to a namepath.
+ */
+ walk_state->opcode = AML_INT_NAMEPATH_OP;
+ walk_state->arg_types = ARGP_NAMESTRING;
+ break;
+
+ case AML_CLASS_UNKNOWN:
+
+ /* The opcode is unrecognized. Complain and skip unknown opcodes */
+
+ if (walk_state->pass_number == 2) {
+ ACPI_ERROR((AE_INFO,
+ "Unknown opcode 0x%.2X at table offset 0x%.4X, ignoring",
+ walk_state->opcode,
+ (u32)(walk_state->aml_offset +
+ sizeof(struct acpi_table_header))));
+
+ ACPI_DUMP_BUFFER((walk_state->parser_state.aml - 16),
+ 48);
+
+#ifdef ACPI_ASL_COMPILER
+ /*
+ * This is executed for the disassembler only. Output goes
+ * to the disassembled ASL output file.
+ */
+ acpi_os_printf
+ ("/*\nError: Unknown opcode 0x%.2X at table offset 0x%.4X, context:\n",
+ walk_state->opcode,
+ (u32)(walk_state->aml_offset +
+ sizeof(struct acpi_table_header)));
+
+ /* Dump the context surrounding the invalid opcode */
+
+ acpi_ut_dump_buffer(((u8 *)walk_state->parser_state.
+ aml - 16), 48, DB_BYTE_DISPLAY,
+ (walk_state->aml_offset +
+ sizeof(struct acpi_table_header) -
+ 16));
+ acpi_os_printf(" */\n");
+#endif
+ }
+
+ /* Increment past one-byte or two-byte opcode */
+
+ walk_state->parser_state.aml++;
+ if (walk_state->opcode > 0xFF) { /* Can only happen if first byte is 0x5B */
+ walk_state->parser_state.aml++;
+ }
+
+ return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
+
+ default:
+
+ /* Found opcode info, this is a normal opcode */
+
+ walk_state->parser_state.aml +=
+ acpi_ps_get_opcode_size(walk_state->opcode);
+ walk_state->arg_types = walk_state->op_info->parse_args;
+ break;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_build_named_op
+ *
+ * PARAMETERS: walk_state - Current state
+ * aml_op_start - Begin of named Op in AML
+ * unnamed_op - Early Op (not a named Op)
+ * op - Returned Op
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Parse a named Op
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
+ u8 *aml_op_start,
+ union acpi_parse_object *unnamed_op,
+ union acpi_parse_object **op)
+{
+ acpi_status status = AE_OK;
+ union acpi_parse_object *arg = NULL;
+
+ ACPI_FUNCTION_TRACE_PTR(ps_build_named_op, walk_state);
+
+ unnamed_op->common.value.arg = NULL;
+ unnamed_op->common.arg_list_length = 0;
+ unnamed_op->common.aml_opcode = walk_state->opcode;
+
+ /*
+ * Get and append arguments until we find the node that contains
+ * the name (the type ARGP_NAME).
+ */
+ while (GET_CURRENT_ARG_TYPE(walk_state->arg_types) &&
+ (GET_CURRENT_ARG_TYPE(walk_state->arg_types) != ARGP_NAME)) {
+ status =
+ acpi_ps_get_next_arg(walk_state,
+ &(walk_state->parser_state),
+ GET_CURRENT_ARG_TYPE(walk_state->
+ arg_types), &arg);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ acpi_ps_append_arg(unnamed_op, arg);
+ INCREMENT_ARG_LIST(walk_state->arg_types);
+ }
+
+ /*
+ * Make sure that we found a NAME and didn't run out of arguments
+ */
+ if (!GET_CURRENT_ARG_TYPE(walk_state->arg_types)) {
+ return_ACPI_STATUS(AE_AML_NO_OPERAND);
+ }
+
+ /* We know that this arg is a name, move to next arg */
+
+ INCREMENT_ARG_LIST(walk_state->arg_types);
+
+ /*
+ * Find the object. This will either insert the object into
+ * the namespace or simply look it up
+ */
+ walk_state->op = NULL;
+
+ status = walk_state->descending_callback(walk_state, op);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "During name lookup/catalog"));
+ return_ACPI_STATUS(status);
+ }
+
+ if (!*op) {
+ return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
+ }
+
+ status = acpi_ps_next_parse_state(walk_state, *op, status);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_CTRL_PENDING) {
+ return_ACPI_STATUS(AE_CTRL_PARSE_PENDING);
+ }
+ return_ACPI_STATUS(status);
+ }
+
+ acpi_ps_append_arg(*op, unnamed_op->common.value.arg);
+
+ if ((*op)->common.aml_opcode == AML_REGION_OP ||
+ (*op)->common.aml_opcode == AML_DATA_REGION_OP) {
+ /*
+ * Defer final parsing of an operation_region body, because we don't
+ * have enough info in the first pass to parse it correctly (i.e.,
+ * there may be method calls within the term_arg elements of the body.)
+ *
+ * However, we must continue parsing because the opregion is not a
+ * standalone package -- we don't know where the end is at this point.
+ *
+ * (Length is unknown until parse of the body complete)
+ */
+ (*op)->named.data = aml_op_start;
+ (*op)->named.length = 0;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_create_op
+ *
+ * PARAMETERS: walk_state - Current state
+ * aml_op_start - Op start in AML
+ * new_op - Returned Op
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Get Op from AML
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ps_create_op(struct acpi_walk_state *walk_state,
+ u8 *aml_op_start, union acpi_parse_object **new_op)
+{
+ acpi_status status = AE_OK;
+ union acpi_parse_object *op;
+ union acpi_parse_object *named_op = NULL;
+ union acpi_parse_object *parent_scope;
+ u8 argument_count;
+ const struct acpi_opcode_info *op_info;
+
+ ACPI_FUNCTION_TRACE_PTR(ps_create_op, walk_state);
+
+ status = acpi_ps_get_aml_opcode(walk_state);
+ if (status == AE_CTRL_PARSE_CONTINUE) {
+ return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
+ }
+
+ /* Create Op structure and append to parent's argument list */
+
+ walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
+ op = acpi_ps_alloc_op(walk_state->opcode);
+ if (!op) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ if (walk_state->op_info->flags & AML_NAMED) {
+ status =
+ acpi_ps_build_named_op(walk_state, aml_op_start, op,
+ &named_op);
+ acpi_ps_free_op(op);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ *new_op = named_op;
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Not a named opcode, just allocate Op and append to parent */
+
+ if (walk_state->op_info->flags & AML_CREATE) {
+ /*
+ * Backup to beginning of create_XXXfield declaration
+ * body_length is unknown until we parse the body
+ */
+ op->named.data = aml_op_start;
+ op->named.length = 0;
+ }
+
+ if (walk_state->opcode == AML_BANK_FIELD_OP) {
+ /*
+ * Backup to beginning of bank_field declaration
+ * body_length is unknown until we parse the body
+ */
+ op->named.data = aml_op_start;
+ op->named.length = 0;
+ }
+
+ parent_scope = acpi_ps_get_parent_scope(&(walk_state->parser_state));
+ acpi_ps_append_arg(parent_scope, op);
+
+ if (parent_scope) {
+ op_info =
+ acpi_ps_get_opcode_info(parent_scope->common.aml_opcode);
+ if (op_info->flags & AML_HAS_TARGET) {
+ argument_count =
+ acpi_ps_get_argument_count(op_info->type);
+ if (parent_scope->common.arg_list_length >
+ argument_count) {
+ op->common.flags |= ACPI_PARSEOP_TARGET;
+ }
+ } else if (parent_scope->common.aml_opcode == AML_INCREMENT_OP) {
+ op->common.flags |= ACPI_PARSEOP_TARGET;
+ }
+ }
+
+ if (walk_state->descending_callback != NULL) {
+ /*
+ * Find the object. This will either insert the object into
+ * the namespace or simply look it up
+ */
+ walk_state->op = *new_op = op;
+
+ status = walk_state->descending_callback(walk_state, &op);
+ status = acpi_ps_next_parse_state(walk_state, op, status);
+ if (status == AE_CTRL_PENDING) {
+ status = AE_CTRL_PARSE_PENDING;
+ }
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_complete_op
+ *
+ * PARAMETERS: walk_state - Current state
+ * op - Returned Op
+ * status - Parse status before complete Op
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Complete Op
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ps_complete_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object **op, acpi_status status)
+{
+ acpi_status status2;
+
+ ACPI_FUNCTION_TRACE_PTR(ps_complete_op, walk_state);
+
+ /*
+ * Finished one argument of the containing scope
+ */
+ walk_state->parser_state.scope->parse_scope.arg_count--;
+
+ /* Close this Op (will result in parse subtree deletion) */
+
+ status2 = acpi_ps_complete_this_op(walk_state, *op);
+ if (ACPI_FAILURE(status2)) {
+ return_ACPI_STATUS(status2);
+ }
+
+ *op = NULL;
+
+ switch (status) {
+ case AE_OK:
+ break;
+
+ case AE_CTRL_TRANSFER:
+
+ /* We are about to transfer to a called method */
+
+ walk_state->prev_op = NULL;
+ walk_state->prev_arg_types = walk_state->arg_types;
+ return_ACPI_STATUS(status);
+
+ case AE_CTRL_END:
+
+ acpi_ps_pop_scope(&(walk_state->parser_state), op,
+ &walk_state->arg_types,
+ &walk_state->arg_count);
+
+ if (*op) {
+ walk_state->op = *op;
+ walk_state->op_info =
+ acpi_ps_get_opcode_info((*op)->common.aml_opcode);
+ walk_state->opcode = (*op)->common.aml_opcode;
+
+ status = walk_state->ascending_callback(walk_state);
+ status =
+ acpi_ps_next_parse_state(walk_state, *op, status);
+
+ status2 = acpi_ps_complete_this_op(walk_state, *op);
+ if (ACPI_FAILURE(status2)) {
+ return_ACPI_STATUS(status2);
+ }
+ }
+
+ status = AE_OK;
+ break;
+
+ case AE_CTRL_BREAK:
+ case AE_CTRL_CONTINUE:
+
+ /* Pop off scopes until we find the While */
+
+ while (!(*op) || ((*op)->common.aml_opcode != AML_WHILE_OP)) {
+ acpi_ps_pop_scope(&(walk_state->parser_state), op,
+ &walk_state->arg_types,
+ &walk_state->arg_count);
+ }
+
+ /* Close this iteration of the While loop */
+
+ walk_state->op = *op;
+ walk_state->op_info =
+ acpi_ps_get_opcode_info((*op)->common.aml_opcode);
+ walk_state->opcode = (*op)->common.aml_opcode;
+
+ status = walk_state->ascending_callback(walk_state);
+ status = acpi_ps_next_parse_state(walk_state, *op, status);
+
+ status2 = acpi_ps_complete_this_op(walk_state, *op);
+ if (ACPI_FAILURE(status2)) {
+ return_ACPI_STATUS(status2);
+ }
+
+ status = AE_OK;
+ break;
+
+ case AE_CTRL_TERMINATE:
+
+ /* Clean up */
+ do {
+ if (*op) {
+ status2 =
+ acpi_ps_complete_this_op(walk_state, *op);
+ if (ACPI_FAILURE(status2)) {
+ return_ACPI_STATUS(status2);
+ }
+
+ acpi_ut_delete_generic_state
+ (acpi_ut_pop_generic_state
+ (&walk_state->control_state));
+ }
+
+ acpi_ps_pop_scope(&(walk_state->parser_state), op,
+ &walk_state->arg_types,
+ &walk_state->arg_count);
+
+ } while (*op);
+
+ return_ACPI_STATUS(AE_OK);
+
+ default: /* All other non-AE_OK status */
+
+ do {
+ if (*op) {
+ status2 =
+ acpi_ps_complete_this_op(walk_state, *op);
+ if (ACPI_FAILURE(status2)) {
+ return_ACPI_STATUS(status2);
+ }
+ }
+
+ acpi_ps_pop_scope(&(walk_state->parser_state), op,
+ &walk_state->arg_types,
+ &walk_state->arg_count);
+
+ } while (*op);
+
+#if 0
+ /*
+ * TBD: Cleanup parse ops on error
+ */
+ if (*op == NULL) {
+ acpi_ps_pop_scope(parser_state, op,
+ &walk_state->arg_types,
+ &walk_state->arg_count);
+ }
+#endif
+ walk_state->prev_op = NULL;
+ walk_state->prev_arg_types = walk_state->arg_types;
+ return_ACPI_STATUS(status);
+ }
+
+ /* This scope complete? */
+
+ if (acpi_ps_has_completed_scope(&(walk_state->parser_state))) {
+ acpi_ps_pop_scope(&(walk_state->parser_state), op,
+ &walk_state->arg_types,
+ &walk_state->arg_count);
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Popped scope, Op=%p\n", *op));
+ } else {
+ *op = NULL;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_complete_final_op
+ *
+ * PARAMETERS: walk_state - Current state
+ * op - Current Op
+ * status - Current parse status before complete last
+ * Op
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Complete last Op.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op, acpi_status status)
+{
+ acpi_status status2;
+
+ ACPI_FUNCTION_TRACE_PTR(ps_complete_final_op, walk_state);
+
+ /*
+ * Complete the last Op (if not completed), and clear the scope stack.
+ * It is easily possible to end an AML "package" with an unbounded number
+ * of open scopes (such as when several ASL blocks are closed with
+ * sequential closing braces). We want to terminate each one cleanly.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "AML package complete at Op %p\n",
+ op));
+ do {
+ if (op) {
+ if (walk_state->ascending_callback != NULL) {
+ walk_state->op = op;
+ walk_state->op_info =
+ acpi_ps_get_opcode_info(op->common.
+ aml_opcode);
+ walk_state->opcode = op->common.aml_opcode;
+
+ status =
+ walk_state->ascending_callback(walk_state);
+ status =
+ acpi_ps_next_parse_state(walk_state, op,
+ status);
+ if (status == AE_CTRL_PENDING) {
+ status =
+ acpi_ps_complete_op(walk_state, &op,
+ AE_OK);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ if (status == AE_CTRL_TERMINATE) {
+ status = AE_OK;
+
+ /* Clean up */
+ do {
+ if (op) {
+ status2 =
+ acpi_ps_complete_this_op
+ (walk_state, op);
+ if (ACPI_FAILURE
+ (status2)) {
+ return_ACPI_STATUS
+ (status2);
+ }
+ }
+
+ acpi_ps_pop_scope(&
+ (walk_state->
+ parser_state),
+ &op,
+ &walk_state->
+ arg_types,
+ &walk_state->
+ arg_count);
+
+ } while (op);
+
+ return_ACPI_STATUS(status);
+ }
+
+ else if (ACPI_FAILURE(status)) {
+
+ /* First error is most important */
+
+ (void)
+ acpi_ps_complete_this_op(walk_state,
+ op);
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ status2 = acpi_ps_complete_this_op(walk_state, op);
+ if (ACPI_FAILURE(status2)) {
+ return_ACPI_STATUS(status2);
+ }
+ }
+
+ acpi_ps_pop_scope(&(walk_state->parser_state), &op,
+ &walk_state->arg_types,
+ &walk_state->arg_count);
+
+ } while (op);
+
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 1793d934aa30..1b659e59710a 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -43,16 +43,12 @@
#include <acpi/acpi.h>
#include "accommon.h"
-#include "acparser.h"
#include "acopcode.h"
#include "amlcode.h"
#define _COMPONENT ACPI_PARSER
ACPI_MODULE_NAME("psopcode")
-static const u8 acpi_gbl_argument_count[] =
- { 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 6 };
-
/*******************************************************************************
*
* NAME: acpi_gbl_aml_op_info
@@ -63,7 +59,6 @@ static const u8 acpi_gbl_argument_count[] =
* the operand type.
*
******************************************************************************/
-
/*
* Summary of opcode types/flags
*
@@ -181,7 +176,6 @@ static const u8 acpi_gbl_argument_count[] =
AML_CREATE_QWORD_FIELD_OP
******************************************************************************/
-
/*
* Master Opcode information table. A summary of everything we know about each
* opcode, all in one place.
@@ -656,169 +650,3 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
/*! [End] no source code translation !*/
};
-
-/*
- * This table is directly indexed by the opcodes, and returns an
- * index into the table above
- */
-static const u8 acpi_gbl_short_op_index[256] = {
-/* 0 1 2 3 4 5 6 7 */
-/* 8 9 A B C D E F */
-/* 0x00 */ 0x00, 0x01, _UNK, _UNK, _UNK, _UNK, 0x02, _UNK,
-/* 0x08 */ 0x03, _UNK, 0x04, 0x05, 0x06, 0x07, 0x6E, _UNK,
-/* 0x10 */ 0x08, 0x09, 0x0a, 0x6F, 0x0b, _UNK, _UNK, _UNK,
-/* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x20 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x28 */ _UNK, _UNK, _UNK, _UNK, _UNK, 0x63, _PFX, _PFX,
-/* 0x30 */ 0x67, 0x66, 0x68, 0x65, 0x69, 0x64, 0x6A, 0x7D,
-/* 0x38 */ 0x7F, 0x80, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x40 */ _UNK, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
-/* 0x48 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
-/* 0x50 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
-/* 0x58 */ _ASC, _ASC, _ASC, _UNK, _PFX, _UNK, _PFX, _ASC,
-/* 0x60 */ 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13,
-/* 0x68 */ 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, _UNK,
-/* 0x70 */ 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22,
-/* 0x78 */ 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a,
-/* 0x80 */ 0x2b, 0x2c, 0x2d, 0x2e, 0x70, 0x71, 0x2f, 0x30,
-/* 0x88 */ 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x72,
-/* 0x90 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x73, 0x74,
-/* 0x98 */ 0x75, 0x76, _UNK, _UNK, 0x77, 0x78, 0x79, 0x7A,
-/* 0xA0 */ 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x60, 0x61,
-/* 0xA8 */ 0x62, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0xB0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0xB8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0xC0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0xC8 */ _UNK, _UNK, _UNK, _UNK, 0x44, _UNK, _UNK, _UNK,
-/* 0xD0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0xD8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0xE0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0xE8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0xF0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0xF8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 0x45,
-};
-
-/*
- * This table is indexed by the second opcode of the extended opcode
- * pair. It returns an index into the opcode table (acpi_gbl_aml_op_info)
- */
-static const u8 acpi_gbl_long_op_index[NUM_EXTENDED_OPCODE] = {
-/* 0 1 2 3 4 5 6 7 */
-/* 8 9 A B C D E F */
-/* 0x00 */ _UNK, 0x46, 0x47, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x08 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x10 */ _UNK, _UNK, 0x48, 0x49, _UNK, _UNK, _UNK, _UNK,
-/* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 0x7B,
-/* 0x20 */ 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51,
-/* 0x28 */ 0x52, 0x53, 0x54, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x30 */ 0x55, 0x56, 0x57, 0x7e, _UNK, _UNK, _UNK, _UNK,
-/* 0x38 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x40 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x48 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x50 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x58 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x60 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x68 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x70 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x78 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
-/* 0x80 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
-/* 0x88 */ 0x7C,
-};
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ps_get_opcode_info
- *
- * PARAMETERS: opcode - The AML opcode
- *
- * RETURN: A pointer to the info about the opcode.
- *
- * DESCRIPTION: Find AML opcode description based on the opcode.
- * NOTE: This procedure must ALWAYS return a valid pointer!
- *
- ******************************************************************************/
-
-const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
-{
- ACPI_FUNCTION_NAME(ps_get_opcode_info);
-
- /*
- * Detect normal 8-bit opcode or extended 16-bit opcode
- */
- if (!(opcode & 0xFF00)) {
-
- /* Simple (8-bit) opcode: 0-255, can't index beyond table */
-
- return (&acpi_gbl_aml_op_info
- [acpi_gbl_short_op_index[(u8) opcode]]);
- }
-
- if (((opcode & 0xFF00) == AML_EXTENDED_OPCODE) &&
- (((u8) opcode) <= MAX_EXTENDED_OPCODE)) {
-
- /* Valid extended (16-bit) opcode */
-
- return (&acpi_gbl_aml_op_info
- [acpi_gbl_long_op_index[(u8) opcode]]);
- }
-
- /* Unknown AML opcode */
-
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Unknown AML opcode [%4.4X]\n", opcode));
-
- return (&acpi_gbl_aml_op_info[_UNK]);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ps_get_opcode_name
- *
- * PARAMETERS: opcode - The AML opcode
- *
- * RETURN: A pointer to the name of the opcode (ASCII String)
- * Note: Never returns NULL.
- *
- * DESCRIPTION: Translate an opcode into a human-readable string
- *
- ******************************************************************************/
-
-char *acpi_ps_get_opcode_name(u16 opcode)
-{
-#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUG_OUTPUT)
-
- const struct acpi_opcode_info *op;
-
- op = acpi_ps_get_opcode_info(opcode);
-
- /* Always guaranteed to return a valid pointer */
-
- return (op->name);
-
-#else
- return ("OpcodeName unavailable");
-
-#endif
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ps_get_argument_count
- *
- * PARAMETERS: op_type - Type associated with the AML opcode
- *
- * RETURN: Argument count
- *
- * DESCRIPTION: Obtain the number of expected arguments for an AML opcode
- *
- ******************************************************************************/
-
-u8 acpi_ps_get_argument_count(u32 op_type)
-{
-
- if (op_type <= AML_TYPE_EXEC_6A_0T_1R) {
- return (acpi_gbl_argument_count[op_type]);
- }
-
- return (0);
-}
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
new file mode 100644
index 000000000000..9ba5301e5751
--- /dev/null
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -0,0 +1,223 @@
+/******************************************************************************
+ *
+ * Module Name: psopinfo - AML opcode information functions and dispatch tables
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2013, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acparser.h"
+#include "acopcode.h"
+#include "amlcode.h"
+
+#define _COMPONENT ACPI_PARSER
+ACPI_MODULE_NAME("psopinfo")
+
+extern const u8 acpi_gbl_short_op_index[];
+extern const u8 acpi_gbl_long_op_index[];
+
+static const u8 acpi_gbl_argument_count[] =
+ { 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 6 };
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_get_opcode_info
+ *
+ * PARAMETERS: opcode - The AML opcode
+ *
+ * RETURN: A pointer to the info about the opcode.
+ *
+ * DESCRIPTION: Find AML opcode description based on the opcode.
+ * NOTE: This procedure must ALWAYS return a valid pointer!
+ *
+ ******************************************************************************/
+
+const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
+{
+ ACPI_FUNCTION_NAME(ps_get_opcode_info);
+
+ /*
+ * Detect normal 8-bit opcode or extended 16-bit opcode
+ */
+ if (!(opcode & 0xFF00)) {
+
+ /* Simple (8-bit) opcode: 0-255, can't index beyond table */
+
+ return (&acpi_gbl_aml_op_info
+ [acpi_gbl_short_op_index[(u8)opcode]]);
+ }
+
+ if (((opcode & 0xFF00) == AML_EXTENDED_OPCODE) &&
+ (((u8)opcode) <= MAX_EXTENDED_OPCODE)) {
+
+ /* Valid extended (16-bit) opcode */
+
+ return (&acpi_gbl_aml_op_info
+ [acpi_gbl_long_op_index[(u8)opcode]]);
+ }
+
+ /* Unknown AML opcode */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Unknown AML opcode [%4.4X]\n", opcode));
+
+ return (&acpi_gbl_aml_op_info[_UNK]);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_get_opcode_name
+ *
+ * PARAMETERS: opcode - The AML opcode
+ *
+ * RETURN: A pointer to the name of the opcode (ASCII String)
+ * Note: Never returns NULL.
+ *
+ * DESCRIPTION: Translate an opcode into a human-readable string
+ *
+ ******************************************************************************/
+
+char *acpi_ps_get_opcode_name(u16 opcode)
+{
+#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUG_OUTPUT)
+
+ const struct acpi_opcode_info *op;
+
+ op = acpi_ps_get_opcode_info(opcode);
+
+ /* Always guaranteed to return a valid pointer */
+
+ return (op->name);
+
+#else
+ return ("OpcodeName unavailable");
+
+#endif
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ps_get_argument_count
+ *
+ * PARAMETERS: op_type - Type associated with the AML opcode
+ *
+ * RETURN: Argument count
+ *
+ * DESCRIPTION: Obtain the number of expected arguments for an AML opcode
+ *
+ ******************************************************************************/
+
+u8 acpi_ps_get_argument_count(u32 op_type)
+{
+
+ if (op_type <= AML_TYPE_EXEC_6A_0T_1R) {
+ return (acpi_gbl_argument_count[op_type]);
+ }
+
+ return (0);
+}
+
+/*
+ * This table is directly indexed by the opcodes It returns
+ * an index into the opcode table (acpi_gbl_aml_op_info)
+ */
+const u8 acpi_gbl_short_op_index[256] = {
+/* 0 1 2 3 4 5 6 7 */
+/* 8 9 A B C D E F */
+/* 0x00 */ 0x00, 0x01, _UNK, _UNK, _UNK, _UNK, 0x02, _UNK,
+/* 0x08 */ 0x03, _UNK, 0x04, 0x05, 0x06, 0x07, 0x6E, _UNK,
+/* 0x10 */ 0x08, 0x09, 0x0a, 0x6F, 0x0b, _UNK, _UNK, _UNK,
+/* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x20 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x28 */ _UNK, _UNK, _UNK, _UNK, _UNK, 0x63, _PFX, _PFX,
+/* 0x30 */ 0x67, 0x66, 0x68, 0x65, 0x69, 0x64, 0x6A, 0x7D,
+/* 0x38 */ 0x7F, 0x80, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x40 */ _UNK, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
+/* 0x48 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
+/* 0x50 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
+/* 0x58 */ _ASC, _ASC, _ASC, _UNK, _PFX, _UNK, _PFX, _ASC,
+/* 0x60 */ 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13,
+/* 0x68 */ 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, _UNK,
+/* 0x70 */ 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22,
+/* 0x78 */ 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a,
+/* 0x80 */ 0x2b, 0x2c, 0x2d, 0x2e, 0x70, 0x71, 0x2f, 0x30,
+/* 0x88 */ 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x72,
+/* 0x90 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x73, 0x74,
+/* 0x98 */ 0x75, 0x76, _UNK, _UNK, 0x77, 0x78, 0x79, 0x7A,
+/* 0xA0 */ 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x60, 0x61,
+/* 0xA8 */ 0x62, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xB0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xB8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xC0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xC8 */ _UNK, _UNK, _UNK, _UNK, 0x44, _UNK, _UNK, _UNK,
+/* 0xD0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xD8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xE0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xE8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xF0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xF8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 0x45,
+};
+
+/*
+ * This table is indexed by the second opcode of the extended opcode
+ * pair. It returns an index into the opcode table (acpi_gbl_aml_op_info)
+ */
+const u8 acpi_gbl_long_op_index[NUM_EXTENDED_OPCODE] = {
+/* 0 1 2 3 4 5 6 7 */
+/* 8 9 A B C D E F */
+/* 0x00 */ _UNK, 0x46, 0x47, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x08 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x10 */ _UNK, _UNK, 0x48, 0x49, _UNK, _UNK, _UNK, _UNK,
+/* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 0x7B,
+/* 0x20 */ 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51,
+/* 0x28 */ 0x52, 0x53, 0x54, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x30 */ 0x55, 0x56, 0x57, 0x7e, _UNK, _UNK, _UNK, _UNK,
+/* 0x38 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x40 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x48 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x50 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x58 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x60 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x68 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x70 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x78 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x80 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+/* 0x88 */ 0x7C,
+};
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 2494caf47755..abc4c48b2edd 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index 608dc20dc173..6a4b6fb39f32 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index fdb2e71f3046..c1934bf04f0a 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 4137dcb352d1..91fa73a6e55e 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -202,14 +202,6 @@ u8 acpi_ps_is_leading_char(u32 c)
}
/*
- * Is "c" a namestring prefix character?
- */
-u8 acpi_ps_is_prefix_char(u32 c)
-{
- return ((u8) (c == '\\' || c == '^'));
-}
-
-/*
* Get op's name (4-byte name segment) or 0 if unnamed
*/
#ifdef ACPI_FUTURE_USAGE
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index ab96cf47896d..abd65624754f 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 963e16225797..f68254268965 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index 856ff075b6ab..f3a9276ac665 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 147feb6aa2a0..7816d4eef04e 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -84,7 +84,7 @@ static u8 acpi_rs_count_set_bits(u16 bit_field)
bit_field &= (u16) (bit_field - 1);
}
- return bits_set;
+ return (bits_set);
}
/*******************************************************************************
@@ -407,7 +407,9 @@ acpi_rs_get_list_length(u8 * aml_buffer,
/* Validate the Resource Type and Resource Length */
- status = acpi_ut_validate_resource(aml_buffer, &resource_index);
+ status =
+ acpi_ut_validate_resource(NULL, aml_buffer,
+ &resource_index);
if (ACPI_FAILURE(status)) {
/*
* Exit on failure. Cannot continue because the descriptor length
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 311cbc4f05fa..f8b55b426c9d 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -98,7 +98,7 @@ acpi_buffer_to_resource(u8 *aml_buffer,
/* Perform the AML-to-Resource conversion */
- status = acpi_ut_walk_aml_resources(aml_buffer, aml_buffer_length,
+ status = acpi_ut_walk_aml_resources(NULL, aml_buffer, aml_buffer_length,
acpi_rs_convert_aml_to_resources,
&current_resource_ptr);
if (status == AE_AML_NO_RESOURCE_END_TAG) {
@@ -174,7 +174,7 @@ acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
/* Do the conversion */
resource = output_buffer->pointer;
- status = acpi_ut_walk_aml_resources(aml_start, aml_buffer_length,
+ status = acpi_ut_walk_aml_resources(NULL, aml_start, aml_buffer_length,
acpi_rs_convert_aml_to_resources,
&resource);
if (ACPI_FAILURE(status)) {
@@ -480,8 +480,7 @@ acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer,
status = acpi_rs_get_aml_length(linked_list_buffer, &aml_size_needed);
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AmlSizeNeeded=%X, %s\n",
- (u32) aml_size_needed,
- acpi_format_exception(status)));
+ (u32)aml_size_needed, acpi_format_exception(status)));
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index 4d11b072388c..cab51445189d 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -77,419 +77,16 @@ static void acpi_rs_dump_address_common(union acpi_resource_data *resource);
static void
acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table);
-#define ACPI_RSD_OFFSET(f) (u8) ACPI_OFFSET (union acpi_resource_data,f)
-#define ACPI_PRT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_pci_routing_table,f)
-#define ACPI_RSD_TABLE_SIZE(name) (sizeof(name) / sizeof (struct acpi_rsdump_info))
-
-/*******************************************************************************
- *
- * Resource Descriptor info tables
- *
- * Note: The first table entry must be a Title or Literal and must contain
- * the table length (number of table entries)
- *
- ******************************************************************************/
-
-struct acpi_rsdump_info acpi_rs_dump_irq[7] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_irq), "IRQ", NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(irq.descriptor_length),
- "Descriptor Length", NULL},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.triggering), "Triggering",
- acpi_gbl_he_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.polarity), "Polarity",
- acpi_gbl_ll_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.sharable), "Sharing",
- acpi_gbl_shr_decode},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(irq.interrupt_count),
- "Interrupt Count", NULL},
- {ACPI_RSD_SHORTLIST, ACPI_RSD_OFFSET(irq.interrupts[0]),
- "Interrupt List", NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_dma[6] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_dma), "DMA", NULL},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(dma.type), "Speed",
- acpi_gbl_typ_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(dma.bus_master), "Mastering",
- acpi_gbl_bm_decode},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(dma.transfer), "Transfer Type",
- acpi_gbl_siz_decode},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(dma.channel_count), "Channel Count",
- NULL},
- {ACPI_RSD_SHORTLIST, ACPI_RSD_OFFSET(dma.channels[0]), "Channel List",
- NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_start_dpf[4] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_start_dpf),
- "Start-Dependent-Functions", NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(start_dpf.descriptor_length),
- "Descriptor Length", NULL},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(start_dpf.compatibility_priority),
- "Compatibility Priority", acpi_gbl_config_decode},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(start_dpf.performance_robustness),
- "Performance/Robustness", acpi_gbl_config_decode}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_end_dpf[1] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_end_dpf),
- "End-Dependent-Functions", NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_io[6] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_io), "I/O", NULL},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(io.io_decode), "Address Decoding",
- acpi_gbl_io_decode},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(io.minimum), "Address Minimum", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(io.maximum), "Address Maximum", NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(io.alignment), "Alignment", NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(io.address_length), "Address Length",
- NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_fixed_io[3] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_io),
- "Fixed I/O", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_io.address), "Address", NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(fixed_io.address_length),
- "Address Length", NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_vendor[3] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_vendor),
- "Vendor Specific", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(vendor.byte_length), "Length", NULL},
- {ACPI_RSD_LONGLIST, ACPI_RSD_OFFSET(vendor.byte_data[0]), "Vendor Data",
- NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_end_tag[1] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_end_tag), "EndTag",
- NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_memory24[6] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory24),
- "24-Bit Memory Range", NULL},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(memory24.write_protect),
- "Write Protect", acpi_gbl_rw_decode},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.minimum), "Address Minimum",
- NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.maximum), "Address Maximum",
- NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.alignment), "Alignment",
- NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.address_length),
- "Address Length", NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_memory32[6] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory32),
- "32-Bit Memory Range", NULL},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(memory32.write_protect),
- "Write Protect", acpi_gbl_rw_decode},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.minimum), "Address Minimum",
- NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.maximum), "Address Maximum",
- NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.alignment), "Alignment",
- NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.address_length),
- "Address Length", NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_fixed_memory32[4] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_memory32),
- "32-Bit Fixed Memory Range", NULL},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(fixed_memory32.write_protect),
- "Write Protect", acpi_gbl_rw_decode},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(fixed_memory32.address), "Address",
- NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(fixed_memory32.address_length),
- "Address Length", NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_address16[8] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address16),
- "16-Bit WORD Address Space", NULL},
- {ACPI_RSD_ADDRESS, 0, NULL, NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.granularity), "Granularity",
- NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.minimum), "Address Minimum",
- NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.maximum), "Address Maximum",
- NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.translation_offset),
- "Translation Offset", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address_length),
- "Address Length", NULL},
- {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address16.resource_source), NULL, NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_address32[8] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address32),
- "32-Bit DWORD Address Space", NULL},
- {ACPI_RSD_ADDRESS, 0, NULL, NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.granularity), "Granularity",
- NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.minimum), "Address Minimum",
- NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.maximum), "Address Maximum",
- NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.translation_offset),
- "Translation Offset", NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address_length),
- "Address Length", NULL},
- {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address32.resource_source), NULL, NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_address64[8] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address64),
- "64-Bit QWORD Address Space", NULL},
- {ACPI_RSD_ADDRESS, 0, NULL, NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.granularity), "Granularity",
- NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.minimum), "Address Minimum",
- NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.maximum), "Address Maximum",
- NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.translation_offset),
- "Translation Offset", NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address_length),
- "Address Length", NULL},
- {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address64.resource_source), NULL, NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_ext_address64[8] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_ext_address64),
- "64-Bit Extended Address Space", NULL},
- {ACPI_RSD_ADDRESS, 0, NULL, NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.granularity),
- "Granularity", NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.minimum),
- "Address Minimum", NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.maximum),
- "Address Maximum", NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.translation_offset),
- "Translation Offset", NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address_length),
- "Address Length", NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.type_specific),
- "Type-Specific Attribute", NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_ext_irq[8] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_ext_irq),
- "Extended IRQ", NULL},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.producer_consumer),
- "Type", acpi_gbl_consume_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.triggering),
- "Triggering", acpi_gbl_he_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.polarity), "Polarity",
- acpi_gbl_ll_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.sharable), "Sharing",
- acpi_gbl_shr_decode},
- {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(extended_irq.resource_source), NULL,
- NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(extended_irq.interrupt_count),
- "Interrupt Count", NULL},
- {ACPI_RSD_DWORDLIST, ACPI_RSD_OFFSET(extended_irq.interrupts[0]),
- "Interrupt List", NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_generic_reg[6] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_generic_reg),
- "Generic Register", NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.space_id), "Space ID",
- NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.bit_width), "Bit Width",
- NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.bit_offset), "Bit Offset",
- NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.access_size),
- "Access Size", NULL},
- {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(generic_reg.address), "Address", NULL}
-};
-
-struct acpi_rsdump_info acpi_rs_dump_gpio[16] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_gpio), "GPIO", NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.revision_id), "RevisionId", NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.connection_type),
- "ConnectionType", acpi_gbl_ct_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.producer_consumer),
- "ProducerConsumer", acpi_gbl_consume_decode},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.pin_config), "PinConfig",
- acpi_gbl_ppc_decode},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.sharable), "Sharable",
- acpi_gbl_shr_decode},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.io_restriction),
- "IoRestriction", acpi_gbl_ior_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.triggering), "Triggering",
- acpi_gbl_he_decode},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.polarity), "Polarity",
- acpi_gbl_ll_decode},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.drive_strength), "DriveStrength",
- NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.debounce_timeout),
- "DebounceTimeout", NULL},
- {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(gpio.resource_source),
- "ResourceSource", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.pin_table_length),
- "PinTableLength", NULL},
- {ACPI_RSD_WORDLIST, ACPI_RSD_OFFSET(gpio.pin_table), "PinTable", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.vendor_length), "VendorLength",
- NULL},
- {ACPI_RSD_SHORTLISTX, ACPI_RSD_OFFSET(gpio.vendor_data), "VendorData",
- NULL},
-};
-
-struct acpi_rsdump_info acpi_rs_dump_fixed_dma[4] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_dma),
- "FixedDma", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.request_lines),
- "RequestLines", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.channels), "Channels",
- NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(fixed_dma.width), "TransferWidth",
- acpi_gbl_dts_decode},
-};
-
-#define ACPI_RS_DUMP_COMMON_SERIAL_BUS \
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.revision_id), "RevisionId", NULL}, \
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type), "Type", acpi_gbl_sbt_decode}, \
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.producer_consumer), "ProducerConsumer", acpi_gbl_consume_decode}, \
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.slave_mode), "SlaveMode", acpi_gbl_sm_decode}, \
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type_revision_id), "TypeRevisionId", NULL}, \
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.type_data_length), "TypeDataLength", NULL}, \
- {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET (common_serial_bus.resource_source), "ResourceSource", NULL}, \
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.vendor_length), "VendorLength", NULL}, \
- {ACPI_RSD_SHORTLISTX,ACPI_RSD_OFFSET (common_serial_bus.vendor_data), "VendorData", NULL},
-
-struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[10] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_common_serial_bus),
- "Common Serial Bus", NULL},
- ACPI_RS_DUMP_COMMON_SERIAL_BUS
-};
-
-struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[13] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_i2c_serial_bus),
- "I2C Serial Bus", NULL},
- ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
- ACPI_RSD_OFFSET(i2c_serial_bus.
- access_mode),
- "AccessMode", acpi_gbl_am_decode},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(i2c_serial_bus.connection_speed),
- "ConnectionSpeed", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(i2c_serial_bus.slave_address),
- "SlaveAddress", NULL},
-};
-
-struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[17] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_spi_serial_bus),
- "Spi Serial Bus", NULL},
- ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
- ACPI_RSD_OFFSET(spi_serial_bus.
- wire_mode), "WireMode",
- acpi_gbl_wm_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(spi_serial_bus.device_polarity),
- "DevicePolarity", acpi_gbl_dp_decode},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.data_bit_length),
- "DataBitLength", NULL},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_phase),
- "ClockPhase", acpi_gbl_cph_decode},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_polarity),
- "ClockPolarity", acpi_gbl_cpo_decode},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(spi_serial_bus.device_selection),
- "DeviceSelection", NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(spi_serial_bus.connection_speed),
- "ConnectionSpeed", NULL},
-};
-
-struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[19] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_uart_serial_bus),
- "Uart Serial Bus", NULL},
- ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_2BITFLAG,
- ACPI_RSD_OFFSET(uart_serial_bus.
- flow_control),
- "FlowControl", acpi_gbl_fc_decode},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.stop_bits),
- "StopBits", acpi_gbl_sb_decode},
- {ACPI_RSD_3BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.data_bits),
- "DataBits", acpi_gbl_bpb_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.endian), "Endian",
- acpi_gbl_ed_decode},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.parity), "Parity",
- acpi_gbl_pt_decode},
- {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.lines_enabled),
- "LinesEnabled", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.rx_fifo_size),
- "RxFifoSize", NULL},
- {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.tx_fifo_size),
- "TxFifoSize", NULL},
- {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(uart_serial_bus.default_baud_rate),
- "ConnectionSpeed", NULL},
-};
-
-/*
- * Tables used for common address descriptor flag fields
- */
-static struct acpi_rsdump_info acpi_rs_dump_general_flags[5] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_general_flags), NULL,
- NULL},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.producer_consumer),
- "Consumer/Producer", acpi_gbl_consume_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.decode), "Address Decode",
- acpi_gbl_dec_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.min_address_fixed),
- "Min Relocatability", acpi_gbl_min_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.max_address_fixed),
- "Max Relocatability", acpi_gbl_max_decode}
-};
-
-static struct acpi_rsdump_info acpi_rs_dump_memory_flags[5] = {
- {ACPI_RSD_LITERAL, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory_flags),
- "Resource Type", (void *)"Memory Range"},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.mem.write_protect),
- "Write Protect", acpi_gbl_rw_decode},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.mem.caching),
- "Caching", acpi_gbl_mem_decode},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.mem.range_type),
- "Range Type", acpi_gbl_mtp_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.mem.translation),
- "Translation", acpi_gbl_ttp_decode}
-};
-
-static struct acpi_rsdump_info acpi_rs_dump_io_flags[4] = {
- {ACPI_RSD_LITERAL, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_io_flags),
- "Resource Type", (void *)"I/O Range"},
- {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.io.range_type),
- "Range Type", acpi_gbl_rng_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.io.translation),
- "Translation", acpi_gbl_ttp_decode},
- {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.io.translation_type),
- "Translation Type", acpi_gbl_trs_decode}
-};
-
-/*
- * Table used to dump _PRT contents
- */
-static struct acpi_rsdump_info acpi_rs_dump_prt[5] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_prt), NULL, NULL},
- {ACPI_RSD_UINT64, ACPI_PRT_OFFSET(address), "Address", NULL},
- {ACPI_RSD_UINT32, ACPI_PRT_OFFSET(pin), "Pin", NULL},
- {ACPI_RSD_STRING, ACPI_PRT_OFFSET(source[0]), "Source", NULL},
- {ACPI_RSD_UINT32, ACPI_PRT_OFFSET(source_index), "Source Index", NULL}
-};
-
/*******************************************************************************
*
* FUNCTION: acpi_rs_dump_descriptor
*
- * PARAMETERS: Resource
+ * PARAMETERS: resource - Buffer containing the resource
+ * table - Table entry to decode the resource
*
* RETURN: None
*
- * DESCRIPTION:
+ * DESCRIPTION: Dump a resource descriptor based on a dump table entry.
*
******************************************************************************/
@@ -654,7 +251,8 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
/*
* Optional resource_source for Address resources
*/
- acpi_rs_dump_resource_source(ACPI_CAST_PTR(struct
+ acpi_rs_dump_resource_source(ACPI_CAST_PTR
+ (struct
acpi_resource_source,
target));
break;
@@ -765,8 +363,9 @@ void acpi_rs_dump_resource_list(struct acpi_resource *resource_list)
ACPI_FUNCTION_ENTRY();
- if (!(acpi_dbg_level & ACPI_LV_RESOURCES)
- || !(_COMPONENT & acpi_dbg_layer)) {
+ /* Check if debug output enabled */
+
+ if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_RESOURCES, _COMPONENT)) {
return;
}
@@ -827,8 +426,9 @@ void acpi_rs_dump_irq_list(u8 * route_table)
ACPI_FUNCTION_ENTRY();
- if (!(acpi_dbg_level & ACPI_LV_RESOURCES)
- || !(_COMPONENT & acpi_dbg_layer)) {
+ /* Check if debug output enabled */
+
+ if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_RESOURCES, _COMPONENT)) {
return;
}
diff --git a/drivers/acpi/acpica/rsdumpinfo.c b/drivers/acpi/acpica/rsdumpinfo.c
new file mode 100644
index 000000000000..46192bd53653
--- /dev/null
+++ b/drivers/acpi/acpica/rsdumpinfo.c
@@ -0,0 +1,454 @@
+/*******************************************************************************
+ *
+ * Module Name: rsdumpinfo - Tables used to display resource descriptors.
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2013, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acresrc.h"
+
+#define _COMPONENT ACPI_RESOURCES
+ACPI_MODULE_NAME("rsdumpinfo")
+
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+#define ACPI_RSD_OFFSET(f) (u8) ACPI_OFFSET (union acpi_resource_data,f)
+#define ACPI_PRT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_pci_routing_table,f)
+#define ACPI_RSD_TABLE_SIZE(name) (sizeof(name) / sizeof (struct acpi_rsdump_info))
+/*******************************************************************************
+ *
+ * Resource Descriptor info tables
+ *
+ * Note: The first table entry must be a Title or Literal and must contain
+ * the table length (number of table entries)
+ *
+ ******************************************************************************/
+struct acpi_rsdump_info acpi_rs_dump_irq[7] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_irq), "IRQ", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(irq.descriptor_length),
+ "Descriptor Length", NULL},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.triggering), "Triggering",
+ acpi_gbl_he_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.polarity), "Polarity",
+ acpi_gbl_ll_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(irq.sharable), "Sharing",
+ acpi_gbl_shr_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(irq.interrupt_count),
+ "Interrupt Count", NULL},
+ {ACPI_RSD_SHORTLIST, ACPI_RSD_OFFSET(irq.interrupts[0]),
+ "Interrupt List", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_dma[6] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_dma), "DMA", NULL},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(dma.type), "Speed",
+ acpi_gbl_typ_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(dma.bus_master), "Mastering",
+ acpi_gbl_bm_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(dma.transfer), "Transfer Type",
+ acpi_gbl_siz_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(dma.channel_count), "Channel Count",
+ NULL},
+ {ACPI_RSD_SHORTLIST, ACPI_RSD_OFFSET(dma.channels[0]), "Channel List",
+ NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_start_dpf[4] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_start_dpf),
+ "Start-Dependent-Functions", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(start_dpf.descriptor_length),
+ "Descriptor Length", NULL},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(start_dpf.compatibility_priority),
+ "Compatibility Priority", acpi_gbl_config_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(start_dpf.performance_robustness),
+ "Performance/Robustness", acpi_gbl_config_decode}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_end_dpf[1] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_end_dpf),
+ "End-Dependent-Functions", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_io[6] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_io), "I/O", NULL},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(io.io_decode), "Address Decoding",
+ acpi_gbl_io_decode},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(io.minimum), "Address Minimum", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(io.maximum), "Address Maximum", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(io.alignment), "Alignment", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(io.address_length), "Address Length",
+ NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_fixed_io[3] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_io),
+ "Fixed I/O", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_io.address), "Address", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(fixed_io.address_length),
+ "Address Length", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_vendor[3] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_vendor),
+ "Vendor Specific", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(vendor.byte_length), "Length", NULL},
+ {ACPI_RSD_LONGLIST, ACPI_RSD_OFFSET(vendor.byte_data[0]), "Vendor Data",
+ NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_end_tag[1] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_end_tag), "EndTag",
+ NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_memory24[6] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory24),
+ "24-Bit Memory Range", NULL},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(memory24.write_protect),
+ "Write Protect", acpi_gbl_rw_decode},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.minimum), "Address Minimum",
+ NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.maximum), "Address Maximum",
+ NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.alignment), "Alignment",
+ NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.address_length),
+ "Address Length", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_memory32[6] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory32),
+ "32-Bit Memory Range", NULL},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(memory32.write_protect),
+ "Write Protect", acpi_gbl_rw_decode},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.minimum), "Address Minimum",
+ NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.maximum), "Address Maximum",
+ NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.alignment), "Alignment",
+ NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.address_length),
+ "Address Length", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_fixed_memory32[4] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_memory32),
+ "32-Bit Fixed Memory Range", NULL},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(fixed_memory32.write_protect),
+ "Write Protect", acpi_gbl_rw_decode},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(fixed_memory32.address), "Address",
+ NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(fixed_memory32.address_length),
+ "Address Length", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_address16[8] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address16),
+ "16-Bit WORD Address Space", NULL},
+ {ACPI_RSD_ADDRESS, 0, NULL, NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.granularity), "Granularity",
+ NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.minimum), "Address Minimum",
+ NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.maximum), "Address Maximum",
+ NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.translation_offset),
+ "Translation Offset", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address_length),
+ "Address Length", NULL},
+ {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address16.resource_source), NULL, NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_address32[8] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address32),
+ "32-Bit DWORD Address Space", NULL},
+ {ACPI_RSD_ADDRESS, 0, NULL, NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.granularity), "Granularity",
+ NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.minimum), "Address Minimum",
+ NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.maximum), "Address Maximum",
+ NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.translation_offset),
+ "Translation Offset", NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address_length),
+ "Address Length", NULL},
+ {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address32.resource_source), NULL, NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_address64[8] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address64),
+ "64-Bit QWORD Address Space", NULL},
+ {ACPI_RSD_ADDRESS, 0, NULL, NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.granularity), "Granularity",
+ NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.minimum), "Address Minimum",
+ NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.maximum), "Address Maximum",
+ NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.translation_offset),
+ "Translation Offset", NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address_length),
+ "Address Length", NULL},
+ {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address64.resource_source), NULL, NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_ext_address64[8] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_ext_address64),
+ "64-Bit Extended Address Space", NULL},
+ {ACPI_RSD_ADDRESS, 0, NULL, NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.granularity),
+ "Granularity", NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.minimum),
+ "Address Minimum", NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.maximum),
+ "Address Maximum", NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.translation_offset),
+ "Translation Offset", NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address_length),
+ "Address Length", NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.type_specific),
+ "Type-Specific Attribute", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_ext_irq[8] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_ext_irq),
+ "Extended IRQ", NULL},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.producer_consumer),
+ "Type", acpi_gbl_consume_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.triggering),
+ "Triggering", acpi_gbl_he_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.polarity), "Polarity",
+ acpi_gbl_ll_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(extended_irq.sharable), "Sharing",
+ acpi_gbl_shr_decode},
+ {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(extended_irq.resource_source), NULL,
+ NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(extended_irq.interrupt_count),
+ "Interrupt Count", NULL},
+ {ACPI_RSD_DWORDLIST, ACPI_RSD_OFFSET(extended_irq.interrupts[0]),
+ "Interrupt List", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_generic_reg[6] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_generic_reg),
+ "Generic Register", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.space_id), "Space ID",
+ NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.bit_width), "Bit Width",
+ NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.bit_offset), "Bit Offset",
+ NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(generic_reg.access_size),
+ "Access Size", NULL},
+ {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(generic_reg.address), "Address", NULL}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_gpio[16] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_gpio), "GPIO", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.revision_id), "RevisionId", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.connection_type),
+ "ConnectionType", acpi_gbl_ct_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.producer_consumer),
+ "ProducerConsumer", acpi_gbl_consume_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.pin_config), "PinConfig",
+ acpi_gbl_ppc_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.sharable), "Sharing",
+ acpi_gbl_shr_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.io_restriction),
+ "IoRestriction", acpi_gbl_ior_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.triggering), "Triggering",
+ acpi_gbl_he_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.polarity), "Polarity",
+ acpi_gbl_ll_decode},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.drive_strength), "DriveStrength",
+ NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.debounce_timeout),
+ "DebounceTimeout", NULL},
+ {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(gpio.resource_source),
+ "ResourceSource", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.pin_table_length),
+ "PinTableLength", NULL},
+ {ACPI_RSD_WORDLIST, ACPI_RSD_OFFSET(gpio.pin_table), "PinTable", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.vendor_length), "VendorLength",
+ NULL},
+ {ACPI_RSD_SHORTLISTX, ACPI_RSD_OFFSET(gpio.vendor_data), "VendorData",
+ NULL},
+};
+
+struct acpi_rsdump_info acpi_rs_dump_fixed_dma[4] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_dma),
+ "FixedDma", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.request_lines),
+ "RequestLines", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.channels), "Channels",
+ NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(fixed_dma.width), "TransferWidth",
+ acpi_gbl_dts_decode},
+};
+
+#define ACPI_RS_DUMP_COMMON_SERIAL_BUS \
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.revision_id), "RevisionId", NULL}, \
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type), "Type", acpi_gbl_sbt_decode}, \
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.producer_consumer), "ProducerConsumer", acpi_gbl_consume_decode}, \
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.slave_mode), "SlaveMode", acpi_gbl_sm_decode}, \
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type_revision_id), "TypeRevisionId", NULL}, \
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.type_data_length), "TypeDataLength", NULL}, \
+ {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET (common_serial_bus.resource_source), "ResourceSource", NULL}, \
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.vendor_length), "VendorLength", NULL}, \
+ {ACPI_RSD_SHORTLISTX,ACPI_RSD_OFFSET (common_serial_bus.vendor_data), "VendorData", NULL},
+
+struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[10] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_common_serial_bus),
+ "Common Serial Bus", NULL},
+ ACPI_RS_DUMP_COMMON_SERIAL_BUS
+};
+
+struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[13] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_i2c_serial_bus),
+ "I2C Serial Bus", NULL},
+ ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
+ ACPI_RSD_OFFSET(i2c_serial_bus.
+ access_mode),
+ "AccessMode", acpi_gbl_am_decode},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(i2c_serial_bus.connection_speed),
+ "ConnectionSpeed", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(i2c_serial_bus.slave_address),
+ "SlaveAddress", NULL},
+};
+
+struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[17] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_spi_serial_bus),
+ "Spi Serial Bus", NULL},
+ ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
+ ACPI_RSD_OFFSET(spi_serial_bus.
+ wire_mode), "WireMode",
+ acpi_gbl_wm_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(spi_serial_bus.device_polarity),
+ "DevicePolarity", acpi_gbl_dp_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.data_bit_length),
+ "DataBitLength", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_phase),
+ "ClockPhase", acpi_gbl_cph_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_polarity),
+ "ClockPolarity", acpi_gbl_cpo_decode},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(spi_serial_bus.device_selection),
+ "DeviceSelection", NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(spi_serial_bus.connection_speed),
+ "ConnectionSpeed", NULL},
+};
+
+struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[19] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_uart_serial_bus),
+ "Uart Serial Bus", NULL},
+ ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_2BITFLAG,
+ ACPI_RSD_OFFSET(uart_serial_bus.
+ flow_control),
+ "FlowControl", acpi_gbl_fc_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.stop_bits),
+ "StopBits", acpi_gbl_sb_decode},
+ {ACPI_RSD_3BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.data_bits),
+ "DataBits", acpi_gbl_bpb_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.endian), "Endian",
+ acpi_gbl_ed_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.parity), "Parity",
+ acpi_gbl_pt_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.lines_enabled),
+ "LinesEnabled", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.rx_fifo_size),
+ "RxFifoSize", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.tx_fifo_size),
+ "TxFifoSize", NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(uart_serial_bus.default_baud_rate),
+ "ConnectionSpeed", NULL},
+};
+
+/*
+ * Tables used for common address descriptor flag fields
+ */
+struct acpi_rsdump_info acpi_rs_dump_general_flags[5] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_general_flags), NULL,
+ NULL},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.producer_consumer),
+ "Consumer/Producer", acpi_gbl_consume_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.decode), "Address Decode",
+ acpi_gbl_dec_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.min_address_fixed),
+ "Min Relocatability", acpi_gbl_min_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.max_address_fixed),
+ "Max Relocatability", acpi_gbl_max_decode}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_memory_flags[5] = {
+ {ACPI_RSD_LITERAL, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory_flags),
+ "Resource Type", (void *)"Memory Range"},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.mem.write_protect),
+ "Write Protect", acpi_gbl_rw_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.mem.caching),
+ "Caching", acpi_gbl_mem_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.mem.range_type),
+ "Range Type", acpi_gbl_mtp_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.mem.translation),
+ "Translation", acpi_gbl_ttp_decode}
+};
+
+struct acpi_rsdump_info acpi_rs_dump_io_flags[4] = {
+ {ACPI_RSD_LITERAL, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_io_flags),
+ "Resource Type", (void *)"I/O Range"},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.io.range_type),
+ "Range Type", acpi_gbl_rng_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.io.translation),
+ "Translation", acpi_gbl_ttp_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.io.translation_type),
+ "Translation Type", acpi_gbl_trs_decode}
+};
+
+/*
+ * Table used to dump _PRT contents
+ */
+struct acpi_rsdump_info acpi_rs_dump_prt[5] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_prt), NULL, NULL},
+ {ACPI_RSD_UINT64, ACPI_PRT_OFFSET(address), "Address", NULL},
+ {ACPI_RSD_UINT32, ACPI_PRT_OFFSET(pin), "Pin", NULL},
+ {ACPI_RSD_STRING, ACPI_PRT_OFFSET(source[0]), "Source", NULL},
+ {ACPI_RSD_UINT32, ACPI_PRT_OFFSET(source_index), "Source Index", NULL}
+};
+
+#endif
diff --git a/drivers/acpi/acpica/rsinfo.c b/drivers/acpi/acpica/rsinfo.c
index a9fa5158200b..41fed78e0de6 100644
--- a/drivers/acpi/acpica/rsinfo.c
+++ b/drivers/acpi/acpica/rsinfo.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsio.c b/drivers/acpi/acpica/rsio.c
index f6a081057a22..ca183755a6f9 100644
--- a/drivers/acpi/acpica/rsio.c
+++ b/drivers/acpi/acpica/rsio.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsirq.c b/drivers/acpi/acpica/rsirq.c
index e23a9ec248cb..364decc1028a 100644
--- a/drivers/acpi/acpica/rsirq.c
+++ b/drivers/acpi/acpica/rsirq.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -53,7 +53,7 @@ ACPI_MODULE_NAME("rsirq")
* acpi_rs_get_irq
*
******************************************************************************/
-struct acpi_rsconvert_info acpi_rs_get_irq[8] = {
+struct acpi_rsconvert_info acpi_rs_get_irq[9] = {
{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_IRQ,
ACPI_RS_SIZE(struct acpi_resource_irq),
ACPI_RSC_TABLE_SIZE(acpi_rs_get_irq)},
@@ -80,7 +80,7 @@ struct acpi_rsconvert_info acpi_rs_get_irq[8] = {
{ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_AML_LENGTH, 0, 3},
- /* Get flags: Triggering[0], Polarity[3], Sharing[4] */
+ /* Get flags: Triggering[0], Polarity[3], Sharing[4], Wake[5] */
{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.triggering),
AML_OFFSET(irq.flags),
@@ -92,7 +92,11 @@ struct acpi_rsconvert_info acpi_rs_get_irq[8] = {
{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.sharable),
AML_OFFSET(irq.flags),
- 4}
+ 4},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.wake_capable),
+ AML_OFFSET(irq.flags),
+ 5}
};
/*******************************************************************************
@@ -101,7 +105,7 @@ struct acpi_rsconvert_info acpi_rs_get_irq[8] = {
*
******************************************************************************/
-struct acpi_rsconvert_info acpi_rs_set_irq[13] = {
+struct acpi_rsconvert_info acpi_rs_set_irq[14] = {
/* Start with a default descriptor of length 3 */
{ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_IRQ,
@@ -114,7 +118,7 @@ struct acpi_rsconvert_info acpi_rs_set_irq[13] = {
AML_OFFSET(irq.irq_mask),
ACPI_RS_OFFSET(data.irq.interrupt_count)},
- /* Set the flags byte */
+ /* Set flags: Triggering[0], Polarity[3], Sharing[4], Wake[5] */
{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.triggering),
AML_OFFSET(irq.flags),
@@ -128,6 +132,10 @@ struct acpi_rsconvert_info acpi_rs_set_irq[13] = {
AML_OFFSET(irq.flags),
4},
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.wake_capable),
+ AML_OFFSET(irq.flags),
+ 5},
+
/*
* All done if the output descriptor length is required to be 3
* (i.e., optimization to 2 bytes cannot be attempted)
@@ -181,7 +189,7 @@ struct acpi_rsconvert_info acpi_rs_set_irq[13] = {
*
******************************************************************************/
-struct acpi_rsconvert_info acpi_rs_convert_ext_irq[9] = {
+struct acpi_rsconvert_info acpi_rs_convert_ext_irq[10] = {
{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_EXTENDED_IRQ,
ACPI_RS_SIZE(struct acpi_resource_extended_irq),
ACPI_RSC_TABLE_SIZE(acpi_rs_convert_ext_irq)},
@@ -190,8 +198,10 @@ struct acpi_rsconvert_info acpi_rs_convert_ext_irq[9] = {
sizeof(struct aml_resource_extended_irq),
0},
- /* Flag bits */
-
+ /*
+ * Flags: Producer/Consumer[0], Triggering[1], Polarity[2],
+ * Sharing[3], Wake[4]
+ */
{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.producer_consumer),
AML_OFFSET(extended_irq.flags),
0},
@@ -208,19 +218,21 @@ struct acpi_rsconvert_info acpi_rs_convert_ext_irq[9] = {
AML_OFFSET(extended_irq.flags),
3},
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.wake_capable),
+ AML_OFFSET(extended_irq.flags),
+ 4},
+
/* IRQ Table length (Byte4) */
{ACPI_RSC_COUNT, ACPI_RS_OFFSET(data.extended_irq.interrupt_count),
AML_OFFSET(extended_irq.interrupt_count),
- sizeof(u32)}
- ,
+ sizeof(u32)},
/* Copy every IRQ in the table, each is 32 bits */
{ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.extended_irq.interrupts[0]),
AML_OFFSET(extended_irq.interrupts[0]),
- 0}
- ,
+ 0},
/* Optional resource_source (Index and String) */
@@ -285,7 +297,6 @@ struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[4] = {
* request_lines
* Channels
*/
-
{ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.fixed_dma.request_lines),
AML_OFFSET(fixed_dma.request_lines),
2},
@@ -293,5 +304,4 @@ struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[4] = {
{ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.fixed_dma.width),
AML_OFFSET(fixed_dma.width),
1},
-
};
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 8b64db9a3fd2..ee2e206fc6c8 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -217,9 +217,10 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
/* Perform final sanity check on the new AML resource descriptor */
- status =
- acpi_ut_validate_resource(ACPI_CAST_PTR
- (union aml_resource, aml), NULL);
+ status = acpi_ut_validate_resource(NULL,
+ ACPI_CAST_PTR(union
+ aml_resource,
+ aml), NULL);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/rsmemory.c b/drivers/acpi/acpica/rsmemory.c
index 4fd611ad02b4..ebc773a1b350 100644
--- a/drivers/acpi/acpica/rsmemory.c
+++ b/drivers/acpi/acpica/rsmemory.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -156,8 +156,7 @@ struct acpi_rsconvert_info acpi_rs_get_vendor_small[3] = {
{ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length),
0,
- sizeof(u8)}
- ,
+ sizeof(u8)},
/* Vendor data */
@@ -181,8 +180,7 @@ struct acpi_rsconvert_info acpi_rs_get_vendor_large[3] = {
{ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length),
0,
- sizeof(u8)}
- ,
+ sizeof(u8)},
/* Vendor data */
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index c6f291c2bc83..d5bf05a96096 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -136,30 +136,30 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
/*
* Mask and shift the flag bit
*/
- ACPI_SET8(destination) = (u8)
- ((ACPI_GET8(source) >> info->value) & 0x01);
+ ACPI_SET8(destination,
+ ((ACPI_GET8(source) >> info->value) & 0x01));
break;
case ACPI_RSC_2BITFLAG:
/*
* Mask and shift the flag bits
*/
- ACPI_SET8(destination) = (u8)
- ((ACPI_GET8(source) >> info->value) & 0x03);
+ ACPI_SET8(destination,
+ ((ACPI_GET8(source) >> info->value) & 0x03));
break;
case ACPI_RSC_3BITFLAG:
/*
* Mask and shift the flag bits
*/
- ACPI_SET8(destination) = (u8)
- ((ACPI_GET8(source) >> info->value) & 0x07);
+ ACPI_SET8(destination,
+ ((ACPI_GET8(source) >> info->value) & 0x07));
break;
case ACPI_RSC_COUNT:
item_count = ACPI_GET8(source);
- ACPI_SET8(destination) = (u8) item_count;
+ ACPI_SET8(destination, item_count);
resource->length = resource->length +
(info->value * (item_count - 1));
@@ -168,7 +168,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
case ACPI_RSC_COUNT16:
item_count = aml_resource_length;
- ACPI_SET16(destination) = item_count;
+ ACPI_SET16(destination, item_count);
resource->length = resource->length +
(info->value * (item_count - 1));
@@ -181,13 +181,13 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
resource->length = resource->length + item_count;
item_count = item_count / 2;
- ACPI_SET16(destination) = item_count;
+ ACPI_SET16(destination, item_count);
break;
case ACPI_RSC_COUNT_GPIO_VEN:
item_count = ACPI_GET8(source);
- ACPI_SET8(destination) = (u8)item_count;
+ ACPI_SET8(destination, item_count);
resource->length = resource->length +
(info->value * item_count);
@@ -216,7 +216,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
}
resource->length = resource->length + item_count;
- ACPI_SET16(destination) = item_count;
+ ACPI_SET16(destination, item_count);
break;
case ACPI_RSC_COUNT_SERIAL_VEN:
@@ -224,7 +224,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
item_count = ACPI_GET16(source) - info->value;
resource->length = resource->length + item_count;
- ACPI_SET16(destination) = item_count;
+ ACPI_SET16(destination, item_count);
break;
case ACPI_RSC_COUNT_SERIAL_RES:
@@ -234,7 +234,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
- ACPI_GET16(source) - info->value;
resource->length = resource->length + item_count;
- ACPI_SET16(destination) = item_count;
+ ACPI_SET16(destination, item_count);
break;
case ACPI_RSC_LENGTH:
@@ -385,7 +385,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
}
target = ACPI_ADD_PTR(char, resource, info->value);
- ACPI_SET8(target) = (u8) item_count;
+ ACPI_SET8(target, item_count);
break;
case ACPI_RSC_BITMASK16:
@@ -401,7 +401,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
}
target = ACPI_ADD_PTR(char, resource, info->value);
- ACPI_SET8(target) = (u8) item_count;
+ ACPI_SET8(target, item_count);
break;
case ACPI_RSC_EXIT_NE:
@@ -514,37 +514,40 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
/*
* Clear the flag byte
*/
- ACPI_SET8(destination) = 0;
+ ACPI_SET8(destination, 0);
break;
case ACPI_RSC_1BITFLAG:
/*
* Mask and shift the flag bit
*/
- ACPI_SET8(destination) |= (u8)
- ((ACPI_GET8(source) & 0x01) << info->value);
+ ACPI_SET_BIT(*ACPI_CAST8(destination), (u8)
+ ((ACPI_GET8(source) & 0x01) << info->
+ value));
break;
case ACPI_RSC_2BITFLAG:
/*
* Mask and shift the flag bits
*/
- ACPI_SET8(destination) |= (u8)
- ((ACPI_GET8(source) & 0x03) << info->value);
+ ACPI_SET_BIT(*ACPI_CAST8(destination), (u8)
+ ((ACPI_GET8(source) & 0x03) << info->
+ value));
break;
case ACPI_RSC_3BITFLAG:
/*
* Mask and shift the flag bits
*/
- ACPI_SET8(destination) |= (u8)
- ((ACPI_GET8(source) & 0x07) << info->value);
+ ACPI_SET_BIT(*ACPI_CAST8(destination), (u8)
+ ((ACPI_GET8(source) & 0x07) << info->
+ value));
break;
case ACPI_RSC_COUNT:
item_count = ACPI_GET8(source);
- ACPI_SET8(destination) = (u8) item_count;
+ ACPI_SET8(destination, item_count);
aml_length =
(u16) (aml_length +
@@ -561,18 +564,18 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
case ACPI_RSC_COUNT_GPIO_PIN:
item_count = ACPI_GET16(source);
- ACPI_SET16(destination) = (u16)aml_length;
+ ACPI_SET16(destination, aml_length);
aml_length = (u16)(aml_length + item_count * 2);
target = ACPI_ADD_PTR(void, aml, info->value);
- ACPI_SET16(target) = (u16)aml_length;
+ ACPI_SET16(target, aml_length);
acpi_rs_set_resource_length(aml_length, aml);
break;
case ACPI_RSC_COUNT_GPIO_VEN:
item_count = ACPI_GET16(source);
- ACPI_SET16(destination) = (u16)item_count;
+ ACPI_SET16(destination, item_count);
aml_length =
(u16)(aml_length + (info->value * item_count));
@@ -584,7 +587,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
/* Set resource source string length */
item_count = ACPI_GET16(source);
- ACPI_SET16(destination) = (u16)aml_length;
+ ACPI_SET16(destination, aml_length);
/* Compute offset for the Vendor Data */
@@ -594,7 +597,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
/* Set vendor offset only if there is vendor data */
if (resource->data.gpio.vendor_length) {
- ACPI_SET16(target) = (u16)aml_length;
+ ACPI_SET16(target, aml_length);
}
acpi_rs_set_resource_length(aml_length, aml);
@@ -603,7 +606,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
case ACPI_RSC_COUNT_SERIAL_VEN:
item_count = ACPI_GET16(source);
- ACPI_SET16(destination) = item_count + info->value;
+ ACPI_SET16(destination, item_count + info->value);
aml_length = (u16)(aml_length + item_count);
acpi_rs_set_resource_length(aml_length, aml);
break;
@@ -686,7 +689,8 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
* Optional resource_source (Index and String)
*/
aml_length =
- acpi_rs_set_resource_source(aml, (acpi_rs_length)
+ acpi_rs_set_resource_source(aml,
+ (acpi_rs_length)
aml_length, source);
acpi_rs_set_resource_length(aml_length, aml);
break;
@@ -706,10 +710,12 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
/*
* 8-bit encoded bitmask (DMA macro)
*/
- ACPI_SET8(destination) = (u8)
- acpi_rs_encode_bitmask(source,
- *ACPI_ADD_PTR(u8, resource,
- info->value));
+ ACPI_SET8(destination,
+ acpi_rs_encode_bitmask(source,
+ *ACPI_ADD_PTR(u8,
+ resource,
+ info->
+ value)));
break;
case ACPI_RSC_BITMASK16:
diff --git a/drivers/acpi/acpica/rsserial.c b/drivers/acpi/acpica/rsserial.c
index 9aa5e689b444..fe49fc43e10f 100644
--- a/drivers/acpi/acpica/rsserial.c
+++ b/drivers/acpi/acpica/rsserial.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -53,7 +53,7 @@ ACPI_MODULE_NAME("rsserial")
* acpi_rs_convert_gpio
*
******************************************************************************/
-struct acpi_rsconvert_info acpi_rs_convert_gpio[17] = {
+struct acpi_rsconvert_info acpi_rs_convert_gpio[18] = {
{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_GPIO,
ACPI_RS_SIZE(struct acpi_resource_gpio),
ACPI_RSC_TABLE_SIZE(acpi_rs_convert_gpio)},
@@ -75,10 +75,14 @@ struct acpi_rsconvert_info acpi_rs_convert_gpio[17] = {
AML_OFFSET(gpio.flags),
0},
- {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.sharable),
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.sharable),
AML_OFFSET(gpio.int_flags),
3},
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.wake_capable),
+ AML_OFFSET(gpio.int_flags),
+ 4},
+
{ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.io_restriction),
AML_OFFSET(gpio.int_flags),
0},
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 37d5241c0acf..a44953c6f75d 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -108,7 +108,7 @@ u16 acpi_rs_encode_bitmask(u8 * list, u8 count)
mask |= (0x1 << list[i]);
}
- return mask;
+ return (mask);
}
/*******************************************************************************
@@ -358,8 +358,10 @@ acpi_rs_get_resource_source(acpi_rs_length resource_length,
*
* Zero the entire area of the buffer.
*/
- total_length = (u32)
- ACPI_STRLEN(ACPI_CAST_PTR(char, &aml_resource_source[1])) + 1;
+ total_length =
+ (u32)
+ ACPI_STRLEN(ACPI_CAST_PTR(char, &aml_resource_source[1])) +
+ 1;
total_length = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(total_length);
ACPI_MEMSET(resource_source->string_ptr, 0, total_length);
@@ -675,7 +677,9 @@ acpi_rs_get_method_data(acpi_handle handle,
/* Execute the method, no parameters */
status =
- acpi_ut_evaluate_object(handle, path, ACPI_BTYPE_BUFFER, &obj_desc);
+ acpi_ut_evaluate_object(ACPI_CAST_PTR
+ (struct acpi_namespace_node, handle), path,
+ ACPI_BTYPE_BUFFER, &obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 5aad744b5b83..15d6eaef0e28 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -423,7 +423,7 @@ ACPI_EXPORT_SYMBOL(acpi_resource_to_address64)
*
* RETURN: Status
*
- * DESCRIPTION: Walk a resource template for the specified evice to find a
+ * DESCRIPTION: Walk a resource template for the specified device to find a
* vendor-defined resource that matches the supplied UUID and
* UUID subtype. Returns a struct acpi_resource of type Vendor.
*
@@ -522,57 +522,42 @@ acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context)
/*******************************************************************************
*
- * FUNCTION: acpi_walk_resources
+ * FUNCTION: acpi_walk_resource_buffer
*
- * PARAMETERS: device_handle - Handle to the device object for the
- * device we are querying
- * name - Method name of the resources we want.
- * (METHOD_NAME__CRS, METHOD_NAME__PRS, or
- * METHOD_NAME__AEI)
+ * PARAMETERS: buffer - Formatted buffer returned by one of the
+ * various Get*Resource functions
* user_function - Called for each resource
* context - Passed to user_function
*
* RETURN: Status
*
- * DESCRIPTION: Retrieves the current or possible resource list for the
- * specified device. The user_function is called once for
- * each resource in the list.
+ * DESCRIPTION: Walks the input resource template. The user_function is called
+ * once for each resource in the list.
*
******************************************************************************/
+
acpi_status
-acpi_walk_resources(acpi_handle device_handle,
- char *name,
- acpi_walk_resource_callback user_function, void *context)
+acpi_walk_resource_buffer(struct acpi_buffer * buffer,
+ acpi_walk_resource_callback user_function,
+ void *context)
{
- acpi_status status;
- struct acpi_buffer buffer;
+ acpi_status status = AE_OK;
struct acpi_resource *resource;
struct acpi_resource *resource_end;
- ACPI_FUNCTION_TRACE(acpi_walk_resources);
+ ACPI_FUNCTION_TRACE(acpi_walk_resource_buffer);
/* Parameter validation */
- if (!device_handle || !user_function || !name ||
- (!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) &&
- !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS) &&
- !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI))) {
+ if (!buffer || !buffer->pointer || !user_function) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- /* Get the _CRS/_PRS/_AEI resource list */
-
- buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
- status = acpi_rs_get_method_data(device_handle, name, &buffer);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /* Buffer now contains the resource list */
+ /* Buffer contains the resource list and length */
- resource = ACPI_CAST_PTR(struct acpi_resource, buffer.pointer);
+ resource = ACPI_CAST_PTR(struct acpi_resource, buffer->pointer);
resource_end =
- ACPI_ADD_PTR(struct acpi_resource, buffer.pointer, buffer.length);
+ ACPI_ADD_PTR(struct acpi_resource, buffer->pointer, buffer->length);
/* Walk the resource list until the end_tag is found (or buffer end) */
@@ -606,11 +591,63 @@ acpi_walk_resources(acpi_handle device_handle,
/* Get the next resource descriptor */
- resource =
- ACPI_ADD_PTR(struct acpi_resource, resource,
- resource->length);
+ resource = ACPI_NEXT_RESOURCE(resource);
}
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_walk_resource_buffer)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_walk_resources
+ *
+ * PARAMETERS: device_handle - Handle to the device object for the
+ * device we are querying
+ * name - Method name of the resources we want.
+ * (METHOD_NAME__CRS, METHOD_NAME__PRS, or
+ * METHOD_NAME__AEI)
+ * user_function - Called for each resource
+ * context - Passed to user_function
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Retrieves the current or possible resource list for the
+ * specified device. The user_function is called once for
+ * each resource in the list.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_walk_resources(acpi_handle device_handle,
+ char *name,
+ acpi_walk_resource_callback user_function, void *context)
+{
+ acpi_status status;
+ struct acpi_buffer buffer;
+
+ ACPI_FUNCTION_TRACE(acpi_walk_resources);
+
+ /* Parameter validation */
+
+ if (!device_handle || !user_function || !name ||
+ (!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) &&
+ !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS) &&
+ !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI))) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Get the _CRS/_PRS/_AEI resource list */
+
+ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+ status = acpi_rs_get_method_data(device_handle, name, &buffer);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Walk the resource list and cleanup */
+
+ status = acpi_walk_resource_buffer(&buffer, user_function, context);
ACPI_FREE(buffer.pointer);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 390651860bf0..74181bf181ec 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -172,6 +172,7 @@ static struct acpi_fadt_pm_info fadt_pm_info_table[] = {
* FUNCTION: acpi_tb_init_generic_address
*
* PARAMETERS: generic_address - GAS struct to be initialized
+ * space_id - ACPI Space ID for this register
* byte_width - Width of this register
* address - Address of the register
*
@@ -407,8 +408,8 @@ static void acpi_tb_convert_fadt(void)
* should be zero are indeed zero. This will workaround BIOSs that
* inadvertently place values in these fields.
*
- * The ACPI 1.0 reserved fields that will be zeroed are the bytes located at
- * offset 45, 55, 95, and the word located at offset 109, 110.
+ * The ACPI 1.0 reserved fields that will be zeroed are the bytes located
+ * at offset 45, 55, 95, and the word located at offset 109, 110.
*
* Note: The FADT revision value is unreliable. Only the length can be
* trusted.
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index 77d1db29a725..e4f4f02d49e7 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index f540ae462925..e57cd38004e3 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 285e24b97382..ce3d5db39a9c 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -147,7 +147,7 @@ acpi_status acpi_tb_initialize_facs(void)
ACPI_CAST_INDIRECT_PTR(struct
acpi_table_header,
&acpi_gbl_FACS));
- return status;
+ return (status);
}
#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index f5632780421d..b35a5e6d653a 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -44,7 +44,6 @@
#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
-#include "acnamesp.h"
#include "actables.h"
#define _COMPONENT ACPI_TABLES
@@ -437,7 +436,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_table_by_index)
*
******************************************************************************/
acpi_status
-acpi_install_table_handler(acpi_tbl_handler handler, void *context)
+acpi_install_table_handler(acpi_table_handler handler, void *context)
{
acpi_status status;
@@ -483,7 +482,7 @@ ACPI_EXPORT_SYMBOL(acpi_install_table_handler)
* DESCRIPTION: Remove table event handler
*
******************************************************************************/
-acpi_status acpi_remove_table_handler(acpi_tbl_handler handler)
+acpi_status acpi_remove_table_handler(acpi_table_handler handler)
{
acpi_status status;
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index a5e1e4e47098..67e046ec8f0a 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -192,7 +192,7 @@ static acpi_status acpi_tb_load_namespace(void)
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
}
- ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI Tables successfully acquired\n"));
+ ACPI_INFO((AE_INFO, "All ACPI Tables successfully acquired"));
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 28f330230f99..7c2ecfb7c2c3 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index 64880306133d..698b9d385516 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -214,7 +214,7 @@ acpi_ut_check_address_range(acpi_adr_space_type space_id,
if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
(space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
- return_UINT32(0);
+ return_VALUE(0);
}
range_info = acpi_gbl_address_range_list[space_id];
@@ -256,7 +256,7 @@ acpi_ut_check_address_range(acpi_adr_space_type space_id,
range_info = range_info->next;
}
- return_UINT32(overlap_count);
+ return_VALUE(overlap_count);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index ed29d474095e..e0ffb580f4b0 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index e1d40ed26390..e0e8579deaac 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 294692ae76e9..e4c9291fc0a3 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -785,7 +785,7 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
status = acpi_os_create_mutex(&dest_desc->mutex.os_mutex);
if (ACPI_FAILURE(status)) {
- return status;
+ return (status);
}
break;
@@ -795,7 +795,7 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
&dest_desc->event.
os_semaphore);
if (ACPI_FAILURE(status)) {
- return status;
+ return (status);
}
break;
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 5d95166245ae..c57d9cc07ba9 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -166,11 +166,9 @@ acpi_debug_print(u32 requested_debug_level,
acpi_thread_id thread_id;
va_list args;
- /*
- * Stay silent if the debug level or component ID is disabled
- */
- if (!(requested_debug_level & acpi_dbg_level) ||
- !(component_id & acpi_dbg_layer)) {
+ /* Check if debug output enabled */
+
+ if (!ACPI_IS_DEBUG_ENABLED(requested_debug_level, component_id)) {
return;
}
@@ -236,8 +234,9 @@ acpi_debug_print_raw(u32 requested_debug_level,
{
va_list args;
- if (!(requested_debug_level & acpi_dbg_level) ||
- !(component_id & acpi_dbg_layer)) {
+ /* Check if debug output enabled */
+
+ if (!ACPI_IS_DEBUG_ENABLED(requested_debug_level, component_id)) {
return;
}
@@ -272,9 +271,13 @@ acpi_ut_trace(u32 line_number,
acpi_gbl_nesting_level++;
acpi_ut_track_stack_ptr();
- acpi_debug_print(ACPI_LV_FUNCTIONS,
- line_number, function_name, module_name, component_id,
- "%s\n", acpi_gbl_fn_entry_str);
+ /* Check if enabled up-front for performance */
+
+ if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
+ acpi_debug_print(ACPI_LV_FUNCTIONS,
+ line_number, function_name, module_name,
+ component_id, "%s\n", acpi_gbl_fn_entry_str);
+ }
}
ACPI_EXPORT_SYMBOL(acpi_ut_trace)
@@ -304,9 +307,14 @@ acpi_ut_trace_ptr(u32 line_number,
acpi_gbl_nesting_level++;
acpi_ut_track_stack_ptr();
- acpi_debug_print(ACPI_LV_FUNCTIONS,
- line_number, function_name, module_name, component_id,
- "%s %p\n", acpi_gbl_fn_entry_str, pointer);
+ /* Check if enabled up-front for performance */
+
+ if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
+ acpi_debug_print(ACPI_LV_FUNCTIONS,
+ line_number, function_name, module_name,
+ component_id, "%s %p\n", acpi_gbl_fn_entry_str,
+ pointer);
+ }
}
/*******************************************************************************
@@ -335,9 +343,14 @@ acpi_ut_trace_str(u32 line_number,
acpi_gbl_nesting_level++;
acpi_ut_track_stack_ptr();
- acpi_debug_print(ACPI_LV_FUNCTIONS,
- line_number, function_name, module_name, component_id,
- "%s %s\n", acpi_gbl_fn_entry_str, string);
+ /* Check if enabled up-front for performance */
+
+ if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
+ acpi_debug_print(ACPI_LV_FUNCTIONS,
+ line_number, function_name, module_name,
+ component_id, "%s %s\n", acpi_gbl_fn_entry_str,
+ string);
+ }
}
/*******************************************************************************
@@ -366,9 +379,14 @@ acpi_ut_trace_u32(u32 line_number,
acpi_gbl_nesting_level++;
acpi_ut_track_stack_ptr();
- acpi_debug_print(ACPI_LV_FUNCTIONS,
- line_number, function_name, module_name, component_id,
- "%s %08X\n", acpi_gbl_fn_entry_str, integer);
+ /* Check if enabled up-front for performance */
+
+ if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
+ acpi_debug_print(ACPI_LV_FUNCTIONS,
+ line_number, function_name, module_name,
+ component_id, "%s %08X\n",
+ acpi_gbl_fn_entry_str, integer);
+ }
}
/*******************************************************************************
@@ -393,9 +411,13 @@ acpi_ut_exit(u32 line_number,
const char *module_name, u32 component_id)
{
- acpi_debug_print(ACPI_LV_FUNCTIONS,
- line_number, function_name, module_name, component_id,
- "%s\n", acpi_gbl_fn_exit_str);
+ /* Check if enabled up-front for performance */
+
+ if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
+ acpi_debug_print(ACPI_LV_FUNCTIONS,
+ line_number, function_name, module_name,
+ component_id, "%s\n", acpi_gbl_fn_exit_str);
+ }
acpi_gbl_nesting_level--;
}
@@ -425,17 +447,23 @@ acpi_ut_status_exit(u32 line_number,
u32 component_id, acpi_status status)
{
- if (ACPI_SUCCESS(status)) {
- acpi_debug_print(ACPI_LV_FUNCTIONS,
- line_number, function_name, module_name,
- component_id, "%s %s\n", acpi_gbl_fn_exit_str,
- acpi_format_exception(status));
- } else {
- acpi_debug_print(ACPI_LV_FUNCTIONS,
- line_number, function_name, module_name,
- component_id, "%s ****Exception****: %s\n",
- acpi_gbl_fn_exit_str,
- acpi_format_exception(status));
+ /* Check if enabled up-front for performance */
+
+ if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
+ if (ACPI_SUCCESS(status)) {
+ acpi_debug_print(ACPI_LV_FUNCTIONS,
+ line_number, function_name,
+ module_name, component_id, "%s %s\n",
+ acpi_gbl_fn_exit_str,
+ acpi_format_exception(status));
+ } else {
+ acpi_debug_print(ACPI_LV_FUNCTIONS,
+ line_number, function_name,
+ module_name, component_id,
+ "%s ****Exception****: %s\n",
+ acpi_gbl_fn_exit_str,
+ acpi_format_exception(status));
+ }
}
acpi_gbl_nesting_level--;
@@ -465,10 +493,15 @@ acpi_ut_value_exit(u32 line_number,
const char *module_name, u32 component_id, u64 value)
{
- acpi_debug_print(ACPI_LV_FUNCTIONS,
- line_number, function_name, module_name, component_id,
- "%s %8.8X%8.8X\n", acpi_gbl_fn_exit_str,
- ACPI_FORMAT_UINT64(value));
+ /* Check if enabled up-front for performance */
+
+ if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
+ acpi_debug_print(ACPI_LV_FUNCTIONS,
+ line_number, function_name, module_name,
+ component_id, "%s %8.8X%8.8X\n",
+ acpi_gbl_fn_exit_str,
+ ACPI_FORMAT_UINT64(value));
+ }
acpi_gbl_nesting_level--;
}
@@ -497,9 +530,14 @@ acpi_ut_ptr_exit(u32 line_number,
const char *module_name, u32 component_id, u8 *ptr)
{
- acpi_debug_print(ACPI_LV_FUNCTIONS,
- line_number, function_name, module_name, component_id,
- "%s %p\n", acpi_gbl_fn_exit_str, ptr);
+ /* Check if enabled up-front for performance */
+
+ if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
+ acpi_debug_print(ACPI_LV_FUNCTIONS,
+ line_number, function_name, module_name,
+ component_id, "%s %p\n", acpi_gbl_fn_exit_str,
+ ptr);
+ }
acpi_gbl_nesting_level--;
}
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 60a158472d82..11e2e02e1618 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 798105443d0f..2541de420249 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -340,7 +340,7 @@ void acpi_ut_delete_internal_object_list(union acpi_operand_object **obj_list)
{
union acpi_operand_object **internal_obj;
- ACPI_FUNCTION_TRACE(ut_delete_internal_object_list);
+ ACPI_FUNCTION_ENTRY();
/* Walk the null-terminated internal list */
@@ -351,7 +351,7 @@ void acpi_ut_delete_internal_object_list(union acpi_operand_object **obj_list)
/* Free the combined parameter pointer list and object array */
ACPI_FREE(obj_list);
- return_VOID;
+ return;
}
/*******************************************************************************
@@ -484,7 +484,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
union acpi_generic_state *state;
u32 i;
- ACPI_FUNCTION_TRACE_PTR(ut_update_object_reference, object);
+ ACPI_FUNCTION_NAME(ut_update_object_reference);
while (object) {
@@ -493,7 +493,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
if (ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_NAMED) {
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"Object %p is NS handle\n", object));
- return_ACPI_STATUS(AE_OK);
+ return (AE_OK);
}
/*
@@ -530,18 +530,42 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
*/
for (i = 0; i < object->package.count; i++) {
/*
- * Push each element onto the stack for later processing.
- * Note: There can be null elements within the package,
- * these are simply ignored
+ * Null package elements are legal and can be simply
+ * ignored.
*/
- status =
- acpi_ut_create_update_state_and_push
- (object->package.elements[i], action,
- &state_list);
- if (ACPI_FAILURE(status)) {
- goto error_exit;
+ next_object = object->package.elements[i];
+ if (!next_object) {
+ continue;
+ }
+
+ switch (next_object->common.type) {
+ case ACPI_TYPE_INTEGER:
+ case ACPI_TYPE_STRING:
+ case ACPI_TYPE_BUFFER:
+ /*
+ * For these very simple sub-objects, we can just
+ * update the reference count here and continue.
+ * Greatly increases performance of this operation.
+ */
+ acpi_ut_update_ref_count(next_object,
+ action);
+ break;
+
+ default:
+ /*
+ * For complex sub-objects, push them onto the stack
+ * for later processing (this eliminates recursion.)
+ */
+ status =
+ acpi_ut_create_update_state_and_push
+ (next_object, action, &state_list);
+ if (ACPI_FAILURE(status)) {
+ goto error_exit;
+ }
+ break;
}
}
+ next_object = NULL;
break;
case ACPI_TYPE_BUFFER_FIELD:
@@ -619,7 +643,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
}
}
- return_ACPI_STATUS(AE_OK);
+ return (AE_OK);
error_exit:
@@ -633,7 +657,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
acpi_ut_delete_generic_state(state);
}
- return_ACPI_STATUS(status);
+ return (status);
}
/*******************************************************************************
@@ -652,12 +676,12 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
void acpi_ut_add_reference(union acpi_operand_object *object)
{
- ACPI_FUNCTION_TRACE_PTR(ut_add_reference, object);
+ ACPI_FUNCTION_NAME(ut_add_reference);
/* Ensure that we have a valid object */
if (!acpi_ut_valid_internal_object(object)) {
- return_VOID;
+ return;
}
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
@@ -667,7 +691,7 @@ void acpi_ut_add_reference(union acpi_operand_object *object)
/* Increment the reference count */
(void)acpi_ut_update_object_reference(object, REF_INCREMENT);
- return_VOID;
+ return;
}
/*******************************************************************************
@@ -685,7 +709,7 @@ void acpi_ut_add_reference(union acpi_operand_object *object)
void acpi_ut_remove_reference(union acpi_operand_object *object)
{
- ACPI_FUNCTION_TRACE_PTR(ut_remove_reference, object);
+ ACPI_FUNCTION_NAME(ut_remove_reference);
/*
* Allow a NULL pointer to be passed in, just ignore it. This saves
@@ -694,13 +718,13 @@ void acpi_ut_remove_reference(union acpi_operand_object *object)
*/
if (!object ||
(ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_NAMED)) {
- return_VOID;
+ return;
}
/* Ensure that we have a valid object */
if (!acpi_ut_valid_internal_object(object)) {
- return_VOID;
+ return;
}
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
@@ -713,5 +737,5 @@ void acpi_ut_remove_reference(union acpi_operand_object *object)
* of all subobjects!)
*/
(void)acpi_ut_update_object_reference(object, REF_DECREMENT);
- return_VOID;
+ return;
}
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index a9c65fbea5f4..c3f3a7e7bdc7 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -68,7 +68,7 @@ ACPI_MODULE_NAME("uteval")
******************************************************************************/
acpi_status
-acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
+acpi_ut_evaluate_object(struct acpi_namespace_node * prefix_node,
char *path,
u32 expected_return_btypes,
union acpi_operand_object **return_desc)
diff --git a/drivers/acpi/acpica/utexcep.c b/drivers/acpi/acpica/utexcep.c
index 23b98945f6b7..a0ab7c02e87c 100644
--- a/drivers/acpi/acpica/utexcep.c
+++ b/drivers/acpi/acpica/utexcep.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index ed1893155f8b..ffecf4b4f0dd 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -293,11 +293,11 @@ acpi_status acpi_ut_init_globals(void)
/* GPE support */
+ acpi_gbl_all_gpes_initialized = FALSE;
acpi_gbl_gpe_xrupt_list_head = NULL;
acpi_gbl_gpe_fadt_blocks[0] = NULL;
acpi_gbl_gpe_fadt_blocks[1] = NULL;
acpi_current_gpe_count = 0;
- acpi_gbl_all_gpes_initialized = FALSE;
acpi_gbl_global_event_handler = NULL;
@@ -357,17 +357,24 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_root_node_struct.peer = NULL;
acpi_gbl_root_node_struct.object = NULL;
+#ifdef ACPI_DISASSEMBLER
+ acpi_gbl_external_list = NULL;
+#endif
+
#ifdef ACPI_DEBUG_OUTPUT
acpi_gbl_lowest_stack_pointer = ACPI_CAST_PTR(acpi_size, ACPI_SIZE_MAX);
#endif
#ifdef ACPI_DBG_TRACK_ALLOCATIONS
acpi_gbl_display_final_mem_stats = FALSE;
+ acpi_gbl_disable_mem_tracking = FALSE;
#endif
return_ACPI_STATUS(AE_OK);
}
+/* Public globals */
+
ACPI_EXPORT_SYMBOL(acpi_gbl_FADT)
ACPI_EXPORT_SYMBOL(acpi_dbg_level)
ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index 774c3aefbf5d..43a170a74a61 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 246798e4c938..c5d1ac44c07d 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index b1eb7f17e110..5c26ad420344 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -66,11 +66,11 @@ acpi_status acpi_ut_create_rw_lock(struct acpi_rw_lock *lock)
lock->num_readers = 0;
status = acpi_os_create_mutex(&lock->reader_mutex);
if (ACPI_FAILURE(status)) {
- return status;
+ return (status);
}
status = acpi_os_create_mutex(&lock->writer_mutex);
- return status;
+ return (status);
}
void acpi_ut_delete_rw_lock(struct acpi_rw_lock *lock)
@@ -108,7 +108,7 @@ acpi_status acpi_ut_acquire_read_lock(struct acpi_rw_lock *lock)
status = acpi_os_acquire_mutex(lock->reader_mutex, ACPI_WAIT_FOREVER);
if (ACPI_FAILURE(status)) {
- return status;
+ return (status);
}
/* Acquire the write lock only for the first reader */
@@ -121,7 +121,7 @@ acpi_status acpi_ut_acquire_read_lock(struct acpi_rw_lock *lock)
}
acpi_os_release_mutex(lock->reader_mutex);
- return status;
+ return (status);
}
acpi_status acpi_ut_release_read_lock(struct acpi_rw_lock *lock)
@@ -130,7 +130,7 @@ acpi_status acpi_ut_release_read_lock(struct acpi_rw_lock *lock)
status = acpi_os_acquire_mutex(lock->reader_mutex, ACPI_WAIT_FOREVER);
if (ACPI_FAILURE(status)) {
- return status;
+ return (status);
}
/* Release the write lock only for the very last reader */
@@ -141,7 +141,7 @@ acpi_status acpi_ut_release_read_lock(struct acpi_rw_lock *lock)
}
acpi_os_release_mutex(lock->reader_mutex);
- return status;
+ return (status);
}
/*******************************************************************************
@@ -165,7 +165,7 @@ acpi_status acpi_ut_acquire_write_lock(struct acpi_rw_lock *lock)
acpi_status status;
status = acpi_os_acquire_mutex(lock->writer_mutex, ACPI_WAIT_FOREVER);
- return status;
+ return (status);
}
void acpi_ut_release_write_lock(struct acpi_rw_lock *lock)
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index 49563674833a..909fe66e1934 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 9286a69eb9aa..785fdd07ae56 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -48,36 +48,6 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utmisc")
-#if defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP
-/*******************************************************************************
- *
- * FUNCTION: ut_convert_backslashes
- *
- * PARAMETERS: pathname - File pathname string to be converted
- *
- * RETURN: Modifies the input Pathname
- *
- * DESCRIPTION: Convert all backslashes (0x5C) to forward slashes (0x2F) within
- * the entire input file pathname string.
- *
- ******************************************************************************/
-void ut_convert_backslashes(char *pathname)
-{
-
- if (!pathname) {
- return;
- }
-
- while (*pathname) {
- if (*pathname == '\\') {
- *pathname = '/';
- }
-
- pathname++;
- }
-}
-#endif
-
/*******************************************************************************
*
* FUNCTION: acpi_ut_is_pci_root_bridge
@@ -89,7 +59,6 @@ void ut_convert_backslashes(char *pathname)
* DESCRIPTION: Determine if the input ID is a PCI Root Bridge ID.
*
******************************************************************************/
-
u8 acpi_ut_is_pci_root_bridge(char *id)
{
@@ -136,362 +105,6 @@ u8 acpi_ut_is_aml_table(struct acpi_table_header *table)
/*******************************************************************************
*
- * FUNCTION: acpi_ut_allocate_owner_id
- *
- * PARAMETERS: owner_id - Where the new owner ID is returned
- *
- * RETURN: Status
- *
- * DESCRIPTION: Allocate a table or method owner ID. The owner ID is used to
- * track objects created by the table or method, to be deleted
- * when the method exits or the table is unloaded.
- *
- ******************************************************************************/
-
-acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
-{
- u32 i;
- u32 j;
- u32 k;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ut_allocate_owner_id);
-
- /* Guard against multiple allocations of ID to the same location */
-
- if (*owner_id) {
- ACPI_ERROR((AE_INFO, "Owner ID [0x%2.2X] already exists",
- *owner_id));
- return_ACPI_STATUS(AE_ALREADY_EXISTS);
- }
-
- /* Mutex for the global ID mask */
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /*
- * Find a free owner ID, cycle through all possible IDs on repeated
- * allocations. (ACPI_NUM_OWNERID_MASKS + 1) because first index may have
- * to be scanned twice.
- */
- for (i = 0, j = acpi_gbl_last_owner_id_index;
- i < (ACPI_NUM_OWNERID_MASKS + 1); i++, j++) {
- if (j >= ACPI_NUM_OWNERID_MASKS) {
- j = 0; /* Wraparound to start of mask array */
- }
-
- for (k = acpi_gbl_next_owner_id_offset; k < 32; k++) {
- if (acpi_gbl_owner_id_mask[j] == ACPI_UINT32_MAX) {
-
- /* There are no free IDs in this mask */
-
- break;
- }
-
- if (!(acpi_gbl_owner_id_mask[j] & (1 << k))) {
- /*
- * Found a free ID. The actual ID is the bit index plus one,
- * making zero an invalid Owner ID. Save this as the last ID
- * allocated and update the global ID mask.
- */
- acpi_gbl_owner_id_mask[j] |= (1 << k);
-
- acpi_gbl_last_owner_id_index = (u8)j;
- acpi_gbl_next_owner_id_offset = (u8)(k + 1);
-
- /*
- * Construct encoded ID from the index and bit position
- *
- * Note: Last [j].k (bit 255) is never used and is marked
- * permanently allocated (prevents +1 overflow)
- */
- *owner_id =
- (acpi_owner_id) ((k + 1) + ACPI_MUL_32(j));
-
- ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
- "Allocated OwnerId: %2.2X\n",
- (unsigned int)*owner_id));
- goto exit;
- }
- }
-
- acpi_gbl_next_owner_id_offset = 0;
- }
-
- /*
- * All owner_ids have been allocated. This typically should
- * not happen since the IDs are reused after deallocation. The IDs are
- * allocated upon table load (one per table) and method execution, and
- * they are released when a table is unloaded or a method completes
- * execution.
- *
- * If this error happens, there may be very deep nesting of invoked control
- * methods, or there may be a bug where the IDs are not released.
- */
- status = AE_OWNER_ID_LIMIT;
- ACPI_ERROR((AE_INFO,
- "Could not allocate new OwnerId (255 max), AE_OWNER_ID_LIMIT"));
-
- exit:
- (void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_release_owner_id
- *
- * PARAMETERS: owner_id_ptr - Pointer to a previously allocated owner_ID
- *
- * RETURN: None. No error is returned because we are either exiting a
- * control method or unloading a table. Either way, we would
- * ignore any error anyway.
- *
- * DESCRIPTION: Release a table or method owner ID. Valid IDs are 1 - 255
- *
- ******************************************************************************/
-
-void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
-{
- acpi_owner_id owner_id = *owner_id_ptr;
- acpi_status status;
- u32 index;
- u32 bit;
-
- ACPI_FUNCTION_TRACE_U32(ut_release_owner_id, owner_id);
-
- /* Always clear the input owner_id (zero is an invalid ID) */
-
- *owner_id_ptr = 0;
-
- /* Zero is not a valid owner_ID */
-
- if (owner_id == 0) {
- ACPI_ERROR((AE_INFO, "Invalid OwnerId: 0x%2.2X", owner_id));
- return_VOID;
- }
-
- /* Mutex for the global ID mask */
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
- if (ACPI_FAILURE(status)) {
- return_VOID;
- }
-
- /* Normalize the ID to zero */
-
- owner_id--;
-
- /* Decode ID to index/offset pair */
-
- index = ACPI_DIV_32(owner_id);
- bit = 1 << ACPI_MOD_32(owner_id);
-
- /* Free the owner ID only if it is valid */
-
- if (acpi_gbl_owner_id_mask[index] & bit) {
- acpi_gbl_owner_id_mask[index] ^= bit;
- } else {
- ACPI_ERROR((AE_INFO,
- "Release of non-allocated OwnerId: 0x%2.2X",
- owner_id + 1));
- }
-
- (void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
- return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_strupr (strupr)
- *
- * PARAMETERS: src_string - The source string to convert
- *
- * RETURN: None
- *
- * DESCRIPTION: Convert string to uppercase
- *
- * NOTE: This is not a POSIX function, so it appears here, not in utclib.c
- *
- ******************************************************************************/
-
-void acpi_ut_strupr(char *src_string)
-{
- char *string;
-
- ACPI_FUNCTION_ENTRY();
-
- if (!src_string) {
- return;
- }
-
- /* Walk entire string, uppercasing the letters */
-
- for (string = src_string; *string; string++) {
- *string = (char)ACPI_TOUPPER(*string);
- }
-
- return;
-}
-
-#ifdef ACPI_ASL_COMPILER
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_strlwr (strlwr)
- *
- * PARAMETERS: src_string - The source string to convert
- *
- * RETURN: None
- *
- * DESCRIPTION: Convert string to lowercase
- *
- * NOTE: This is not a POSIX function, so it appears here, not in utclib.c
- *
- ******************************************************************************/
-
-void acpi_ut_strlwr(char *src_string)
-{
- char *string;
-
- ACPI_FUNCTION_ENTRY();
-
- if (!src_string) {
- return;
- }
-
- /* Walk entire string, lowercasing the letters */
-
- for (string = src_string; *string; string++) {
- *string = (char)ACPI_TOLOWER(*string);
- }
-
- return;
-}
-
-/******************************************************************************
- *
- * FUNCTION: acpi_ut_stricmp
- *
- * PARAMETERS: string1 - first string to compare
- * string2 - second string to compare
- *
- * RETURN: int that signifies string relationship. Zero means strings
- * are equal.
- *
- * DESCRIPTION: Implementation of the non-ANSI stricmp function (compare
- * strings with no case sensitivity)
- *
- ******************************************************************************/
-
-int acpi_ut_stricmp(char *string1, char *string2)
-{
- int c1;
- int c2;
-
- do {
- c1 = tolower((int)*string1);
- c2 = tolower((int)*string2);
-
- string1++;
- string2++;
- }
- while ((c1 == c2) && (c1));
-
- return (c1 - c2);
-}
-#endif
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_print_string
- *
- * PARAMETERS: string - Null terminated ASCII string
- * max_length - Maximum output length
- *
- * RETURN: None
- *
- * DESCRIPTION: Dump an ASCII string with support for ACPI-defined escape
- * sequences.
- *
- ******************************************************************************/
-
-void acpi_ut_print_string(char *string, u8 max_length)
-{
- u32 i;
-
- if (!string) {
- acpi_os_printf("<\"NULL STRING PTR\">");
- return;
- }
-
- acpi_os_printf("\"");
- for (i = 0; string[i] && (i < max_length); i++) {
-
- /* Escape sequences */
-
- switch (string[i]) {
- case 0x07:
- acpi_os_printf("\\a"); /* BELL */
- break;
-
- case 0x08:
- acpi_os_printf("\\b"); /* BACKSPACE */
- break;
-
- case 0x0C:
- acpi_os_printf("\\f"); /* FORMFEED */
- break;
-
- case 0x0A:
- acpi_os_printf("\\n"); /* LINEFEED */
- break;
-
- case 0x0D:
- acpi_os_printf("\\r"); /* CARRIAGE RETURN */
- break;
-
- case 0x09:
- acpi_os_printf("\\t"); /* HORIZONTAL TAB */
- break;
-
- case 0x0B:
- acpi_os_printf("\\v"); /* VERTICAL TAB */
- break;
-
- case '\'': /* Single Quote */
- case '\"': /* Double Quote */
- case '\\': /* Backslash */
- acpi_os_printf("\\%c", (int)string[i]);
- break;
-
- default:
-
- /* Check for printable character or hex escape */
-
- if (ACPI_IS_PRINT(string[i])) {
- /* This is a normal character */
-
- acpi_os_printf("%c", (int)string[i]);
- } else {
- /* All others will be Hex escapes */
-
- acpi_os_printf("\\x%2.2X", (s32) string[i]);
- }
- break;
- }
- }
- acpi_os_printf("\"");
-
- if (i == max_length && string[i]) {
- acpi_os_printf("...");
- }
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ut_dword_byte_swap
*
* PARAMETERS: value - Value to be converted
@@ -559,379 +172,6 @@ void acpi_ut_set_integer_width(u8 revision)
}
}
-#ifdef ACPI_DEBUG_OUTPUT
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_display_init_pathname
- *
- * PARAMETERS: type - Object type of the node
- * obj_handle - Handle whose pathname will be displayed
- * path - Additional path string to be appended.
- * (NULL if no extra path)
- *
- * RETURN: acpi_status
- *
- * DESCRIPTION: Display full pathname of an object, DEBUG ONLY
- *
- ******************************************************************************/
-
-void
-acpi_ut_display_init_pathname(u8 type,
- struct acpi_namespace_node *obj_handle,
- char *path)
-{
- acpi_status status;
- struct acpi_buffer buffer;
-
- ACPI_FUNCTION_ENTRY();
-
- /* Only print the path if the appropriate debug level is enabled */
-
- if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) {
- return;
- }
-
- /* Get the full pathname to the node */
-
- buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
- status = acpi_ns_handle_to_pathname(obj_handle, &buffer);
- if (ACPI_FAILURE(status)) {
- return;
- }
-
- /* Print what we're doing */
-
- switch (type) {
- case ACPI_TYPE_METHOD:
- acpi_os_printf("Executing ");
- break;
-
- default:
- acpi_os_printf("Initializing ");
- break;
- }
-
- /* Print the object type and pathname */
-
- acpi_os_printf("%-12s %s",
- acpi_ut_get_type_name(type), (char *)buffer.pointer);
-
- /* Extra path is used to append names like _STA, _INI, etc. */
-
- if (path) {
- acpi_os_printf(".%s", path);
- }
- acpi_os_printf("\n");
-
- ACPI_FREE(buffer.pointer);
-}
-#endif
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_valid_acpi_char
- *
- * PARAMETERS: char - The character to be examined
- * position - Byte position (0-3)
- *
- * RETURN: TRUE if the character is valid, FALSE otherwise
- *
- * DESCRIPTION: Check for a valid ACPI character. Must be one of:
- * 1) Upper case alpha
- * 2) numeric
- * 3) underscore
- *
- * We allow a '!' as the last character because of the ASF! table
- *
- ******************************************************************************/
-
-u8 acpi_ut_valid_acpi_char(char character, u32 position)
-{
-
- if (!((character >= 'A' && character <= 'Z') ||
- (character >= '0' && character <= '9') || (character == '_'))) {
-
- /* Allow a '!' in the last position */
-
- if (character == '!' && position == 3) {
- return (TRUE);
- }
-
- return (FALSE);
- }
-
- return (TRUE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_valid_acpi_name
- *
- * PARAMETERS: name - The name to be examined
- *
- * RETURN: TRUE if the name is valid, FALSE otherwise
- *
- * DESCRIPTION: Check for a valid ACPI name. Each character must be one of:
- * 1) Upper case alpha
- * 2) numeric
- * 3) underscore
- *
- ******************************************************************************/
-
-u8 acpi_ut_valid_acpi_name(u32 name)
-{
- u32 i;
-
- ACPI_FUNCTION_ENTRY();
-
- for (i = 0; i < ACPI_NAME_SIZE; i++) {
- if (!acpi_ut_valid_acpi_char
- ((ACPI_CAST_PTR(char, &name))[i], i)) {
- return (FALSE);
- }
- }
-
- return (TRUE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_repair_name
- *
- * PARAMETERS: name - The ACPI name to be repaired
- *
- * RETURN: Repaired version of the name
- *
- * DESCRIPTION: Repair an ACPI name: Change invalid characters to '*' and
- * return the new name. NOTE: the Name parameter must reside in
- * read/write memory, cannot be a const.
- *
- * An ACPI Name must consist of valid ACPI characters. We will repair the name
- * if necessary because we don't want to abort because of this, but we want
- * all namespace names to be printable. A warning message is appropriate.
- *
- * This issue came up because there are in fact machines that exhibit
- * this problem, and we want to be able to enable ACPI support for them,
- * even though there are a few bad names.
- *
- ******************************************************************************/
-
-void acpi_ut_repair_name(char *name)
-{
- u32 i;
- u8 found_bad_char = FALSE;
- u32 original_name;
-
- ACPI_FUNCTION_NAME(ut_repair_name);
-
- ACPI_MOVE_NAME(&original_name, name);
-
- /* Check each character in the name */
-
- for (i = 0; i < ACPI_NAME_SIZE; i++) {
- if (acpi_ut_valid_acpi_char(name[i], i)) {
- continue;
- }
-
- /*
- * Replace a bad character with something printable, yet technically
- * still invalid. This prevents any collisions with existing "good"
- * names in the namespace.
- */
- name[i] = '*';
- found_bad_char = TRUE;
- }
-
- if (found_bad_char) {
-
- /* Report warning only if in strict mode or debug mode */
-
- if (!acpi_gbl_enable_interpreter_slack) {
- ACPI_WARNING((AE_INFO,
- "Found bad character(s) in name, repaired: [%4.4s]\n",
- name));
- } else {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Found bad character(s) in name, repaired: [%4.4s]\n",
- name));
- }
- }
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_strtoul64
- *
- * PARAMETERS: string - Null terminated string
- * base - Radix of the string: 16 or ACPI_ANY_BASE;
- * ACPI_ANY_BASE means 'in behalf of to_integer'
- * ret_integer - Where the converted integer is returned
- *
- * RETURN: Status and Converted value
- *
- * DESCRIPTION: Convert a string into an unsigned value. Performs either a
- * 32-bit or 64-bit conversion, depending on the current mode
- * of the interpreter.
- * NOTE: Does not support Octal strings, not needed.
- *
- ******************************************************************************/
-
-acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
-{
- u32 this_digit = 0;
- u64 return_value = 0;
- u64 quotient;
- u64 dividend;
- u32 to_integer_op = (base == ACPI_ANY_BASE);
- u32 mode32 = (acpi_gbl_integer_byte_width == 4);
- u8 valid_digits = 0;
- u8 sign_of0x = 0;
- u8 term = 0;
-
- ACPI_FUNCTION_TRACE_STR(ut_stroul64, string);
-
- switch (base) {
- case ACPI_ANY_BASE:
- case 16:
- break;
-
- default:
- /* Invalid Base */
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
-
- if (!string) {
- goto error_exit;
- }
-
- /* Skip over any white space in the buffer */
-
- while ((*string) && (ACPI_IS_SPACE(*string) || *string == '\t')) {
- string++;
- }
-
- if (to_integer_op) {
- /*
- * Base equal to ACPI_ANY_BASE means 'ToInteger operation case'.
- * We need to determine if it is decimal or hexadecimal.
- */
- if ((*string == '0') && (ACPI_TOLOWER(*(string + 1)) == 'x')) {
- sign_of0x = 1;
- base = 16;
-
- /* Skip over the leading '0x' */
- string += 2;
- } else {
- base = 10;
- }
- }
-
- /* Any string left? Check that '0x' is not followed by white space. */
-
- if (!(*string) || ACPI_IS_SPACE(*string) || *string == '\t') {
- if (to_integer_op) {
- goto error_exit;
- } else {
- goto all_done;
- }
- }
-
- /*
- * Perform a 32-bit or 64-bit conversion, depending upon the current
- * execution mode of the interpreter
- */
- dividend = (mode32) ? ACPI_UINT32_MAX : ACPI_UINT64_MAX;
-
- /* Main loop: convert the string to a 32- or 64-bit integer */
-
- while (*string) {
- if (ACPI_IS_DIGIT(*string)) {
-
- /* Convert ASCII 0-9 to Decimal value */
-
- this_digit = ((u8)*string) - '0';
- } else if (base == 10) {
-
- /* Digit is out of range; possible in to_integer case only */
-
- term = 1;
- } else {
- this_digit = (u8)ACPI_TOUPPER(*string);
- if (ACPI_IS_XDIGIT((char)this_digit)) {
-
- /* Convert ASCII Hex char to value */
-
- this_digit = this_digit - 'A' + 10;
- } else {
- term = 1;
- }
- }
-
- if (term) {
- if (to_integer_op) {
- goto error_exit;
- } else {
- break;
- }
- } else if ((valid_digits == 0) && (this_digit == 0)
- && !sign_of0x) {
-
- /* Skip zeros */
- string++;
- continue;
- }
-
- valid_digits++;
-
- if (sign_of0x
- && ((valid_digits > 16)
- || ((valid_digits > 8) && mode32))) {
- /*
- * This is to_integer operation case.
- * No any restrictions for string-to-integer conversion,
- * see ACPI spec.
- */
- goto error_exit;
- }
-
- /* Divide the digit into the correct position */
-
- (void)acpi_ut_short_divide((dividend - (u64)this_digit),
- base, &quotient, NULL);
-
- if (return_value > quotient) {
- if (to_integer_op) {
- goto error_exit;
- } else {
- break;
- }
- }
-
- return_value *= base;
- return_value += this_digit;
- string++;
- }
-
- /* All done, normal exit */
-
- all_done:
-
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n",
- ACPI_FORMAT_UINT64(return_value)));
-
- *ret_integer = return_value;
- return_ACPI_STATUS(AE_OK);
-
- error_exit:
- /* Base was set/validated above */
-
- if (base == 10) {
- return_ACPI_STATUS(AE_BAD_DECIMAL_CONSTANT);
- } else {
- return_ACPI_STATUS(AE_BAD_HEX_CONSTANT);
- }
-}
-
/*******************************************************************************
*
* FUNCTION: acpi_ut_create_update_state_and_push
@@ -1097,3 +337,71 @@ acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
return_ACPI_STATUS(AE_AML_INTERNAL);
}
+
+#ifdef ACPI_DEBUG_OUTPUT
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_display_init_pathname
+ *
+ * PARAMETERS: type - Object type of the node
+ * obj_handle - Handle whose pathname will be displayed
+ * path - Additional path string to be appended.
+ * (NULL if no extra path)
+ *
+ * RETURN: acpi_status
+ *
+ * DESCRIPTION: Display full pathname of an object, DEBUG ONLY
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_display_init_pathname(u8 type,
+ struct acpi_namespace_node *obj_handle,
+ char *path)
+{
+ acpi_status status;
+ struct acpi_buffer buffer;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* Only print the path if the appropriate debug level is enabled */
+
+ if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) {
+ return;
+ }
+
+ /* Get the full pathname to the node */
+
+ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+ status = acpi_ns_handle_to_pathname(obj_handle, &buffer);
+ if (ACPI_FAILURE(status)) {
+ return;
+ }
+
+ /* Print what we're doing */
+
+ switch (type) {
+ case ACPI_TYPE_METHOD:
+ acpi_os_printf("Executing ");
+ break;
+
+ default:
+ acpi_os_printf("Initializing ");
+ break;
+ }
+
+ /* Print the object type and pathname */
+
+ acpi_os_printf("%-12s %s",
+ acpi_ut_get_type_name(type), (char *)buffer.pointer);
+
+ /* Extra path is used to append names like _STA, _INI, etc. */
+
+ if (path) {
+ acpi_os_printf(".%s", path);
+ }
+ acpi_os_printf("\n");
+
+ ACPI_FREE(buffer.pointer);
+}
+#endif
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 5ccf57c0d87e..22feb99b8e35 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index 5c52ca78f6fa..1099f5c069f8 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -419,7 +419,7 @@ void acpi_ut_delete_object_desc(union acpi_operand_object *object)
{
ACPI_FUNCTION_TRACE_PTR(ut_delete_object_desc, object);
- /* Object must be a union acpi_operand_object */
+ /* Object must be of type union acpi_operand_object */
if (ACPI_GET_DESCRIPTOR_TYPE(object) != ACPI_DESC_TYPE_OPERAND) {
ACPI_ERROR((AE_INFO,
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 676285d6116d..36a7d361d7cb 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utownerid.c b/drivers/acpi/acpica/utownerid.c
new file mode 100644
index 000000000000..835340b26d37
--- /dev/null
+++ b/drivers/acpi/acpica/utownerid.c
@@ -0,0 +1,218 @@
+/*******************************************************************************
+ *
+ * Module Name: utownerid - Support for Table/Method Owner IDs
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2013, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utownerid")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_allocate_owner_id
+ *
+ * PARAMETERS: owner_id - Where the new owner ID is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Allocate a table or method owner ID. The owner ID is used to
+ * track objects created by the table or method, to be deleted
+ * when the method exits or the table is unloaded.
+ *
+ ******************************************************************************/
+acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
+{
+ u32 i;
+ u32 j;
+ u32 k;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ut_allocate_owner_id);
+
+ /* Guard against multiple allocations of ID to the same location */
+
+ if (*owner_id) {
+ ACPI_ERROR((AE_INFO, "Owner ID [0x%2.2X] already exists",
+ *owner_id));
+ return_ACPI_STATUS(AE_ALREADY_EXISTS);
+ }
+
+ /* Mutex for the global ID mask */
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Find a free owner ID, cycle through all possible IDs on repeated
+ * allocations. (ACPI_NUM_OWNERID_MASKS + 1) because first index may have
+ * to be scanned twice.
+ */
+ for (i = 0, j = acpi_gbl_last_owner_id_index;
+ i < (ACPI_NUM_OWNERID_MASKS + 1); i++, j++) {
+ if (j >= ACPI_NUM_OWNERID_MASKS) {
+ j = 0; /* Wraparound to start of mask array */
+ }
+
+ for (k = acpi_gbl_next_owner_id_offset; k < 32; k++) {
+ if (acpi_gbl_owner_id_mask[j] == ACPI_UINT32_MAX) {
+
+ /* There are no free IDs in this mask */
+
+ break;
+ }
+
+ if (!(acpi_gbl_owner_id_mask[j] & (1 << k))) {
+ /*
+ * Found a free ID. The actual ID is the bit index plus one,
+ * making zero an invalid Owner ID. Save this as the last ID
+ * allocated and update the global ID mask.
+ */
+ acpi_gbl_owner_id_mask[j] |= (1 << k);
+
+ acpi_gbl_last_owner_id_index = (u8)j;
+ acpi_gbl_next_owner_id_offset = (u8)(k + 1);
+
+ /*
+ * Construct encoded ID from the index and bit position
+ *
+ * Note: Last [j].k (bit 255) is never used and is marked
+ * permanently allocated (prevents +1 overflow)
+ */
+ *owner_id =
+ (acpi_owner_id) ((k + 1) + ACPI_MUL_32(j));
+
+ ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
+ "Allocated OwnerId: %2.2X\n",
+ (unsigned int)*owner_id));
+ goto exit;
+ }
+ }
+
+ acpi_gbl_next_owner_id_offset = 0;
+ }
+
+ /*
+ * All owner_ids have been allocated. This typically should
+ * not happen since the IDs are reused after deallocation. The IDs are
+ * allocated upon table load (one per table) and method execution, and
+ * they are released when a table is unloaded or a method completes
+ * execution.
+ *
+ * If this error happens, there may be very deep nesting of invoked control
+ * methods, or there may be a bug where the IDs are not released.
+ */
+ status = AE_OWNER_ID_LIMIT;
+ ACPI_ERROR((AE_INFO,
+ "Could not allocate new OwnerId (255 max), AE_OWNER_ID_LIMIT"));
+
+ exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_release_owner_id
+ *
+ * PARAMETERS: owner_id_ptr - Pointer to a previously allocated owner_ID
+ *
+ * RETURN: None. No error is returned because we are either exiting a
+ * control method or unloading a table. Either way, we would
+ * ignore any error anyway.
+ *
+ * DESCRIPTION: Release a table or method owner ID. Valid IDs are 1 - 255
+ *
+ ******************************************************************************/
+
+void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
+{
+ acpi_owner_id owner_id = *owner_id_ptr;
+ acpi_status status;
+ u32 index;
+ u32 bit;
+
+ ACPI_FUNCTION_TRACE_U32(ut_release_owner_id, owner_id);
+
+ /* Always clear the input owner_id (zero is an invalid ID) */
+
+ *owner_id_ptr = 0;
+
+ /* Zero is not a valid owner_ID */
+
+ if (owner_id == 0) {
+ ACPI_ERROR((AE_INFO, "Invalid OwnerId: 0x%2.2X", owner_id));
+ return_VOID;
+ }
+
+ /* Mutex for the global ID mask */
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
+ if (ACPI_FAILURE(status)) {
+ return_VOID;
+ }
+
+ /* Normalize the ID to zero */
+
+ owner_id--;
+
+ /* Decode ID to index/offset pair */
+
+ index = ACPI_DIV_32(owner_id);
+ bit = 1 << ACPI_MOD_32(owner_id);
+
+ /* Free the owner ID only if it is valid */
+
+ if (acpi_gbl_owner_id_mask[index] & bit) {
+ acpi_gbl_owner_id_mask[index] ^= bit;
+ } else {
+ ACPI_ERROR((AE_INFO,
+ "Release of non-allocated OwnerId: 0x%2.2X",
+ owner_id + 1));
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
+ return_VOID;
+}
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index e38bef4980bc..cb7fa491decf 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -127,7 +127,9 @@ const char *acpi_gbl_rw_decode[] = {
const char *acpi_gbl_shr_decode[] = {
"Exclusive",
- "Shared"
+ "Shared",
+ "ExclusiveAndWake", /* ACPI 5.0 */
+ "SharedAndWake" /* ACPI 5.0 */
};
const char *acpi_gbl_siz_decode[] = {
@@ -383,26 +385,16 @@ static const u8 acpi_gbl_resource_types[] = {
ACPI_VARIABLE_LENGTH /* 0E *serial_bus */
};
-/*
- * For the iASL compiler/disassembler, we don't want any error messages
- * because the disassembler uses the resource validation code to determine
- * if Buffer objects are actually Resource Templates.
- */
-#ifdef ACPI_ASL_COMPILER
-#define ACPI_RESOURCE_ERROR(plist)
-#else
-#define ACPI_RESOURCE_ERROR(plist) ACPI_ERROR(plist)
-#endif
-
/*******************************************************************************
*
* FUNCTION: acpi_ut_walk_aml_resources
*
- * PARAMETERS: aml - Pointer to the raw AML resource template
- * aml_length - Length of the entire template
- * user_function - Called once for each descriptor found. If
- * NULL, a pointer to the end_tag is returned
- * context - Passed to user_function
+ * PARAMETERS: walk_state - Current walk info
+ * PARAMETERS: aml - Pointer to the raw AML resource template
+ * aml_length - Length of the entire template
+ * user_function - Called once for each descriptor found. If
+ * NULL, a pointer to the end_tag is returned
+ * context - Passed to user_function
*
* RETURN: Status
*
@@ -412,7 +404,8 @@ static const u8 acpi_gbl_resource_types[] = {
******************************************************************************/
acpi_status
-acpi_ut_walk_aml_resources(u8 * aml,
+acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
+ u8 *aml,
acpi_size aml_length,
acpi_walk_aml_callback user_function, void **context)
{
@@ -441,7 +434,8 @@ acpi_ut_walk_aml_resources(u8 * aml,
/* Validate the Resource Type and Resource Length */
- status = acpi_ut_validate_resource(aml, &resource_index);
+ status =
+ acpi_ut_validate_resource(walk_state, aml, &resource_index);
if (ACPI_FAILURE(status)) {
/*
* Exit on failure. Cannot continue because the descriptor length
@@ -498,7 +492,8 @@ acpi_ut_walk_aml_resources(u8 * aml,
/* Insert an end_tag anyway. acpi_rs_get_list_length always leaves room */
- (void)acpi_ut_validate_resource(end_tag, &resource_index);
+ (void)acpi_ut_validate_resource(walk_state, end_tag,
+ &resource_index);
status =
user_function(end_tag, 2, offset, resource_index, context);
if (ACPI_FAILURE(status)) {
@@ -513,9 +508,10 @@ acpi_ut_walk_aml_resources(u8 * aml,
*
* FUNCTION: acpi_ut_validate_resource
*
- * PARAMETERS: aml - Pointer to the raw AML resource descriptor
- * return_index - Where the resource index is returned. NULL
- * if the index is not required.
+ * PARAMETERS: walk_state - Current walk info
+ * aml - Pointer to the raw AML resource descriptor
+ * return_index - Where the resource index is returned. NULL
+ * if the index is not required.
*
* RETURN: Status, and optionally the Index into the global resource tables
*
@@ -525,7 +521,9 @@ acpi_ut_walk_aml_resources(u8 * aml,
*
******************************************************************************/
-acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
+acpi_status
+acpi_ut_validate_resource(struct acpi_walk_state *walk_state,
+ void *aml, u8 *return_index)
{
union aml_resource *aml_resource;
u8 resource_type;
@@ -627,10 +625,12 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
if ((aml_resource->common_serial_bus.type == 0) ||
(aml_resource->common_serial_bus.type >
AML_RESOURCE_MAX_SERIALBUSTYPE)) {
- ACPI_RESOURCE_ERROR((AE_INFO,
- "Invalid/unsupported SerialBus resource descriptor: BusType 0x%2.2X",
- aml_resource->common_serial_bus.
- type));
+ if (walk_state) {
+ ACPI_ERROR((AE_INFO,
+ "Invalid/unsupported SerialBus resource descriptor: BusType 0x%2.2X",
+ aml_resource->common_serial_bus.
+ type));
+ }
return (AE_AML_INVALID_RESOURCE_TYPE);
}
}
@@ -645,18 +645,22 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
invalid_resource:
- ACPI_RESOURCE_ERROR((AE_INFO,
- "Invalid/unsupported resource descriptor: Type 0x%2.2X",
- resource_type));
+ if (walk_state) {
+ ACPI_ERROR((AE_INFO,
+ "Invalid/unsupported resource descriptor: Type 0x%2.2X",
+ resource_type));
+ }
return (AE_AML_INVALID_RESOURCE_TYPE);
bad_resource_length:
- ACPI_RESOURCE_ERROR((AE_INFO,
- "Invalid resource descriptor length: Type "
- "0x%2.2X, Length 0x%4.4X, MinLength 0x%4.4X",
- resource_type, resource_length,
- minimum_resource_length));
+ if (walk_state) {
+ ACPI_ERROR((AE_INFO,
+ "Invalid resource descriptor length: Type "
+ "0x%2.2X, Length 0x%4.4X, MinLength 0x%4.4X",
+ resource_type, resource_length,
+ minimum_resource_length));
+ }
return (AE_AML_BAD_RESOURCE_LENGTH);
}
@@ -800,8 +804,7 @@ u32 acpi_ut_get_descriptor_length(void *aml)
******************************************************************************/
acpi_status
-acpi_ut_get_resource_end_tag(union acpi_operand_object * obj_desc,
- u8 ** end_tag)
+acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc, u8 **end_tag)
{
acpi_status status;
@@ -816,7 +819,7 @@ acpi_ut_get_resource_end_tag(union acpi_operand_object * obj_desc,
/* Validate the template and get a pointer to the end_tag */
- status = acpi_ut_walk_aml_resources(obj_desc->buffer.pointer,
+ status = acpi_ut_walk_aml_resources(NULL, obj_desc->buffer.pointer,
obj_desc->buffer.length, NULL,
(void **)end_tag);
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index cee0473ba813..a6b729d4c1dc 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -97,14 +97,13 @@ void
acpi_ut_push_generic_state(union acpi_generic_state **list_head,
union acpi_generic_state *state)
{
- ACPI_FUNCTION_TRACE(ut_push_generic_state);
+ ACPI_FUNCTION_ENTRY();
/* Push the state object onto the front of the list (stack) */
state->common.next = *list_head;
*list_head = state;
-
- return_VOID;
+ return;
}
/*******************************************************************************
@@ -124,7 +123,7 @@ union acpi_generic_state *acpi_ut_pop_generic_state(union acpi_generic_state
{
union acpi_generic_state *state;
- ACPI_FUNCTION_TRACE(ut_pop_generic_state);
+ ACPI_FUNCTION_ENTRY();
/* Remove the state object at the head of the list (stack) */
@@ -136,7 +135,7 @@ union acpi_generic_state *acpi_ut_pop_generic_state(union acpi_generic_state
*list_head = state->common.next;
}
- return_PTR(state);
+ return (state);
}
/*******************************************************************************
@@ -186,13 +185,13 @@ struct acpi_thread_state *acpi_ut_create_thread_state(void)
{
union acpi_generic_state *state;
- ACPI_FUNCTION_TRACE(ut_create_thread_state);
+ ACPI_FUNCTION_ENTRY();
/* Create the generic state object */
state = acpi_ut_create_generic_state();
if (!state) {
- return_PTR(NULL);
+ return (NULL);
}
/* Init fields specific to the update struct */
@@ -207,7 +206,7 @@ struct acpi_thread_state *acpi_ut_create_thread_state(void)
state->thread.thread_id = (acpi_thread_id) 1;
}
- return_PTR((struct acpi_thread_state *)state);
+ return ((struct acpi_thread_state *)state);
}
/*******************************************************************************
@@ -230,13 +229,13 @@ union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object
{
union acpi_generic_state *state;
- ACPI_FUNCTION_TRACE_PTR(ut_create_update_state, object);
+ ACPI_FUNCTION_ENTRY();
/* Create the generic state object */
state = acpi_ut_create_generic_state();
if (!state) {
- return_PTR(NULL);
+ return (NULL);
}
/* Init fields specific to the update struct */
@@ -244,8 +243,7 @@ union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object
state->common.descriptor_type = ACPI_DESC_TYPE_STATE_UPDATE;
state->update.object = object;
state->update.value = action;
-
- return_PTR(state);
+ return (state);
}
/*******************************************************************************
@@ -267,13 +265,13 @@ union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object,
{
union acpi_generic_state *state;
- ACPI_FUNCTION_TRACE_PTR(ut_create_pkg_state, internal_object);
+ ACPI_FUNCTION_ENTRY();
/* Create the generic state object */
state = acpi_ut_create_generic_state();
if (!state) {
- return_PTR(NULL);
+ return (NULL);
}
/* Init fields specific to the update struct */
@@ -283,8 +281,7 @@ union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object,
state->pkg.dest_object = external_object;
state->pkg.index = index;
state->pkg.num_packages = 1;
-
- return_PTR(state);
+ return (state);
}
/*******************************************************************************
@@ -304,21 +301,20 @@ union acpi_generic_state *acpi_ut_create_control_state(void)
{
union acpi_generic_state *state;
- ACPI_FUNCTION_TRACE(ut_create_control_state);
+ ACPI_FUNCTION_ENTRY();
/* Create the generic state object */
state = acpi_ut_create_generic_state();
if (!state) {
- return_PTR(NULL);
+ return (NULL);
}
/* Init fields specific to the control struct */
state->common.descriptor_type = ACPI_DESC_TYPE_STATE_CONTROL;
state->common.state = ACPI_CONTROL_CONDITIONAL_EXECUTING;
-
- return_PTR(state);
+ return (state);
}
/*******************************************************************************
@@ -336,12 +332,12 @@ union acpi_generic_state *acpi_ut_create_control_state(void)
void acpi_ut_delete_generic_state(union acpi_generic_state *state)
{
- ACPI_FUNCTION_TRACE(ut_delete_generic_state);
+ ACPI_FUNCTION_ENTRY();
/* Ignore null state */
if (state) {
(void)acpi_os_release_object(acpi_gbl_state_cache, state);
}
- return_VOID;
+ return;
}
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
new file mode 100644
index 000000000000..b3e36a81aa4d
--- /dev/null
+++ b/drivers/acpi/acpica/utstring.c
@@ -0,0 +1,574 @@
+/*******************************************************************************
+ *
+ * Module Name: utstring - Common functions for strings and characters
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2013, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utstring")
+
+/*
+ * Non-ANSI C library functions - strlwr, strupr, stricmp, and a 64-bit
+ * version of strtoul.
+ */
+#ifdef ACPI_ASL_COMPILER
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_strlwr (strlwr)
+ *
+ * PARAMETERS: src_string - The source string to convert
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Convert string to lowercase
+ *
+ * NOTE: This is not a POSIX function, so it appears here, not in utclib.c
+ *
+ ******************************************************************************/
+void acpi_ut_strlwr(char *src_string)
+{
+ char *string;
+
+ ACPI_FUNCTION_ENTRY();
+
+ if (!src_string) {
+ return;
+ }
+
+ /* Walk entire string, lowercasing the letters */
+
+ for (string = src_string; *string; string++) {
+ *string = (char)ACPI_TOLOWER(*string);
+ }
+
+ return;
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_ut_stricmp (stricmp)
+ *
+ * PARAMETERS: string1 - first string to compare
+ * string2 - second string to compare
+ *
+ * RETURN: int that signifies string relationship. Zero means strings
+ * are equal.
+ *
+ * DESCRIPTION: Implementation of the non-ANSI stricmp function (compare
+ * strings with no case sensitivity)
+ *
+ ******************************************************************************/
+
+int acpi_ut_stricmp(char *string1, char *string2)
+{
+ int c1;
+ int c2;
+
+ do {
+ c1 = tolower((int)*string1);
+ c2 = tolower((int)*string2);
+
+ string1++;
+ string2++;
+ }
+ while ((c1 == c2) && (c1));
+
+ return (c1 - c2);
+}
+#endif
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_strupr (strupr)
+ *
+ * PARAMETERS: src_string - The source string to convert
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Convert string to uppercase
+ *
+ * NOTE: This is not a POSIX function, so it appears here, not in utclib.c
+ *
+ ******************************************************************************/
+
+void acpi_ut_strupr(char *src_string)
+{
+ char *string;
+
+ ACPI_FUNCTION_ENTRY();
+
+ if (!src_string) {
+ return;
+ }
+
+ /* Walk entire string, uppercasing the letters */
+
+ for (string = src_string; *string; string++) {
+ *string = (char)ACPI_TOUPPER(*string);
+ }
+
+ return;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_strtoul64
+ *
+ * PARAMETERS: string - Null terminated string
+ * base - Radix of the string: 16 or ACPI_ANY_BASE;
+ * ACPI_ANY_BASE means 'in behalf of to_integer'
+ * ret_integer - Where the converted integer is returned
+ *
+ * RETURN: Status and Converted value
+ *
+ * DESCRIPTION: Convert a string into an unsigned value. Performs either a
+ * 32-bit or 64-bit conversion, depending on the current mode
+ * of the interpreter.
+ * NOTE: Does not support Octal strings, not needed.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
+{
+ u32 this_digit = 0;
+ u64 return_value = 0;
+ u64 quotient;
+ u64 dividend;
+ u32 to_integer_op = (base == ACPI_ANY_BASE);
+ u32 mode32 = (acpi_gbl_integer_byte_width == 4);
+ u8 valid_digits = 0;
+ u8 sign_of0x = 0;
+ u8 term = 0;
+
+ ACPI_FUNCTION_TRACE_STR(ut_stroul64, string);
+
+ switch (base) {
+ case ACPI_ANY_BASE:
+ case 16:
+ break;
+
+ default:
+ /* Invalid Base */
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ if (!string) {
+ goto error_exit;
+ }
+
+ /* Skip over any white space in the buffer */
+
+ while ((*string) && (ACPI_IS_SPACE(*string) || *string == '\t')) {
+ string++;
+ }
+
+ if (to_integer_op) {
+ /*
+ * Base equal to ACPI_ANY_BASE means 'ToInteger operation case'.
+ * We need to determine if it is decimal or hexadecimal.
+ */
+ if ((*string == '0') && (ACPI_TOLOWER(*(string + 1)) == 'x')) {
+ sign_of0x = 1;
+ base = 16;
+
+ /* Skip over the leading '0x' */
+ string += 2;
+ } else {
+ base = 10;
+ }
+ }
+
+ /* Any string left? Check that '0x' is not followed by white space. */
+
+ if (!(*string) || ACPI_IS_SPACE(*string) || *string == '\t') {
+ if (to_integer_op) {
+ goto error_exit;
+ } else {
+ goto all_done;
+ }
+ }
+
+ /*
+ * Perform a 32-bit or 64-bit conversion, depending upon the current
+ * execution mode of the interpreter
+ */
+ dividend = (mode32) ? ACPI_UINT32_MAX : ACPI_UINT64_MAX;
+
+ /* Main loop: convert the string to a 32- or 64-bit integer */
+
+ while (*string) {
+ if (ACPI_IS_DIGIT(*string)) {
+
+ /* Convert ASCII 0-9 to Decimal value */
+
+ this_digit = ((u8)*string) - '0';
+ } else if (base == 10) {
+
+ /* Digit is out of range; possible in to_integer case only */
+
+ term = 1;
+ } else {
+ this_digit = (u8)ACPI_TOUPPER(*string);
+ if (ACPI_IS_XDIGIT((char)this_digit)) {
+
+ /* Convert ASCII Hex char to value */
+
+ this_digit = this_digit - 'A' + 10;
+ } else {
+ term = 1;
+ }
+ }
+
+ if (term) {
+ if (to_integer_op) {
+ goto error_exit;
+ } else {
+ break;
+ }
+ } else if ((valid_digits == 0) && (this_digit == 0)
+ && !sign_of0x) {
+
+ /* Skip zeros */
+ string++;
+ continue;
+ }
+
+ valid_digits++;
+
+ if (sign_of0x
+ && ((valid_digits > 16)
+ || ((valid_digits > 8) && mode32))) {
+ /*
+ * This is to_integer operation case.
+ * No any restrictions for string-to-integer conversion,
+ * see ACPI spec.
+ */
+ goto error_exit;
+ }
+
+ /* Divide the digit into the correct position */
+
+ (void)acpi_ut_short_divide((dividend - (u64)this_digit),
+ base, &quotient, NULL);
+
+ if (return_value > quotient) {
+ if (to_integer_op) {
+ goto error_exit;
+ } else {
+ break;
+ }
+ }
+
+ return_value *= base;
+ return_value += this_digit;
+ string++;
+ }
+
+ /* All done, normal exit */
+
+ all_done:
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n",
+ ACPI_FORMAT_UINT64(return_value)));
+
+ *ret_integer = return_value;
+ return_ACPI_STATUS(AE_OK);
+
+ error_exit:
+ /* Base was set/validated above */
+
+ if (base == 10) {
+ return_ACPI_STATUS(AE_BAD_DECIMAL_CONSTANT);
+ } else {
+ return_ACPI_STATUS(AE_BAD_HEX_CONSTANT);
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_print_string
+ *
+ * PARAMETERS: string - Null terminated ASCII string
+ * max_length - Maximum output length
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Dump an ASCII string with support for ACPI-defined escape
+ * sequences.
+ *
+ ******************************************************************************/
+
+void acpi_ut_print_string(char *string, u8 max_length)
+{
+ u32 i;
+
+ if (!string) {
+ acpi_os_printf("<\"NULL STRING PTR\">");
+ return;
+ }
+
+ acpi_os_printf("\"");
+ for (i = 0; string[i] && (i < max_length); i++) {
+
+ /* Escape sequences */
+
+ switch (string[i]) {
+ case 0x07:
+ acpi_os_printf("\\a"); /* BELL */
+ break;
+
+ case 0x08:
+ acpi_os_printf("\\b"); /* BACKSPACE */
+ break;
+
+ case 0x0C:
+ acpi_os_printf("\\f"); /* FORMFEED */
+ break;
+
+ case 0x0A:
+ acpi_os_printf("\\n"); /* LINEFEED */
+ break;
+
+ case 0x0D:
+ acpi_os_printf("\\r"); /* CARRIAGE RETURN */
+ break;
+
+ case 0x09:
+ acpi_os_printf("\\t"); /* HORIZONTAL TAB */
+ break;
+
+ case 0x0B:
+ acpi_os_printf("\\v"); /* VERTICAL TAB */
+ break;
+
+ case '\'': /* Single Quote */
+ case '\"': /* Double Quote */
+ case '\\': /* Backslash */
+ acpi_os_printf("\\%c", (int)string[i]);
+ break;
+
+ default:
+
+ /* Check for printable character or hex escape */
+
+ if (ACPI_IS_PRINT(string[i])) {
+ /* This is a normal character */
+
+ acpi_os_printf("%c", (int)string[i]);
+ } else {
+ /* All others will be Hex escapes */
+
+ acpi_os_printf("\\x%2.2X", (s32) string[i]);
+ }
+ break;
+ }
+ }
+ acpi_os_printf("\"");
+
+ if (i == max_length && string[i]) {
+ acpi_os_printf("...");
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_valid_acpi_char
+ *
+ * PARAMETERS: char - The character to be examined
+ * position - Byte position (0-3)
+ *
+ * RETURN: TRUE if the character is valid, FALSE otherwise
+ *
+ * DESCRIPTION: Check for a valid ACPI character. Must be one of:
+ * 1) Upper case alpha
+ * 2) numeric
+ * 3) underscore
+ *
+ * We allow a '!' as the last character because of the ASF! table
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_valid_acpi_char(char character, u32 position)
+{
+
+ if (!((character >= 'A' && character <= 'Z') ||
+ (character >= '0' && character <= '9') || (character == '_'))) {
+
+ /* Allow a '!' in the last position */
+
+ if (character == '!' && position == 3) {
+ return (TRUE);
+ }
+
+ return (FALSE);
+ }
+
+ return (TRUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_valid_acpi_name
+ *
+ * PARAMETERS: name - The name to be examined
+ *
+ * RETURN: TRUE if the name is valid, FALSE otherwise
+ *
+ * DESCRIPTION: Check for a valid ACPI name. Each character must be one of:
+ * 1) Upper case alpha
+ * 2) numeric
+ * 3) underscore
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_valid_acpi_name(u32 name)
+{
+ u32 i;
+
+ ACPI_FUNCTION_ENTRY();
+
+ for (i = 0; i < ACPI_NAME_SIZE; i++) {
+ if (!acpi_ut_valid_acpi_char
+ ((ACPI_CAST_PTR(char, &name))[i], i)) {
+ return (FALSE);
+ }
+ }
+
+ return (TRUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_repair_name
+ *
+ * PARAMETERS: name - The ACPI name to be repaired
+ *
+ * RETURN: Repaired version of the name
+ *
+ * DESCRIPTION: Repair an ACPI name: Change invalid characters to '*' and
+ * return the new name. NOTE: the Name parameter must reside in
+ * read/write memory, cannot be a const.
+ *
+ * An ACPI Name must consist of valid ACPI characters. We will repair the name
+ * if necessary because we don't want to abort because of this, but we want
+ * all namespace names to be printable. A warning message is appropriate.
+ *
+ * This issue came up because there are in fact machines that exhibit
+ * this problem, and we want to be able to enable ACPI support for them,
+ * even though there are a few bad names.
+ *
+ ******************************************************************************/
+
+void acpi_ut_repair_name(char *name)
+{
+ u32 i;
+ u8 found_bad_char = FALSE;
+ u32 original_name;
+
+ ACPI_FUNCTION_NAME(ut_repair_name);
+
+ ACPI_MOVE_NAME(&original_name, name);
+
+ /* Check each character in the name */
+
+ for (i = 0; i < ACPI_NAME_SIZE; i++) {
+ if (acpi_ut_valid_acpi_char(name[i], i)) {
+ continue;
+ }
+
+ /*
+ * Replace a bad character with something printable, yet technically
+ * still invalid. This prevents any collisions with existing "good"
+ * names in the namespace.
+ */
+ name[i] = '*';
+ found_bad_char = TRUE;
+ }
+
+ if (found_bad_char) {
+
+ /* Report warning only if in strict mode or debug mode */
+
+ if (!acpi_gbl_enable_interpreter_slack) {
+ ACPI_WARNING((AE_INFO,
+ "Invalid character(s) in name (0x%.8X), repaired: [%4.4s]",
+ original_name, name));
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Invalid character(s) in name (0x%.8X), repaired: [%4.4s]",
+ original_name, name));
+ }
+ }
+}
+
+#if defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP
+/*******************************************************************************
+ *
+ * FUNCTION: ut_convert_backslashes
+ *
+ * PARAMETERS: pathname - File pathname string to be converted
+ *
+ * RETURN: Modifies the input Pathname
+ *
+ * DESCRIPTION: Convert all backslashes (0x5C) to forward slashes (0x2F) within
+ * the entire input file pathname string.
+ *
+ ******************************************************************************/
+
+void ut_convert_backslashes(char *pathname)
+{
+
+ if (!pathname) {
+ return;
+ }
+
+ while (*pathname) {
+ if (*pathname == '\\') {
+ *pathname = '/';
+ }
+
+ pathname++;
+ }
+}
+#endif
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index a424a9e3fea4..62774c7b76a8 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -436,10 +436,10 @@ acpi_ut_remove_allocation(struct acpi_debug_mem_block *allocation,
struct acpi_memory_list *mem_list;
acpi_status status;
- ACPI_FUNCTION_TRACE(ut_remove_allocation);
+ ACPI_FUNCTION_NAME(ut_remove_allocation);
if (acpi_gbl_disable_mem_tracking) {
- return_ACPI_STATUS(AE_OK);
+ return (AE_OK);
}
mem_list = acpi_gbl_global_list;
@@ -450,12 +450,12 @@ acpi_ut_remove_allocation(struct acpi_debug_mem_block *allocation,
ACPI_ERROR((module, line,
"Empty allocation list, nothing to free!"));
- return_ACPI_STATUS(AE_OK);
+ return (AE_OK);
}
status = acpi_ut_acquire_mutex(ACPI_MTX_MEMORY);
if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ return (status);
}
/* Unlink */
@@ -470,15 +470,15 @@ acpi_ut_remove_allocation(struct acpi_debug_mem_block *allocation,
(allocation->next)->previous = allocation->previous;
}
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Freeing %p, size 0%X\n",
+ &allocation->user_space, allocation->size));
+
/* Mark the segment as deleted */
ACPI_MEMSET(&allocation->user_space, 0xEA, allocation->size);
- ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Freeing size 0%X\n",
- allocation->size));
-
status = acpi_ut_release_mutex(ACPI_MTX_MEMORY);
- return_ACPI_STATUS(status);
+ return (status);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 390db0ca5e2e..48efb446258c 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -44,11 +44,7 @@
#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
-#include "acevents.h"
-#include "acnamesp.h"
#include "acdebug.h"
-#include "actables.h"
-#include "acinterp.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utxface")
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index d4d3826140d8..976b6c734fce 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -297,9 +297,9 @@ ACPI_EXPORT_SYMBOL(acpi_bios_warning)
*
* PARAMETERS: module_name - Caller's module name (for error output)
* line_number - Caller's line number (for error output)
- * Pathname - Full pathname to the node
+ * pathname - Full pathname to the node
* node_flags - From Namespace node for the method/object
- * Format - Printf format string + additional args
+ * format - Printf format string + additional args
*
* RETURN: None
*
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index 14f523627a5e..41ebaaf8bb1a 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxfmutex.c b/drivers/acpi/acpica/utxfmutex.c
index 0a40a851b354..312299721ba1 100644
--- a/drivers/acpi/acpica/utxfmutex.c
+++ b/drivers/acpi/acpica/utxfmutex.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2012, Intel Corp.
+ * Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 00a783661d0b..46f80e2c92f7 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -590,6 +590,9 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
if (bit_width == 32 && bit_offset == 0 && (*paddr & 0x03) == 0 &&
*access_bit_width < 32)
*access_bit_width = 32;
+ else if (bit_width == 64 && bit_offset == 0 && (*paddr & 0x07) == 0 &&
+ *access_bit_width < 64)
+ *access_bit_width = 64;
if ((bit_width + bit_offset) > *access_bit_width) {
pr_warning(FW_BUG APEI_PFX
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
index e6defd86b424..1e5d8a40101e 100644
--- a/drivers/acpi/apei/cper.c
+++ b/drivers/acpi/apei/cper.c
@@ -29,6 +29,7 @@
#include <linux/time.h>
#include <linux/cper.h>
#include <linux/acpi.h>
+#include <linux/pci.h>
#include <linux/aer.h>
/*
@@ -249,6 +250,10 @@ static const char *cper_pcie_port_type_strs[] = {
static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
const struct acpi_hest_generic_data *gdata)
{
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+ struct pci_dev *dev;
+#endif
+
if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE)
printk("%s""port_type: %d, %s\n", pfx, pcie->port_type,
pcie->port_type < ARRAY_SIZE(cper_pcie_port_type_strs) ?
@@ -281,10 +286,18 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
"%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
pfx, pcie->bridge.secondary_status, pcie->bridge.control);
#ifdef CONFIG_ACPI_APEI_PCIEAER
- if (pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) {
- struct aer_capability_regs *aer_regs = (void *)pcie->aer_info;
- cper_print_aer(pfx, gdata->error_severity, aer_regs);
+ dev = pci_get_domain_bus_and_slot(pcie->device_id.segment,
+ pcie->device_id.bus, pcie->device_id.function);
+ if (!dev) {
+ pr_err("PCI AER Cannot get PCI device %04x:%02x:%02x.%d\n",
+ pcie->device_id.segment, pcie->device_id.bus,
+ pcie->device_id.slot, pcie->device_id.function);
+ return;
}
+ if (pcie->validation_bits & CPER_PCIE_VALID_AER_INFO)
+ cper_print_aer(pfx, dev, gdata->error_severity,
+ (struct aer_capability_regs *) pcie->aer_info);
+ pci_dev_put(dev);
#endif
}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 7efaeaa53b88..c5cd5b5513e6 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -1111,7 +1111,7 @@ fail:
return result;
}
-static int acpi_battery_remove(struct acpi_device *device, int type)
+static int acpi_battery_remove(struct acpi_device *device)
{
struct acpi_battery *battery = NULL;
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 1f0d457ecbcf..01708a165368 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -178,276 +178,6 @@ int acpi_bus_get_private_data(acpi_handle handle, void **data)
}
EXPORT_SYMBOL(acpi_bus_get_private_data);
-/* --------------------------------------------------------------------------
- Power Management
- -------------------------------------------------------------------------- */
-
-static const char *state_string(int state)
-{
- switch (state) {
- case ACPI_STATE_D0:
- return "D0";
- case ACPI_STATE_D1:
- return "D1";
- case ACPI_STATE_D2:
- return "D2";
- case ACPI_STATE_D3_HOT:
- return "D3hot";
- case ACPI_STATE_D3_COLD:
- return "D3";
- default:
- return "(unknown)";
- }
-}
-
-static int __acpi_bus_get_power(struct acpi_device *device, int *state)
-{
- int result = ACPI_STATE_UNKNOWN;
-
- if (!device || !state)
- return -EINVAL;
-
- if (!device->flags.power_manageable) {
- /* TBD: Non-recursive algorithm for walking up hierarchy. */
- *state = device->parent ?
- device->parent->power.state : ACPI_STATE_D0;
- goto out;
- }
-
- /*
- * Get the device's power state either directly (via _PSC) or
- * indirectly (via power resources).
- */
- if (device->power.flags.explicit_get) {
- unsigned long long psc;
- acpi_status status = acpi_evaluate_integer(device->handle,
- "_PSC", NULL, &psc);
- if (ACPI_FAILURE(status))
- return -ENODEV;
-
- result = psc;
- }
- /* The test below covers ACPI_STATE_UNKNOWN too. */
- if (result <= ACPI_STATE_D2) {
- ; /* Do nothing. */
- } else if (device->power.flags.power_resources) {
- int error = acpi_power_get_inferred_state(device, &result);
- if (error)
- return error;
- } else if (result == ACPI_STATE_D3_HOT) {
- result = ACPI_STATE_D3;
- }
-
- /*
- * If we were unsure about the device parent's power state up to this
- * point, the fact that the device is in D0 implies that the parent has
- * to be in D0 too.
- */
- if (device->parent && device->parent->power.state == ACPI_STATE_UNKNOWN
- && result == ACPI_STATE_D0)
- device->parent->power.state = ACPI_STATE_D0;
-
- *state = result;
-
- out:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is %s\n",
- device->pnp.bus_id, state_string(*state)));
-
- return 0;
-}
-
-
-/**
- * acpi_device_set_power - Set power state of an ACPI device.
- * @device: Device to set the power state of.
- * @state: New power state to set.
- *
- * Callers must ensure that the device is power manageable before using this
- * function.
- */
-int acpi_device_set_power(struct acpi_device *device, int state)
-{
- int result = 0;
- acpi_status status = AE_OK;
- char object_name[5] = { '_', 'P', 'S', '0' + state, '\0' };
-
- if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
- return -EINVAL;
-
- /* Make sure this is a valid target state */
-
- if (state == device->power.state) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at %s\n",
- state_string(state)));
- return 0;
- }
-
- if (!device->power.states[state].flags.valid) {
- printk(KERN_WARNING PREFIX "Device does not support %s\n",
- state_string(state));
- return -ENODEV;
- }
- if (device->parent && (state < device->parent->power.state)) {
- printk(KERN_WARNING PREFIX
- "Cannot set device to a higher-powered"
- " state than parent\n");
- return -ENODEV;
- }
-
- /* For D3cold we should execute _PS3, not _PS4. */
- if (state == ACPI_STATE_D3_COLD)
- object_name[3] = '3';
-
- /*
- * Transition Power
- * ----------------
- * On transitions to a high-powered state we first apply power (via
- * power resources) then evalute _PSx. Conversly for transitions to
- * a lower-powered state.
- */
- if (state < device->power.state) {
- if (device->power.state >= ACPI_STATE_D3_HOT &&
- state != ACPI_STATE_D0) {
- printk(KERN_WARNING PREFIX
- "Cannot transition to non-D0 state from D3\n");
- return -ENODEV;
- }
- if (device->power.flags.power_resources) {
- result = acpi_power_transition(device, state);
- if (result)
- goto end;
- }
- if (device->power.states[state].flags.explicit_set) {
- status = acpi_evaluate_object(device->handle,
- object_name, NULL, NULL);
- if (ACPI_FAILURE(status)) {
- result = -ENODEV;
- goto end;
- }
- }
- } else {
- if (device->power.states[state].flags.explicit_set) {
- status = acpi_evaluate_object(device->handle,
- object_name, NULL, NULL);
- if (ACPI_FAILURE(status)) {
- result = -ENODEV;
- goto end;
- }
- }
- if (device->power.flags.power_resources) {
- result = acpi_power_transition(device, state);
- if (result)
- goto end;
- }
- }
-
- end:
- if (result)
- printk(KERN_WARNING PREFIX
- "Device [%s] failed to transition to %s\n",
- device->pnp.bus_id, state_string(state));
- else {
- device->power.state = state;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Device [%s] transitioned to %s\n",
- device->pnp.bus_id, state_string(state)));
- }
-
- return result;
-}
-EXPORT_SYMBOL(acpi_device_set_power);
-
-
-int acpi_bus_set_power(acpi_handle handle, int state)
-{
- struct acpi_device *device;
- int result;
-
- result = acpi_bus_get_device(handle, &device);
- if (result)
- return result;
-
- if (!device->flags.power_manageable) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Device [%s] is not power manageable\n",
- dev_name(&device->dev)));
- return -ENODEV;
- }
-
- return acpi_device_set_power(device, state);
-}
-EXPORT_SYMBOL(acpi_bus_set_power);
-
-
-int acpi_bus_init_power(struct acpi_device *device)
-{
- int state;
- int result;
-
- if (!device)
- return -EINVAL;
-
- device->power.state = ACPI_STATE_UNKNOWN;
-
- result = __acpi_bus_get_power(device, &state);
- if (result)
- return result;
-
- if (device->power.flags.power_resources)
- result = acpi_power_on_resources(device, state);
-
- if (!result)
- device->power.state = state;
-
- return result;
-}
-
-
-int acpi_bus_update_power(acpi_handle handle, int *state_p)
-{
- struct acpi_device *device;
- int state;
- int result;
-
- result = acpi_bus_get_device(handle, &device);
- if (result)
- return result;
-
- result = __acpi_bus_get_power(device, &state);
- if (result)
- return result;
-
- result = acpi_device_set_power(device, state);
- if (!result && state_p)
- *state_p = state;
-
- return result;
-}
-EXPORT_SYMBOL_GPL(acpi_bus_update_power);
-
-
-bool acpi_bus_power_manageable(acpi_handle handle)
-{
- struct acpi_device *device;
- int result;
-
- result = acpi_bus_get_device(handle, &device);
- return result ? false : device->flags.power_manageable;
-}
-
-EXPORT_SYMBOL(acpi_bus_power_manageable);
-
-bool acpi_bus_can_wakeup(acpi_handle handle)
-{
- struct acpi_device *device;
- int result;
-
- result = acpi_bus_get_device(handle, &device);
- return result ? false : device->wakeup.flags.valid;
-}
-
-EXPORT_SYMBOL(acpi_bus_can_wakeup);
-
static void acpi_print_osc_error(acpi_handle handle,
struct acpi_osc_context *context, char *error)
{
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index f0d936b65e37..86c7d5445c38 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -75,7 +75,7 @@ static const struct acpi_device_id button_device_ids[] = {
MODULE_DEVICE_TABLE(acpi, button_device_ids);
static int acpi_button_add(struct acpi_device *device);
-static int acpi_button_remove(struct acpi_device *device, int type);
+static int acpi_button_remove(struct acpi_device *device);
static void acpi_button_notify(struct acpi_device *device, u32 event);
#ifdef CONFIG_PM_SLEEP
@@ -433,7 +433,7 @@ static int acpi_button_add(struct acpi_device *device)
return error;
}
-static int acpi_button_remove(struct acpi_device *device, int type)
+static int acpi_button_remove(struct acpi_device *device)
{
struct acpi_button *button = acpi_driver_data(device);
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 811910b50b75..5523ba7d764d 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -34,46 +34,34 @@
#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
-#include <acpi/container.h>
#define PREFIX "ACPI: "
-#define ACPI_CONTAINER_DEVICE_NAME "ACPI container device"
-#define ACPI_CONTAINER_CLASS "container"
-
-#define INSTALL_NOTIFY_HANDLER 1
-#define UNINSTALL_NOTIFY_HANDLER 2
-
#define _COMPONENT ACPI_CONTAINER_COMPONENT
ACPI_MODULE_NAME("container");
-MODULE_AUTHOR("Anil S Keshavamurthy");
-MODULE_DESCRIPTION("ACPI container driver");
-MODULE_LICENSE("GPL");
-
-static int acpi_container_add(struct acpi_device *device);
-static int acpi_container_remove(struct acpi_device *device, int type);
-
static const struct acpi_device_id container_device_ids[] = {
{"ACPI0004", 0},
{"PNP0A05", 0},
{"PNP0A06", 0},
{"", 0},
};
-MODULE_DEVICE_TABLE(acpi, container_device_ids);
-static struct acpi_driver acpi_container_driver = {
- .name = "container",
- .class = ACPI_CONTAINER_CLASS,
+static int container_device_attach(struct acpi_device *device,
+ const struct acpi_device_id *not_used)
+{
+ /*
+ * FIXME: This is necessary, so that acpi_eject_store() doesn't return
+ * -ENODEV for containers.
+ */
+ return 1;
+}
+
+static struct acpi_scan_handler container_device_handler = {
.ids = container_device_ids,
- .ops = {
- .add = acpi_container_add,
- .remove = acpi_container_remove,
- },
+ .attach = container_device_attach,
};
-/*******************************************************************/
-
static int is_device_present(acpi_handle handle)
{
acpi_handle temp;
@@ -92,73 +80,6 @@ static int is_device_present(acpi_handle handle)
return ((sta & ACPI_STA_DEVICE_PRESENT) == ACPI_STA_DEVICE_PRESENT);
}
-static bool is_container_device(const char *hid)
-{
- const struct acpi_device_id *container_id;
-
- for (container_id = container_device_ids;
- container_id->id[0]; container_id++) {
- if (!strcmp((char *)container_id->id, hid))
- return true;
- }
-
- return false;
-}
-
-/*******************************************************************/
-static int acpi_container_add(struct acpi_device *device)
-{
- struct acpi_container *container;
-
- container = kzalloc(sizeof(struct acpi_container), GFP_KERNEL);
- if (!container)
- return -ENOMEM;
-
- container->handle = device->handle;
- strcpy(acpi_device_name(device), ACPI_CONTAINER_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_CONTAINER_CLASS);
- device->driver_data = container;
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device <%s> bid <%s>\n",
- acpi_device_name(device), acpi_device_bid(device)));
-
- return 0;
-}
-
-static int acpi_container_remove(struct acpi_device *device, int type)
-{
- acpi_status status = AE_OK;
- struct acpi_container *pc = NULL;
-
- pc = acpi_driver_data(device);
- kfree(pc);
- return status;
-}
-
-static int container_device_add(struct acpi_device **device, acpi_handle handle)
-{
- acpi_handle phandle;
- struct acpi_device *pdev;
- int result;
-
-
- if (acpi_get_parent(handle, &phandle)) {
- return -ENODEV;
- }
-
- if (acpi_bus_get_device(phandle, &pdev)) {
- return -ENODEV;
- }
-
- if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_DEVICE)) {
- return -ENODEV;
- }
-
- result = acpi_bus_start(*device);
-
- return result;
-}
-
static void container_notify_cb(acpi_handle handle, u32 type, void *context)
{
struct acpi_device *device = NULL;
@@ -167,6 +88,8 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
acpi_status status;
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
+ acpi_scan_lock_acquire();
+
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
/* Fall through */
@@ -182,7 +105,7 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
/* device exist and this is a remove request */
device->flags.eject_pending = 1;
kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
- return;
+ goto out;
}
break;
}
@@ -190,11 +113,16 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
if (!ACPI_FAILURE(status) || device)
break;
- result = container_device_add(&device, handle);
+ result = acpi_bus_scan(handle);
if (result) {
acpi_handle_warn(handle, "Failed to add container\n");
break;
}
+ result = acpi_bus_get_device(handle, &device);
+ if (result) {
+ acpi_handle_warn(handle, "Missing device object\n");
+ break;
+ }
kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
ost_code = ACPI_OST_SC_SUCCESS;
@@ -204,98 +132,59 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
if (!acpi_bus_get_device(handle, &device) && device) {
device->flags.eject_pending = 1;
kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
- return;
+ goto out;
}
break;
default:
/* non-hotplug event; possibly handled by other handler */
- return;
+ goto out;
}
/* Inform firmware that the hotplug operation has completed */
(void) acpi_evaluate_hotplug_ost(handle, type, ost_code, NULL);
- return;
+
+ out:
+ acpi_scan_lock_release();
}
-static acpi_status
-container_walk_namespace_cb(acpi_handle handle,
- u32 lvl, void *context, void **rv)
+static bool is_container(acpi_handle handle)
{
- char *hid = NULL;
struct acpi_device_info *info;
- acpi_status status;
- int *action = context;
-
- status = acpi_get_object_info(handle, &info);
- if (ACPI_FAILURE(status)) {
- return AE_OK;
- }
+ bool ret = false;
- if (info->valid & ACPI_VALID_HID)
- hid = info->hardware_id.string;
+ if (ACPI_FAILURE(acpi_get_object_info(handle, &info)))
+ return false;
- if (hid == NULL) {
- goto end;
- }
-
- if (!is_container_device(hid))
- goto end;
+ if (info->valid & ACPI_VALID_HID) {
+ const struct acpi_device_id *id;
- switch (*action) {
- case INSTALL_NOTIFY_HANDLER:
- acpi_install_notify_handler(handle,
- ACPI_SYSTEM_NOTIFY,
- container_notify_cb, NULL);
- break;
- case UNINSTALL_NOTIFY_HANDLER:
- acpi_remove_notify_handler(handle,
- ACPI_SYSTEM_NOTIFY,
- container_notify_cb);
- break;
- default:
- break;
+ for (id = container_device_ids; id->id[0]; id++) {
+ ret = !strcmp((char *)id->id, info->hardware_id.string);
+ if (ret)
+ break;
+ }
}
-
- end:
kfree(info);
-
- return AE_OK;
+ return ret;
}
-static int __init acpi_container_init(void)
+static acpi_status acpi_container_register_notify_handler(acpi_handle handle,
+ u32 lvl, void *ctxt,
+ void **retv)
{
- int result = 0;
- int action = INSTALL_NOTIFY_HANDLER;
-
- result = acpi_bus_register_driver(&acpi_container_driver);
- if (result < 0) {
- return (result);
- }
-
- /* register notify handler to every container device */
- acpi_walk_namespace(ACPI_TYPE_DEVICE,
- ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX,
- container_walk_namespace_cb, NULL, &action, NULL);
+ if (is_container(handle))
+ acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+ container_notify_cb, NULL);
- return (0);
+ return AE_OK;
}
-static void __exit acpi_container_exit(void)
+void __init acpi_container_init(void)
{
- int action = UNINSTALL_NOTIFY_HANDLER;
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
+ acpi_container_register_notify_handler, NULL,
+ NULL, NULL);
-
- acpi_walk_namespace(ACPI_TYPE_DEVICE,
- ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX,
- container_walk_namespace_cb, NULL, &action, NULL);
-
- acpi_bus_unregister_driver(&acpi_container_driver);
-
- return;
+ acpi_scan_add_handler(&container_device_handler);
}
-
-module_init(acpi_container_init);
-module_exit(acpi_container_exit);
diff --git a/drivers/acpi/csrt.c b/drivers/acpi/csrt.c
new file mode 100644
index 000000000000..5c15a91faf0b
--- /dev/null
+++ b/drivers/acpi/csrt.c
@@ -0,0 +1,159 @@
+/*
+ * Support for Core System Resources Table (CSRT)
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "ACPI: CSRT: " fmt
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+
+ACPI_MODULE_NAME("CSRT");
+
+static int __init acpi_csrt_parse_shared_info(struct platform_device *pdev,
+ const struct acpi_csrt_group *grp)
+{
+ const struct acpi_csrt_shared_info *si;
+ struct resource res[3];
+ size_t nres;
+ int ret;
+
+ memset(res, 0, sizeof(res));
+ nres = 0;
+
+ si = (const struct acpi_csrt_shared_info *)&grp[1];
+ /*
+ * The peripherals that are listed on CSRT typically support only
+ * 32-bit addresses so we only use the low part of MMIO base for
+ * now.
+ */
+ if (!si->mmio_base_high && si->mmio_base_low) {
+ /*
+ * There is no size of the memory resource in shared_info
+ * so we assume that it is 4k here.
+ */
+ res[nres].start = si->mmio_base_low;
+ res[nres].end = res[0].start + SZ_4K - 1;
+ res[nres++].flags = IORESOURCE_MEM;
+ }
+
+ if (si->gsi_interrupt) {
+ int irq = acpi_register_gsi(NULL, si->gsi_interrupt,
+ si->interrupt_mode,
+ si->interrupt_polarity);
+ res[nres].start = irq;
+ res[nres].end = irq;
+ res[nres++].flags = IORESOURCE_IRQ;
+ }
+
+ if (si->base_request_line || si->num_handshake_signals) {
+ /*
+ * We pass the driver a DMA resource describing the range
+ * of request lines the device supports.
+ */
+ res[nres].start = si->base_request_line;
+ res[nres].end = res[nres].start + si->num_handshake_signals - 1;
+ res[nres++].flags = IORESOURCE_DMA;
+ }
+
+ ret = platform_device_add_resources(pdev, res, nres);
+ if (ret) {
+ if (si->gsi_interrupt)
+ acpi_unregister_gsi(si->gsi_interrupt);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __init
+acpi_csrt_parse_resource_group(const struct acpi_csrt_group *grp)
+{
+ struct platform_device *pdev;
+ char vendor[5], name[16];
+ int ret, i;
+
+ vendor[0] = grp->vendor_id;
+ vendor[1] = grp->vendor_id >> 8;
+ vendor[2] = grp->vendor_id >> 16;
+ vendor[3] = grp->vendor_id >> 24;
+ vendor[4] = '\0';
+
+ if (grp->shared_info_length != sizeof(struct acpi_csrt_shared_info))
+ return -ENODEV;
+
+ snprintf(name, sizeof(name), "%s%04X", vendor, grp->device_id);
+ pdev = platform_device_alloc(name, PLATFORM_DEVID_AUTO);
+ if (!pdev)
+ return -ENOMEM;
+
+ /* Add resources based on the shared info */
+ ret = acpi_csrt_parse_shared_info(pdev, grp);
+ if (ret)
+ goto fail;
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ goto fail;
+
+ for (i = 0; i < pdev->num_resources; i++)
+ dev_dbg(&pdev->dev, "%pR\n", &pdev->resource[i]);
+
+ return 0;
+
+fail:
+ platform_device_put(pdev);
+ return ret;
+}
+
+/*
+ * CSRT or Core System Resources Table is a proprietary ACPI table
+ * introduced by Microsoft. This table can contain devices that are not in
+ * the system DSDT table. In particular DMA controllers might be described
+ * here.
+ *
+ * We present these devices as normal platform devices that don't have ACPI
+ * IDs or handle. The platform device name will be something like
+ * <VENDOR><DEVID>.<n>.auto for example: INTL9C06.0.auto.
+ */
+void __init acpi_csrt_init(void)
+{
+ struct acpi_csrt_group *grp, *end;
+ struct acpi_table_csrt *csrt;
+ acpi_status status;
+ int ret;
+
+ status = acpi_get_table(ACPI_SIG_CSRT, 0,
+ (struct acpi_table_header **)&csrt);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND)
+ pr_warn("failed to get the CSRT table\n");
+ return;
+ }
+
+ pr_debug("parsing CSRT table for devices\n");
+
+ grp = (struct acpi_csrt_group *)(csrt + 1);
+ end = (struct acpi_csrt_group *)((void *)csrt + csrt->header.length);
+
+ while (grp < end) {
+ ret = acpi_csrt_parse_resource_group(grp);
+ if (ret) {
+ pr_warn("error in parsing resource group: %d\n", ret);
+ return;
+ }
+
+ grp = (struct acpi_csrt_group *)((void *)grp + grp->length);
+ }
+}
diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
index 5d42c2414ae5..6adfc706a1de 100644
--- a/drivers/acpi/custom_method.c
+++ b/drivers/acpi/custom_method.c
@@ -1,5 +1,5 @@
/*
- * debugfs.c - ACPI debugfs interface to userspace.
+ * custom_method.c - debugfs interface for customizing ACPI control method
*/
#include <linux/init.h>
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index c6ff606c6d5b..dd314ef9bff1 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -30,6 +30,12 @@
#include <acpi/acpi.h>
#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+#include "internal.h"
+
+#define _COMPONENT ACPI_POWER_COMPONENT
+ACPI_MODULE_NAME("device_pm");
static DEFINE_MUTEX(acpi_pm_notifier_lock);
@@ -94,6 +100,293 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
}
/**
+ * acpi_power_state_string - String representation of ACPI device power state.
+ * @state: ACPI device power state to return the string representation of.
+ */
+const char *acpi_power_state_string(int state)
+{
+ switch (state) {
+ case ACPI_STATE_D0:
+ return "D0";
+ case ACPI_STATE_D1:
+ return "D1";
+ case ACPI_STATE_D2:
+ return "D2";
+ case ACPI_STATE_D3_HOT:
+ return "D3hot";
+ case ACPI_STATE_D3_COLD:
+ return "D3cold";
+ default:
+ return "(unknown)";
+ }
+}
+
+/**
+ * acpi_device_get_power - Get power state of an ACPI device.
+ * @device: Device to get the power state of.
+ * @state: Place to store the power state of the device.
+ *
+ * This function does not update the device's power.state field, but it may
+ * update its parent's power.state field (when the parent's power state is
+ * unknown and the device's power state turns out to be D0).
+ */
+int acpi_device_get_power(struct acpi_device *device, int *state)
+{
+ int result = ACPI_STATE_UNKNOWN;
+
+ if (!device || !state)
+ return -EINVAL;
+
+ if (!device->flags.power_manageable) {
+ /* TBD: Non-recursive algorithm for walking up hierarchy. */
+ *state = device->parent ?
+ device->parent->power.state : ACPI_STATE_D0;
+ goto out;
+ }
+
+ /*
+ * Get the device's power state either directly (via _PSC) or
+ * indirectly (via power resources).
+ */
+ if (device->power.flags.explicit_get) {
+ unsigned long long psc;
+ acpi_status status = acpi_evaluate_integer(device->handle,
+ "_PSC", NULL, &psc);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ result = psc;
+ }
+ /* The test below covers ACPI_STATE_UNKNOWN too. */
+ if (result <= ACPI_STATE_D2) {
+ ; /* Do nothing. */
+ } else if (device->power.flags.power_resources) {
+ int error = acpi_power_get_inferred_state(device, &result);
+ if (error)
+ return error;
+ } else if (result == ACPI_STATE_D3_HOT) {
+ result = ACPI_STATE_D3;
+ }
+
+ /*
+ * If we were unsure about the device parent's power state up to this
+ * point, the fact that the device is in D0 implies that the parent has
+ * to be in D0 too.
+ */
+ if (device->parent && device->parent->power.state == ACPI_STATE_UNKNOWN
+ && result == ACPI_STATE_D0)
+ device->parent->power.state = ACPI_STATE_D0;
+
+ *state = result;
+
+ out:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is %s\n",
+ device->pnp.bus_id, acpi_power_state_string(*state)));
+
+ return 0;
+}
+
+static int acpi_dev_pm_explicit_set(struct acpi_device *adev, int state)
+{
+ if (adev->power.states[state].flags.explicit_set) {
+ char method[5] = { '_', 'P', 'S', '0' + state, '\0' };
+ acpi_status status;
+
+ status = acpi_evaluate_object(adev->handle, method, NULL, NULL);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+ }
+ return 0;
+}
+
+/**
+ * acpi_device_set_power - Set power state of an ACPI device.
+ * @device: Device to set the power state of.
+ * @state: New power state to set.
+ *
+ * Callers must ensure that the device is power manageable before using this
+ * function.
+ */
+int acpi_device_set_power(struct acpi_device *device, int state)
+{
+ int result = 0;
+ bool cut_power = false;
+
+ if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
+ return -EINVAL;
+
+ /* Make sure this is a valid target state */
+
+ if (state == device->power.state) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at %s\n",
+ acpi_power_state_string(state)));
+ return 0;
+ }
+
+ if (!device->power.states[state].flags.valid) {
+ printk(KERN_WARNING PREFIX "Device does not support %s\n",
+ acpi_power_state_string(state));
+ return -ENODEV;
+ }
+ if (device->parent && (state < device->parent->power.state)) {
+ printk(KERN_WARNING PREFIX
+ "Cannot set device to a higher-powered"
+ " state than parent\n");
+ return -ENODEV;
+ }
+
+ /* For D3cold we should first transition into D3hot. */
+ if (state == ACPI_STATE_D3_COLD
+ && device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible) {
+ state = ACPI_STATE_D3_HOT;
+ cut_power = true;
+ }
+
+ if (state < device->power.state && state != ACPI_STATE_D0
+ && device->power.state >= ACPI_STATE_D3_HOT) {
+ printk(KERN_WARNING PREFIX
+ "Cannot transition to non-D0 state from D3\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Transition Power
+ * ----------------
+ * In accordance with the ACPI specification first apply power (via
+ * power resources) and then evalute _PSx.
+ */
+ if (device->power.flags.power_resources) {
+ result = acpi_power_transition(device, state);
+ if (result)
+ goto end;
+ }
+ result = acpi_dev_pm_explicit_set(device, state);
+ if (result)
+ goto end;
+
+ if (cut_power) {
+ device->power.state = state;
+ state = ACPI_STATE_D3_COLD;
+ result = acpi_power_transition(device, state);
+ }
+
+ end:
+ if (result) {
+ printk(KERN_WARNING PREFIX
+ "Device [%s] failed to transition to %s\n",
+ device->pnp.bus_id,
+ acpi_power_state_string(state));
+ } else {
+ device->power.state = state;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Device [%s] transitioned to %s\n",
+ device->pnp.bus_id,
+ acpi_power_state_string(state)));
+ }
+
+ return result;
+}
+EXPORT_SYMBOL(acpi_device_set_power);
+
+int acpi_bus_set_power(acpi_handle handle, int state)
+{
+ struct acpi_device *device;
+ int result;
+
+ result = acpi_bus_get_device(handle, &device);
+ if (result)
+ return result;
+
+ if (!device->flags.power_manageable) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Device [%s] is not power manageable\n",
+ dev_name(&device->dev)));
+ return -ENODEV;
+ }
+
+ return acpi_device_set_power(device, state);
+}
+EXPORT_SYMBOL(acpi_bus_set_power);
+
+int acpi_bus_init_power(struct acpi_device *device)
+{
+ int state;
+ int result;
+
+ if (!device)
+ return -EINVAL;
+
+ device->power.state = ACPI_STATE_UNKNOWN;
+
+ result = acpi_device_get_power(device, &state);
+ if (result)
+ return result;
+
+ if (state < ACPI_STATE_D3_COLD && device->power.flags.power_resources) {
+ result = acpi_power_on_resources(device, state);
+ if (result)
+ return result;
+
+ result = acpi_dev_pm_explicit_set(device, state);
+ if (result)
+ return result;
+ } else if (state == ACPI_STATE_UNKNOWN) {
+ /* No power resources and missing _PSC? Try to force D0. */
+ state = ACPI_STATE_D0;
+ result = acpi_dev_pm_explicit_set(device, state);
+ if (result)
+ return result;
+ }
+ device->power.state = state;
+ return 0;
+}
+
+int acpi_bus_update_power(acpi_handle handle, int *state_p)
+{
+ struct acpi_device *device;
+ int state;
+ int result;
+
+ result = acpi_bus_get_device(handle, &device);
+ if (result)
+ return result;
+
+ result = acpi_device_get_power(device, &state);
+ if (result)
+ return result;
+
+ if (state == ACPI_STATE_UNKNOWN)
+ state = ACPI_STATE_D0;
+
+ result = acpi_device_set_power(device, state);
+ if (!result && state_p)
+ *state_p = state;
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(acpi_bus_update_power);
+
+bool acpi_bus_power_manageable(acpi_handle handle)
+{
+ struct acpi_device *device;
+ int result;
+
+ result = acpi_bus_get_device(handle, &device);
+ return result ? false : device->flags.power_manageable;
+}
+EXPORT_SYMBOL(acpi_bus_power_manageable);
+
+bool acpi_bus_can_wakeup(acpi_handle handle)
+{
+ struct acpi_device *device;
+ int result;
+
+ result = acpi_bus_get_device(handle, &device);
+ return result ? false : device->wakeup.flags.valid;
+}
+EXPORT_SYMBOL(acpi_bus_can_wakeup);
+
+/**
* acpi_device_power_state - Get preferred power state of ACPI device.
* @dev: Device whose preferred target power state to return.
* @adev: ACPI device node corresponding to @dev.
@@ -213,7 +506,7 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in)
acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
struct acpi_device *adev;
- if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
+ if (!handle || acpi_bus_get_device(handle, &adev)) {
dev_dbg(dev, "ACPI handle without context in %s!\n", __func__);
return -ENODEV;
}
@@ -290,7 +583,7 @@ int acpi_pm_device_run_wake(struct device *phys_dev, bool enable)
return -EINVAL;
handle = DEVICE_ACPI_HANDLE(phys_dev);
- if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
+ if (!handle || acpi_bus_get_device(handle, &adev)) {
dev_dbg(phys_dev, "ACPI handle without context in %s!\n",
__func__);
return -ENODEV;
@@ -304,7 +597,7 @@ static inline void acpi_wakeup_device(acpi_handle handle, u32 event,
void *context) {}
#endif /* CONFIG_PM_RUNTIME */
- #ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM_SLEEP
/**
* __acpi_device_sleep_wake - Enable or disable device to wake up the system.
* @dev: Device to enable/desible to wake up the system.
@@ -334,7 +627,7 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
return -EINVAL;
handle = DEVICE_ACPI_HANDLE(dev);
- if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
+ if (!handle || acpi_bus_get_device(handle, &adev)) {
dev_dbg(dev, "ACPI handle without context in %s!\n", __func__);
return -ENODEV;
}
@@ -353,7 +646,7 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
* acpi_dev_pm_get_node - Get ACPI device node for the given physical device.
* @dev: Device to get the ACPI node for.
*/
-static struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
+struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
{
acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
struct acpi_device *adev;
@@ -665,3 +958,59 @@ void acpi_dev_pm_detach(struct device *dev, bool power_off)
}
}
EXPORT_SYMBOL_GPL(acpi_dev_pm_detach);
+
+/**
+ * acpi_dev_pm_add_dependent - Add physical device depending for PM.
+ * @handle: Handle of ACPI device node.
+ * @depdev: Device depending on that node for PM.
+ */
+void acpi_dev_pm_add_dependent(acpi_handle handle, struct device *depdev)
+{
+ struct acpi_device_physical_node *dep;
+ struct acpi_device *adev;
+
+ if (!depdev || acpi_bus_get_device(handle, &adev))
+ return;
+
+ mutex_lock(&adev->physical_node_lock);
+
+ list_for_each_entry(dep, &adev->power_dependent, node)
+ if (dep->dev == depdev)
+ goto out;
+
+ dep = kzalloc(sizeof(*dep), GFP_KERNEL);
+ if (dep) {
+ dep->dev = depdev;
+ list_add_tail(&dep->node, &adev->power_dependent);
+ }
+
+ out:
+ mutex_unlock(&adev->physical_node_lock);
+}
+EXPORT_SYMBOL_GPL(acpi_dev_pm_add_dependent);
+
+/**
+ * acpi_dev_pm_remove_dependent - Remove physical device depending for PM.
+ * @handle: Handle of ACPI device node.
+ * @depdev: Device depending on that node for PM.
+ */
+void acpi_dev_pm_remove_dependent(acpi_handle handle, struct device *depdev)
+{
+ struct acpi_device_physical_node *dep;
+ struct acpi_device *adev;
+
+ if (!depdev || acpi_bus_get_device(handle, &adev))
+ return;
+
+ mutex_lock(&adev->physical_node_lock);
+
+ list_for_each_entry(dep, &adev->power_dependent, node)
+ if (dep->dev == depdev) {
+ list_del(&dep->node);
+ kfree(dep);
+ break;
+ }
+
+ mutex_unlock(&adev->physical_node_lock);
+}
+EXPORT_SYMBOL_GPL(acpi_dev_pm_remove_dependent);
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index f32bd47b35e0..4fdea381ef21 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -310,8 +310,6 @@ static int dock_present(struct dock_station *ds)
static struct acpi_device * dock_create_acpi_device(acpi_handle handle)
{
struct acpi_device *device;
- struct acpi_device *parent_device;
- acpi_handle parent;
int ret;
if (acpi_bus_get_device(handle, &device)) {
@@ -319,16 +317,11 @@ static struct acpi_device * dock_create_acpi_device(acpi_handle handle)
* no device created for this object,
* so we should create one.
*/
- acpi_get_parent(handle, &parent);
- if (acpi_bus_get_device(parent, &parent_device))
- parent_device = NULL;
-
- ret = acpi_bus_add(&device, parent_device, handle,
- ACPI_BUS_TYPE_DEVICE);
- if (ret) {
+ ret = acpi_bus_scan(handle);
+ if (ret)
pr_debug("error adding bus, %x\n", -ret);
- return NULL;
- }
+
+ acpi_bus_get_device(handle, &device);
}
return device;
}
@@ -343,13 +336,9 @@ static struct acpi_device * dock_create_acpi_device(acpi_handle handle)
static void dock_remove_acpi_device(acpi_handle handle)
{
struct acpi_device *device;
- int ret;
- if (!acpi_bus_get_device(handle, &device)) {
- ret = acpi_bus_trim(device, 1);
- if (ret)
- pr_debug("error removing bus, %x\n", -ret);
- }
+ if (!acpi_bus_get_device(handle, &device))
+ acpi_bus_trim(device);
}
/**
@@ -755,7 +744,9 @@ static void acpi_dock_deferred_cb(void *context)
{
struct dock_data *data = context;
+ acpi_scan_lock_acquire();
dock_notify(data->handle, data->event, data->ds);
+ acpi_scan_lock_release();
kfree(data);
}
@@ -768,20 +759,31 @@ static int acpi_dock_notifier_call(struct notifier_block *this,
if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK
&& event != ACPI_NOTIFY_EJECT_REQUEST)
return 0;
+
+ acpi_scan_lock_acquire();
+
list_for_each_entry(dock_station, &dock_stations, sibling) {
if (dock_station->handle == handle) {
struct dock_data *dd;
+ acpi_status status;
dd = kmalloc(sizeof(*dd), GFP_KERNEL);
if (!dd)
- return 0;
+ break;
+
dd->handle = handle;
dd->event = event;
dd->ds = dock_station;
- acpi_os_hotplug_execute(acpi_dock_deferred_cb, dd);
- return 0 ;
+ status = acpi_os_hotplug_execute(acpi_dock_deferred_cb,
+ dd);
+ if (ACPI_FAILURE(status))
+ kfree(dd);
+
+ break;
}
}
+
+ acpi_scan_lock_release();
return 0;
}
@@ -836,7 +838,7 @@ static ssize_t show_docked(struct device *dev,
struct dock_station *dock_station = dev->platform_data;
- if (ACPI_SUCCESS(acpi_bus_get_device(dock_station->handle, &tmp)))
+ if (!acpi_bus_get_device(dock_station->handle, &tmp))
return snprintf(buf, PAGE_SIZE, "1\n");
return snprintf(buf, PAGE_SIZE, "0\n");
}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 354007d490d1..d45b2871d33b 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -852,7 +852,7 @@ static int acpi_ec_add(struct acpi_device *device)
return ret;
}
-static int acpi_ec_remove(struct acpi_device *device, int type)
+static int acpi_ec_remove(struct acpi_device *device)
{
struct acpi_ec *ec;
struct acpi_ec_query_handler *handler, *tmp;
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 3bd6a54702d6..f815da82c765 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -45,7 +45,7 @@ MODULE_DESCRIPTION("ACPI Fan Driver");
MODULE_LICENSE("GPL");
static int acpi_fan_add(struct acpi_device *device);
-static int acpi_fan_remove(struct acpi_device *device, int type);
+static int acpi_fan_remove(struct acpi_device *device);
static const struct acpi_device_id fan_device_ids[] = {
{"PNP0C0B", 0},
@@ -172,7 +172,7 @@ static int acpi_fan_add(struct acpi_device *device)
return result;
}
-static int acpi_fan_remove(struct acpi_device *device, int type)
+static int acpi_fan_remove(struct acpi_device *device)
{
struct thermal_cooling_device *cdev = acpi_driver_data(device);
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 95af6f674a6c..ef6f155469b5 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -68,6 +68,9 @@ static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type)
{
struct acpi_bus_type *tmp, *ret = NULL;
+ if (!type)
+ return NULL;
+
down_read(&bus_type_sem);
list_for_each_entry(tmp, &bus_type_list, list) {
if (tmp->bus == type) {
@@ -95,40 +98,31 @@ static int acpi_find_bridge_device(struct device *dev, acpi_handle * handle)
return ret;
}
-/* Get device's handler per its address under its parent */
-struct acpi_find_child {
- acpi_handle handle;
- u64 address;
-};
-
-static acpi_status
-do_acpi_find_child(acpi_handle handle, u32 lvl, void *context, void **rv)
+static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used,
+ void *addr_p, void **ret_p)
{
+ unsigned long long addr;
acpi_status status;
- struct acpi_device_info *info;
- struct acpi_find_child *find = context;
-
- status = acpi_get_object_info(handle, &info);
- if (ACPI_SUCCESS(status)) {
- if ((info->address == find->address)
- && (info->valid & ACPI_VALID_ADR))
- find->handle = handle;
- kfree(info);
+
+ status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
+ if (ACPI_SUCCESS(status) && addr == *((u64 *)addr_p)) {
+ *ret_p = handle;
+ return AE_CTRL_TERMINATE;
}
return AE_OK;
}
acpi_handle acpi_get_child(acpi_handle parent, u64 address)
{
- struct acpi_find_child find = { NULL, address };
+ void *ret = NULL;
if (!parent)
return NULL;
- acpi_walk_namespace(ACPI_TYPE_DEVICE, parent,
- 1, do_acpi_find_child, NULL, &find, NULL);
- return find.handle;
-}
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, NULL,
+ do_acpi_find_child, &address, &ret);
+ return (acpi_handle)ret;
+}
EXPORT_SYMBOL(acpi_get_child);
static int acpi_bind_one(struct device *dev, acpi_handle handle)
@@ -269,35 +263,46 @@ static int acpi_platform_notify(struct device *dev)
{
struct acpi_bus_type *type;
acpi_handle handle;
- int ret = -EINVAL;
+ int ret;
ret = acpi_bind_one(dev, NULL);
- if (!ret)
- goto out;
-
- if (!dev->bus || !dev->parent) {
+ if (ret && (!dev->bus || !dev->parent)) {
/* bridge devices genernally haven't bus or parent */
ret = acpi_find_bridge_device(dev, &handle);
- goto end;
+ if (!ret) {
+ ret = acpi_bind_one(dev, handle);
+ if (ret)
+ goto out;
+ }
}
+
type = acpi_get_bus_type(dev->bus);
- if (!type) {
- DBG("No ACPI bus support for %s\n", dev_name(dev));
- ret = -EINVAL;
- goto end;
+ if (ret) {
+ if (!type || !type->find_device) {
+ DBG("No ACPI bus support for %s\n", dev_name(dev));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = type->find_device(dev, &handle);
+ if (ret) {
+ DBG("Unable to get handle for %s\n", dev_name(dev));
+ goto out;
+ }
+ ret = acpi_bind_one(dev, handle);
+ if (ret)
+ goto out;
}
- if ((ret = type->find_device(dev, &handle)) != 0)
- DBG("Can't get handler for %s\n", dev_name(dev));
- end:
- if (!ret)
- acpi_bind_one(dev, handle);
+
+ if (type && type->setup)
+ type->setup(dev);
out:
#if ACPI_GLUE_DEBUG
if (!ret) {
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- acpi_get_name(dev->acpi_handle, ACPI_FULL_PATHNAME, &buffer);
+ acpi_get_name(ACPI_HANDLE(dev), ACPI_FULL_PATHNAME, &buffer);
DBG("Device %s -> %s\n", dev_name(dev), (char *)buffer.pointer);
kfree(buffer.pointer);
} else
@@ -309,6 +314,12 @@ static int acpi_platform_notify(struct device *dev)
static int acpi_platform_notify_remove(struct device *dev)
{
+ struct acpi_bus_type *type;
+
+ type = acpi_get_bus_type(dev->bus);
+ if (type && type->cleanup)
+ type->cleanup(dev);
+
acpi_unbind_one(dev);
return 0;
}
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c
index a0cc796932f7..13b1d39d7cdf 100644
--- a/drivers/acpi/hed.c
+++ b/drivers/acpi/hed.c
@@ -70,7 +70,7 @@ static int acpi_hed_add(struct acpi_device *device)
return 0;
}
-static int acpi_hed_remove(struct acpi_device *device, int type)
+static int acpi_hed_remove(struct acpi_device *device)
{
hed_handle = NULL;
return 0;
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 3c407cdc1ec1..79092328cf06 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -25,7 +25,16 @@
int init_acpi_device_notify(void);
int acpi_scan_init(void);
+void acpi_pci_root_init(void);
+void acpi_pci_link_init(void);
+void acpi_platform_init(void);
int acpi_sysfs_init(void);
+void acpi_csrt_init(void);
+#ifdef CONFIG_ACPI_CONTAINER
+void acpi_container_init(void);
+#else
+static inline void acpi_container_init(void) {}
+#endif
#ifdef CONFIG_DEBUG_FS
extern struct dentry *acpi_debugfs_dir;
@@ -35,15 +44,33 @@ static inline void acpi_debugfs_init(void) { return; }
#endif
/* --------------------------------------------------------------------------
+ Device Node Initialization / Removal
+ -------------------------------------------------------------------------- */
+#define ACPI_STA_DEFAULT (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | \
+ ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING)
+
+int acpi_device_add(struct acpi_device *device,
+ void (*release)(struct device *));
+void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
+ int type, unsigned long long sta);
+void acpi_device_add_finalize(struct acpi_device *device);
+void acpi_free_ids(struct acpi_device *device);
+
+/* --------------------------------------------------------------------------
Power Resource
-------------------------------------------------------------------------- */
int acpi_power_init(void);
+void acpi_power_resources_list_free(struct list_head *list);
+int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
+ struct list_head *list);
+int acpi_add_power_resource(acpi_handle handle);
+void acpi_power_add_remove_device(struct acpi_device *adev, bool add);
+int acpi_power_min_system_level(struct list_head *list);
int acpi_device_sleep_wake(struct acpi_device *dev,
int enable, int sleep_state, int dev_state);
int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
int acpi_power_on_resources(struct acpi_device *device, int state);
int acpi_power_transition(struct acpi_device *device, int state);
-int acpi_bus_init_power(struct acpi_device *device);
int acpi_wakeup_device_init(void);
void acpi_early_processor_set_pdc(void);
@@ -98,6 +125,4 @@ static inline void suspend_nvs_restore(void) {}
-------------------------------------------------------------------------- */
struct platform_device;
-struct platform_device *acpi_create_platform_device(struct acpi_device *adev);
-
#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index cb31298ca684..59844ee149be 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -116,14 +116,16 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
struct acpi_srat_mem_affinity *p =
(struct acpi_srat_mem_affinity *)header;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s\n",
+ "SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n",
(unsigned long)p->base_address,
(unsigned long)p->length,
p->proximity_domain,
(p->flags & ACPI_SRAT_MEM_ENABLED)?
"enabled" : "disabled",
(p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)?
- " hot-pluggable" : ""));
+ " hot-pluggable" : "",
+ (p->flags & ACPI_SRAT_MEM_NON_VOLATILE)?
+ " non-volatile" : ""));
}
#endif /* ACPI_DEBUG_OUTPUT */
break;
@@ -273,17 +275,17 @@ static int __init acpi_parse_srat(struct acpi_table_header *table)
static int __init
acpi_table_parse_srat(enum acpi_srat_type id,
- acpi_table_entry_handler handler, unsigned int max_entries)
+ acpi_tbl_entry_handler handler, unsigned int max_entries)
{
return acpi_table_parse_entries(ACPI_SIG_SRAT,
sizeof(struct acpi_table_srat), id,
handler, max_entries);
}
-int __init acpi_numa_init(void)
-{
- int cnt = 0;
+static int srat_mem_cnt;
+void __init early_parse_srat(void)
+{
/*
* Should not limit number with cpu num that is from NR_CPUS or nr_cpus=
* SRAT cpu entries could have different order with that in MADT.
@@ -293,21 +295,24 @@ int __init acpi_numa_init(void)
/* SRAT: Static Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
- acpi_parse_x2apic_affinity, 0);
+ acpi_parse_x2apic_affinity, 0);
acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
- acpi_parse_processor_affinity, 0);
- cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
- acpi_parse_memory_affinity,
- NR_NODE_MEMBLKS);
+ acpi_parse_processor_affinity, 0);
+ srat_mem_cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
+ acpi_parse_memory_affinity,
+ NR_NODE_MEMBLKS);
}
+}
+int __init acpi_numa_init(void)
+{
/* SLIT: System Locality Information Table */
acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
acpi_numa_arch_fixup();
- if (cnt < 0)
- return cnt;
+ if (srat_mem_cnt < 0)
+ return srat_mem_cnt;
else if (!parsed_numa_memblks)
return -ENOENT;
return 0;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 3ff267861541..908b02d5da1b 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -250,7 +250,7 @@ acpi_physical_address __init acpi_os_get_root_pointer(void)
return acpi_rsdp;
#endif
- if (efi_enabled) {
+ if (efi_enabled(EFI_CONFIG_TABLES)) {
if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
return efi.acpi20;
else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
@@ -787,7 +787,7 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
acpi_irq_handler = handler;
acpi_irq_context = context;
- if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
+ if (request_irq(irq, acpi_irq, IRQF_SHARED | IRQF_NO_SUSPEND, "acpi", acpi_irq)) {
printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
acpi_irq_handler = NULL;
return AE_NOT_ACQUIRED;
diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c
deleted file mode 100644
index a1dee29beed3..000000000000
--- a/drivers/acpi/pci_bind.c
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * pci_bind.c - ACPI PCI Device Binding ($Revision: 2 $)
- *
- * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
- * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/pci-acpi.h>
-#include <linux/acpi.h>
-#include <linux/pm_runtime.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-
-#define _COMPONENT ACPI_PCI_COMPONENT
-ACPI_MODULE_NAME("pci_bind");
-
-static int acpi_pci_unbind(struct acpi_device *device)
-{
- struct pci_dev *dev;
-
- dev = acpi_get_pci_dev(device->handle);
- if (!dev)
- goto out;
-
- device_set_run_wake(&dev->dev, false);
- pci_acpi_remove_pm_notifier(device);
- acpi_power_resource_unregister_device(&dev->dev, device->handle);
-
- if (!dev->subordinate)
- goto out;
-
- acpi_pci_irq_del_prt(pci_domain_nr(dev->bus), dev->subordinate->number);
-
- device->ops.bind = NULL;
- device->ops.unbind = NULL;
-
-out:
- pci_dev_put(dev);
- return 0;
-}
-
-static int acpi_pci_bind(struct acpi_device *device)
-{
- acpi_status status;
- acpi_handle handle;
- unsigned char bus;
- struct pci_dev *dev;
-
- dev = acpi_get_pci_dev(device->handle);
- if (!dev)
- return 0;
-
- pci_acpi_add_pm_notifier(device, dev);
- acpi_power_resource_register_device(&dev->dev, device->handle);
- if (device->wakeup.flags.run_wake)
- device_set_run_wake(&dev->dev, true);
-
- /*
- * Install the 'bind' function to facilitate callbacks for
- * children of the P2P bridge.
- */
- if (dev->subordinate) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Device %04x:%02x:%02x.%d is a PCI bridge\n",
- pci_domain_nr(dev->bus), dev->bus->number,
- PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)));
- device->ops.bind = acpi_pci_bind;
- device->ops.unbind = acpi_pci_unbind;
- }
-
- /*
- * Evaluate and parse _PRT, if exists. This code allows parsing of
- * _PRT objects within the scope of non-bridge devices. Note that
- * _PRTs within the scope of a PCI bridge assume the bridge's
- * subordinate bus number.
- *
- * TBD: Can _PRTs exist within the scope of non-bridge PCI devices?
- */
- status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle);
- if (ACPI_FAILURE(status))
- goto out;
-
- if (dev->subordinate)
- bus = dev->subordinate->number;
- else
- bus = dev->bus->number;
-
- acpi_pci_irq_add_prt(device->handle, pci_domain_nr(dev->bus), bus);
-
-out:
- pci_dev_put(dev);
- return 0;
-}
-
-int acpi_pci_bind_root(struct acpi_device *device)
-{
- device->ops.bind = acpi_pci_bind;
- device->ops.unbind = acpi_pci_unbind;
-
- return 0;
-}
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index a12808259dfb..ab764ed34a50 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -53,23 +53,19 @@ ACPI_MODULE_NAME("pci_link");
#define ACPI_PCI_LINK_FILE_STATUS "state"
#define ACPI_PCI_LINK_MAX_POSSIBLE 16
-static int acpi_pci_link_add(struct acpi_device *device);
-static int acpi_pci_link_remove(struct acpi_device *device, int type);
+static int acpi_pci_link_add(struct acpi_device *device,
+ const struct acpi_device_id *not_used);
+static void acpi_pci_link_remove(struct acpi_device *device);
static const struct acpi_device_id link_device_ids[] = {
{"PNP0C0F", 0},
{"", 0},
};
-MODULE_DEVICE_TABLE(acpi, link_device_ids);
-static struct acpi_driver acpi_pci_link_driver = {
- .name = "pci_link",
- .class = ACPI_PCI_LINK_CLASS,
+static struct acpi_scan_handler pci_link_handler = {
.ids = link_device_ids,
- .ops = {
- .add = acpi_pci_link_add,
- .remove = acpi_pci_link_remove,
- },
+ .attach = acpi_pci_link_add,
+ .detach = acpi_pci_link_remove,
};
/*
@@ -692,7 +688,8 @@ int acpi_pci_link_free_irq(acpi_handle handle)
Driver Interface
-------------------------------------------------------------------------- */
-static int acpi_pci_link_add(struct acpi_device *device)
+static int acpi_pci_link_add(struct acpi_device *device,
+ const struct acpi_device_id *not_used)
{
int result;
struct acpi_pci_link *link;
@@ -746,7 +743,7 @@ static int acpi_pci_link_add(struct acpi_device *device)
if (result)
kfree(link);
- return result;
+ return result < 0 ? result : 1;
}
static int acpi_pci_link_resume(struct acpi_pci_link *link)
@@ -766,7 +763,7 @@ static void irqrouter_resume(void)
}
}
-static int acpi_pci_link_remove(struct acpi_device *device, int type)
+static void acpi_pci_link_remove(struct acpi_device *device)
{
struct acpi_pci_link *link;
@@ -777,7 +774,6 @@ static int acpi_pci_link_remove(struct acpi_device *device, int type)
mutex_unlock(&acpi_link_lock);
kfree(link);
- return 0;
}
/*
@@ -874,20 +870,10 @@ static struct syscore_ops irqrouter_syscore_ops = {
.resume = irqrouter_resume,
};
-static int __init irqrouter_init_ops(void)
-{
- if (!acpi_disabled && !acpi_noirq)
- register_syscore_ops(&irqrouter_syscore_ops);
-
- return 0;
-}
-
-device_initcall(irqrouter_init_ops);
-
-static int __init acpi_pci_link_init(void)
+void __init acpi_pci_link_init(void)
{
if (acpi_noirq)
- return 0;
+ return;
if (acpi_irq_balance == -1) {
/* no command line switch: enable balancing in IOAPIC mode */
@@ -896,11 +882,6 @@ static int __init acpi_pci_link_init(void)
else
acpi_irq_balance = 0;
}
-
- if (acpi_bus_register_driver(&acpi_pci_link_driver) < 0)
- return -ENODEV;
-
- return 0;
+ register_syscore_ops(&irqrouter_syscore_ops);
+ acpi_scan_add_handler(&pci_link_handler);
}
-
-subsys_initcall(acpi_pci_link_init);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 7928d4dc7056..b3cc69c5caf1 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -45,9 +45,9 @@
ACPI_MODULE_NAME("pci_root");
#define ACPI_PCI_ROOT_CLASS "pci_bridge"
#define ACPI_PCI_ROOT_DEVICE_NAME "PCI Root Bridge"
-static int acpi_pci_root_add(struct acpi_device *device);
-static int acpi_pci_root_remove(struct acpi_device *device, int type);
-static int acpi_pci_root_start(struct acpi_device *device);
+static int acpi_pci_root_add(struct acpi_device *device,
+ const struct acpi_device_id *not_used);
+static void acpi_pci_root_remove(struct acpi_device *device);
#define ACPI_PCIE_REQ_SUPPORT (OSC_EXT_PCI_CONFIG_SUPPORT \
| OSC_ACTIVE_STATE_PWR_SUPPORT \
@@ -58,17 +58,11 @@ static const struct acpi_device_id root_device_ids[] = {
{"PNP0A03", 0},
{"", 0},
};
-MODULE_DEVICE_TABLE(acpi, root_device_ids);
-static struct acpi_driver acpi_pci_root_driver = {
- .name = "pci_root",
- .class = ACPI_PCI_ROOT_CLASS,
+static struct acpi_scan_handler pci_root_handler = {
.ids = root_device_ids,
- .ops = {
- .add = acpi_pci_root_add,
- .remove = acpi_pci_root_remove,
- .start = acpi_pci_root_start,
- },
+ .attach = acpi_pci_root_add,
+ .detach = acpi_pci_root_remove,
};
/* Lock to protect both acpi_pci_roots and acpi_pci_drivers lists */
@@ -188,21 +182,6 @@ static acpi_status try_get_root_bridge_busnr(acpi_handle handle,
return AE_OK;
}
-static void acpi_pci_bridge_scan(struct acpi_device *device)
-{
- int status;
- struct acpi_device *child = NULL;
-
- if (device->flags.bus_address)
- if (device->parent && device->parent->ops.bind) {
- status = device->parent->ops.bind(device);
- if (!status) {
- list_for_each_entry(child, &device->children, node)
- acpi_pci_bridge_scan(child);
- }
- }
-}
-
static u8 pci_osc_uuid_str[] = "33DB4D5B-1FF7-401C-9657-7441C03DD766";
static acpi_status acpi_pci_run_osc(acpi_handle handle,
@@ -445,14 +424,15 @@ out:
}
EXPORT_SYMBOL(acpi_pci_osc_control_set);
-static int acpi_pci_root_add(struct acpi_device *device)
+static int acpi_pci_root_add(struct acpi_device *device,
+ const struct acpi_device_id *not_used)
{
unsigned long long segment, bus;
acpi_status status;
int result;
struct acpi_pci_root *root;
acpi_handle handle;
- struct acpi_device *child;
+ struct acpi_pci_driver *driver;
u32 flags, base_flags;
bool is_osc_granted = false;
@@ -603,21 +583,6 @@ static int acpi_pci_root_add(struct acpi_device *device)
goto out_del_root;
}
- /*
- * Attach ACPI-PCI Context
- * -----------------------
- * Thus binding the ACPI and PCI devices.
- */
- result = acpi_pci_bind_root(device);
- if (result)
- goto out_del_root;
-
- /*
- * Scan and bind all _ADR-Based Devices
- */
- list_for_each_entry(child, &device->children, node)
- acpi_pci_bridge_scan(child);
-
/* ASPM setting */
if (is_osc_granted) {
if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM)
@@ -632,24 +597,6 @@ static int acpi_pci_root_add(struct acpi_device *device)
if (device->wakeup.flags.run_wake)
device_set_run_wake(root->bus->bridge, true);
- return 0;
-
-out_del_root:
- mutex_lock(&acpi_pci_root_lock);
- list_del(&root->node);
- mutex_unlock(&acpi_pci_root_lock);
-
- acpi_pci_irq_del_prt(root->segment, root->secondary.start);
-end:
- kfree(root);
- return result;
-}
-
-static int acpi_pci_root_start(struct acpi_device *device)
-{
- struct acpi_pci_root *root = acpi_driver_data(device);
- struct acpi_pci_driver *driver;
-
if (system_state != SYSTEM_BOOTING)
pci_assign_unassigned_bus_resources(root->bus);
@@ -664,11 +611,20 @@ static int acpi_pci_root_start(struct acpi_device *device)
pci_enable_bridges(root->bus);
pci_bus_add_devices(root->bus);
+ return 1;
- return 0;
+out_del_root:
+ mutex_lock(&acpi_pci_root_lock);
+ list_del(&root->node);
+ mutex_unlock(&acpi_pci_root_lock);
+
+ acpi_pci_irq_del_prt(root->segment, root->secondary.start);
+end:
+ kfree(root);
+ return result;
}
-static int acpi_pci_root_remove(struct acpi_device *device, int type)
+static void acpi_pci_root_remove(struct acpi_device *device)
{
acpi_status status;
acpi_handle handle;
@@ -696,21 +652,14 @@ static int acpi_pci_root_remove(struct acpi_device *device, int type)
list_del(&root->node);
mutex_unlock(&acpi_pci_root_lock);
kfree(root);
- return 0;
}
-static int __init acpi_pci_root_init(void)
+void __init acpi_pci_root_init(void)
{
acpi_hest_init();
- if (acpi_pci_disabled)
- return 0;
-
- pci_acpi_crs_quirks();
- if (acpi_bus_register_driver(&acpi_pci_root_driver) < 0)
- return -ENODEV;
-
- return 0;
+ if (!acpi_pci_disabled) {
+ pci_acpi_crs_quirks();
+ acpi_scan_add_handler(&pci_root_handler);
+ }
}
-
-subsys_initcall(acpi_pci_root_init);
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
index d22585f21aeb..2c630c006c2f 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
@@ -50,13 +50,12 @@ module_param(debug, bool, 0644);
ACPI_MODULE_NAME("pci_slot");
#define MY_NAME "pci_slot"
-#define err(format, arg...) printk(KERN_ERR "%s: " format , MY_NAME , ## arg)
-#define info(format, arg...) printk(KERN_INFO "%s: " format , MY_NAME , ## arg)
+#define err(format, arg...) pr_err("%s: " format , MY_NAME , ## arg)
+#define info(format, arg...) pr_info("%s: " format , MY_NAME , ## arg)
#define dbg(format, arg...) \
do { \
if (debug) \
- printk(KERN_DEBUG "%s: " format, \
- MY_NAME , ## arg); \
+ pr_debug("%s: " format, MY_NAME , ## arg); \
} while (0)
#define SLOT_NAME_SIZE 21 /* Inspired by #define in acpiphp.h */
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 6e7b9d523812..b820528a5fa3 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -41,6 +41,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/sysfs.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include "sleep.h"
@@ -58,88 +59,121 @@ ACPI_MODULE_NAME("power");
#define ACPI_POWER_RESOURCE_STATE_ON 0x01
#define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF
-static int acpi_power_add(struct acpi_device *device);
-static int acpi_power_remove(struct acpi_device *device, int type);
-
-static const struct acpi_device_id power_device_ids[] = {
- {ACPI_POWER_HID, 0},
- {"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, power_device_ids);
-
-#ifdef CONFIG_PM_SLEEP
-static int acpi_power_resume(struct device *dev);
-#endif
-static SIMPLE_DEV_PM_OPS(acpi_power_pm, NULL, acpi_power_resume);
-
-static struct acpi_driver acpi_power_driver = {
- .name = "power",
- .class = ACPI_POWER_CLASS,
- .ids = power_device_ids,
- .ops = {
- .add = acpi_power_add,
- .remove = acpi_power_remove,
- },
- .drv.pm = &acpi_power_pm,
-};
-
-/*
- * A power managed device
- * A device may rely on multiple power resources.
- * */
-struct acpi_power_managed_device {
- struct device *dev; /* The physical device */
- acpi_handle *handle;
-};
-
-struct acpi_power_resource_device {
- struct acpi_power_managed_device *device;
- struct acpi_power_resource_device *next;
+struct acpi_power_dependent_device {
+ struct list_head node;
+ struct acpi_device *adev;
+ struct work_struct work;
};
struct acpi_power_resource {
- struct acpi_device * device;
- acpi_bus_id name;
+ struct acpi_device device;
+ struct list_head list_node;
+ struct list_head dependent;
+ char *name;
u32 system_level;
u32 order;
unsigned int ref_count;
struct mutex resource_lock;
+};
- /* List of devices relying on this power resource */
- struct acpi_power_resource_device *devices;
- struct mutex devices_lock;
+struct acpi_power_resource_entry {
+ struct list_head node;
+ struct acpi_power_resource *resource;
};
-static struct list_head acpi_power_resource_list;
+static LIST_HEAD(acpi_power_resource_list);
+static DEFINE_MUTEX(power_resource_list_lock);
/* --------------------------------------------------------------------------
Power Resource Management
-------------------------------------------------------------------------- */
-static int
-acpi_power_get_context(acpi_handle handle,
- struct acpi_power_resource **resource)
+static inline
+struct acpi_power_resource *to_power_resource(struct acpi_device *device)
{
- int result = 0;
- struct acpi_device *device = NULL;
+ return container_of(device, struct acpi_power_resource, device);
+}
+
+static struct acpi_power_resource *acpi_power_get_context(acpi_handle handle)
+{
+ struct acpi_device *device;
+ if (acpi_bus_get_device(handle, &device))
+ return NULL;
- if (!resource)
- return -ENODEV;
+ return to_power_resource(device);
+}
- result = acpi_bus_get_device(handle, &device);
- if (result) {
- printk(KERN_WARNING PREFIX "Getting context [%p]\n", handle);
- return result;
- }
+static int acpi_power_resources_list_add(acpi_handle handle,
+ struct list_head *list)
+{
+ struct acpi_power_resource *resource = acpi_power_get_context(handle);
+ struct acpi_power_resource_entry *entry;
- *resource = acpi_driver_data(device);
- if (!*resource)
- return -ENODEV;
+ if (!resource || !list)
+ return -EINVAL;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->resource = resource;
+ if (!list_empty(list)) {
+ struct acpi_power_resource_entry *e;
+ list_for_each_entry(e, list, node)
+ if (e->resource->order > resource->order) {
+ list_add_tail(&entry->node, &e->node);
+ return 0;
+ }
+ }
+ list_add_tail(&entry->node, list);
return 0;
}
+void acpi_power_resources_list_free(struct list_head *list)
+{
+ struct acpi_power_resource_entry *entry, *e;
+
+ list_for_each_entry_safe(entry, e, list, node) {
+ list_del(&entry->node);
+ kfree(entry);
+ }
+}
+
+int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
+ struct list_head *list)
+{
+ unsigned int i;
+ int err = 0;
+
+ for (i = start; i < package->package.count; i++) {
+ union acpi_object *element = &package->package.elements[i];
+ acpi_handle rhandle;
+
+ if (element->type != ACPI_TYPE_LOCAL_REFERENCE) {
+ err = -ENODATA;
+ break;
+ }
+ rhandle = element->reference.handle;
+ if (!rhandle) {
+ err = -ENODEV;
+ break;
+ }
+ err = acpi_add_power_resource(rhandle);
+ if (err)
+ break;
+
+ err = acpi_power_resources_list_add(rhandle, list);
+ if (err)
+ break;
+ }
+ if (err)
+ acpi_power_resources_list_free(list);
+
+ return err;
+}
+
static int acpi_power_get_state(acpi_handle handle, int *state)
{
acpi_status status = AE_OK;
@@ -167,31 +201,23 @@ static int acpi_power_get_state(acpi_handle handle, int *state)
return 0;
}
-static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
+static int acpi_power_get_list_state(struct list_head *list, int *state)
{
+ struct acpi_power_resource_entry *entry;
int cur_state;
- int i = 0;
if (!list || !state)
return -EINVAL;
/* The state of the list is 'on' IFF all resources are 'on'. */
-
- for (i = 0; i < list->count; i++) {
- struct acpi_power_resource *resource;
- acpi_handle handle = list->handles[i];
+ list_for_each_entry(entry, list, node) {
+ struct acpi_power_resource *resource = entry->resource;
+ acpi_handle handle = resource->device.handle;
int result;
- result = acpi_power_get_context(handle, &resource);
- if (result)
- return result;
-
mutex_lock(&resource->resource_lock);
-
result = acpi_power_get_state(handle, &cur_state);
-
mutex_unlock(&resource->resource_lock);
-
if (result)
return result;
@@ -203,54 +229,52 @@ static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
cur_state ? "on" : "off"));
*state = cur_state;
-
return 0;
}
-/* Resume the device when all power resources in _PR0 are on */
-static void acpi_power_on_device(struct acpi_power_managed_device *device)
+static void acpi_power_resume_dependent(struct work_struct *work)
{
- struct acpi_device *acpi_dev;
- acpi_handle handle = device->handle;
+ struct acpi_power_dependent_device *dep;
+ struct acpi_device_physical_node *pn;
+ struct acpi_device *adev;
int state;
- if (acpi_bus_get_device(handle, &acpi_dev))
+ dep = container_of(work, struct acpi_power_dependent_device, work);
+ adev = dep->adev;
+ if (acpi_power_get_inferred_state(adev, &state))
return;
- if(acpi_power_get_inferred_state(acpi_dev, &state))
+ if (state > ACPI_STATE_D0)
return;
- if (state == ACPI_STATE_D0 && pm_runtime_suspended(device->dev))
- pm_request_resume(device->dev);
+ mutex_lock(&adev->physical_node_lock);
+
+ list_for_each_entry(pn, &adev->physical_node_list, node)
+ pm_request_resume(pn->dev);
+
+ list_for_each_entry(pn, &adev->power_dependent, node)
+ pm_request_resume(pn->dev);
+
+ mutex_unlock(&adev->physical_node_lock);
}
static int __acpi_power_on(struct acpi_power_resource *resource)
{
acpi_status status = AE_OK;
- status = acpi_evaluate_object(resource->device->handle, "_ON", NULL, NULL);
+ status = acpi_evaluate_object(resource->device.handle, "_ON", NULL, NULL);
if (ACPI_FAILURE(status))
return -ENODEV;
- /* Update the power resource's _device_ power state */
- resource->device->power.state = ACPI_STATE_D0;
-
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned on\n",
resource->name));
return 0;
}
-static int acpi_power_on(acpi_handle handle)
+static int acpi_power_on(struct acpi_power_resource *resource)
{
- int result = 0;
- bool resume_device = false;
- struct acpi_power_resource *resource = NULL;
- struct acpi_power_resource_device *device_list;
-
- result = acpi_power_get_context(handle, &resource);
- if (result)
- return result;
+ int result = 0;;
mutex_lock(&resource->resource_lock);
@@ -260,39 +284,38 @@ static int acpi_power_on(acpi_handle handle)
resource->name));
} else {
result = __acpi_power_on(resource);
- if (result)
+ if (result) {
resource->ref_count--;
- else
- resume_device = true;
+ } else {
+ struct acpi_power_dependent_device *dep;
+
+ list_for_each_entry(dep, &resource->dependent, node)
+ schedule_work(&dep->work);
+ }
}
mutex_unlock(&resource->resource_lock);
- if (!resume_device)
- return result;
-
- mutex_lock(&resource->devices_lock);
+ return result;
+}
- device_list = resource->devices;
- while (device_list) {
- acpi_power_on_device(device_list->device);
- device_list = device_list->next;
- }
+static int __acpi_power_off(struct acpi_power_resource *resource)
+{
+ acpi_status status;
- mutex_unlock(&resource->devices_lock);
+ status = acpi_evaluate_object(resource->device.handle, "_OFF",
+ NULL, NULL);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
- return result;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned off\n",
+ resource->name));
+ return 0;
}
-static int acpi_power_off(acpi_handle handle)
+static int acpi_power_off(struct acpi_power_resource *resource)
{
int result = 0;
- acpi_status status = AE_OK;
- struct acpi_power_resource *resource = NULL;
-
- result = acpi_power_get_context(handle, &resource);
- if (result)
- return result;
mutex_lock(&resource->resource_lock);
@@ -307,19 +330,10 @@ static int acpi_power_off(acpi_handle handle)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Power resource [%s] still in use\n",
resource->name));
- goto unlock;
- }
-
- status = acpi_evaluate_object(resource->device->handle, "_OFF", NULL, NULL);
- if (ACPI_FAILURE(status)) {
- result = -ENODEV;
} else {
- /* Update the power resource's _device_ power state */
- resource->device->power.state = ACPI_STATE_D3;
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Power resource [%s] turned off\n",
- resource->name));
+ result = __acpi_power_off(resource);
+ if (result)
+ resource->ref_count++;
}
unlock:
@@ -328,148 +342,202 @@ static int acpi_power_off(acpi_handle handle)
return result;
}
-static void __acpi_power_off_list(struct acpi_handle_list *list, int num_res)
+static int acpi_power_off_list(struct list_head *list)
{
- int i;
+ struct acpi_power_resource_entry *entry;
+ int result = 0;
- for (i = num_res - 1; i >= 0 ; i--)
- acpi_power_off(list->handles[i]);
-}
+ list_for_each_entry_reverse(entry, list, node) {
+ result = acpi_power_off(entry->resource);
+ if (result)
+ goto err;
+ }
+ return 0;
-static void acpi_power_off_list(struct acpi_handle_list *list)
-{
- __acpi_power_off_list(list, list->count);
+ err:
+ list_for_each_entry_continue(entry, list, node)
+ acpi_power_on(entry->resource);
+
+ return result;
}
-static int acpi_power_on_list(struct acpi_handle_list *list)
+static int acpi_power_on_list(struct list_head *list)
{
+ struct acpi_power_resource_entry *entry;
int result = 0;
- int i;
- for (i = 0; i < list->count; i++) {
- result = acpi_power_on(list->handles[i]);
- if (result) {
- __acpi_power_off_list(list, i);
- break;
- }
+ list_for_each_entry(entry, list, node) {
+ result = acpi_power_on(entry->resource);
+ if (result)
+ goto err;
}
+ return 0;
+
+ err:
+ list_for_each_entry_continue_reverse(entry, list, node)
+ acpi_power_off(entry->resource);
return result;
}
-static void __acpi_power_resource_unregister_device(struct device *dev,
- acpi_handle res_handle)
+static void acpi_power_add_dependent(struct acpi_power_resource *resource,
+ struct acpi_device *adev)
{
- struct acpi_power_resource *resource = NULL;
- struct acpi_power_resource_device *prev, *curr;
+ struct acpi_power_dependent_device *dep;
- if (acpi_power_get_context(res_handle, &resource))
- return;
+ mutex_lock(&resource->resource_lock);
+
+ list_for_each_entry(dep, &resource->dependent, node)
+ if (dep->adev == adev)
+ goto out;
+
+ dep = kzalloc(sizeof(*dep), GFP_KERNEL);
+ if (!dep)
+ goto out;
+
+ dep->adev = adev;
+ INIT_WORK(&dep->work, acpi_power_resume_dependent);
+ list_add_tail(&dep->node, &resource->dependent);
- mutex_lock(&resource->devices_lock);
- prev = NULL;
- curr = resource->devices;
- while (curr) {
- if (curr->device->dev == dev) {
- if (!prev)
- resource->devices = curr->next;
- else
- prev->next = curr->next;
-
- kfree(curr);
+ out:
+ mutex_unlock(&resource->resource_lock);
+}
+
+static void acpi_power_remove_dependent(struct acpi_power_resource *resource,
+ struct acpi_device *adev)
+{
+ struct acpi_power_dependent_device *dep;
+ struct work_struct *work = NULL;
+
+ mutex_lock(&resource->resource_lock);
+
+ list_for_each_entry(dep, &resource->dependent, node)
+ if (dep->adev == adev) {
+ list_del(&dep->node);
+ work = &dep->work;
break;
}
- prev = curr;
- curr = curr->next;
+ mutex_unlock(&resource->resource_lock);
+
+ if (work) {
+ cancel_work_sync(work);
+ kfree(dep);
}
- mutex_unlock(&resource->devices_lock);
}
-/* Unlink dev from all power resources in _PR0 */
-void acpi_power_resource_unregister_device(struct device *dev, acpi_handle handle)
-{
- struct acpi_device *acpi_dev;
- struct acpi_handle_list *list;
- int i;
+static struct attribute *attrs[] = {
+ NULL,
+};
- if (!dev || !handle)
- return;
+static struct attribute_group attr_groups[] = {
+ [ACPI_STATE_D0] = {
+ .name = "power_resources_D0",
+ .attrs = attrs,
+ },
+ [ACPI_STATE_D1] = {
+ .name = "power_resources_D1",
+ .attrs = attrs,
+ },
+ [ACPI_STATE_D2] = {
+ .name = "power_resources_D2",
+ .attrs = attrs,
+ },
+ [ACPI_STATE_D3_HOT] = {
+ .name = "power_resources_D3hot",
+ .attrs = attrs,
+ },
+};
- if (acpi_bus_get_device(handle, &acpi_dev))
+static void acpi_power_hide_list(struct acpi_device *adev, int state)
+{
+ struct acpi_device_power_state *ps = &adev->power.states[state];
+ struct acpi_power_resource_entry *entry;
+
+ if (list_empty(&ps->resources))
return;
- list = &acpi_dev->power.states[ACPI_STATE_D0].resources;
+ list_for_each_entry_reverse(entry, &ps->resources, node) {
+ struct acpi_device *res_dev = &entry->resource->device;
- for (i = 0; i < list->count; i++)
- __acpi_power_resource_unregister_device(dev,
- list->handles[i]);
+ sysfs_remove_link_from_group(&adev->dev.kobj,
+ attr_groups[state].name,
+ dev_name(&res_dev->dev));
+ }
+ sysfs_remove_group(&adev->dev.kobj, &attr_groups[state]);
}
-EXPORT_SYMBOL_GPL(acpi_power_resource_unregister_device);
-static int __acpi_power_resource_register_device(
- struct acpi_power_managed_device *powered_device, acpi_handle handle)
+static void acpi_power_expose_list(struct acpi_device *adev, int state)
{
- struct acpi_power_resource *resource = NULL;
- struct acpi_power_resource_device *power_resource_device;
- int result;
-
- result = acpi_power_get_context(handle, &resource);
- if (result)
- return result;
+ struct acpi_device_power_state *ps = &adev->power.states[state];
+ struct acpi_power_resource_entry *entry;
+ int ret;
- power_resource_device = kzalloc(
- sizeof(*power_resource_device), GFP_KERNEL);
- if (!power_resource_device)
- return -ENOMEM;
+ if (list_empty(&ps->resources))
+ return;
- power_resource_device->device = powered_device;
+ ret = sysfs_create_group(&adev->dev.kobj, &attr_groups[state]);
+ if (ret)
+ return;
- mutex_lock(&resource->devices_lock);
- power_resource_device->next = resource->devices;
- resource->devices = power_resource_device;
- mutex_unlock(&resource->devices_lock);
+ list_for_each_entry(entry, &ps->resources, node) {
+ struct acpi_device *res_dev = &entry->resource->device;
- return 0;
+ ret = sysfs_add_link_to_group(&adev->dev.kobj,
+ attr_groups[state].name,
+ &res_dev->dev.kobj,
+ dev_name(&res_dev->dev));
+ if (ret) {
+ acpi_power_hide_list(adev, state);
+ break;
+ }
+ }
}
-/* Link dev to all power resources in _PR0 */
-int acpi_power_resource_register_device(struct device *dev, acpi_handle handle)
+void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
{
- struct acpi_device *acpi_dev;
- struct acpi_handle_list *list;
- struct acpi_power_managed_device *powered_device;
- int i, ret;
+ struct acpi_device_power_state *ps;
+ struct acpi_power_resource_entry *entry;
+ int state;
- if (!dev || !handle)
- return -ENODEV;
+ if (!adev->power.flags.power_resources)
+ return;
- ret = acpi_bus_get_device(handle, &acpi_dev);
- if (ret || !acpi_dev->power.flags.power_resources)
- return -ENODEV;
+ ps = &adev->power.states[ACPI_STATE_D0];
+ list_for_each_entry(entry, &ps->resources, node) {
+ struct acpi_power_resource *resource = entry->resource;
- powered_device = kzalloc(sizeof(*powered_device), GFP_KERNEL);
- if (!powered_device)
- return -ENOMEM;
+ if (add)
+ acpi_power_add_dependent(resource, adev);
+ else
+ acpi_power_remove_dependent(resource, adev);
+ }
- powered_device->dev = dev;
- powered_device->handle = handle;
+ for (state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++) {
+ if (add)
+ acpi_power_expose_list(adev, state);
+ else
+ acpi_power_hide_list(adev, state);
+ }
+}
- list = &acpi_dev->power.states[ACPI_STATE_D0].resources;
+int acpi_power_min_system_level(struct list_head *list)
+{
+ struct acpi_power_resource_entry *entry;
+ int system_level = 5;
- for (i = 0; i < list->count; i++) {
- ret = __acpi_power_resource_register_device(powered_device,
- list->handles[i]);
+ list_for_each_entry(entry, list, node) {
+ struct acpi_power_resource *resource = entry->resource;
- if (ret) {
- acpi_power_resource_unregister_device(dev, handle);
- break;
- }
+ if (system_level > resource->system_level)
+ system_level = resource->system_level;
}
-
- return ret;
+ return system_level;
}
-EXPORT_SYMBOL_GPL(acpi_power_resource_register_device);
+
+/* --------------------------------------------------------------------------
+ Device Power Management
+ -------------------------------------------------------------------------- */
/**
* acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in
@@ -542,7 +610,7 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
*/
int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
{
- int i, err = 0;
+ int err = 0;
if (!dev || !dev->wakeup.flags.valid)
return -EINVAL;
@@ -552,24 +620,17 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
if (dev->wakeup.prepare_count++)
goto out;
- /* Open power resource */
- for (i = 0; i < dev->wakeup.resources.count; i++) {
- int ret = acpi_power_on(dev->wakeup.resources.handles[i]);
- if (ret) {
- printk(KERN_ERR PREFIX "Transition power state\n");
- dev->wakeup.flags.valid = 0;
- err = -ENODEV;
- goto err_out;
- }
+ err = acpi_power_on_list(&dev->wakeup.resources);
+ if (err) {
+ dev_err(&dev->dev, "Cannot turn wakeup power resources on\n");
+ dev->wakeup.flags.valid = 0;
+ } else {
+ /*
+ * Passing 3 as the third argument below means the device may be
+ * put into arbitrary power state afterward.
+ */
+ err = acpi_device_sleep_wake(dev, 1, sleep_state, 3);
}
-
- /*
- * Passing 3 as the third argument below means the device may be placed
- * in arbitrary power state afterwards.
- */
- err = acpi_device_sleep_wake(dev, 1, sleep_state, 3);
-
- err_out:
if (err)
dev->wakeup.prepare_count = 0;
@@ -586,7 +647,7 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
*/
int acpi_disable_wakeup_device_power(struct acpi_device *dev)
{
- int i, err = 0;
+ int err = 0;
if (!dev || !dev->wakeup.flags.valid)
return -EINVAL;
@@ -607,15 +668,10 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
if (err)
goto out;
- /* Close power resource */
- for (i = 0; i < dev->wakeup.resources.count; i++) {
- int ret = acpi_power_off(dev->wakeup.resources.handles[i]);
- if (ret) {
- printk(KERN_ERR PREFIX "Transition power state\n");
- dev->wakeup.flags.valid = 0;
- err = -ENODEV;
- goto out;
- }
+ err = acpi_power_off_list(&dev->wakeup.resources);
+ if (err) {
+ dev_err(&dev->dev, "Cannot turn wakeup power resources off\n");
+ dev->wakeup.flags.valid = 0;
}
out:
@@ -623,14 +679,9 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
return err;
}
-/* --------------------------------------------------------------------------
- Device Power Management
- -------------------------------------------------------------------------- */
-
int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
{
int result = 0;
- struct acpi_handle_list *list = NULL;
int list_state = 0;
int i = 0;
@@ -642,8 +693,9 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
* required for a given D-state are 'on'.
*/
for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
- list = &device->power.states[i].resources;
- if (list->count < 1)
+ struct list_head *list = &device->power.states[i].resources;
+
+ if (list_empty(list))
continue;
result = acpi_power_get_list_state(list, &list_state);
@@ -662,7 +714,7 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
int acpi_power_on_resources(struct acpi_device *device, int state)
{
- if (!device || state < ACPI_STATE_D0 || state > ACPI_STATE_D3)
+ if (!device || state < ACPI_STATE_D0 || state > ACPI_STATE_D3_HOT)
return -EINVAL;
return acpi_power_on_list(&device->power.states[state].resources);
@@ -675,7 +727,7 @@ int acpi_power_transition(struct acpi_device *device, int state)
if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
return -EINVAL;
- if (device->power.state == state)
+ if (device->power.state == state || !device->flags.power_manageable)
return 0;
if ((device->power.state < ACPI_STATE_D0)
@@ -703,118 +755,126 @@ int acpi_power_transition(struct acpi_device *device, int state)
return result;
}
-/* --------------------------------------------------------------------------
- Driver Interface
- -------------------------------------------------------------------------- */
+static void acpi_release_power_resource(struct device *dev)
+{
+ struct acpi_device *device = to_acpi_device(dev);
+ struct acpi_power_resource *resource;
+
+ resource = container_of(device, struct acpi_power_resource, device);
+
+ mutex_lock(&power_resource_list_lock);
+ list_del(&resource->list_node);
+ mutex_unlock(&power_resource_list_lock);
+
+ acpi_free_ids(device);
+ kfree(resource);
+}
-static int acpi_power_add(struct acpi_device *device)
+static ssize_t acpi_power_in_use_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf) {
+ struct acpi_power_resource *resource;
+
+ resource = to_power_resource(to_acpi_device(dev));
+ return sprintf(buf, "%u\n", !!resource->ref_count);
+}
+static DEVICE_ATTR(resource_in_use, 0444, acpi_power_in_use_show, NULL);
+
+static void acpi_power_sysfs_remove(struct acpi_device *device)
{
- int result = 0, state;
- acpi_status status = AE_OK;
- struct acpi_power_resource *resource = NULL;
+ device_remove_file(&device->dev, &dev_attr_resource_in_use);
+}
+
+int acpi_add_power_resource(acpi_handle handle)
+{
+ struct acpi_power_resource *resource;
+ struct acpi_device *device = NULL;
union acpi_object acpi_object;
struct acpi_buffer buffer = { sizeof(acpi_object), &acpi_object };
+ acpi_status status;
+ int state, result = -ENODEV;
+ acpi_bus_get_device(handle, &device);
+ if (device)
+ return 0;
- if (!device)
- return -EINVAL;
-
- resource = kzalloc(sizeof(struct acpi_power_resource), GFP_KERNEL);
+ resource = kzalloc(sizeof(*resource), GFP_KERNEL);
if (!resource)
return -ENOMEM;
- resource->device = device;
+ device = &resource->device;
+ acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER,
+ ACPI_STA_DEFAULT);
mutex_init(&resource->resource_lock);
- mutex_init(&resource->devices_lock);
- strcpy(resource->name, device->pnp.bus_id);
+ INIT_LIST_HEAD(&resource->dependent);
+ resource->name = device->pnp.bus_id;
strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
- device->driver_data = resource;
+ device->power.state = ACPI_STATE_UNKNOWN;
/* Evalute the object to get the system level and resource order. */
- status = acpi_evaluate_object(device->handle, NULL, NULL, &buffer);
- if (ACPI_FAILURE(status)) {
- result = -ENODEV;
- goto end;
- }
+ status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ goto err;
+
resource->system_level = acpi_object.power_resource.system_level;
resource->order = acpi_object.power_resource.resource_order;
- result = acpi_power_get_state(device->handle, &state);
+ result = acpi_power_get_state(handle, &state);
if (result)
- goto end;
-
- switch (state) {
- case ACPI_POWER_RESOURCE_STATE_ON:
- device->power.state = ACPI_STATE_D0;
- break;
- case ACPI_POWER_RESOURCE_STATE_OFF:
- device->power.state = ACPI_STATE_D3;
- break;
- default:
- device->power.state = ACPI_STATE_UNKNOWN;
- break;
- }
+ goto err;
printk(KERN_INFO PREFIX "%s [%s] (%s)\n", acpi_device_name(device),
acpi_device_bid(device), state ? "on" : "off");
- end:
+ device->flags.match_driver = true;
+ result = acpi_device_add(device, acpi_release_power_resource);
if (result)
- kfree(resource);
+ goto err;
- return result;
-}
-
-static int acpi_power_remove(struct acpi_device *device, int type)
-{
- struct acpi_power_resource *resource;
-
- if (!device)
- return -EINVAL;
-
- resource = acpi_driver_data(device);
- if (!resource)
- return -EINVAL;
-
- kfree(resource);
+ if (!device_create_file(&device->dev, &dev_attr_resource_in_use))
+ device->remove = acpi_power_sysfs_remove;
+ mutex_lock(&power_resource_list_lock);
+ list_add(&resource->list_node, &acpi_power_resource_list);
+ mutex_unlock(&power_resource_list_lock);
+ acpi_device_add_finalize(device);
return 0;
+
+ err:
+ acpi_release_power_resource(&device->dev);
+ return result;
}
-#ifdef CONFIG_PM_SLEEP
-static int acpi_power_resume(struct device *dev)
+#ifdef CONFIG_ACPI_SLEEP
+void acpi_resume_power_resources(void)
{
- int result = 0, state;
- struct acpi_device *device;
struct acpi_power_resource *resource;
- if (!dev)
- return -EINVAL;
+ mutex_lock(&power_resource_list_lock);
- device = to_acpi_device(dev);
- resource = acpi_driver_data(device);
- if (!resource)
- return -EINVAL;
+ list_for_each_entry(resource, &acpi_power_resource_list, list_node) {
+ int result, state;
- mutex_lock(&resource->resource_lock);
+ mutex_lock(&resource->resource_lock);
- result = acpi_power_get_state(device->handle, &state);
- if (result)
- goto unlock;
+ result = acpi_power_get_state(resource->device.handle, &state);
+ if (result)
+ continue;
- if (state == ACPI_POWER_RESOURCE_STATE_OFF && resource->ref_count)
- result = __acpi_power_on(resource);
+ if (state == ACPI_POWER_RESOURCE_STATE_OFF
+ && resource->ref_count) {
+ dev_info(&resource->device.dev, "Turning ON\n");
+ __acpi_power_on(resource);
+ } else if (state == ACPI_POWER_RESOURCE_STATE_ON
+ && !resource->ref_count) {
+ dev_info(&resource->device.dev, "Turning OFF\n");
+ __acpi_power_off(resource);
+ }
- unlock:
- mutex_unlock(&resource->resource_lock);
+ mutex_unlock(&resource->resource_lock);
+ }
- return result;
+ mutex_unlock(&power_resource_list_lock);
}
#endif
-
-int __init acpi_power_init(void)
-{
- INIT_LIST_HEAD(&acpi_power_resource_list);
- return acpi_bus_register_driver(&acpi_power_driver);
-}
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index ef98796b3824..52ce76725c20 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -311,11 +311,12 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
dev->pnp.bus_id,
(u32) dev->wakeup.sleep_state);
- if (!dev->physical_node_count)
+ if (!dev->physical_node_count) {
seq_printf(seq, "%c%-8s\n",
- dev->wakeup.flags.run_wake ?
- '*' : ' ', "disabled");
- else {
+ dev->wakeup.flags.run_wake ? '*' : ' ',
+ device_may_wakeup(&dev->dev) ?
+ "enabled" : "disabled");
+ } else {
struct device *ldev;
list_for_each_entry(entry, &dev->physical_node_list,
node) {
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index e83311bf1ebd..df34bd04ae62 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -45,6 +45,7 @@
#include <linux/cpuidle.h>
#include <linux/slab.h>
#include <linux/acpi.h>
+#include <linux/memory_hotplug.h>
#include <asm/io.h>
#include <asm/cpu.h>
@@ -81,7 +82,7 @@ MODULE_DESCRIPTION("ACPI Processor Driver");
MODULE_LICENSE("GPL");
static int acpi_processor_add(struct acpi_device *device);
-static int acpi_processor_remove(struct acpi_device *device, int type);
+static int acpi_processor_remove(struct acpi_device *device);
static void acpi_processor_notify(struct acpi_device *device, u32 event);
static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr);
static int acpi_processor_handle_eject(struct acpi_processor *pr);
@@ -610,7 +611,7 @@ err_free_pr:
return result;
}
-static int acpi_processor_remove(struct acpi_device *device, int type)
+static int acpi_processor_remove(struct acpi_device *device)
{
struct acpi_processor *pr = NULL;
@@ -623,7 +624,7 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
if (pr->id >= nr_cpu_ids)
goto free;
- if (type == ACPI_BUS_REMOVAL_EJECT) {
+ if (device->removal_type == ACPI_BUS_REMOVAL_EJECT) {
if (acpi_processor_handle_eject(pr))
return -EINVAL;
}
@@ -641,6 +642,7 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
per_cpu(processors, pr->id) = NULL;
per_cpu(processor_device_array, pr->id) = NULL;
+ try_offline_node(cpu_to_node(pr->id));
free:
free_cpumask_var(pr->throttling.shared_cpu_map);
@@ -677,36 +679,17 @@ static int is_processor_present(acpi_handle handle)
return 0;
}
-static
-int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
-{
- acpi_handle phandle;
- struct acpi_device *pdev;
-
-
- if (acpi_get_parent(handle, &phandle)) {
- return -ENODEV;
- }
-
- if (acpi_bus_get_device(phandle, &pdev)) {
- return -ENODEV;
- }
-
- if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_PROCESSOR)) {
- return -ENODEV;
- }
-
- return 0;
-}
-
static void acpi_processor_hotplug_notify(acpi_handle handle,
u32 event, void *data)
{
struct acpi_device *device = NULL;
struct acpi_eject_event *ej_event = NULL;
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
+ acpi_status status;
int result;
+ acpi_scan_lock_acquire();
+
switch (event) {
case ACPI_NOTIFY_BUS_CHECK:
case ACPI_NOTIFY_DEVICE_CHECK:
@@ -721,12 +704,16 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
if (!acpi_bus_get_device(handle, &device))
break;
- result = acpi_processor_device_add(handle, &device);
+ result = acpi_bus_scan(handle);
if (result) {
acpi_handle_err(handle, "Unable to add the device\n");
break;
}
-
+ result = acpi_bus_get_device(handle, &device);
+ if (result) {
+ acpi_handle_err(handle, "Missing device object\n");
+ break;
+ }
ost_code = ACPI_OST_SC_SUCCESS;
break;
@@ -751,25 +738,32 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
break;
}
- ej_event->handle = handle;
+ get_device(&device->dev);
+ ej_event->device = device;
ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
- acpi_os_hotplug_execute(acpi_bus_hot_remove_device,
- (void *)ej_event);
-
- /* eject is performed asynchronously */
- return;
+ /* The eject is carried out asynchronously. */
+ status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device,
+ ej_event);
+ if (ACPI_FAILURE(status)) {
+ put_device(&device->dev);
+ kfree(ej_event);
+ break;
+ }
+ goto out;
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Unsupported event [0x%x]\n", event));
/* non-hotplug event; possibly handled by other handler */
- return;
+ goto out;
}
/* Inform firmware that the hotplug operation has completed */
(void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
- return;
+
+ out:
+ acpi_scan_lock_release();
}
static acpi_status is_processor_device(acpi_handle handle)
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index f1a5da44591d..fc95308e9a11 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -28,19 +28,12 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
-#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/cpufreq.h>
-#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
-#include <linux/moduleparam.h>
-#include <linux/sched.h> /* need_resched() */
-#include <linux/pm_qos.h>
+#include <linux/sched.h> /* need_resched() */
#include <linux/clockchips.h>
#include <linux/cpuidle.h>
-#include <linux/irqflags.h>
/*
* Include the apic definitions for x86 to have the APIC timer related defines
@@ -52,22 +45,14 @@
#include <asm/apic.h>
#endif
-#include <asm/io.h>
-#include <asm/uaccess.h>
-
#include <acpi/acpi_bus.h>
#include <acpi/processor.h>
-#include <asm/processor.h>
#define PREFIX "ACPI: "
#define ACPI_PROCESSOR_CLASS "processor"
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_idle");
-#define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
-#define C2_OVERHEAD 1 /* 1us */
-#define C3_OVERHEAD 1 /* 1us */
-#define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
module_param(max_cstate, uint, 0000);
@@ -81,10 +66,11 @@ module_param(latency_factor, uint, 0644);
static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
+static struct acpi_processor_cx *acpi_cstate[CPUIDLE_STATE_MAX];
+
static int disabled_by_idle_boot_param(void)
{
return boot_option_idle_override == IDLE_POLL ||
- boot_option_idle_override == IDLE_FORCE_MWAIT ||
boot_option_idle_override == IDLE_HALT;
}
@@ -736,8 +722,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
- struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
- struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
+ struct acpi_processor_cx *cx = acpi_cstate[index];
pr = __this_cpu_read(processors);
@@ -760,8 +745,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
*/
static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
{
- struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
- struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
+ struct acpi_processor_cx *cx = acpi_cstate[index];
ACPI_FLUSH_CPU_CACHE();
@@ -791,8 +775,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
- struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
- struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
+ struct acpi_processor_cx *cx = acpi_cstate[index];
pr = __this_cpu_read(processors);
@@ -850,8 +833,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
- struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
- struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
+ struct acpi_processor_cx *cx = acpi_cstate[index];
pr = __this_cpu_read(processors);
@@ -943,13 +925,13 @@ struct cpuidle_driver acpi_idle_driver = {
* device i.e. per-cpu data
*
* @pr: the ACPI processor
+ * @dev : the cpuidle device
*/
-static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
+static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
+ struct cpuidle_device *dev)
{
int i, count = CPUIDLE_DRIVER_STATE_START;
struct acpi_processor_cx *cx;
- struct cpuidle_state_usage *state_usage;
- struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
if (!pr->flags.power_setup_done)
return -EINVAL;
@@ -958,6 +940,9 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
return -EINVAL;
}
+ if (!dev)
+ return -EINVAL;
+
dev->cpu = pr->id;
if (max_cstate == 0)
@@ -965,7 +950,6 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
cx = &pr->power.states[i];
- state_usage = &dev->states_usage[count];
if (!cx->valid)
continue;
@@ -976,8 +960,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
continue;
#endif
-
- cpuidle_set_statedata(state_usage, cx);
+ acpi_cstate[count] = cx;
count++;
if (count == CPUIDLE_STATE_MAX)
@@ -1101,7 +1084,7 @@ int acpi_processor_hotplug(struct acpi_processor *pr)
cpuidle_disable_device(dev);
acpi_processor_get_power_info(pr);
if (pr->flags.power) {
- acpi_processor_setup_cpuidle_cx(pr);
+ acpi_processor_setup_cpuidle_cx(pr, dev);
ret = cpuidle_enable_device(dev);
}
cpuidle_resume_and_unlock();
@@ -1149,6 +1132,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
}
/* Populate Updated C-state information */
+ acpi_processor_get_power_info(pr);
acpi_processor_setup_cpuidle_states(pr);
/* Enable all cpuidle devices */
@@ -1158,8 +1142,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
continue;
acpi_processor_get_power_info(_pr);
if (_pr->flags.power) {
- acpi_processor_setup_cpuidle_cx(_pr);
dev = per_cpu(acpi_cpuidle_device, cpu);
+ acpi_processor_setup_cpuidle_cx(_pr, dev);
cpuidle_enable_device(dev);
}
}
@@ -1228,7 +1212,7 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr)
return -ENOMEM;
per_cpu(acpi_cpuidle_device, pr->id) = dev;
- acpi_processor_setup_cpuidle_cx(pr);
+ acpi_processor_setup_cpuidle_cx(pr, dev);
/* Register per-cpu cpuidle_device. Cpuidle driver
* must already be registered before registering device
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 836bfe069042..53e7ac9403a7 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -340,6 +340,13 @@ static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
|| boot_cpu_data.x86 == 0x11) {
rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi);
+ /*
+ * MSR C001_0064+:
+ * Bit 63: PstateEn. Read-write. If set, the P-state is valid.
+ */
+ if (!(hi & BIT(31)))
+ return;
+
fid = lo & 0x3f;
did = (lo >> 6) & 7;
if (boot_cpu_data.x86 == 0x10)
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index ff0740e0a9c2..e523245643ac 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -130,7 +130,7 @@ struct acpi_sbs {
#define to_acpi_sbs(x) container_of(x, struct acpi_sbs, charger)
-static int acpi_sbs_remove(struct acpi_device *device, int type);
+static int acpi_sbs_remove(struct acpi_device *device);
static int acpi_battery_get_state(struct acpi_battery *battery);
static inline int battery_scale(int log)
@@ -949,11 +949,11 @@ static int acpi_sbs_add(struct acpi_device *device)
acpi_smbus_register_callback(sbs->hc, acpi_sbs_callback, sbs);
end:
if (result)
- acpi_sbs_remove(device, 0);
+ acpi_sbs_remove(device);
return result;
}
-static int acpi_sbs_remove(struct acpi_device *device, int type)
+static int acpi_sbs_remove(struct acpi_device *device)
{
struct acpi_sbs *sbs;
int id;
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index cf6129a8af7c..b78bc605837e 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -33,7 +33,7 @@ struct acpi_smb_hc {
};
static int acpi_smbus_hc_add(struct acpi_device *device);
-static int acpi_smbus_hc_remove(struct acpi_device *device, int type);
+static int acpi_smbus_hc_remove(struct acpi_device *device);
static const struct acpi_device_id sbs_device_ids[] = {
{"ACPI0001", 0},
@@ -296,7 +296,7 @@ static int acpi_smbus_hc_add(struct acpi_device *device)
extern void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
-static int acpi_smbus_hc_remove(struct acpi_device *device, int type)
+static int acpi_smbus_hc_remove(struct acpi_device *device)
{
struct acpi_smb_hc *hc;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index c88be6c37c30..daee7497efd3 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -29,29 +29,10 @@ extern struct acpi_device *acpi_root;
static const char *dummy_hid = "device";
-/*
- * The following ACPI IDs are known to be suitable for representing as
- * platform devices.
- */
-static const struct acpi_device_id acpi_platform_device_ids[] = {
-
- { "PNP0D40" },
-
- /* Haswell LPSS devices */
- { "INT33C0", 0 },
- { "INT33C1", 0 },
- { "INT33C2", 0 },
- { "INT33C3", 0 },
- { "INT33C4", 0 },
- { "INT33C5", 0 },
- { "INT33C6", 0 },
- { "INT33C7", 0 },
-
- { }
-};
-
static LIST_HEAD(acpi_device_list);
static LIST_HEAD(acpi_bus_id_list);
+static DEFINE_MUTEX(acpi_scan_lock);
+static LIST_HEAD(acpi_scan_handlers_list);
DEFINE_MUTEX(acpi_device_lock);
LIST_HEAD(acpi_wakeup_device_list);
@@ -61,6 +42,27 @@ struct acpi_device_bus_id{
struct list_head node;
};
+void acpi_scan_lock_acquire(void)
+{
+ mutex_lock(&acpi_scan_lock);
+}
+EXPORT_SYMBOL_GPL(acpi_scan_lock_acquire);
+
+void acpi_scan_lock_release(void)
+{
+ mutex_unlock(&acpi_scan_lock);
+}
+EXPORT_SYMBOL_GPL(acpi_scan_lock_release);
+
+int acpi_scan_add_handler(struct acpi_scan_handler *handler)
+{
+ if (!handler || !handler->attach)
+ return -EINVAL;
+
+ list_add_tail(&handler->list_node, &acpi_scan_handlers_list);
+ return 0;
+}
+
/*
* Creates hid/cid(s) string needed for modalias and uevent
* e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get:
@@ -115,39 +117,32 @@ static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
*/
void acpi_bus_hot_remove_device(void *context)
{
- struct acpi_eject_event *ej_event = (struct acpi_eject_event *) context;
- struct acpi_device *device;
- acpi_handle handle = ej_event->handle;
+ struct acpi_eject_event *ej_event = context;
+ struct acpi_device *device = ej_event->device;
+ acpi_handle handle = device->handle;
acpi_handle temp;
struct acpi_object_list arg_list;
union acpi_object arg;
acpi_status status = AE_OK;
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
- if (acpi_bus_get_device(handle, &device))
- goto err_out;
+ mutex_lock(&acpi_scan_lock);
- if (!device)
- goto err_out;
+ /* If there is no handle, the device node has been unregistered. */
+ if (!device->handle) {
+ dev_dbg(&device->dev, "ACPI handle missing\n");
+ put_device(&device->dev);
+ goto out;
+ }
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Hot-removing device %s...\n", dev_name(&device->dev)));
- if (acpi_bus_trim(device, 1)) {
- printk(KERN_ERR PREFIX
- "Removing device failed\n");
- goto err_out;
- }
-
- /* device has been freed */
+ acpi_bus_trim(device);
+ /* Device node has been unregistered. */
+ put_device(&device->dev);
device = NULL;
- /* power off device */
- status = acpi_evaluate_object(handle, "_PS3", NULL, NULL);
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
- printk(KERN_WARNING PREFIX
- "Power-off device failed\n");
-
if (ACPI_SUCCESS(acpi_get_handle(handle, "_LCK", &temp))) {
arg_list.count = 1;
arg_list.pointer = &arg;
@@ -167,23 +162,46 @@ void acpi_bus_hot_remove_device(void *context)
status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL);
if (ACPI_FAILURE(status)) {
if (status != AE_NOT_FOUND)
- printk(KERN_WARNING PREFIX
- "Eject device failed\n");
- goto err_out;
- }
+ acpi_handle_warn(handle, "Eject failed\n");
- kfree(context);
- return;
+ /* Tell the firmware the hot-remove operation has failed. */
+ acpi_evaluate_hotplug_ost(handle, ej_event->event,
+ ost_code, NULL);
+ }
-err_out:
- /* Inform firmware the hot-remove operation has completed w/ error */
- (void) acpi_evaluate_hotplug_ost(handle,
- ej_event->event, ost_code, NULL);
+ out:
+ mutex_unlock(&acpi_scan_lock);
kfree(context);
return;
}
EXPORT_SYMBOL(acpi_bus_hot_remove_device);
+static ssize_t real_power_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_device *adev = to_acpi_device(dev);
+ int state;
+ int ret;
+
+ ret = acpi_device_get_power(adev, &state);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%s\n", acpi_power_state_string(state));
+}
+
+static DEVICE_ATTR(real_power_state, 0444, real_power_state_show, NULL);
+
+static ssize_t power_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_device *adev = to_acpi_device(dev);
+
+ return sprintf(buf, "%s\n", acpi_power_state_string(adev->power.state));
+}
+
+static DEVICE_ATTR(power_state, 0444, power_state_show, NULL);
+
static ssize_t
acpi_eject_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
@@ -197,12 +215,10 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
if ((!count) || (buf[0] != '1')) {
return -EINVAL;
}
-#ifndef FORCE_EJECT
- if (acpi_device->driver == NULL) {
+ if (!acpi_device->driver && !acpi_device->handler) {
ret = -ENODEV;
goto err;
}
-#endif
status = acpi_get_type(acpi_device->handle, &type);
if (ACPI_FAILURE(status) || (!acpi_device->flags.ejectable)) {
ret = -ENODEV;
@@ -215,7 +231,8 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
goto err;
}
- ej_event->handle = acpi_device->handle;
+ get_device(&acpi_device->dev);
+ ej_event->device = acpi_device;
if (acpi_device->flags.eject_pending) {
/* event originated from ACPI eject notification */
ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
@@ -223,11 +240,15 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
} else {
/* event originated from user */
ej_event->event = ACPI_OST_EC_OSPM_EJECT;
- (void) acpi_evaluate_hotplug_ost(ej_event->handle,
+ (void) acpi_evaluate_hotplug_ost(acpi_device->handle,
ej_event->event, ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
}
- acpi_os_hotplug_execute(acpi_bus_hot_remove_device, (void *)ej_event);
+ status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event);
+ if (ACPI_FAILURE(status)) {
+ put_device(&acpi_device->dev);
+ kfree(ej_event);
+ }
err:
return ret;
}
@@ -375,8 +396,22 @@ static int acpi_device_setup_files(struct acpi_device *dev)
* hot-removal function from userland.
*/
status = acpi_get_handle(dev->handle, "_EJ0", &temp);
- if (ACPI_SUCCESS(status))
+ if (ACPI_SUCCESS(status)) {
result = device_create_file(&dev->dev, &dev_attr_eject);
+ if (result)
+ return result;
+ }
+
+ if (dev->flags.power_manageable) {
+ result = device_create_file(&dev->dev, &dev_attr_power_state);
+ if (result)
+ return result;
+
+ if (dev->power.flags.power_resources)
+ result = device_create_file(&dev->dev,
+ &dev_attr_real_power_state);
+ }
+
end:
return result;
}
@@ -386,6 +421,13 @@ static void acpi_device_remove_files(struct acpi_device *dev)
acpi_status status;
acpi_handle temp;
+ if (dev->flags.power_manageable) {
+ device_remove_file(&dev->dev, &dev_attr_power_state);
+ if (dev->power.flags.power_resources)
+ device_remove_file(&dev->dev,
+ &dev_attr_real_power_state);
+ }
+
/*
* If device has _STR, remove 'description' file
*/
@@ -454,9 +496,9 @@ const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
const struct device *dev)
{
struct acpi_device *adev;
+ acpi_handle handle = ACPI_HANDLE(dev);
- if (!ids || !ACPI_HANDLE(dev)
- || ACPI_FAILURE(acpi_bus_get_device(ACPI_HANDLE(dev), &adev)))
+ if (!ids || !handle || acpi_bus_get_device(handle, &adev))
return NULL;
return __acpi_match_device(adev, ids);
@@ -470,7 +512,7 @@ int acpi_match_device_ids(struct acpi_device *device,
}
EXPORT_SYMBOL(acpi_match_device_ids);
-static void acpi_free_ids(struct acpi_device *device)
+void acpi_free_ids(struct acpi_device *device)
{
struct acpi_hardware_id *id, *tmp;
@@ -478,6 +520,23 @@ static void acpi_free_ids(struct acpi_device *device)
kfree(id->id);
kfree(id);
}
+ kfree(device->pnp.unique_id);
+}
+
+static void acpi_free_power_resources_lists(struct acpi_device *device)
+{
+ int i;
+
+ if (device->wakeup.flags.valid)
+ acpi_power_resources_list_free(&device->wakeup.resources);
+
+ if (!device->flags.power_manageable)
+ return;
+
+ for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
+ struct acpi_device_power_state *ps = &device->power.states[i];
+ acpi_power_resources_list_free(&ps->resources);
+ }
}
static void acpi_device_release(struct device *dev)
@@ -485,7 +544,7 @@ static void acpi_device_release(struct device *dev)
struct acpi_device *acpi_dev = to_acpi_device(dev);
acpi_free_ids(acpi_dev);
- kfree(acpi_dev->pnp.unique_id);
+ acpi_free_power_resources_lists(acpi_dev);
kfree(acpi_dev);
}
@@ -494,7 +553,8 @@ static int acpi_bus_match(struct device *dev, struct device_driver *drv)
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_driver *acpi_drv = to_acpi_driver(drv);
- return !acpi_match_device_ids(acpi_dev, acpi_drv->ids);
+ return acpi_dev->flags.match_driver
+ && !acpi_match_device_ids(acpi_dev, acpi_drv->ids);
}
static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env)
@@ -570,7 +630,6 @@ static void acpi_device_remove_notify_handler(struct acpi_device *device)
}
static int acpi_bus_driver_init(struct acpi_device *, struct acpi_driver *);
-static int acpi_start_single_object(struct acpi_device *);
static int acpi_device_probe(struct device * dev)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
@@ -579,15 +638,13 @@ static int acpi_device_probe(struct device * dev)
ret = acpi_bus_driver_init(acpi_dev, acpi_drv);
if (!ret) {
- if (acpi_dev->bus_ops.acpi_op_start)
- acpi_start_single_object(acpi_dev);
-
if (acpi_drv->ops.notify) {
ret = acpi_device_install_notify_handler(acpi_dev);
if (ret) {
if (acpi_drv->ops.remove)
- acpi_drv->ops.remove(acpi_dev,
- acpi_dev->removal_type);
+ acpi_drv->ops.remove(acpi_dev);
+ acpi_dev->driver = NULL;
+ acpi_dev->driver_data = NULL;
return ret;
}
}
@@ -609,7 +666,7 @@ static int acpi_device_remove(struct device * dev)
if (acpi_drv->ops.notify)
acpi_device_remove_notify_handler(acpi_dev);
if (acpi_drv->ops.remove)
- acpi_drv->ops.remove(acpi_dev, acpi_dev->removal_type);
+ acpi_drv->ops.remove(acpi_dev);
}
acpi_dev->driver = NULL;
acpi_dev->driver_data = NULL;
@@ -626,12 +683,25 @@ struct bus_type acpi_bus_type = {
.uevent = acpi_device_uevent,
};
-static int acpi_device_register(struct acpi_device *device)
+int acpi_device_add(struct acpi_device *device,
+ void (*release)(struct device *))
{
int result;
struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id;
int found = 0;
+ if (device->handle) {
+ acpi_status status;
+
+ status = acpi_attach_data(device->handle, acpi_bus_data_handler,
+ device);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(device->handle,
+ "Unable to attach device data\n");
+ return -ENODEV;
+ }
+ }
+
/*
* Linkage
* -------
@@ -642,11 +712,13 @@ static int acpi_device_register(struct acpi_device *device)
INIT_LIST_HEAD(&device->wakeup_list);
INIT_LIST_HEAD(&device->physical_node_list);
mutex_init(&device->physical_node_lock);
+ INIT_LIST_HEAD(&device->power_dependent);
new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
if (!new_bus_id) {
- printk(KERN_ERR PREFIX "Memory allocation error\n");
- return -ENOMEM;
+ pr_err(PREFIX "Memory allocation error\n");
+ result = -ENOMEM;
+ goto err_detach;
}
mutex_lock(&acpi_device_lock);
@@ -681,11 +753,11 @@ static int acpi_device_register(struct acpi_device *device)
if (device->parent)
device->dev.parent = &device->parent->dev;
device->dev.bus = &acpi_bus_type;
- device->dev.release = &acpi_device_release;
- result = device_register(&device->dev);
+ device->dev.release = release;
+ result = device_add(&device->dev);
if (result) {
dev_err(&device->dev, "Error registering device\n");
- goto end;
+ goto err;
}
result = acpi_device_setup_files(device);
@@ -695,16 +767,20 @@ static int acpi_device_register(struct acpi_device *device)
device->removal_type = ACPI_BUS_REMOVAL_NORMAL;
return 0;
-end:
+
+ err:
mutex_lock(&acpi_device_lock);
if (device->parent)
list_del(&device->node);
list_del(&device->wakeup_list);
mutex_unlock(&acpi_device_lock);
+
+ err_detach:
+ acpi_detach_data(device->handle, acpi_bus_data_handler);
return result;
}
-static void acpi_device_unregister(struct acpi_device *device, int type)
+static void acpi_device_unregister(struct acpi_device *device)
{
mutex_lock(&acpi_device_lock);
if (device->parent)
@@ -715,8 +791,20 @@ static void acpi_device_unregister(struct acpi_device *device, int type)
acpi_detach_data(device->handle, acpi_bus_data_handler);
+ acpi_power_add_remove_device(device, false);
acpi_device_remove_files(device);
- device_unregister(&device->dev);
+ if (device->remove)
+ device->remove(device);
+
+ device_del(&device->dev);
+ /*
+ * Transition the device to D3cold to drop the reference counts of all
+ * power resources the device depends on and turn off the ones that have
+ * no more references.
+ */
+ acpi_device_set_power(device, ACPI_STATE_D3_COLD);
+ device->handle = NULL;
+ put_device(&device->dev);
}
/* --------------------------------------------------------------------------
@@ -760,24 +848,6 @@ acpi_bus_driver_init(struct acpi_device *device, struct acpi_driver *driver)
return 0;
}
-static int acpi_start_single_object(struct acpi_device *device)
-{
- int result = 0;
- struct acpi_driver *driver;
-
-
- if (!(driver = device->driver))
- return 0;
-
- if (driver->ops.start) {
- result = driver->ops.start(device);
- if (result && driver->ops.remove)
- driver->ops.remove(device, ACPI_BUS_REMOVAL_NORMAL);
- }
-
- return result;
-}
-
/**
* acpi_bus_register_driver - register a driver with the ACPI bus
* @driver: driver being registered
@@ -821,29 +891,23 @@ EXPORT_SYMBOL(acpi_bus_unregister_driver);
-------------------------------------------------------------------------- */
static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
{
+ struct acpi_device *device = NULL;
acpi_status status;
- int ret;
- struct acpi_device *device;
/*
* Fixed hardware devices do not appear in the namespace and do not
* have handles, but we fabricate acpi_devices for them, so we have
* to deal with them specially.
*/
- if (handle == NULL)
+ if (!handle)
return acpi_root;
do {
status = acpi_get_parent(handle, &handle);
- if (status == AE_NULL_ENTRY)
- return NULL;
if (ACPI_FAILURE(status))
- return acpi_root;
-
- ret = acpi_bus_get_device(handle, &device);
- if (ret == 0)
- return device;
- } while (1);
+ return status == AE_NULL_ENTRY ? NULL : acpi_root;
+ } while (acpi_bus_get_device(handle, &device));
+ return device;
}
acpi_status
@@ -877,52 +941,43 @@ void acpi_bus_data_handler(acpi_handle handle, void *context)
return;
}
-static int acpi_bus_get_perf_flags(struct acpi_device *device)
-{
- device->performance.state = ACPI_STATE_UNKNOWN;
- return 0;
-}
-
-static acpi_status
-acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
- struct acpi_device_wakeup *wakeup)
+static int acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
+ struct acpi_device_wakeup *wakeup)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *package = NULL;
union acpi_object *element = NULL;
acpi_status status;
- int i = 0;
+ int err = -ENODATA;
if (!wakeup)
- return AE_BAD_PARAMETER;
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&wakeup->resources);
/* _PRW */
status = acpi_evaluate_object(handle, "_PRW", NULL, &buffer);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRW"));
- return status;
+ return err;
}
package = (union acpi_object *)buffer.pointer;
- if (!package || (package->package.count < 2)) {
- status = AE_BAD_DATA;
+ if (!package || package->package.count < 2)
goto out;
- }
element = &(package->package.elements[0]);
- if (!element) {
- status = AE_BAD_DATA;
+ if (!element)
goto out;
- }
+
if (element->type == ACPI_TYPE_PACKAGE) {
if ((element->package.count < 2) ||
(element->package.elements[0].type !=
ACPI_TYPE_LOCAL_REFERENCE)
- || (element->package.elements[1].type != ACPI_TYPE_INTEGER)) {
- status = AE_BAD_DATA;
+ || (element->package.elements[1].type != ACPI_TYPE_INTEGER))
goto out;
- }
+
wakeup->gpe_device =
element->package.elements[0].reference.handle;
wakeup->gpe_number =
@@ -931,38 +986,35 @@ acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
wakeup->gpe_device = NULL;
wakeup->gpe_number = element->integer.value;
} else {
- status = AE_BAD_DATA;
goto out;
}
element = &(package->package.elements[1]);
- if (element->type != ACPI_TYPE_INTEGER) {
- status = AE_BAD_DATA;
+ if (element->type != ACPI_TYPE_INTEGER)
goto out;
- }
+
wakeup->sleep_state = element->integer.value;
- if ((package->package.count - 2) > ACPI_MAX_HANDLES) {
- status = AE_NO_MEMORY;
+ err = acpi_extract_power_resources(package, 2, &wakeup->resources);
+ if (err)
goto out;
- }
- wakeup->resources.count = package->package.count - 2;
- for (i = 0; i < wakeup->resources.count; i++) {
- element = &(package->package.elements[i + 2]);
- if (element->type != ACPI_TYPE_LOCAL_REFERENCE) {
- status = AE_BAD_DATA;
- goto out;
- }
- wakeup->resources.handles[i] = element->reference.handle;
- }
+ if (!list_empty(&wakeup->resources)) {
+ int sleep_state;
+ sleep_state = acpi_power_min_system_level(&wakeup->resources);
+ if (sleep_state < wakeup->sleep_state) {
+ acpi_handle_warn(handle, "Overriding _PRW sleep state "
+ "(S%d) by S%d from power resources\n",
+ (int)wakeup->sleep_state, sleep_state);
+ wakeup->sleep_state = sleep_state;
+ }
+ }
acpi_setup_gpe_for_wake(handle, wakeup->gpe_device, wakeup->gpe_number);
out:
kfree(buffer.pointer);
-
- return status;
+ return err;
}
static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
@@ -1002,17 +1054,17 @@ static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
{
acpi_handle temp;
acpi_status status = 0;
- int psw_error;
+ int err;
/* Presence of _PRW indicates wake capable */
status = acpi_get_handle(device->handle, "_PRW", &temp);
if (ACPI_FAILURE(status))
return;
- status = acpi_bus_extract_wakeup_device_power_package(device->handle,
- &device->wakeup);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Extracting _PRW package"));
+ err = acpi_bus_extract_wakeup_device_power_package(device->handle,
+ &device->wakeup);
+ if (err) {
+ dev_err(&device->dev, "_PRW evaluation error: %d\n", err);
return;
}
@@ -1025,20 +1077,73 @@ static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
* So it is necessary to call _DSW object first. Only when it is not
* present will the _PSW object used.
*/
- psw_error = acpi_device_sleep_wake(device, 0, 0, 0);
- if (psw_error)
+ err = acpi_device_sleep_wake(device, 0, 0, 0);
+ if (err)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"error in _DSW or _PSW evaluation\n"));
}
-static void acpi_bus_add_power_resource(acpi_handle handle);
+static void acpi_bus_init_power_state(struct acpi_device *device, int state)
+{
+ struct acpi_device_power_state *ps = &device->power.states[state];
+ char pathname[5] = { '_', 'P', 'R', '0' + state, '\0' };
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_handle handle;
+ acpi_status status;
+
+ INIT_LIST_HEAD(&ps->resources);
+
+ /* Evaluate "_PRx" to get referenced power resources */
+ status = acpi_evaluate_object(device->handle, pathname, NULL, &buffer);
+ if (ACPI_SUCCESS(status)) {
+ union acpi_object *package = buffer.pointer;
+
+ if (buffer.length && package
+ && package->type == ACPI_TYPE_PACKAGE
+ && package->package.count) {
+ int err = acpi_extract_power_resources(package, 0,
+ &ps->resources);
+ if (!err)
+ device->power.flags.power_resources = 1;
+ }
+ ACPI_FREE(buffer.pointer);
+ }
+
+ /* Evaluate "_PSx" to see if we can do explicit sets */
+ pathname[2] = 'S';
+ status = acpi_get_handle(device->handle, pathname, &handle);
+ if (ACPI_SUCCESS(status))
+ ps->flags.explicit_set = 1;
+
+ /*
+ * State is valid if there are means to put the device into it.
+ * D3hot is only valid if _PR3 present.
+ */
+ if (!list_empty(&ps->resources)
+ || (ps->flags.explicit_set && state < ACPI_STATE_D3_HOT)) {
+ ps->flags.valid = 1;
+ ps->flags.os_accessible = 1;
+ }
+
+ ps->power = -1; /* Unknown - driver assigned */
+ ps->latency = -1; /* Unknown - driver assigned */
+}
-static int acpi_bus_get_power_flags(struct acpi_device *device)
+static void acpi_bus_get_power_flags(struct acpi_device *device)
{
- acpi_status status = 0;
- acpi_handle handle = NULL;
- u32 i = 0;
+ acpi_status status;
+ acpi_handle handle;
+ u32 i;
+
+ /* Presence of _PS0|_PR0 indicates 'power manageable' */
+ status = acpi_get_handle(device->handle, "_PS0", &handle);
+ if (ACPI_FAILURE(status)) {
+ status = acpi_get_handle(device->handle, "_PR0", &handle);
+ if (ACPI_FAILURE(status))
+ return;
+ }
+ device->flags.power_manageable = 1;
/*
* Power Management Flags
@@ -1053,40 +1158,10 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
/*
* Enumerate supported power management states
*/
- for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
- struct acpi_device_power_state *ps = &device->power.states[i];
- char object_name[5] = { '_', 'P', 'R', '0' + i, '\0' };
-
- /* Evaluate "_PRx" to se if power resources are referenced */
- acpi_evaluate_reference(device->handle, object_name, NULL,
- &ps->resources);
- if (ps->resources.count) {
- int j;
-
- device->power.flags.power_resources = 1;
- for (j = 0; j < ps->resources.count; j++)
- acpi_bus_add_power_resource(ps->resources.handles[j]);
- }
+ for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++)
+ acpi_bus_init_power_state(device, i);
- /* Evaluate "_PSx" to see if we can do explicit sets */
- object_name[2] = 'S';
- status = acpi_get_handle(device->handle, object_name, &handle);
- if (ACPI_SUCCESS(status))
- ps->flags.explicit_set = 1;
-
- /*
- * State is valid if there are means to put the device into it.
- * D3hot is only valid if _PR3 present.
- */
- if (ps->resources.count ||
- (ps->flags.explicit_set && i < ACPI_STATE_D3_HOT)) {
- ps->flags.valid = 1;
- ps->flags.os_accessible = 1;
- }
-
- ps->power = -1; /* Unknown - driver assigned */
- ps->latency = -1; /* Unknown - driver assigned */
- }
+ INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources);
/* Set defaults for D0 and D3 states (always valid) */
device->power.states[ACPI_STATE_D0].flags.valid = 1;
@@ -1103,17 +1178,17 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
device->power.flags.power_resources)
device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1;
- acpi_bus_init_power(device);
-
- return 0;
+ if (acpi_bus_init_power(device)) {
+ acpi_free_power_resources_lists(device);
+ device->flags.power_manageable = 0;
+ }
}
-static int acpi_bus_get_flags(struct acpi_device *device)
+static void acpi_bus_get_flags(struct acpi_device *device)
{
acpi_status status = AE_OK;
acpi_handle temp = NULL;
-
/* Presence of _STA indicates 'dynamic_status' */
status = acpi_get_handle(device->handle, "_STA", &temp);
if (ACPI_SUCCESS(status))
@@ -1133,21 +1208,6 @@ static int acpi_bus_get_flags(struct acpi_device *device)
if (ACPI_SUCCESS(status))
device->flags.ejectable = 1;
}
-
- /* Power resources cannot be power manageable. */
- if (device->device_type == ACPI_BUS_TYPE_POWER)
- return 0;
-
- /* Presence of _PS0|_PR0 indicates 'power manageable' */
- status = acpi_get_handle(device->handle, "_PS0", &temp);
- if (ACPI_FAILURE(status))
- status = acpi_get_handle(device->handle, "_PR0", &temp);
- if (ACPI_SUCCESS(status))
- device->flags.power_manageable = 1;
-
- /* TBD: Performance management */
-
- return 0;
}
static void acpi_device_get_busid(struct acpi_device *device)
@@ -1372,56 +1432,32 @@ static void acpi_device_set_id(struct acpi_device *device)
}
}
-static int acpi_device_set_context(struct acpi_device *device)
+void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
+ int type, unsigned long long sta)
{
- acpi_status status;
-
- /*
- * Context
- * -------
- * Attach this 'struct acpi_device' to the ACPI object. This makes
- * resolutions from handle->device very efficient. Fixed hardware
- * devices have no handles, so we skip them.
- */
- if (!device->handle)
- return 0;
-
- status = acpi_attach_data(device->handle,
- acpi_bus_data_handler, device);
- if (ACPI_SUCCESS(status))
- return 0;
-
- printk(KERN_ERR PREFIX "Error attaching device data\n");
- return -ENODEV;
+ INIT_LIST_HEAD(&device->pnp.ids);
+ device->device_type = type;
+ device->handle = handle;
+ device->parent = acpi_bus_get_parent(handle);
+ STRUCT_TO_INT(device->status) = sta;
+ acpi_device_get_busid(device);
+ acpi_device_set_id(device);
+ acpi_bus_get_flags(device);
+ device->flags.match_driver = false;
+ device_initialize(&device->dev);
+ dev_set_uevent_suppress(&device->dev, true);
}
-static int acpi_bus_remove(struct acpi_device *dev, int rmdevice)
+void acpi_device_add_finalize(struct acpi_device *device)
{
- if (!dev)
- return -EINVAL;
-
- dev->removal_type = ACPI_BUS_REMOVAL_EJECT;
- device_release_driver(&dev->dev);
-
- if (!rmdevice)
- return 0;
-
- /*
- * unbind _ADR-Based Devices when hot removal
- */
- if (dev->flags.bus_address) {
- if ((dev->parent) && (dev->parent->ops.unbind))
- dev->parent->ops.unbind(dev);
- }
- acpi_device_unregister(dev, ACPI_BUS_REMOVAL_EJECT);
-
- return 0;
+ device->flags.match_driver = true;
+ dev_set_uevent_suppress(&device->dev, false);
+ kobject_uevent(&device->dev.kobj, KOBJ_ADD);
}
static int acpi_add_single_object(struct acpi_device **child,
acpi_handle handle, int type,
- unsigned long long sta,
- struct acpi_bus_ops *ops)
+ unsigned long long sta)
{
int result;
struct acpi_device *device;
@@ -1433,102 +1469,25 @@ static int acpi_add_single_object(struct acpi_device **child,
return -ENOMEM;
}
- INIT_LIST_HEAD(&device->pnp.ids);
- device->device_type = type;
- device->handle = handle;
- device->parent = acpi_bus_get_parent(handle);
- device->bus_ops = *ops; /* workround for not call .start */
- STRUCT_TO_INT(device->status) = sta;
-
- acpi_device_get_busid(device);
-
- /*
- * Flags
- * -----
- * Note that we only look for object handles -- cannot evaluate objects
- * until we know the device is present and properly initialized.
- */
- result = acpi_bus_get_flags(device);
- if (result)
- goto end;
-
- /*
- * Initialize Device
- * -----------------
- * TBD: Synch with Core's enumeration/initialization process.
- */
- acpi_device_set_id(device);
-
- /*
- * Power Management
- * ----------------
- */
- if (device->flags.power_manageable) {
- result = acpi_bus_get_power_flags(device);
- if (result)
- goto end;
- }
-
- /*
- * Wakeup device management
- *-----------------------
- */
+ acpi_init_device_object(device, handle, type, sta);
+ acpi_bus_get_power_flags(device);
acpi_bus_get_wakeup_device_flags(device);
- /*
- * Performance Management
- * ----------------------
- */
- if (device->flags.performance_manageable) {
- result = acpi_bus_get_perf_flags(device);
- if (result)
- goto end;
- }
-
- if ((result = acpi_device_set_context(device)))
- goto end;
-
- result = acpi_device_register(device);
-
- /*
- * Bind _ADR-Based Devices when hot add
- */
- if (device->flags.bus_address) {
- if (device->parent && device->parent->ops.bind)
- device->parent->ops.bind(device);
- }
-
-end:
- if (!result) {
- acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Adding %s [%s] parent %s\n", dev_name(&device->dev),
- (char *) buffer.pointer,
- device->parent ? dev_name(&device->parent->dev) :
- "(null)"));
- kfree(buffer.pointer);
- *child = device;
- } else
+ result = acpi_device_add(device, acpi_device_release);
+ if (result) {
acpi_device_release(&device->dev);
+ return result;
+ }
- return result;
-}
-
-#define ACPI_STA_DEFAULT (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | \
- ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING)
-
-static void acpi_bus_add_power_resource(acpi_handle handle)
-{
- struct acpi_bus_ops ops = {
- .acpi_op_add = 1,
- .acpi_op_start = 1,
- };
- struct acpi_device *device = NULL;
-
- acpi_bus_get_device(handle, &device);
- if (!device)
- acpi_add_single_object(&device, handle, ACPI_BUS_TYPE_POWER,
- ACPI_STA_DEFAULT, &ops);
+ acpi_power_add_remove_device(device, true);
+ acpi_device_add_finalize(device);
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Added %s [%s] parent %s\n",
+ dev_name(&device->dev), (char *) buffer.pointer,
+ device->parent ? dev_name(&device->parent->dev) : "(null)"));
+ kfree(buffer.pointer);
+ *child = device;
+ return 0;
}
static int acpi_bus_type_and_status(acpi_handle handle, int *type,
@@ -1570,218 +1529,248 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type,
return 0;
}
-static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl,
- void *context, void **return_value)
+static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
+ void *not_used, void **return_value)
{
- struct acpi_bus_ops *ops = context;
+ struct acpi_device *device = NULL;
int type;
unsigned long long sta;
- struct acpi_device *device;
acpi_status status;
int result;
+ acpi_bus_get_device(handle, &device);
+ if (device)
+ goto out;
+
result = acpi_bus_type_and_status(handle, &type, &sta);
if (result)
return AE_OK;
+ if (type == ACPI_BUS_TYPE_POWER) {
+ acpi_add_power_resource(handle);
+ return AE_OK;
+ }
+
if (!(sta & ACPI_STA_DEVICE_PRESENT) &&
!(sta & ACPI_STA_DEVICE_FUNCTIONING)) {
struct acpi_device_wakeup wakeup;
acpi_handle temp;
status = acpi_get_handle(handle, "_PRW", &temp);
- if (ACPI_SUCCESS(status))
+ if (ACPI_SUCCESS(status)) {
acpi_bus_extract_wakeup_device_power_package(handle,
&wakeup);
+ acpi_power_resources_list_free(&wakeup.resources);
+ }
return AE_CTRL_DEPTH;
}
- /*
- * We may already have an acpi_device from a previous enumeration. If
- * so, we needn't add it again, but we may still have to start it.
- */
- device = NULL;
- acpi_bus_get_device(handle, &device);
- if (ops->acpi_op_add && !device) {
- acpi_add_single_object(&device, handle, type, sta, ops);
- /* Is the device a known good platform device? */
- if (device
- && !acpi_match_device_ids(device, acpi_platform_device_ids))
- acpi_create_platform_device(device);
- }
-
+ acpi_add_single_object(&device, handle, type, sta);
if (!device)
return AE_CTRL_DEPTH;
- if (ops->acpi_op_start && !(ops->acpi_op_add)) {
- status = acpi_start_single_object(device);
- if (ACPI_FAILURE(status))
- return AE_CTRL_DEPTH;
- }
-
+ out:
if (!*return_value)
*return_value = device;
+
return AE_OK;
}
-static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops,
- struct acpi_device **child)
+static int acpi_scan_do_attach_handler(struct acpi_device *device, char *id)
{
- acpi_status status;
- void *device = NULL;
+ struct acpi_scan_handler *handler;
- status = acpi_bus_check_add(handle, 0, ops, &device);
- if (ACPI_SUCCESS(status))
- acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
- acpi_bus_check_add, NULL, ops, &device);
+ list_for_each_entry(handler, &acpi_scan_handlers_list, list_node) {
+ const struct acpi_device_id *devid;
- if (child)
- *child = device;
+ for (devid = handler->ids; devid->id[0]; devid++) {
+ int ret;
- if (device)
- return 0;
- else
- return -ENODEV;
-}
+ if (strcmp((char *)devid->id, id))
+ continue;
-/*
- * acpi_bus_add and acpi_bus_start
- *
- * scan a given ACPI tree and (probably recently hot-plugged)
- * create and add or starts found devices.
- *
- * If no devices were found -ENODEV is returned which does not
- * mean that this is a real error, there just have been no suitable
- * ACPI objects in the table trunk from which the kernel could create
- * a device and add/start an appropriate driver.
- */
+ ret = handler->attach(device, devid);
+ if (ret > 0) {
+ device->handler = handler;
+ return ret;
+ } else if (ret < 0) {
+ return ret;
+ }
+ }
+ }
+ return 0;
+}
-int
-acpi_bus_add(struct acpi_device **child,
- struct acpi_device *parent, acpi_handle handle, int type)
+static int acpi_scan_attach_handler(struct acpi_device *device)
{
- struct acpi_bus_ops ops;
+ struct acpi_hardware_id *hwid;
+ int ret = 0;
- memset(&ops, 0, sizeof(ops));
- ops.acpi_op_add = 1;
+ list_for_each_entry(hwid, &device->pnp.ids, list) {
+ ret = acpi_scan_do_attach_handler(device, hwid->id);
+ if (ret)
+ break;
- return acpi_bus_scan(handle, &ops, child);
+ }
+ return ret;
}
-EXPORT_SYMBOL(acpi_bus_add);
-int acpi_bus_start(struct acpi_device *device)
+static acpi_status acpi_bus_device_attach(acpi_handle handle, u32 lvl_not_used,
+ void *not_used, void **ret_not_used)
{
- struct acpi_bus_ops ops;
- int result;
-
- if (!device)
- return -EINVAL;
+ struct acpi_device *device;
+ unsigned long long sta_not_used;
+ int ret;
- memset(&ops, 0, sizeof(ops));
- ops.acpi_op_start = 1;
+ /*
+ * Ignore errors ignored by acpi_bus_check_add() to avoid terminating
+ * namespace walks prematurely.
+ */
+ if (acpi_bus_type_and_status(handle, &ret, &sta_not_used))
+ return AE_OK;
- result = acpi_bus_scan(device->handle, &ops, NULL);
+ if (acpi_bus_get_device(handle, &device))
+ return AE_CTRL_DEPTH;
- acpi_update_all_gpes();
+ ret = acpi_scan_attach_handler(device);
+ if (ret)
+ return ret > 0 ? AE_OK : AE_CTRL_DEPTH;
- return result;
+ ret = device_attach(&device->dev);
+ return ret >= 0 ? AE_OK : AE_CTRL_DEPTH;
}
-EXPORT_SYMBOL(acpi_bus_start);
-int acpi_bus_trim(struct acpi_device *start, int rmdevice)
+/**
+ * acpi_bus_scan - Add ACPI device node objects in a given namespace scope.
+ * @handle: Root of the namespace scope to scan.
+ *
+ * Scan a given ACPI tree (probably recently hot-plugged) and create and add
+ * found devices.
+ *
+ * If no devices were found, -ENODEV is returned, but it does not mean that
+ * there has been a real error. There just have been no suitable ACPI objects
+ * in the table trunk from which the kernel could create a device and add an
+ * appropriate driver.
+ *
+ * Must be called under acpi_scan_lock.
+ */
+int acpi_bus_scan(acpi_handle handle)
{
- acpi_status status;
- struct acpi_device *parent, *child;
- acpi_handle phandle, chandle;
- acpi_object_type type;
- u32 level = 1;
- int err = 0;
+ void *device = NULL;
+ int error = 0;
- parent = start;
- phandle = start->handle;
- child = chandle = NULL;
+ if (ACPI_SUCCESS(acpi_bus_check_add(handle, 0, NULL, &device)))
+ acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
+ acpi_bus_check_add, NULL, NULL, &device);
- while ((level > 0) && parent && (!err)) {
- status = acpi_get_next_object(ACPI_TYPE_ANY, phandle,
- chandle, &chandle);
+ if (!device)
+ error = -ENODEV;
+ else if (ACPI_SUCCESS(acpi_bus_device_attach(handle, 0, NULL, NULL)))
+ acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
+ acpi_bus_device_attach, NULL, NULL, NULL);
- /*
- * If this scope is exhausted then move our way back up.
- */
- if (ACPI_FAILURE(status)) {
- level--;
- chandle = phandle;
- acpi_get_parent(phandle, &phandle);
- child = parent;
- parent = parent->parent;
-
- if (level == 0)
- err = acpi_bus_remove(child, rmdevice);
- else
- err = acpi_bus_remove(child, 1);
+ return error;
+}
+EXPORT_SYMBOL(acpi_bus_scan);
- continue;
- }
+static acpi_status acpi_bus_device_detach(acpi_handle handle, u32 lvl_not_used,
+ void *not_used, void **ret_not_used)
+{
+ struct acpi_device *device = NULL;
- status = acpi_get_type(chandle, &type);
- if (ACPI_FAILURE(status)) {
- continue;
- }
- /*
- * If there is a device corresponding to chandle then
- * parse it (depth-first).
- */
- if (acpi_bus_get_device(chandle, &child) == 0) {
- level++;
- phandle = chandle;
- chandle = NULL;
- parent = child;
+ if (!acpi_bus_get_device(handle, &device)) {
+ struct acpi_scan_handler *dev_handler = device->handler;
+
+ device->removal_type = ACPI_BUS_REMOVAL_EJECT;
+ if (dev_handler) {
+ if (dev_handler->detach)
+ dev_handler->detach(device);
+
+ device->handler = NULL;
+ } else {
+ device_release_driver(&device->dev);
}
- continue;
}
- return err;
+ return AE_OK;
+}
+
+static acpi_status acpi_bus_remove(acpi_handle handle, u32 lvl_not_used,
+ void *not_used, void **ret_not_used)
+{
+ struct acpi_device *device = NULL;
+
+ if (!acpi_bus_get_device(handle, &device))
+ acpi_device_unregister(device);
+
+ return AE_OK;
+}
+
+/**
+ * acpi_bus_trim - Remove ACPI device node and all of its descendants
+ * @start: Root of the ACPI device nodes subtree to remove.
+ *
+ * Must be called under acpi_scan_lock.
+ */
+void acpi_bus_trim(struct acpi_device *start)
+{
+ /*
+ * Execute acpi_bus_device_detach() as a post-order callback to detach
+ * all ACPI drivers from the device nodes being removed.
+ */
+ acpi_walk_namespace(ACPI_TYPE_ANY, start->handle, ACPI_UINT32_MAX, NULL,
+ acpi_bus_device_detach, NULL, NULL);
+ acpi_bus_device_detach(start->handle, 0, NULL, NULL);
+ /*
+ * Execute acpi_bus_remove() as a post-order callback to remove device
+ * nodes in the given namespace scope.
+ */
+ acpi_walk_namespace(ACPI_TYPE_ANY, start->handle, ACPI_UINT32_MAX, NULL,
+ acpi_bus_remove, NULL, NULL);
+ acpi_bus_remove(start->handle, 0, NULL, NULL);
}
EXPORT_SYMBOL_GPL(acpi_bus_trim);
static int acpi_bus_scan_fixed(void)
{
int result = 0;
- struct acpi_device *device = NULL;
- struct acpi_bus_ops ops;
-
- memset(&ops, 0, sizeof(ops));
- ops.acpi_op_add = 1;
- ops.acpi_op_start = 1;
/*
* Enumerate all fixed-feature devices.
*/
- if ((acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON) == 0) {
+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON)) {
+ struct acpi_device *device = NULL;
+
result = acpi_add_single_object(&device, NULL,
ACPI_BUS_TYPE_POWER_BUTTON,
- ACPI_STA_DEFAULT,
- &ops);
+ ACPI_STA_DEFAULT);
+ if (result)
+ return result;
+
+ result = device_attach(&device->dev);
+ if (result < 0)
+ return result;
+
device_init_wakeup(&device->dev, true);
}
- if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON)) {
+ struct acpi_device *device = NULL;
+
result = acpi_add_single_object(&device, NULL,
ACPI_BUS_TYPE_SLEEP_BUTTON,
- ACPI_STA_DEFAULT,
- &ops);
+ ACPI_STA_DEFAULT);
+ if (result)
+ return result;
+
+ result = device_attach(&device->dev);
}
- return result;
+ return result < 0 ? result : 0;
}
int __init acpi_scan_init(void)
{
int result;
- struct acpi_bus_ops ops;
-
- memset(&ops, 0, sizeof(ops));
- ops.acpi_op_add = 1;
- ops.acpi_op_start = 1;
result = bus_register(&acpi_bus_type);
if (result) {
@@ -1789,20 +1778,33 @@ int __init acpi_scan_init(void)
printk(KERN_ERR PREFIX "Could not register bus type\n");
}
- acpi_power_init();
+ acpi_pci_root_init();
+ acpi_pci_link_init();
+ acpi_platform_init();
+ acpi_csrt_init();
+ acpi_container_init();
+ mutex_lock(&acpi_scan_lock);
/*
* Enumerate devices in the ACPI namespace.
*/
- result = acpi_bus_scan(ACPI_ROOT_OBJECT, &ops, &acpi_root);
-
- if (!result)
- result = acpi_bus_scan_fixed();
+ result = acpi_bus_scan(ACPI_ROOT_OBJECT);
+ if (result)
+ goto out;
+ result = acpi_bus_get_device(ACPI_ROOT_OBJECT, &acpi_root);
if (result)
- acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL);
- else
- acpi_update_all_gpes();
+ goto out;
+
+ result = acpi_bus_scan_fixed();
+ if (result) {
+ acpi_device_unregister(acpi_root);
+ goto out;
+ }
+ acpi_update_all_gpes();
+
+ out:
+ mutex_unlock(&acpi_scan_lock);
return result;
}
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 2fcc67d34b11..6d3a06a629a1 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -177,6 +177,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
},
{
.callback = init_nvs_nosave,
+ .ident = "Sony Vaio VGN-FW41E_H",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW41E_H"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
.ident = "Sony Vaio VGN-FW21E",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
@@ -386,6 +394,8 @@ static void acpi_pm_finish(void)
acpi_target_sleep_state = ACPI_STATE_S0;
+ acpi_resume_power_resources();
+
/* If we were woken with the fixed power button, provide a small
* hint to userspace in the form of a wakeup event on the fixed power
* button device (if it can be found).
@@ -577,7 +587,28 @@ static const struct platform_suspend_ops acpi_suspend_ops_old = {
.end = acpi_pm_end,
.recover = acpi_pm_finish,
};
-#endif /* CONFIG_SUSPEND */
+
+static void acpi_sleep_suspend_setup(void)
+{
+ int i;
+
+ for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) {
+ acpi_status status;
+ u8 type_a, type_b;
+
+ status = acpi_get_sleep_type_data(i, &type_a, &type_b);
+ if (ACPI_SUCCESS(status)) {
+ sleep_states[i] = 1;
+ pr_cont(" S%d", i);
+ }
+ }
+
+ suspend_set_ops(old_suspend_ordering ?
+ &acpi_suspend_ops_old : &acpi_suspend_ops);
+}
+#else /* !CONFIG_SUSPEND */
+static inline void acpi_sleep_suspend_setup(void) {}
+#endif /* !CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATION
static unsigned long s4_hardware_signature;
@@ -698,7 +729,30 @@ static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
.restore_cleanup = acpi_pm_thaw,
.recover = acpi_pm_finish,
};
-#endif /* CONFIG_HIBERNATION */
+
+static void acpi_sleep_hibernate_setup(void)
+{
+ acpi_status status;
+ u8 type_a, type_b;
+
+ status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b);
+ if (ACPI_FAILURE(status))
+ return;
+
+ hibernation_set_ops(old_suspend_ordering ?
+ &acpi_hibernation_ops_old : &acpi_hibernation_ops);
+ sleep_states[ACPI_STATE_S4] = 1;
+ pr_cont(KERN_CONT " S4");
+ if (nosigcheck)
+ return;
+
+ acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
+ if (facs)
+ s4_hardware_signature = facs->hardware_signature;
+}
+#else /* !CONFIG_HIBERNATION */
+static inline void acpi_sleep_hibernate_setup(void) {}
+#endif /* !CONFIG_HIBERNATION */
int acpi_suspend(u32 acpi_state)
{
@@ -734,9 +788,6 @@ int __init acpi_sleep_init(void)
{
acpi_status status;
u8 type_a, type_b;
-#ifdef CONFIG_SUSPEND
- int i = 0;
-#endif
if (acpi_disabled)
return 0;
@@ -744,45 +795,19 @@ int __init acpi_sleep_init(void)
acpi_sleep_dmi_check();
sleep_states[ACPI_STATE_S0] = 1;
- printk(KERN_INFO PREFIX "(supports S0");
-
-#ifdef CONFIG_SUSPEND
- for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) {
- status = acpi_get_sleep_type_data(i, &type_a, &type_b);
- if (ACPI_SUCCESS(status)) {
- sleep_states[i] = 1;
- printk(KERN_CONT " S%d", i);
- }
- }
+ pr_info(PREFIX "(supports S0");
- suspend_set_ops(old_suspend_ordering ?
- &acpi_suspend_ops_old : &acpi_suspend_ops);
-#endif
+ acpi_sleep_suspend_setup();
+ acpi_sleep_hibernate_setup();
-#ifdef CONFIG_HIBERNATION
- status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b);
- if (ACPI_SUCCESS(status)) {
- hibernation_set_ops(old_suspend_ordering ?
- &acpi_hibernation_ops_old : &acpi_hibernation_ops);
- sleep_states[ACPI_STATE_S4] = 1;
- printk(KERN_CONT " S4");
- if (!nosigcheck) {
- acpi_get_table(ACPI_SIG_FACS, 1,
- (struct acpi_table_header **)&facs);
- if (facs)
- s4_hardware_signature =
- facs->hardware_signature;
- }
- }
-#endif
status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
if (ACPI_SUCCESS(status)) {
sleep_states[ACPI_STATE_S5] = 1;
- printk(KERN_CONT " S5");
+ pr_cont(" S5");
pm_power_off_prepare = acpi_power_off_prepare;
pm_power_off = acpi_power_off;
}
- printk(KERN_CONT ")\n");
+ pr_cont(")\n");
/*
* Register the tts_notifier to reboot notifier list so that the _TTS
* object can also be evaluated when the system enters S5.
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index 74d59c8f4678..0143540a2519 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -6,3 +6,5 @@ extern void acpi_disable_wakeup_devices(u8 sleep_state);
extern struct list_head acpi_wakeup_device_list;
extern struct mutex acpi_device_lock;
+
+extern void acpi_resume_power_resources(void);
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index ea61ca9129cd..41c0504470db 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -498,7 +498,7 @@ static int get_status(u32 index, acpi_event_status *status,
result = acpi_get_gpe_device(index, handle);
if (result) {
ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND,
- "Invalid GPE 0x%x\n", index));
+ "Invalid GPE 0x%x", index));
goto end;
}
result = acpi_get_gpe_status(*handle, index, status);
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 2572d9715bda..d67a1fe07f0e 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -204,7 +204,7 @@ int __init
acpi_table_parse_entries(char *id,
unsigned long table_size,
int entry_id,
- acpi_table_entry_handler handler,
+ acpi_tbl_entry_handler handler,
unsigned int max_entries)
{
struct acpi_table_header *table_header = NULL;
@@ -269,7 +269,7 @@ err:
int __init
acpi_table_parse_madt(enum acpi_madt_type id,
- acpi_table_entry_handler handler, unsigned int max_entries)
+ acpi_tbl_entry_handler handler, unsigned int max_entries)
{
return acpi_table_parse_entries(ACPI_SIG_MADT,
sizeof(struct acpi_table_madt), id,
@@ -285,7 +285,7 @@ acpi_table_parse_madt(enum acpi_madt_type id,
* Scan the ACPI System Descriptor Table (STD) for a table matching @id,
* run @handler on it. Return 0 if table found, return on if not.
*/
-int __init acpi_table_parse(char *id, acpi_table_handler handler)
+int __init acpi_table_parse(char *id, acpi_tbl_table_handler handler)
{
struct acpi_table_header *table = NULL;
acpi_size tbl_size;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 506fbd4b5733..8470771e5eae 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -97,7 +97,7 @@ module_param(psv, int, 0644);
MODULE_PARM_DESC(psv, "Disable or override all passive trip points.");
static int acpi_thermal_add(struct acpi_device *device);
-static int acpi_thermal_remove(struct acpi_device *device, int type);
+static int acpi_thermal_remove(struct acpi_device *device);
static void acpi_thermal_notify(struct acpi_device *device, u32 event);
static const struct acpi_device_id thermal_device_ids[] = {
@@ -288,7 +288,7 @@ do { \
if (flags != ACPI_TRIPS_INIT) \
ACPI_EXCEPTION((AE_INFO, AE_ERROR, \
"ACPI thermal trip point %s changed\n" \
- "Please send acpidump to linux-acpi@vger.kernel.org\n", str)); \
+ "Please send acpidump to linux-acpi@vger.kernel.org", str)); \
} while (0)
static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
@@ -531,6 +531,10 @@ static void acpi_thermal_check(void *data)
{
struct acpi_thermal *tz = data;
+ if (!tz->tz_enabled) {
+ pr_warn("thermal zone is disabled \n");
+ return;
+ }
thermal_zone_device_update(tz->thermal_zone);
}
@@ -1111,7 +1115,7 @@ end:
return result;
}
-static int acpi_thermal_remove(struct acpi_device *device, int type)
+static int acpi_thermal_remove(struct acpi_device *device)
{
struct acpi_thermal *tz = NULL;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index ac9a69cd45f5..313f959413dc 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -88,7 +88,7 @@ module_param(use_bios_initial_backlight, bool, 0644);
static int register_count = 0;
static int acpi_video_bus_add(struct acpi_device *device);
-static int acpi_video_bus_remove(struct acpi_device *device, int type);
+static int acpi_video_bus_remove(struct acpi_device *device);
static void acpi_video_bus_notify(struct acpi_device *device, u32 event);
static const struct acpi_device_id video_device_ids[] = {
@@ -673,7 +673,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
br->levels[i] = br->levels[i - level_ac_battery];
count += level_ac_battery;
} else if (level_ac_battery > 2)
- ACPI_ERROR((AE_INFO, "Too many duplicates in _BCL package\n"));
+ ACPI_ERROR((AE_INFO, "Too many duplicates in _BCL package"));
/* Check if the _BCL package is in a reversed order */
if (max_level == br->levels[2]) {
@@ -682,7 +682,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
acpi_video_cmp_level, NULL);
} else if (max_level != br->levels[count - 1])
ACPI_ERROR((AE_INFO,
- "Found unordered _BCL package\n"));
+ "Found unordered _BCL package"));
br->count = count;
device->brightness = br;
@@ -1740,7 +1740,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
return error;
}
-static int acpi_video_bus_remove(struct acpi_device *device, int type)
+static int acpi_video_bus_remove(struct acpi_device *device)
{
struct acpi_video_bus *video = NULL;
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
index 536c166f4253..ab92785f54dc 100644
--- a/drivers/amba/tegra-ahb.c
+++ b/drivers/amba/tegra-ahb.c
@@ -20,6 +20,7 @@
*
*/
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -257,9 +258,9 @@ static int tegra_ahb_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- ahb->regs = devm_request_and_ioremap(&pdev->dev, res);
- if (!ahb->regs)
- return -EBUSY;
+ ahb->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ahb->regs))
+ return PTR_ERR(ahb->regs);
ahb->dev = &pdev->dev;
platform_set_drvdata(pdev, ahb);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index e08d322d01d7..3e751b74615e 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -14,7 +14,7 @@ menuconfig ATA
tristate "Serial ATA and Parallel ATA drivers"
depends on HAS_IOMEM
depends on BLOCK
- depends on !(M32R || M68K) || BROKEN
+ depends on !(M32R || M68K || S390) || BROKEN
select SCSI
---help---
If you want to use a ATA hard disk, ATA tape drive, ATA CD-ROM or
@@ -58,6 +58,19 @@ config ATA_ACPI
You can disable this at kernel boot time by using the
option libata.noacpi=1
+config SATA_ZPODD
+ bool "SATA Zero Power ODD Support"
+ depends on ATA_ACPI
+ default n
+ help
+ This option adds support for SATA ZPODD. It requires both
+ ODD and the platform support, and if enabled, will automatically
+ power on/off the ODD when certain condition is satisfied. This
+ does not impact user's experience of the ODD, only power is saved
+ when ODD is not in use(i.e. no disc inside).
+
+ If unsure, say N.
+
config SATA_PMP
bool "SATA Port Multiplier support"
default y
@@ -163,7 +176,7 @@ config SATA_QSTOR
config SATA_SX4
tristate "Promise SATA SX4 support (Experimental)"
- depends on PCI && EXPERIMENTAL
+ depends on PCI
help
This option enables support for Promise Serial ATA SX4.
@@ -247,6 +260,14 @@ config SATA_PROMISE
If unsure, say N.
+config SATA_RCAR
+ tristate "Renesas R-Car SATA support"
+ depends on ARCH_SHMOBILE && ARCH_R8A7779
+ help
+ This option enables support for Renesas R-Car Serial ATA.
+
+ If unsure, say N.
+
config SATA_SIL
tristate "Silicon Image SATA support"
depends on PCI
@@ -390,7 +411,7 @@ config PATA_CS5530
config PATA_CS5535
tristate "CS5535 PATA support (Experimental)"
- depends on PCI && X86 && !X86_64 && EXPERIMENTAL
+ depends on PCI && X86 && !X86_64
help
This option enables support for the NatSemi/AMD CS5535
companion chip used with the Geode processor family.
@@ -408,7 +429,7 @@ config PATA_CS5536
config PATA_CYPRESS
tristate "Cypress CY82C693 PATA support (Very Experimental)"
- depends on PCI && EXPERIMENTAL
+ depends on PCI
help
This option enables support for the Cypress/Contaq CY82C693
chipset found in some Alpha systems
@@ -496,7 +517,7 @@ config PATA_IMX
config PATA_IT8213
tristate "IT8213 PATA support (Experimental)"
- depends on PCI && EXPERIMENTAL
+ depends on PCI
help
This option enables support for the ITE 821 PATA
controllers via the new ATA layer.
@@ -589,7 +610,7 @@ config PATA_OLDPIIX
config PATA_OPTIDMA
tristate "OPTI FireStar PATA support (Very Experimental)"
- depends on PCI && EXPERIMENTAL
+ depends on PCI
help
This option enables DMA/PIO support for the later OPTi
controllers found on some old motherboards and in some
@@ -616,7 +637,7 @@ config PATA_PDC_OLD
config PATA_RADISYS
tristate "RADISYS 82600 PATA support (Experimental)"
- depends on PCI && EXPERIMENTAL
+ depends on PCI
help
This option enables support for the RADISYS 82600
PATA controllers via the new ATA layer
@@ -687,7 +708,7 @@ config PATA_SIS
config PATA_TOSHIBA
tristate "Toshiba Piccolo support (Experimental)"
- depends on PCI && EXPERIMENTAL
+ depends on PCI
help
Support for the Toshiba Piccolo controllers. Currently only the
primary channel is supported by this driver.
@@ -738,7 +759,7 @@ comment "PIO-only SFF controllers"
config PATA_AT32
tristate "Atmel AVR32 PATA support (Experimental)"
- depends on AVR32 && PLATFORM_AT32AP && EXPERIMENTAL
+ depends on AVR32 && PLATFORM_AT32AP
help
This option enables support for the IDE devices on the
Atmel AT32AP platform.
@@ -755,7 +776,7 @@ config PATA_AT91
config PATA_CMD640_PCI
tristate "CMD640 PCI PATA support (Experimental)"
- depends on PCI && EXPERIMENTAL
+ depends on PCI
help
This option enables support for the CMD640 PCI IDE
interface chip. Only the primary channel is currently
@@ -801,7 +822,7 @@ config PATA_NS87410
config PATA_OPTI
tristate "OPTI621/6215 PATA support (Very Experimental)"
- depends on PCI && EXPERIMENTAL
+ depends on PCI
help
This option enables full PIO support for the early Opti ATA
controllers found on some old motherboards.
@@ -881,7 +902,7 @@ config PATA_SAMSUNG_CF
config PATA_WINBOND_VLB
tristate "Winbond W83759A VLB PATA support (Experimental)"
- depends on ISA && EXPERIMENTAL
+ depends on ISA
select PATA_LEGACY
help
Support for the Winbond W83759A controller on Vesa Local Bus
@@ -909,7 +930,7 @@ config ATA_GENERIC
config PATA_LEGACY
tristate "Legacy ISA PATA support (Experimental)"
- depends on (ISA || PCI) && EXPERIMENTAL
+ depends on (ISA || PCI)
help
This option enables support for ISA/VLB/PCI bus legacy PATA
ports and allows them to be accessed via the new ATA layer.
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 9329dafba91b..c04d0fd038a3 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_ATA_PIIX) += ata_piix.o
obj-$(CONFIG_SATA_MV) += sata_mv.o
obj-$(CONFIG_SATA_NV) += sata_nv.o
obj-$(CONFIG_SATA_PROMISE) += sata_promise.o
+obj-$(CONFIG_SATA_RCAR) += sata_rcar.o
obj-$(CONFIG_SATA_SIL) += sata_sil.o
obj-$(CONFIG_SATA_SIS) += sata_sis.o
obj-$(CONFIG_SATA_SVW) += sata_svw.o
@@ -107,3 +108,4 @@ libata-y := libata-core.o libata-scsi.o libata-eh.o libata-transport.o
libata-$(CONFIG_ATA_SFF) += libata-sff.o
libata-$(CONFIG_SATA_PMP) += libata-pmp.o
libata-$(CONFIG_ATA_ACPI) += libata-acpi.o
+libata-$(CONFIG_SATA_ZPODD) += libata-zpodd.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 7862d17976b7..a99112cfd8b1 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -53,6 +53,7 @@
enum {
AHCI_PCI_BAR_STA2X11 = 0,
+ AHCI_PCI_BAR_ENMOTUS = 2,
AHCI_PCI_BAR_STANDARD = 5,
};
@@ -264,6 +265,30 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x9c07), board_ahci }, /* Lynx Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c0e), board_ahci }, /* Lynx Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0x9c0f), board_ahci }, /* Lynx Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f25), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f26), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f32), board_ahci }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f33), board_ahci }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f34), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f35), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f36), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
+ { PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d0e), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d62), board_ahci }, /* Wellsburg AHCI */
+ { PCI_VDEVICE(INTEL, 0x8d64), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -410,6 +435,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
+ /* Enmotus */
+ { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
+
/* Generic, PCI class code for AHCI */
{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
@@ -1057,6 +1085,86 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
{}
#endif
+int ahci_init_interrupts(struct pci_dev *pdev, struct ahci_host_priv *hpriv)
+{
+ int rc;
+ unsigned int maxvec;
+
+ if (!(hpriv->flags & AHCI_HFLAG_NO_MSI)) {
+ rc = pci_enable_msi_block_auto(pdev, &maxvec);
+ if (rc > 0) {
+ if ((rc == maxvec) || (rc == 1))
+ return rc;
+ /*
+ * Assume that advantage of multipe MSIs is negated,
+ * so fallback to single MSI mode to save resources
+ */
+ pci_disable_msi(pdev);
+ if (!pci_enable_msi(pdev))
+ return 1;
+ }
+ }
+
+ pci_intx(pdev, 1);
+ return 0;
+}
+
+/**
+ * ahci_host_activate - start AHCI host, request IRQs and register it
+ * @host: target ATA host
+ * @irq: base IRQ number to request
+ * @n_msis: number of MSIs allocated for this host
+ * @irq_handler: irq_handler used when requesting IRQs
+ * @irq_flags: irq_flags used when requesting IRQs
+ *
+ * Similar to ata_host_activate, but requests IRQs according to AHCI-1.1
+ * when multiple MSIs were allocated. That is one MSI per port, starting
+ * from @irq.
+ *
+ * LOCKING:
+ * Inherited from calling layer (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis)
+{
+ int i, rc;
+
+ /* Sharing Last Message among several ports is not supported */
+ if (n_msis < host->n_ports)
+ return -EINVAL;
+
+ rc = ata_host_start(host);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < host->n_ports; i++) {
+ rc = devm_request_threaded_irq(host->dev,
+ irq + i, ahci_hw_interrupt, ahci_thread_fn, IRQF_SHARED,
+ dev_driver_string(host->dev), host->ports[i]);
+ if (rc)
+ goto out_free_irqs;
+ }
+
+ for (i = 0; i < host->n_ports; i++)
+ ata_port_desc(host->ports[i], "irq %d", irq + i);
+
+ rc = ata_host_register(host, &ahci_sht);
+ if (rc)
+ goto out_free_all_irqs;
+
+ return 0;
+
+out_free_all_irqs:
+ i = host->n_ports;
+out_free_irqs:
+ for (i--; i >= 0; i--)
+ devm_free_irq(host->dev, irq + i, host->ports[i]);
+
+ return rc;
+}
+
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
unsigned int board_id = ent->driver_data;
@@ -1065,7 +1173,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
struct ata_host *host;
- int n_ports, i, rc;
+ int n_ports, n_msis, i, rc;
int ahci_pci_bar = AHCI_PCI_BAR_STANDARD;
VPRINTK("ENTER\n");
@@ -1098,9 +1206,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev,
"PDC42819 can only drive SATA devices with this driver\n");
- /* The Connext uses non-standard BAR */
+ /* Both Connext and Enmotus devices use non-standard BARs */
if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == 0xCC06)
ahci_pci_bar = AHCI_PCI_BAR_STA2X11;
+ else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000)
+ ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS;
/* acquire resources */
rc = pcim_enable_device(pdev);
@@ -1150,11 +1260,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ahci_sb600_enable_64bit(pdev))
hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY;
- if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
- pci_intx(pdev, 1);
-
hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
+ n_msis = ahci_init_interrupts(pdev, hpriv);
+ if (n_msis > 1)
+ hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
+
/* save initial config */
ahci_pci_save_initial_config(pdev, hpriv);
@@ -1250,6 +1361,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ahci_pci_print_info(host);
pci_set_master(pdev);
+
+ if (hpriv->flags & AHCI_HFLAG_MULTI_MSI)
+ return ahci_host_activate(host, pdev->irq, n_msis);
+
return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
&ahci_sht);
}
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 9be471200a07..b830e6c9fe49 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -231,6 +231,7 @@ enum {
AHCI_HFLAG_DELAY_ENGINE = (1 << 15), /* do not start engine on
port start (wait until
error-handling stage) */
+ AHCI_HFLAG_MULTI_MSI = (1 << 16), /* multiple PCI MSIs */
/* ap->flags bits */
@@ -297,6 +298,8 @@ struct ahci_port_priv {
unsigned int ncq_saw_d2h:1;
unsigned int ncq_saw_dmas:1;
unsigned int ncq_saw_sdb:1;
+ u32 intr_status; /* interrupts to handle */
+ spinlock_t lock; /* protects parent ata_port */
u32 intr_mask; /* interrupts to enable */
bool fbs_supported; /* set iff FBS is supported */
bool fbs_enabled; /* set iff FBS is enabled */
@@ -359,7 +362,10 @@ void ahci_set_em_messages(struct ahci_host_priv *hpriv,
struct ata_port_info *pi);
int ahci_reset_em(struct ata_host *host);
irqreturn_t ahci_interrupt(int irq, void *dev_instance);
+irqreturn_t ahci_hw_interrupt(int irq, void *dev_instance);
+irqreturn_t ahci_thread_fn(int irq, void *dev_instance);
void ahci_print_info(struct ata_host *host, const char *scc_s);
+int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis);
static inline void __iomem *__ahci_port_base(struct ata_host *host,
unsigned int port_no)
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 174eca609b42..d2ba439cfe54 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -317,6 +317,23 @@ static const struct pci_device_id piix_pci_tbl[] = {
{ 0x8086, 0x9c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (DH89xxCC) */
{ 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (Avoton) */
+ { 0x8086, 0x1f20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Avoton) */
+ { 0x8086, 0x1f21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Avoton) */
+ { 0x8086, 0x1f30, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (Avoton) */
+ { 0x8086, 0x1f31, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (Wellsburg) */
+ { 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Wellsburg) */
+ { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (Wellsburg) */
+ { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Wellsburg) */
+ { 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+
{ } /* terminate list */
};
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 320712a7b9ea..34c82167b962 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1655,19 +1655,16 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
ata_port_abort(ap);
}
-static void ahci_port_intr(struct ata_port *ap)
+static void ahci_handle_port_interrupt(struct ata_port *ap,
+ void __iomem *port_mmio, u32 status)
{
- void __iomem *port_mmio = ahci_port_base(ap);
struct ata_eh_info *ehi = &ap->link.eh_info;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_host_priv *hpriv = ap->host->private_data;
int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
- u32 status, qc_active = 0;
+ u32 qc_active = 0;
int rc;
- status = readl(port_mmio + PORT_IRQ_STAT);
- writel(status, port_mmio + PORT_IRQ_STAT);
-
/* ignore BAD_PMP while resetting */
if (unlikely(resetting))
status &= ~PORT_IRQ_BAD_PMP;
@@ -1743,6 +1740,107 @@ static void ahci_port_intr(struct ata_port *ap)
}
}
+void ahci_port_intr(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 status;
+
+ status = readl(port_mmio + PORT_IRQ_STAT);
+ writel(status, port_mmio + PORT_IRQ_STAT);
+
+ ahci_handle_port_interrupt(ap, port_mmio, status);
+}
+
+irqreturn_t ahci_thread_fn(int irq, void *dev_instance)
+{
+ struct ata_port *ap = dev_instance;
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ unsigned long flags;
+ u32 status;
+
+ spin_lock_irqsave(&ap->host->lock, flags);
+ status = pp->intr_status;
+ if (status)
+ pp->intr_status = 0;
+ spin_unlock_irqrestore(&ap->host->lock, flags);
+
+ spin_lock_bh(ap->lock);
+ ahci_handle_port_interrupt(ap, port_mmio, status);
+ spin_unlock_bh(ap->lock);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(ahci_thread_fn);
+
+void ahci_hw_port_interrupt(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_port_priv *pp = ap->private_data;
+ u32 status;
+
+ status = readl(port_mmio + PORT_IRQ_STAT);
+ writel(status, port_mmio + PORT_IRQ_STAT);
+
+ pp->intr_status |= status;
+}
+
+irqreturn_t ahci_hw_interrupt(int irq, void *dev_instance)
+{
+ struct ata_port *ap_this = dev_instance;
+ struct ahci_port_priv *pp = ap_this->private_data;
+ struct ata_host *host = ap_this->host;
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ unsigned int i;
+ u32 irq_stat, irq_masked;
+
+ VPRINTK("ENTER\n");
+
+ spin_lock(&host->lock);
+
+ irq_stat = readl(mmio + HOST_IRQ_STAT);
+
+ if (!irq_stat) {
+ u32 status = pp->intr_status;
+
+ spin_unlock(&host->lock);
+
+ VPRINTK("EXIT\n");
+
+ return status ? IRQ_WAKE_THREAD : IRQ_NONE;
+ }
+
+ irq_masked = irq_stat & hpriv->port_map;
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap;
+
+ if (!(irq_masked & (1 << i)))
+ continue;
+
+ ap = host->ports[i];
+ if (ap) {
+ ahci_hw_port_interrupt(ap);
+ VPRINTK("port %u\n", i);
+ } else {
+ VPRINTK("port %u (no irq)\n", i);
+ if (ata_ratelimit())
+ dev_warn(host->dev,
+ "interrupt on disabled port %u\n", i);
+ }
+ }
+
+ writel(irq_stat, mmio + HOST_IRQ_STAT);
+
+ spin_unlock(&host->lock);
+
+ VPRINTK("EXIT\n");
+
+ return IRQ_WAKE_THREAD;
+}
+EXPORT_SYMBOL_GPL(ahci_hw_interrupt);
+
irqreturn_t ahci_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
@@ -1951,13 +2049,13 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
/* Use the nominal value 10 ms if the read MDAT is zero,
* the nominal value of DETO is 20 ms.
*/
- if (dev->sata_settings[ATA_LOG_DEVSLP_VALID] &
+ if (dev->devslp_timing[ATA_LOG_DEVSLP_VALID] &
ATA_LOG_DEVSLP_VALID_MASK) {
- mdat = dev->sata_settings[ATA_LOG_DEVSLP_MDAT] &
+ mdat = dev->devslp_timing[ATA_LOG_DEVSLP_MDAT] &
ATA_LOG_DEVSLP_MDAT_MASK;
if (!mdat)
mdat = 10;
- deto = dev->sata_settings[ATA_LOG_DEVSLP_DETO];
+ deto = dev->devslp_timing[ATA_LOG_DEVSLP_DETO];
if (!deto)
deto = 20;
} else {
@@ -2196,6 +2294,14 @@ static int ahci_port_start(struct ata_port *ap)
*/
pp->intr_mask = DEF_PORT_IRQ;
+ /*
+ * Switch to per-port locking in case each port has its own MSI vector.
+ */
+ if ((hpriv->flags & AHCI_HFLAG_MULTI_MSI)) {
+ spin_lock_init(&pp->lock);
+ ap->lock = &pp->lock;
+ }
+
ap->private_data = pp;
/* engage engines, captain */
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index ef01ac07502e..0ea1018280bd 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -17,6 +17,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
#include <scsi/scsi_device.h>
#include "libata.h"
@@ -835,50 +836,95 @@ void ata_acpi_on_resume(struct ata_port *ap)
}
}
-/**
- * ata_acpi_set_state - set the port power state
- * @ap: target ATA port
- * @state: state, on/off
- *
- * This function executes the _PS0/_PS3 ACPI method to set the power state.
- * ACPI spec requires _PS0 when IDE power on and _PS3 when power off
- */
-void ata_acpi_set_state(struct ata_port *ap, pm_message_t state)
+static int ata_acpi_choose_suspend_state(struct ata_device *dev, bool runtime)
{
+ int d_max_in = ACPI_STATE_D3_COLD;
+ if (!runtime)
+ goto out;
+
+ /*
+ * For ATAPI, runtime D3 cold is only allowed
+ * for ZPODD in zero power ready state
+ */
+ if (dev->class == ATA_DEV_ATAPI &&
+ !(zpodd_dev_enabled(dev) && zpodd_zpready(dev)))
+ d_max_in = ACPI_STATE_D3_HOT;
+
+out:
+ return acpi_pm_device_sleep_state(&dev->sdev->sdev_gendev,
+ NULL, d_max_in);
+}
+
+static void sata_acpi_set_state(struct ata_port *ap, pm_message_t state)
+{
+ bool runtime = PMSG_IS_AUTO(state);
struct ata_device *dev;
acpi_handle handle;
int acpi_state;
- /* channel first and then drives for power on and vica versa
- for power off */
- handle = ata_ap_acpi_handle(ap);
- if (handle && state.event == PM_EVENT_ON)
- acpi_bus_set_power(handle, ACPI_STATE_D0);
-
ata_for_each_dev(dev, &ap->link, ENABLED) {
handle = ata_dev_acpi_handle(dev);
if (!handle)
continue;
- if (state.event != PM_EVENT_ON) {
- acpi_state = acpi_pm_device_sleep_state(
- &dev->sdev->sdev_gendev, NULL, ACPI_STATE_D3);
- if (acpi_state > 0)
- acpi_bus_set_power(handle, acpi_state);
- /* TBD: need to check if it's runtime pm request */
- acpi_pm_device_run_wake(
- &dev->sdev->sdev_gendev, true);
+ if (!(state.event & PM_EVENT_RESUME)) {
+ acpi_state = ata_acpi_choose_suspend_state(dev, runtime);
+ if (acpi_state == ACPI_STATE_D0)
+ continue;
+ if (runtime && zpodd_dev_enabled(dev) &&
+ acpi_state == ACPI_STATE_D3_COLD)
+ zpodd_enable_run_wake(dev);
+ acpi_bus_set_power(handle, acpi_state);
} else {
- /* Ditto */
- acpi_pm_device_run_wake(
- &dev->sdev->sdev_gendev, false);
+ if (runtime && zpodd_dev_enabled(dev))
+ zpodd_disable_run_wake(dev);
acpi_bus_set_power(handle, ACPI_STATE_D0);
}
}
+}
+
+/* ACPI spec requires _PS0 when IDE power on and _PS3 when power off */
+static void pata_acpi_set_state(struct ata_port *ap, pm_message_t state)
+{
+ struct ata_device *dev;
+ acpi_handle port_handle;
+
+ port_handle = ata_ap_acpi_handle(ap);
+ if (!port_handle)
+ return;
+
+ /* channel first and then drives for power on and vica versa
+ for power off */
+ if (state.event & PM_EVENT_RESUME)
+ acpi_bus_set_power(port_handle, ACPI_STATE_D0);
+
+ ata_for_each_dev(dev, &ap->link, ENABLED) {
+ acpi_handle dev_handle = ata_dev_acpi_handle(dev);
+ if (!dev_handle)
+ continue;
+
+ acpi_bus_set_power(dev_handle, state.event & PM_EVENT_RESUME ?
+ ACPI_STATE_D0 : ACPI_STATE_D3);
+ }
- handle = ata_ap_acpi_handle(ap);
- if (handle && state.event != PM_EVENT_ON)
- acpi_bus_set_power(handle, ACPI_STATE_D3);
+ if (!(state.event & PM_EVENT_RESUME))
+ acpi_bus_set_power(port_handle, ACPI_STATE_D3);
+}
+
+/**
+ * ata_acpi_set_state - set the port power state
+ * @ap: target ATA port
+ * @state: state, on/off
+ *
+ * This function sets a proper ACPI D state for the device on
+ * system and runtime PM operations.
+ */
+void ata_acpi_set_state(struct ata_port *ap, pm_message_t state)
+{
+ if (ap->flags & ATA_FLAG_ACPI_SATA)
+ sata_acpi_set_state(ap, state);
+ else
+ pata_acpi_set_state(ap, state);
}
/**
@@ -974,96 +1020,35 @@ void ata_acpi_on_disable(struct ata_device *dev)
ata_acpi_clear_gtf(dev);
}
-static void ata_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
-{
- struct ata_device *ata_dev = context;
-
- if (event == ACPI_NOTIFY_DEVICE_WAKE && ata_dev &&
- pm_runtime_suspended(&ata_dev->sdev->sdev_gendev))
- scsi_autopm_get_device(ata_dev->sdev);
-}
-
-static void ata_acpi_add_pm_notifier(struct ata_device *dev)
-{
- struct acpi_device *acpi_dev;
- acpi_handle handle;
- acpi_status status;
-
- handle = ata_dev_acpi_handle(dev);
- if (!handle)
- return;
-
- status = acpi_bus_get_device(handle, &acpi_dev);
- if (ACPI_FAILURE(status))
- return;
-
- if (dev->sdev->can_power_off) {
- acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- ata_acpi_wake_dev, dev);
- device_set_run_wake(&dev->sdev->sdev_gendev, true);
- }
-}
-
-static void ata_acpi_remove_pm_notifier(struct ata_device *dev)
-{
- struct acpi_device *acpi_dev;
- acpi_handle handle;
- acpi_status status;
-
- handle = ata_dev_acpi_handle(dev);
- if (!handle)
- return;
-
- status = acpi_bus_get_device(handle, &acpi_dev);
- if (ACPI_FAILURE(status))
- return;
-
- if (dev->sdev->can_power_off) {
- device_set_run_wake(&dev->sdev->sdev_gendev, false);
- acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- ata_acpi_wake_dev);
- }
-}
-
static void ata_acpi_register_power_resource(struct ata_device *dev)
{
struct scsi_device *sdev = dev->sdev;
acpi_handle handle;
- struct device *device;
handle = ata_dev_acpi_handle(dev);
- if (!handle)
- return;
-
- device = &sdev->sdev_gendev;
-
- acpi_power_resource_register_device(device, handle);
+ if (handle)
+ acpi_dev_pm_remove_dependent(handle, &sdev->sdev_gendev);
}
static void ata_acpi_unregister_power_resource(struct ata_device *dev)
{
struct scsi_device *sdev = dev->sdev;
acpi_handle handle;
- struct device *device;
handle = ata_dev_acpi_handle(dev);
- if (!handle)
- return;
-
- device = &sdev->sdev_gendev;
-
- acpi_power_resource_unregister_device(device, handle);
+ if (handle)
+ acpi_dev_pm_remove_dependent(handle, &sdev->sdev_gendev);
}
void ata_acpi_bind(struct ata_device *dev)
{
- ata_acpi_add_pm_notifier(dev);
ata_acpi_register_power_resource(dev);
+ if (zpodd_dev_enabled(dev))
+ dev_pm_qos_expose_flags(&dev->sdev->sdev_gendev, 0);
}
void ata_acpi_unbind(struct ata_device *dev)
{
- ata_acpi_remove_pm_notifier(dev);
ata_acpi_unregister_power_resource(dev);
}
@@ -1105,9 +1090,6 @@ static int ata_acpi_bind_device(struct ata_port *ap, struct scsi_device *sdev,
acpi_handle *handle)
{
struct ata_device *ata_dev;
- acpi_status status;
- struct acpi_device *acpi_dev;
- struct acpi_device_power_state *states;
if (ap->flags & ATA_FLAG_ACPI_SATA) {
if (!sata_pmp_attached(ap))
@@ -1124,21 +1106,6 @@ static int ata_acpi_bind_device(struct ata_port *ap, struct scsi_device *sdev,
if (!*handle)
return -ENODEV;
- status = acpi_bus_get_device(*handle, &acpi_dev);
- if (ACPI_FAILURE(status))
- return 0;
-
- /*
- * If firmware has _PS3 or _PR3 for this device,
- * and this ata ODD device support device attention,
- * it means this device can be powered off
- */
- states = acpi_dev->power.states;
- if ((states[ACPI_STATE_D3_HOT].flags.valid ||
- states[ACPI_STATE_D3_COLD].flags.explicit_set) &&
- ata_dev->flags & ATA_DFLAG_DA)
- sdev->can_power_off = 1;
-
return 0;
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 9e8b99af400d..497adea1f0d6 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2325,24 +2325,28 @@ int ata_dev_configure(struct ata_device *dev)
}
}
- /* check and mark DevSlp capability */
- if (ata_id_has_devslp(dev->id))
- dev->flags |= ATA_DFLAG_DEVSLP;
-
- /* Obtain SATA Settings page from Identify Device Data Log,
- * which contains DevSlp timing variables etc.
- * Exclude old devices with ata_id_has_ncq()
+ /* Check and mark DevSlp capability. Get DevSlp timing variables
+ * from SATA Settings page of Identify Device Data Log.
*/
- if (ata_id_has_ncq(dev->id)) {
+ if (ata_id_has_devslp(dev->id)) {
+ u8 sata_setting[ATA_SECT_SIZE];
+ int i, j;
+
+ dev->flags |= ATA_DFLAG_DEVSLP;
err_mask = ata_read_log_page(dev,
ATA_LOG_SATA_ID_DEV_DATA,
ATA_LOG_SATA_SETTINGS,
- dev->sata_settings,
+ sata_setting,
1);
if (err_mask)
ata_dev_dbg(dev,
"failed to get Identify Device Data, Emask 0x%x\n",
err_mask);
+ else
+ for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
+ j = ATA_LOG_DEVSLP_OFFSET + i;
+ dev->devslp_timing[i] = sata_setting[j];
+ }
}
dev->cdb_len = 16;
@@ -2396,8 +2400,10 @@ int ata_dev_configure(struct ata_device *dev)
dma_dir_string = ", DMADIR";
}
- if (ata_id_has_da(dev->id))
+ if (ata_id_has_da(dev->id)) {
dev->flags |= ATA_DFLAG_DA;
+ zpodd_init(dev);
+ }
/* print device info to dmesg */
if (ata_msg_drv(ap) && print_info)
@@ -5327,9 +5333,6 @@ static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
static int __ata_port_suspend_common(struct ata_port *ap, pm_message_t mesg, int *async)
{
- unsigned int ehi_flags = ATA_EHI_QUIET;
- int rc;
-
/*
* On some hardware, device fails to respond after spun down
* for suspend. As the device won't be used before being
@@ -5338,11 +5341,9 @@ static int __ata_port_suspend_common(struct ata_port *ap, pm_message_t mesg, int
*
* http://thread.gmane.org/gmane.linux.ide/46764
*/
- if (mesg.event == PM_EVENT_SUSPEND)
- ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
-
- rc = ata_port_request_pm(ap, mesg, 0, ehi_flags, async);
- return rc;
+ unsigned int ehi_flags = ATA_EHI_QUIET | ATA_EHI_NO_AUTOPSY |
+ ATA_EHI_NO_RECOVERY;
+ return ata_port_request_pm(ap, mesg, 0, ehi_flags, async);
}
static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
@@ -5363,40 +5364,38 @@ static int ata_port_suspend(struct device *dev)
static int ata_port_do_freeze(struct device *dev)
{
if (pm_runtime_suspended(dev))
- pm_runtime_resume(dev);
+ return 0;
return ata_port_suspend_common(dev, PMSG_FREEZE);
}
static int ata_port_poweroff(struct device *dev)
{
- if (pm_runtime_suspended(dev))
- return 0;
-
return ata_port_suspend_common(dev, PMSG_HIBERNATE);
}
-static int __ata_port_resume_common(struct ata_port *ap, int *async)
+static int __ata_port_resume_common(struct ata_port *ap, pm_message_t mesg,
+ int *async)
{
int rc;
- rc = ata_port_request_pm(ap, PMSG_ON, ATA_EH_RESET,
+ rc = ata_port_request_pm(ap, mesg, ATA_EH_RESET,
ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, async);
return rc;
}
-static int ata_port_resume_common(struct device *dev)
+static int ata_port_resume_common(struct device *dev, pm_message_t mesg)
{
struct ata_port *ap = to_ata_port(dev);
- return __ata_port_resume_common(ap, NULL);
+ return __ata_port_resume_common(ap, mesg, NULL);
}
static int ata_port_resume(struct device *dev)
{
int rc;
- rc = ata_port_resume_common(dev);
+ rc = ata_port_resume_common(dev, PMSG_RESUME);
if (!rc) {
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
@@ -5406,11 +5405,40 @@ static int ata_port_resume(struct device *dev)
return rc;
}
+/*
+ * For ODDs, the upper layer will poll for media change every few seconds,
+ * which will make it enter and leave suspend state every few seconds. And
+ * as each suspend will cause a hard/soft reset, the gain of runtime suspend
+ * is very little and the ODD may malfunction after constantly being reset.
+ * So the idle callback here will not proceed to suspend if a non-ZPODD capable
+ * ODD is attached to the port.
+ */
static int ata_port_runtime_idle(struct device *dev)
{
+ struct ata_port *ap = to_ata_port(dev);
+ struct ata_link *link;
+ struct ata_device *adev;
+
+ ata_for_each_link(link, ap, HOST_FIRST) {
+ ata_for_each_dev(adev, link, ENABLED)
+ if (adev->class == ATA_DEV_ATAPI &&
+ !zpodd_dev_enabled(adev))
+ return -EBUSY;
+ }
+
return pm_runtime_suspend(dev);
}
+static int ata_port_runtime_suspend(struct device *dev)
+{
+ return ata_port_suspend_common(dev, PMSG_AUTO_SUSPEND);
+}
+
+static int ata_port_runtime_resume(struct device *dev)
+{
+ return ata_port_resume_common(dev, PMSG_AUTO_RESUME);
+}
+
static const struct dev_pm_ops ata_port_pm_ops = {
.suspend = ata_port_suspend,
.resume = ata_port_resume,
@@ -5419,8 +5447,8 @@ static const struct dev_pm_ops ata_port_pm_ops = {
.poweroff = ata_port_poweroff,
.restore = ata_port_resume,
- .runtime_suspend = ata_port_suspend,
- .runtime_resume = ata_port_resume_common,
+ .runtime_suspend = ata_port_runtime_suspend,
+ .runtime_resume = ata_port_runtime_resume,
.runtime_idle = ata_port_runtime_idle,
};
@@ -5437,7 +5465,7 @@ EXPORT_SYMBOL_GPL(ata_sas_port_async_suspend);
int ata_sas_port_async_resume(struct ata_port *ap, int *async)
{
- return __ata_port_resume_common(ap, async);
+ return __ata_port_resume_common(ap, PMSG_RESUME, async);
}
EXPORT_SYMBOL_GPL(ata_sas_port_async_resume);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index bf039b0e97b7..f9476fb3ac43 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1591,7 +1591,7 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
* RETURNS:
* 0 on success, AC_ERR_* mask on failure.
*/
-static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
+unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
{
u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
struct ata_taskfile tf;
@@ -1624,7 +1624,7 @@ static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
* RETURNS:
* 0 on success, AC_ERR_* mask on failure
*/
-static unsigned int atapi_eh_request_sense(struct ata_device *dev,
+unsigned int atapi_eh_request_sense(struct ata_device *dev,
u8 *sense_buf, u8 dfl_sense_key)
{
u8 cdb[ATAPI_CDB_LEN] =
@@ -2094,7 +2094,7 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev,
*/
static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
{
- if (qc->flags & AC_ERR_MEDIA)
+ if (qc->err_mask & AC_ERR_MEDIA)
return 0; /* don't retry media errors */
if (qc->flags & ATA_QCFLAG_IO)
return 1; /* otherwise retry anything from fs stack */
@@ -3857,6 +3857,8 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
rc = atapi_eh_clear_ua(dev);
if (rc)
goto rest_fail;
+ if (zpodd_dev_enabled(dev))
+ zpodd_post_poweron(dev);
}
}
@@ -4022,11 +4024,12 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
{
unsigned long flags;
int rc = 0;
+ struct ata_device *dev;
/* are we suspending? */
spin_lock_irqsave(ap->lock, flags);
if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
- ap->pm_mesg.event == PM_EVENT_ON) {
+ ap->pm_mesg.event & PM_EVENT_RESUME) {
spin_unlock_irqrestore(ap->lock, flags);
return;
}
@@ -4034,6 +4037,18 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
+ /*
+ * If we have a ZPODD attached, check its zero
+ * power ready status before the port is frozen.
+ * Only needed for runtime suspend.
+ */
+ if (PMSG_IS_AUTO(ap->pm_mesg)) {
+ ata_for_each_dev(dev, &ap->link, ENABLED) {
+ if (zpodd_dev_enabled(dev))
+ zpodd_on_suspend(dev);
+ }
+ }
+
/* tell ACPI we're suspending */
rc = ata_acpi_on_suspend(ap);
if (rc)
@@ -4045,7 +4060,7 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
if (ap->ops->port_suspend)
rc = ap->ops->port_suspend(ap, ap->pm_mesg);
- ata_acpi_set_state(ap, PMSG_SUSPEND);
+ ata_acpi_set_state(ap, ap->pm_mesg);
out:
/* report result */
spin_lock_irqsave(ap->lock, flags);
@@ -4085,7 +4100,7 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
/* are we resuming? */
spin_lock_irqsave(ap->lock, flags);
if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
- ap->pm_mesg.event != PM_EVENT_ON) {
+ !(ap->pm_mesg.event & PM_EVENT_RESUME)) {
spin_unlock_irqrestore(ap->lock, flags);
return;
}
@@ -4104,7 +4119,7 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
ata_for_each_dev(dev, link, ALL)
ata_ering_clear(&dev->ering);
- ata_acpi_set_state(ap, PMSG_ON);
+ ata_acpi_set_state(ap, ap->pm_mesg);
if (ap->ops->port_resume)
rc = ap->ops->port_resume(ap);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 7c337e754dab..318b41358187 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -933,7 +933,11 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
* block specified for the ATA pass through commands. Regardless
* of whether the command errored or not, return a sense
* block. Copy all controller registers into the sense
- * block. Clear sense key, ASC & ASCQ if there is no error.
+ * block. If there was no error, we get the request from an ATA
+ * passthrough command, so we use the following sense data:
+ * sk = RECOVERED ERROR
+ * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE
+ *
*
* LOCKING:
* None.
@@ -959,6 +963,10 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
&sb[1], &sb[2], &sb[3], verbose);
sb[1] &= 0x0f;
+ } else {
+ sb[1] = RECOVERED_ERROR;
+ sb[2] = 0;
+ sb[3] = 0x1D;
}
/*
@@ -1733,10 +1741,12 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
/* For ATA pass thru (SAT) commands, generate a sense block if
* user mandated it or if there's an error. Note that if we
- * generate because the user forced us to, a check condition
- * is generated and the ATA register values are returned
+ * generate because the user forced us to [CK_COND =1], a check
+ * condition is generated and the ATA register values are returned
* whether the command completed successfully or not. If there
- * was no error, SK, ASC and ASCQ will all be zero.
+ * was no error, we use the following sense data:
+ * sk = RECOVERED ERROR
+ * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE
*/
if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
((cdb[2] & 0x20) || need_sense)) {
@@ -3755,6 +3765,8 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
mutex_lock(&ap->scsi_host->scan_mutex);
spin_lock_irqsave(ap->lock, flags);
+ if (zpodd_dev_enabled(dev))
+ zpodd_exit(dev);
ata_acpi_unbind(dev);
/* clearing dev->sdev is protected by host lock */
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
new file mode 100644
index 000000000000..90b159b740b3
--- /dev/null
+++ b/drivers/ata/libata-zpodd.c
@@ -0,0 +1,299 @@
+#include <linux/libata.h>
+#include <linux/cdrom.h>
+#include <linux/pm_runtime.h>
+#include <linux/module.h>
+#include <scsi/scsi_device.h>
+
+#include "libata.h"
+
+static int zpodd_poweroff_delay = 30; /* 30 seconds for power off delay */
+module_param(zpodd_poweroff_delay, int, 0644);
+MODULE_PARM_DESC(zpodd_poweroff_delay, "Poweroff delay for ZPODD in seconds");
+
+enum odd_mech_type {
+ ODD_MECH_TYPE_SLOT,
+ ODD_MECH_TYPE_DRAWER,
+ ODD_MECH_TYPE_UNSUPPORTED,
+};
+
+struct zpodd {
+ enum odd_mech_type mech_type; /* init during probe, RO afterwards */
+ struct ata_device *dev;
+
+ /* The following fields are synchronized by PM core. */
+ bool from_notify; /* resumed as a result of
+ * acpi wake notification */
+ bool zp_ready; /* ZP ready state */
+ unsigned long last_ready; /* last ZP ready timestamp */
+ bool zp_sampled; /* ZP ready state sampled */
+ bool powered_off; /* ODD is powered off
+ * during suspend */
+};
+
+static int eject_tray(struct ata_device *dev)
+{
+ struct ata_taskfile tf = {};
+ const char cdb[] = { GPCMD_START_STOP_UNIT,
+ 0, 0, 0,
+ 0x02, /* LoEj */
+ 0, 0, 0, 0, 0, 0, 0,
+ };
+
+ tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ tf.command = ATA_CMD_PACKET;
+ tf.protocol = ATAPI_PROT_NODATA;
+
+ return ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
+}
+
+/* Per the spec, only slot type and drawer type ODD can be supported */
+static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
+{
+ char buf[16];
+ unsigned int ret;
+ struct rm_feature_desc *desc = (void *)(buf + 8);
+ struct ata_taskfile tf = {};
+
+ char cdb[] = { GPCMD_GET_CONFIGURATION,
+ 2, /* only 1 feature descriptor requested */
+ 0, 3, /* 3, removable medium feature */
+ 0, 0, 0,/* reserved */
+ 0, sizeof(buf),
+ 0, 0, 0,
+ };
+
+ tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ tf.command = ATA_CMD_PACKET;
+ tf.protocol = ATAPI_PROT_PIO;
+ tf.lbam = sizeof(buf);
+
+ ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
+ buf, sizeof(buf), 0);
+ if (ret)
+ return ODD_MECH_TYPE_UNSUPPORTED;
+
+ if (be16_to_cpu(desc->feature_code) != 3)
+ return ODD_MECH_TYPE_UNSUPPORTED;
+
+ if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1)
+ return ODD_MECH_TYPE_SLOT;
+ else if (desc->mech_type == 1 && desc->load == 0 && desc->eject == 1)
+ return ODD_MECH_TYPE_DRAWER;
+ else
+ return ODD_MECH_TYPE_UNSUPPORTED;
+}
+
+static bool odd_can_poweroff(struct ata_device *ata_dev)
+{
+ acpi_handle handle;
+ acpi_status status;
+ struct acpi_device *acpi_dev;
+
+ handle = ata_dev_acpi_handle(ata_dev);
+ if (!handle)
+ return false;
+
+ status = acpi_bus_get_device(handle, &acpi_dev);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ return acpi_device_can_poweroff(acpi_dev);
+}
+
+/* Test if ODD is zero power ready by sense code */
+static bool zpready(struct ata_device *dev)
+{
+ u8 sense_key, *sense_buf;
+ unsigned int ret, asc, ascq, add_len;
+ struct zpodd *zpodd = dev->zpodd;
+
+ ret = atapi_eh_tur(dev, &sense_key);
+
+ if (!ret || sense_key != NOT_READY)
+ return false;
+
+ sense_buf = dev->link->ap->sector_buf;
+ ret = atapi_eh_request_sense(dev, sense_buf, sense_key);
+ if (ret)
+ return false;
+
+ /* sense valid */
+ if ((sense_buf[0] & 0x7f) != 0x70)
+ return false;
+
+ add_len = sense_buf[7];
+ /* has asc and ascq */
+ if (add_len < 6)
+ return false;
+
+ asc = sense_buf[12];
+ ascq = sense_buf[13];
+
+ if (zpodd->mech_type == ODD_MECH_TYPE_SLOT)
+ /* no media inside */
+ return asc == 0x3a;
+ else
+ /* no media inside and door closed */
+ return asc == 0x3a && ascq == 0x01;
+}
+
+/*
+ * Update the zpodd->zp_ready field. This field will only be set
+ * if the ODD has stayed in ZP ready state for zpodd_poweroff_delay
+ * time, and will be used to decide if power off is allowed. If it
+ * is set, it will be cleared during resume from powered off state.
+ */
+void zpodd_on_suspend(struct ata_device *dev)
+{
+ struct zpodd *zpodd = dev->zpodd;
+ unsigned long expires;
+
+ if (!zpready(dev)) {
+ zpodd->zp_sampled = false;
+ zpodd->zp_ready = false;
+ return;
+ }
+
+ if (!zpodd->zp_sampled) {
+ zpodd->zp_sampled = true;
+ zpodd->last_ready = jiffies;
+ return;
+ }
+
+ expires = zpodd->last_ready +
+ msecs_to_jiffies(zpodd_poweroff_delay * 1000);
+ if (time_before(jiffies, expires))
+ return;
+
+ zpodd->zp_ready = true;
+}
+
+bool zpodd_zpready(struct ata_device *dev)
+{
+ struct zpodd *zpodd = dev->zpodd;
+ return zpodd->zp_ready;
+}
+
+/*
+ * Enable runtime wake capability through ACPI and set the powered_off flag,
+ * this flag will be used during resume to decide what operations are needed
+ * to take.
+ *
+ * Also, media poll needs to be silenced, so that it doesn't bring the ODD
+ * back to full power state every few seconds.
+ */
+void zpodd_enable_run_wake(struct ata_device *dev)
+{
+ struct zpodd *zpodd = dev->zpodd;
+
+ sdev_disable_disk_events(dev->sdev);
+
+ zpodd->powered_off = true;
+ device_set_run_wake(&dev->sdev->sdev_gendev, true);
+ acpi_pm_device_run_wake(&dev->sdev->sdev_gendev, true);
+}
+
+/* Disable runtime wake capability if it is enabled */
+void zpodd_disable_run_wake(struct ata_device *dev)
+{
+ struct zpodd *zpodd = dev->zpodd;
+
+ if (zpodd->powered_off) {
+ acpi_pm_device_run_wake(&dev->sdev->sdev_gendev, false);
+ device_set_run_wake(&dev->sdev->sdev_gendev, false);
+ }
+}
+
+/*
+ * Post power on processing after the ODD has been recovered. If the
+ * ODD wasn't powered off during suspend, it doesn't do anything.
+ *
+ * For drawer type ODD, if it is powered on due to user pressed the
+ * eject button, the tray needs to be ejected. This can only be done
+ * after the ODD has been recovered, i.e. link is initialized and
+ * device is able to process NON_DATA PIO command, as eject needs to
+ * send command for the ODD to process.
+ *
+ * The from_notify flag set in wake notification handler function
+ * zpodd_wake_dev represents if power on is due to user's action.
+ *
+ * For both types of ODD, several fields need to be reset.
+ */
+void zpodd_post_poweron(struct ata_device *dev)
+{
+ struct zpodd *zpodd = dev->zpodd;
+
+ if (!zpodd->powered_off)
+ return;
+
+ zpodd->powered_off = false;
+
+ if (zpodd->from_notify) {
+ zpodd->from_notify = false;
+ if (zpodd->mech_type == ODD_MECH_TYPE_DRAWER)
+ eject_tray(dev);
+ }
+
+ zpodd->zp_sampled = false;
+ zpodd->zp_ready = false;
+
+ sdev_enable_disk_events(dev->sdev);
+}
+
+static void zpodd_wake_dev(acpi_handle handle, u32 event, void *context)
+{
+ struct ata_device *ata_dev = context;
+ struct zpodd *zpodd = ata_dev->zpodd;
+ struct device *dev = &ata_dev->sdev->sdev_gendev;
+
+ if (event == ACPI_NOTIFY_DEVICE_WAKE && pm_runtime_suspended(dev)) {
+ zpodd->from_notify = true;
+ pm_runtime_resume(dev);
+ }
+}
+
+static void ata_acpi_add_pm_notifier(struct ata_device *dev)
+{
+ acpi_handle handle = ata_dev_acpi_handle(dev);
+ acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+ zpodd_wake_dev, dev);
+}
+
+static void ata_acpi_remove_pm_notifier(struct ata_device *dev)
+{
+ acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->sdev->sdev_gendev);
+ acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY, zpodd_wake_dev);
+}
+
+void zpodd_init(struct ata_device *dev)
+{
+ enum odd_mech_type mech_type;
+ struct zpodd *zpodd;
+
+ if (dev->zpodd)
+ return;
+
+ if (!odd_can_poweroff(dev))
+ return;
+
+ mech_type = zpodd_get_mech_type(dev);
+ if (mech_type == ODD_MECH_TYPE_UNSUPPORTED)
+ return;
+
+ zpodd = kzalloc(sizeof(struct zpodd), GFP_KERNEL);
+ if (!zpodd)
+ return;
+
+ zpodd->mech_type = mech_type;
+
+ ata_acpi_add_pm_notifier(dev);
+ zpodd->dev = dev;
+ dev->zpodd = zpodd;
+}
+
+void zpodd_exit(struct ata_device *dev)
+{
+ ata_acpi_remove_pm_notifier(dev);
+ kfree(dev->zpodd);
+ dev->zpodd = NULL;
+}
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 7148a58020b9..c949dd311b2e 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -182,6 +182,9 @@ extern void ata_eh_finish(struct ata_port *ap);
extern int ata_ering_map(struct ata_ering *ering,
int (*map_fn)(struct ata_ering_entry *, void *),
void *arg);
+extern unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key);
+extern unsigned int atapi_eh_request_sense(struct ata_device *dev,
+ u8 *sense_buf, u8 dfl_sense_key);
/* libata-pmp.c */
#ifdef CONFIG_SATA_PMP
@@ -230,4 +233,28 @@ static inline void ata_sff_exit(void)
{ }
#endif /* CONFIG_ATA_SFF */
+/* libata-zpodd.c */
+#ifdef CONFIG_SATA_ZPODD
+void zpodd_init(struct ata_device *dev);
+void zpodd_exit(struct ata_device *dev);
+static inline bool zpodd_dev_enabled(struct ata_device *dev)
+{
+ return dev->zpodd != NULL;
+}
+void zpodd_on_suspend(struct ata_device *dev);
+bool zpodd_zpready(struct ata_device *dev);
+void zpodd_enable_run_wake(struct ata_device *dev);
+void zpodd_disable_run_wake(struct ata_device *dev);
+void zpodd_post_poweron(struct ata_device *dev);
+#else /* CONFIG_SATA_ZPODD */
+static inline void zpodd_init(struct ata_device *dev) {}
+static inline void zpodd_exit(struct ata_device *dev) {}
+static inline bool zpodd_dev_enabled(struct ata_device *dev) { return false; }
+static inline void zpodd_on_suspend(struct ata_device *dev) {}
+static inline bool zpodd_zpready(struct ata_device *dev) { return false; }
+static inline void zpodd_enable_run_wake(struct ata_device *dev) {}
+static inline void zpodd_disable_run_wake(struct ata_device *dev) {}
+static inline void zpodd_post_poweron(struct ata_device *dev) {}
+#endif /* CONFIG_SATA_ZPODD */
+
#endif /* __LIBATA_H__ */
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index 556222f04731..c1bfaf43d109 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -31,6 +31,7 @@
* Copyright (C) 2006 Tower Technologies
*/
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -937,9 +938,9 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
goto err_rel_gpio;
}
- ide_base = devm_request_and_ioremap(&pdev->dev, mem_res);
- if (!ide_base) {
- err = -ENXIO;
+ ide_base = devm_ioremap_resource(&pdev->dev, mem_res);
+ if (IS_ERR(ide_base)) {
+ err = PTR_ERR(ide_base);
goto err_rel_gpio;
}
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 652f57e83484..3a8fb28b71f2 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -26,9 +26,9 @@
#include <asm/prom.h>
#include <asm/mpc52xx.h>
-#include <sysdev/bestcomm/bestcomm.h>
-#include <sysdev/bestcomm/bestcomm_priv.h>
-#include <sysdev/bestcomm/ata.h>
+#include <linux/fsl/bestcomm/bestcomm.h>
+#include <linux/fsl/bestcomm/bestcomm_priv.h>
+#include <linux/fsl/bestcomm/ata.h>
#define DRV_NAME "mpc52xx_ata"
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
index 63ffb002ec67..70b0e01372b3 100644
--- a/drivers/ata/pata_samsung_cf.c
+++ b/drivers/ata/pata_samsung_cf.c
@@ -512,7 +512,7 @@ static int __init pata_s3c_probe(struct platform_device *pdev)
return -ENOMEM;
}
- info->clk = clk_get(&pdev->dev, "cfcon");
+ info->clk = devm_clk_get(&pdev->dev, "cfcon");
if (IS_ERR(info->clk)) {
dev_err(dev, "failed to get access to cf controller clock\n");
ret = PTR_ERR(info->clk);
@@ -589,7 +589,6 @@ static int __init pata_s3c_probe(struct platform_device *pdev)
stop_clk:
clk_disable(info->clk);
- clk_put(info->clk);
return ret;
}
@@ -601,7 +600,6 @@ static int __exit pata_s3c_remove(struct platform_device *pdev)
ata_host_detach(host);
clk_disable(info->clk);
- clk_put(info->clk);
return 0;
}
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
new file mode 100644
index 000000000000..caf33f620c35
--- /dev/null
+++ b/drivers/ata/sata_rcar.c
@@ -0,0 +1,910 @@
+/*
+ * Renesas R-Car SATA driver
+ *
+ * Author: Vladimir Barinov <source@cogentembedded.com>
+ * Copyright (C) 2013 Cogent Embedded, Inc.
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#define DRV_NAME "sata_rcar"
+
+/* SH-Navi2G/ATAPI-ATA compatible task registers */
+#define DATA_REG 0x100
+#define SDEVCON_REG 0x138
+
+/* SH-Navi2G/ATAPI module compatible control registers */
+#define ATAPI_CONTROL1_REG 0x180
+#define ATAPI_STATUS_REG 0x184
+#define ATAPI_INT_ENABLE_REG 0x188
+#define ATAPI_DTB_ADR_REG 0x198
+#define ATAPI_DMA_START_ADR_REG 0x19C
+#define ATAPI_DMA_TRANS_CNT_REG 0x1A0
+#define ATAPI_CONTROL2_REG 0x1A4
+#define ATAPI_SIG_ST_REG 0x1B0
+#define ATAPI_BYTE_SWAP_REG 0x1BC
+
+/* ATAPI control 1 register (ATAPI_CONTROL1) bits */
+#define ATAPI_CONTROL1_ISM BIT(16)
+#define ATAPI_CONTROL1_DTA32M BIT(11)
+#define ATAPI_CONTROL1_RESET BIT(7)
+#define ATAPI_CONTROL1_DESE BIT(3)
+#define ATAPI_CONTROL1_RW BIT(2)
+#define ATAPI_CONTROL1_STOP BIT(1)
+#define ATAPI_CONTROL1_START BIT(0)
+
+/* ATAPI status register (ATAPI_STATUS) bits */
+#define ATAPI_STATUS_SATAINT BIT(11)
+#define ATAPI_STATUS_DNEND BIT(6)
+#define ATAPI_STATUS_DEVTRM BIT(5)
+#define ATAPI_STATUS_DEVINT BIT(4)
+#define ATAPI_STATUS_ERR BIT(2)
+#define ATAPI_STATUS_NEND BIT(1)
+#define ATAPI_STATUS_ACT BIT(0)
+
+/* Interrupt enable register (ATAPI_INT_ENABLE) bits */
+#define ATAPI_INT_ENABLE_SATAINT BIT(11)
+#define ATAPI_INT_ENABLE_DNEND BIT(6)
+#define ATAPI_INT_ENABLE_DEVTRM BIT(5)
+#define ATAPI_INT_ENABLE_DEVINT BIT(4)
+#define ATAPI_INT_ENABLE_ERR BIT(2)
+#define ATAPI_INT_ENABLE_NEND BIT(1)
+#define ATAPI_INT_ENABLE_ACT BIT(0)
+
+/* Access control registers for physical layer control register */
+#define SATAPHYADDR_REG 0x200
+#define SATAPHYWDATA_REG 0x204
+#define SATAPHYACCEN_REG 0x208
+#define SATAPHYRESET_REG 0x20C
+#define SATAPHYRDATA_REG 0x210
+#define SATAPHYACK_REG 0x214
+
+/* Physical layer control address command register (SATAPHYADDR) bits */
+#define SATAPHYADDR_PHYRATEMODE BIT(10)
+#define SATAPHYADDR_PHYCMD_READ BIT(9)
+#define SATAPHYADDR_PHYCMD_WRITE BIT(8)
+
+/* Physical layer control enable register (SATAPHYACCEN) bits */
+#define SATAPHYACCEN_PHYLANE BIT(0)
+
+/* Physical layer control reset register (SATAPHYRESET) bits */
+#define SATAPHYRESET_PHYRST BIT(1)
+#define SATAPHYRESET_PHYSRES BIT(0)
+
+/* Physical layer control acknowledge register (SATAPHYACK) bits */
+#define SATAPHYACK_PHYACK BIT(0)
+
+/* Serial-ATA HOST control registers */
+#define BISTCONF_REG 0x102C
+#define SDATA_REG 0x1100
+#define SSDEVCON_REG 0x1204
+
+#define SCRSSTS_REG 0x1400
+#define SCRSERR_REG 0x1404
+#define SCRSCON_REG 0x1408
+#define SCRSACT_REG 0x140C
+
+#define SATAINTSTAT_REG 0x1508
+#define SATAINTMASK_REG 0x150C
+
+/* SATA INT status register (SATAINTSTAT) bits */
+#define SATAINTSTAT_SERR BIT(3)
+#define SATAINTSTAT_ATA BIT(0)
+
+/* SATA INT mask register (SATAINTSTAT) bits */
+#define SATAINTMASK_SERRMSK BIT(3)
+#define SATAINTMASK_ERRMSK BIT(2)
+#define SATAINTMASK_ERRCRTMSK BIT(1)
+#define SATAINTMASK_ATAMSK BIT(0)
+
+#define SATA_RCAR_INT_MASK (SATAINTMASK_SERRMSK | \
+ SATAINTMASK_ATAMSK)
+
+/* Physical Layer Control Registers */
+#define SATAPCTLR1_REG 0x43
+#define SATAPCTLR2_REG 0x52
+#define SATAPCTLR3_REG 0x5A
+#define SATAPCTLR4_REG 0x60
+
+/* Descriptor table word 0 bit (when DTA32M = 1) */
+#define SATA_RCAR_DTEND BIT(0)
+
+struct sata_rcar_priv {
+ void __iomem *base;
+ struct clk *clk;
+};
+
+static void sata_rcar_phy_initialize(struct sata_rcar_priv *priv)
+{
+ /* idle state */
+ iowrite32(0, priv->base + SATAPHYADDR_REG);
+ /* reset */
+ iowrite32(SATAPHYRESET_PHYRST, priv->base + SATAPHYRESET_REG);
+ udelay(10);
+ /* deassert reset */
+ iowrite32(0, priv->base + SATAPHYRESET_REG);
+}
+
+static void sata_rcar_phy_write(struct sata_rcar_priv *priv, u16 reg, u32 val,
+ int group)
+{
+ int timeout;
+
+ /* deassert reset */
+ iowrite32(0, priv->base + SATAPHYRESET_REG);
+ /* lane 1 */
+ iowrite32(SATAPHYACCEN_PHYLANE, priv->base + SATAPHYACCEN_REG);
+ /* write phy register value */
+ iowrite32(val, priv->base + SATAPHYWDATA_REG);
+ /* set register group */
+ if (group)
+ reg |= SATAPHYADDR_PHYRATEMODE;
+ /* write command */
+ iowrite32(SATAPHYADDR_PHYCMD_WRITE | reg, priv->base + SATAPHYADDR_REG);
+ /* wait for ack */
+ for (timeout = 0; timeout < 100; timeout++) {
+ val = ioread32(priv->base + SATAPHYACK_REG);
+ if (val & SATAPHYACK_PHYACK)
+ break;
+ }
+ if (timeout >= 100)
+ pr_err("%s timeout\n", __func__);
+ /* idle state */
+ iowrite32(0, priv->base + SATAPHYADDR_REG);
+}
+
+static void sata_rcar_freeze(struct ata_port *ap)
+{
+ struct sata_rcar_priv *priv = ap->host->private_data;
+
+ /* mask */
+ iowrite32(0x7ff, priv->base + SATAINTMASK_REG);
+
+ ata_sff_freeze(ap);
+}
+
+static void sata_rcar_thaw(struct ata_port *ap)
+{
+ struct sata_rcar_priv *priv = ap->host->private_data;
+
+ /* ack */
+ iowrite32(~SATA_RCAR_INT_MASK, priv->base + SATAINTSTAT_REG);
+
+ ata_sff_thaw(ap);
+
+ /* unmask */
+ iowrite32(0x7ff & ~SATA_RCAR_INT_MASK, priv->base + SATAINTMASK_REG);
+}
+
+static void sata_rcar_ioread16_rep(void __iomem *reg, void *buffer, int count)
+{
+ u16 *ptr = buffer;
+
+ while (count--) {
+ u16 data = ioread32(reg);
+
+ *ptr++ = data;
+ }
+}
+
+static void sata_rcar_iowrite16_rep(void __iomem *reg, void *buffer, int count)
+{
+ const u16 *ptr = buffer;
+
+ while (count--)
+ iowrite32(*ptr++, reg);
+}
+
+static u8 sata_rcar_check_status(struct ata_port *ap)
+{
+ return ioread32(ap->ioaddr.status_addr);
+}
+
+static u8 sata_rcar_check_altstatus(struct ata_port *ap)
+{
+ return ioread32(ap->ioaddr.altstatus_addr);
+}
+
+static void sata_rcar_set_devctl(struct ata_port *ap, u8 ctl)
+{
+ iowrite32(ctl, ap->ioaddr.ctl_addr);
+}
+
+static void sata_rcar_dev_select(struct ata_port *ap, unsigned int device)
+{
+ iowrite32(ATA_DEVICE_OBS, ap->ioaddr.device_addr);
+ ata_sff_pause(ap); /* needed; also flushes, for mmio */
+}
+
+static unsigned int sata_rcar_ata_devchk(struct ata_port *ap,
+ unsigned int device)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ u8 nsect, lbal;
+
+ sata_rcar_dev_select(ap, device);
+
+ iowrite32(0x55, ioaddr->nsect_addr);
+ iowrite32(0xaa, ioaddr->lbal_addr);
+
+ iowrite32(0xaa, ioaddr->nsect_addr);
+ iowrite32(0x55, ioaddr->lbal_addr);
+
+ iowrite32(0x55, ioaddr->nsect_addr);
+ iowrite32(0xaa, ioaddr->lbal_addr);
+
+ nsect = ioread32(ioaddr->nsect_addr);
+ lbal = ioread32(ioaddr->lbal_addr);
+
+ if (nsect == 0x55 && lbal == 0xaa)
+ return 1; /* found a device */
+
+ return 0; /* nothing found */
+}
+
+static int sata_rcar_wait_after_reset(struct ata_link *link,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+
+ ata_msleep(ap, ATA_WAIT_AFTER_RESET);
+
+ return ata_sff_wait_ready(link, deadline);
+}
+
+static int sata_rcar_bus_softreset(struct ata_port *ap, unsigned long deadline)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+
+ DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
+
+ /* software reset. causes dev0 to be selected */
+ iowrite32(ap->ctl, ioaddr->ctl_addr);
+ udelay(20);
+ iowrite32(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
+ udelay(20);
+ iowrite32(ap->ctl, ioaddr->ctl_addr);
+ ap->last_ctl = ap->ctl;
+
+ /* wait the port to become ready */
+ return sata_rcar_wait_after_reset(&ap->link, deadline);
+}
+
+static int sata_rcar_softreset(struct ata_link *link, unsigned int *classes,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ unsigned int devmask = 0;
+ int rc;
+ u8 err;
+
+ /* determine if device 0 is present */
+ if (sata_rcar_ata_devchk(ap, 0))
+ devmask |= 1 << 0;
+
+ /* issue bus reset */
+ DPRINTK("about to softreset, devmask=%x\n", devmask);
+ rc = sata_rcar_bus_softreset(ap, deadline);
+ /* if link is occupied, -ENODEV too is an error */
+ if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
+ ata_link_err(link, "SRST failed (errno=%d)\n", rc);
+ return rc;
+ }
+
+ /* determine by signature whether we have ATA or ATAPI devices */
+ classes[0] = ata_sff_dev_classify(&link->device[0], devmask, &err);
+
+ DPRINTK("classes[0]=%u\n", classes[0]);
+ return 0;
+}
+
+static void sata_rcar_tf_load(struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+
+ if (tf->ctl != ap->last_ctl) {
+ iowrite32(tf->ctl, ioaddr->ctl_addr);
+ ap->last_ctl = tf->ctl;
+ ata_wait_idle(ap);
+ }
+
+ if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
+ iowrite32(tf->hob_feature, ioaddr->feature_addr);
+ iowrite32(tf->hob_nsect, ioaddr->nsect_addr);
+ iowrite32(tf->hob_lbal, ioaddr->lbal_addr);
+ iowrite32(tf->hob_lbam, ioaddr->lbam_addr);
+ iowrite32(tf->hob_lbah, ioaddr->lbah_addr);
+ VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
+ tf->hob_feature,
+ tf->hob_nsect,
+ tf->hob_lbal,
+ tf->hob_lbam,
+ tf->hob_lbah);
+ }
+
+ if (is_addr) {
+ iowrite32(tf->feature, ioaddr->feature_addr);
+ iowrite32(tf->nsect, ioaddr->nsect_addr);
+ iowrite32(tf->lbal, ioaddr->lbal_addr);
+ iowrite32(tf->lbam, ioaddr->lbam_addr);
+ iowrite32(tf->lbah, ioaddr->lbah_addr);
+ VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
+ tf->feature,
+ tf->nsect,
+ tf->lbal,
+ tf->lbam,
+ tf->lbah);
+ }
+
+ if (tf->flags & ATA_TFLAG_DEVICE) {
+ iowrite32(tf->device, ioaddr->device_addr);
+ VPRINTK("device 0x%X\n", tf->device);
+ }
+
+ ata_wait_idle(ap);
+}
+
+static void sata_rcar_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+
+ tf->command = sata_rcar_check_status(ap);
+ tf->feature = ioread32(ioaddr->error_addr);
+ tf->nsect = ioread32(ioaddr->nsect_addr);
+ tf->lbal = ioread32(ioaddr->lbal_addr);
+ tf->lbam = ioread32(ioaddr->lbam_addr);
+ tf->lbah = ioread32(ioaddr->lbah_addr);
+ tf->device = ioread32(ioaddr->device_addr);
+
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ iowrite32(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
+ tf->hob_feature = ioread32(ioaddr->error_addr);
+ tf->hob_nsect = ioread32(ioaddr->nsect_addr);
+ tf->hob_lbal = ioread32(ioaddr->lbal_addr);
+ tf->hob_lbam = ioread32(ioaddr->lbam_addr);
+ tf->hob_lbah = ioread32(ioaddr->lbah_addr);
+ iowrite32(tf->ctl, ioaddr->ctl_addr);
+ ap->last_ctl = tf->ctl;
+ }
+}
+
+static void sata_rcar_exec_command(struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
+
+ iowrite32(tf->command, ap->ioaddr.command_addr);
+ ata_sff_pause(ap);
+}
+
+static unsigned int sata_rcar_data_xfer(struct ata_device *dev,
+ unsigned char *buf,
+ unsigned int buflen, int rw)
+{
+ struct ata_port *ap = dev->link->ap;
+ void __iomem *data_addr = ap->ioaddr.data_addr;
+ unsigned int words = buflen >> 1;
+
+ /* Transfer multiple of 2 bytes */
+ if (rw == READ)
+ sata_rcar_ioread16_rep(data_addr, buf, words);
+ else
+ sata_rcar_iowrite16_rep(data_addr, buf, words);
+
+ /* Transfer trailing byte, if any. */
+ if (unlikely(buflen & 0x01)) {
+ unsigned char pad[2] = { };
+
+ /* Point buf to the tail of buffer */
+ buf += buflen - 1;
+
+ /*
+ * Use io*16_rep() accessors here as well to avoid pointlessly
+ * swapping bytes to and from on the big endian machines...
+ */
+ if (rw == READ) {
+ sata_rcar_ioread16_rep(data_addr, pad, 1);
+ *buf = pad[0];
+ } else {
+ pad[0] = *buf;
+ sata_rcar_iowrite16_rep(data_addr, pad, 1);
+ }
+ words++;
+ }
+
+ return words << 1;
+}
+
+static void sata_rcar_drain_fifo(struct ata_queued_cmd *qc)
+{
+ int count;
+ struct ata_port *ap;
+
+ /* We only need to flush incoming data when a command was running */
+ if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
+ return;
+
+ ap = qc->ap;
+ /* Drain up to 64K of data before we give up this recovery method */
+ for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ) &&
+ count < 65536; count += 2)
+ ioread32(ap->ioaddr.data_addr);
+
+ /* Can become DEBUG later */
+ if (count)
+ ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
+}
+
+static int sata_rcar_scr_read(struct ata_link *link, unsigned int sc_reg,
+ u32 *val)
+{
+ if (sc_reg > SCR_ACTIVE)
+ return -EINVAL;
+
+ *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg << 2));
+ return 0;
+}
+
+static int sata_rcar_scr_write(struct ata_link *link, unsigned int sc_reg,
+ u32 val)
+{
+ if (sc_reg > SCR_ACTIVE)
+ return -EINVAL;
+
+ iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg << 2));
+ return 0;
+}
+
+static void sata_rcar_bmdma_fill_sg(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_bmdma_prd *prd = ap->bmdma_prd;
+ struct scatterlist *sg;
+ unsigned int si, pi;
+
+ pi = 0;
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ u32 addr, sg_len, len;
+
+ /*
+ * Note: h/w doesn't support 64-bit, so we unconditionally
+ * truncate dma_addr_t to u32.
+ */
+ addr = (u32)sg_dma_address(sg);
+ sg_len = sg_dma_len(sg);
+
+ /* H/w transfer count is only 29 bits long, let's be careful */
+ while (sg_len) {
+ len = sg_len;
+ if (len > 0x1ffffffe)
+ len = 0x1ffffffe;
+
+ prd[pi].addr = cpu_to_le32(addr);
+ prd[pi].flags_len = cpu_to_le32(len);
+ VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
+
+ pi++;
+ sg_len -= len;
+ addr += len;
+ }
+ }
+
+ /* end-of-table flag */
+ prd[pi - 1].addr |= cpu_to_le32(SATA_RCAR_DTEND);
+}
+
+static void sata_rcar_qc_prep(struct ata_queued_cmd *qc)
+{
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+ return;
+
+ sata_rcar_bmdma_fill_sg(qc);
+}
+
+static void sata_rcar_bmdma_setup(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ unsigned int rw = qc->tf.flags & ATA_TFLAG_WRITE;
+ u32 dmactl;
+ struct sata_rcar_priv *priv = ap->host->private_data;
+
+ /* load PRD table addr. */
+ mb(); /* make sure PRD table writes are visible to controller */
+ iowrite32(ap->bmdma_prd_dma, priv->base + ATAPI_DTB_ADR_REG);
+
+ /* specify data direction, triple-check start bit is clear */
+ dmactl = ioread32(priv->base + ATAPI_CONTROL1_REG);
+ dmactl &= ~(ATAPI_CONTROL1_RW | ATAPI_CONTROL1_STOP);
+ if (dmactl & ATAPI_CONTROL1_START) {
+ dmactl &= ~ATAPI_CONTROL1_START;
+ dmactl |= ATAPI_CONTROL1_STOP;
+ }
+ if (!rw)
+ dmactl |= ATAPI_CONTROL1_RW;
+ iowrite32(dmactl, priv->base + ATAPI_CONTROL1_REG);
+
+ /* issue r/w command */
+ ap->ops->sff_exec_command(ap, &qc->tf);
+}
+
+static void sata_rcar_bmdma_start(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ u32 dmactl;
+ struct sata_rcar_priv *priv = ap->host->private_data;
+
+ /* start host DMA transaction */
+ dmactl = ioread32(priv->base + ATAPI_CONTROL1_REG);
+ dmactl |= ATAPI_CONTROL1_START;
+ iowrite32(dmactl, priv->base + ATAPI_CONTROL1_REG);
+}
+
+static void sata_rcar_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct sata_rcar_priv *priv = ap->host->private_data;
+ u32 dmactl;
+
+ /* force termination of DMA transfer if active */
+ dmactl = ioread32(priv->base + ATAPI_CONTROL1_REG);
+ if (dmactl & ATAPI_CONTROL1_START) {
+ dmactl &= ~ATAPI_CONTROL1_START;
+ dmactl |= ATAPI_CONTROL1_STOP;
+ iowrite32(dmactl, priv->base + ATAPI_CONTROL1_REG);
+ }
+
+ /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+ ata_sff_dma_pause(ap);
+}
+
+static u8 sata_rcar_bmdma_status(struct ata_port *ap)
+{
+ struct sata_rcar_priv *priv = ap->host->private_data;
+ u32 status;
+ u8 host_stat = 0;
+
+ status = ioread32(priv->base + ATAPI_STATUS_REG);
+ if (status & ATAPI_STATUS_DEVINT)
+ host_stat |= ATA_DMA_INTR;
+ if (status & ATAPI_STATUS_ACT)
+ host_stat |= ATA_DMA_ACTIVE;
+
+ return host_stat;
+}
+
+static struct scsi_host_template sata_rcar_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations sata_rcar_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .freeze = sata_rcar_freeze,
+ .thaw = sata_rcar_thaw,
+ .softreset = sata_rcar_softreset,
+
+ .scr_read = sata_rcar_scr_read,
+ .scr_write = sata_rcar_scr_write,
+
+ .sff_dev_select = sata_rcar_dev_select,
+ .sff_set_devctl = sata_rcar_set_devctl,
+ .sff_check_status = sata_rcar_check_status,
+ .sff_check_altstatus = sata_rcar_check_altstatus,
+ .sff_tf_load = sata_rcar_tf_load,
+ .sff_tf_read = sata_rcar_tf_read,
+ .sff_exec_command = sata_rcar_exec_command,
+ .sff_data_xfer = sata_rcar_data_xfer,
+ .sff_drain_fifo = sata_rcar_drain_fifo,
+
+ .qc_prep = sata_rcar_qc_prep,
+
+ .bmdma_setup = sata_rcar_bmdma_setup,
+ .bmdma_start = sata_rcar_bmdma_start,
+ .bmdma_stop = sata_rcar_bmdma_stop,
+ .bmdma_status = sata_rcar_bmdma_status,
+};
+
+static int sata_rcar_serr_interrupt(struct ata_port *ap)
+{
+ struct sata_rcar_priv *priv = ap->host->private_data;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ int freeze = 0;
+ int handled = 0;
+ u32 serror;
+
+ serror = ioread32(priv->base + SCRSERR_REG);
+ if (!serror)
+ return 0;
+
+ DPRINTK("SError @host_intr: 0x%x\n", serror);
+
+ /* first, analyze and record host port events */
+ ata_ehi_clear_desc(ehi);
+
+ if (serror & (SERR_DEV_XCHG | SERR_PHYRDY_CHG)) {
+ /* Setup a soft-reset EH action */
+ ata_ehi_hotplugged(ehi);
+ ata_ehi_push_desc(ehi, "%s", "hotplug");
+
+ freeze = serror & SERR_COMM_WAKE ? 0 : 1;
+ handled = 1;
+ }
+
+ /* freeze or abort */
+ if (freeze)
+ ata_port_freeze(ap);
+ else
+ ata_port_abort(ap);
+
+ return handled;
+}
+
+static int sata_rcar_ata_interrupt(struct ata_port *ap)
+{
+ struct ata_queued_cmd *qc;
+ int handled = 0;
+
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc)
+ handled |= ata_bmdma_port_intr(ap, qc);
+
+ return handled;
+}
+
+static irqreturn_t sata_rcar_interrupt(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ struct sata_rcar_priv *priv = host->private_data;
+ struct ata_port *ap;
+ unsigned int handled = 0;
+ u32 sataintstat;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ sataintstat = ioread32(priv->base + SATAINTSTAT_REG);
+ if (!sataintstat)
+ goto done;
+ /* ack */
+ iowrite32(sataintstat & ~SATA_RCAR_INT_MASK,
+ priv->base + SATAINTSTAT_REG);
+
+ ap = host->ports[0];
+
+ if (sataintstat & SATAINTSTAT_ATA)
+ handled |= sata_rcar_ata_interrupt(ap);
+
+ if (sataintstat & SATAINTSTAT_SERR)
+ handled |= sata_rcar_serr_interrupt(ap);
+
+done:
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return IRQ_RETVAL(handled);
+}
+
+static void sata_rcar_setup_port(struct ata_host *host)
+{
+ struct ata_port *ap = host->ports[0];
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ struct sata_rcar_priv *priv = host->private_data;
+
+ ap->ops = &sata_rcar_port_ops;
+ ap->pio_mask = ATA_PIO4;
+ ap->udma_mask = ATA_UDMA6;
+ ap->flags |= ATA_FLAG_SATA;
+
+ ioaddr->cmd_addr = priv->base + SDATA_REG;
+ ioaddr->ctl_addr = priv->base + SSDEVCON_REG;
+ ioaddr->scr_addr = priv->base + SCRSSTS_REG;
+ ioaddr->altstatus_addr = ioaddr->ctl_addr;
+
+ ioaddr->data_addr = ioaddr->cmd_addr + (ATA_REG_DATA << 2);
+ ioaddr->error_addr = ioaddr->cmd_addr + (ATA_REG_ERR << 2);
+ ioaddr->feature_addr = ioaddr->cmd_addr + (ATA_REG_FEATURE << 2);
+ ioaddr->nsect_addr = ioaddr->cmd_addr + (ATA_REG_NSECT << 2);
+ ioaddr->lbal_addr = ioaddr->cmd_addr + (ATA_REG_LBAL << 2);
+ ioaddr->lbam_addr = ioaddr->cmd_addr + (ATA_REG_LBAM << 2);
+ ioaddr->lbah_addr = ioaddr->cmd_addr + (ATA_REG_LBAH << 2);
+ ioaddr->device_addr = ioaddr->cmd_addr + (ATA_REG_DEVICE << 2);
+ ioaddr->status_addr = ioaddr->cmd_addr + (ATA_REG_STATUS << 2);
+ ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << 2);
+}
+
+static void sata_rcar_init_controller(struct ata_host *host)
+{
+ struct sata_rcar_priv *priv = host->private_data;
+ u32 val;
+
+ /* reset and setup phy */
+ sata_rcar_phy_initialize(priv);
+ sata_rcar_phy_write(priv, SATAPCTLR1_REG, 0x00200188, 0);
+ sata_rcar_phy_write(priv, SATAPCTLR1_REG, 0x00200188, 1);
+ sata_rcar_phy_write(priv, SATAPCTLR3_REG, 0x0000A061, 0);
+ sata_rcar_phy_write(priv, SATAPCTLR2_REG, 0x20000000, 0);
+ sata_rcar_phy_write(priv, SATAPCTLR2_REG, 0x20000000, 1);
+ sata_rcar_phy_write(priv, SATAPCTLR4_REG, 0x28E80000, 0);
+
+ /* SATA-IP reset state */
+ val = ioread32(priv->base + ATAPI_CONTROL1_REG);
+ val |= ATAPI_CONTROL1_RESET;
+ iowrite32(val, priv->base + ATAPI_CONTROL1_REG);
+
+ /* ISM mode, PRD mode, DTEND flag at bit 0 */
+ val = ioread32(priv->base + ATAPI_CONTROL1_REG);
+ val |= ATAPI_CONTROL1_ISM;
+ val |= ATAPI_CONTROL1_DESE;
+ val |= ATAPI_CONTROL1_DTA32M;
+ iowrite32(val, priv->base + ATAPI_CONTROL1_REG);
+
+ /* Release the SATA-IP from the reset state */
+ val = ioread32(priv->base + ATAPI_CONTROL1_REG);
+ val &= ~ATAPI_CONTROL1_RESET;
+ iowrite32(val, priv->base + ATAPI_CONTROL1_REG);
+
+ /* ack and mask */
+ iowrite32(0, priv->base + SATAINTSTAT_REG);
+ iowrite32(0x7ff, priv->base + SATAINTMASK_REG);
+ /* enable interrupts */
+ iowrite32(ATAPI_INT_ENABLE_SATAINT, priv->base + ATAPI_INT_ENABLE_REG);
+}
+
+static int sata_rcar_probe(struct platform_device *pdev)
+{
+ struct ata_host *host;
+ struct sata_rcar_priv *priv;
+ struct resource *mem;
+ int irq;
+ int ret = 0;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mem == NULL)
+ return -EINVAL;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -EINVAL;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct sata_rcar_priv),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "failed to get access to sata clock\n");
+ return PTR_ERR(priv->clk);
+ }
+ clk_enable(priv->clk);
+
+ host = ata_host_alloc(&pdev->dev, 1);
+ if (!host) {
+ dev_err(&pdev->dev, "ata_host_alloc failed\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ host->private_data = priv;
+
+ priv->base = devm_request_and_ioremap(&pdev->dev, mem);
+ if (!priv->base) {
+ ret = -EADDRNOTAVAIL;
+ goto cleanup;
+ }
+
+ /* setup port */
+ sata_rcar_setup_port(host);
+
+ /* initialize host controller */
+ sata_rcar_init_controller(host);
+
+ ret = ata_host_activate(host, irq, sata_rcar_interrupt, 0,
+ &sata_rcar_sht);
+ if (!ret)
+ return 0;
+
+cleanup:
+ clk_disable(priv->clk);
+
+ return ret;
+}
+
+static int sata_rcar_remove(struct platform_device *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct sata_rcar_priv *priv = host->private_data;
+
+ ata_host_detach(host);
+
+ /* disable interrupts */
+ iowrite32(0, priv->base + ATAPI_INT_ENABLE_REG);
+ /* ack and mask */
+ iowrite32(0, priv->base + SATAINTSTAT_REG);
+ iowrite32(0x7ff, priv->base + SATAINTMASK_REG);
+
+ clk_disable(priv->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int sata_rcar_suspend(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct sata_rcar_priv *priv = host->private_data;
+ int ret;
+
+ ret = ata_host_suspend(host, PMSG_SUSPEND);
+ if (!ret) {
+ /* disable interrupts */
+ iowrite32(0, priv->base + ATAPI_INT_ENABLE_REG);
+ /* mask */
+ iowrite32(0x7ff, priv->base + SATAINTMASK_REG);
+
+ clk_disable(priv->clk);
+ }
+
+ return ret;
+}
+
+static int sata_rcar_resume(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct sata_rcar_priv *priv = host->private_data;
+
+ clk_enable(priv->clk);
+
+ /* ack and mask */
+ iowrite32(0, priv->base + SATAINTSTAT_REG);
+ iowrite32(0x7ff, priv->base + SATAINTMASK_REG);
+ /* enable interrupts */
+ iowrite32(ATAPI_INT_ENABLE_SATAINT, priv->base + ATAPI_INT_ENABLE_REG);
+
+ ata_host_resume(host);
+
+ return 0;
+}
+
+static const struct dev_pm_ops sata_rcar_pm_ops = {
+ .suspend = sata_rcar_suspend,
+ .resume = sata_rcar_resume,
+};
+#endif
+
+static struct of_device_id sata_rcar_match[] = {
+ { .compatible = "renesas,rcar-sata", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sata_rcar_match);
+
+static struct platform_driver sata_rcar_driver = {
+ .probe = sata_rcar_probe,
+ .remove = sata_rcar_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = sata_rcar_match,
+#ifdef CONFIG_PM
+ .pm = &sata_rcar_pm_ops,
+#endif
+ },
+};
+
+module_platform_driver(sata_rcar_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vladimir Barinov");
+MODULE_DESCRIPTION("Renesas R-Car SATA controller low level driver");
diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h
index 6a0955e6d4fc..53ecac5a2161 100644
--- a/drivers/atm/iphase.h
+++ b/drivers/atm/iphase.h
@@ -636,82 +636,82 @@ struct rx_buf_desc {
#define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE
#define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE
-typedef volatile u_int freg_t;
+typedef volatile u_int ffreg_t;
typedef u_int rreg_t;
typedef struct _ffredn_t {
- freg_t idlehead_high; /* Idle cell header (high) */
- freg_t idlehead_low; /* Idle cell header (low) */
- freg_t maxrate; /* Maximum rate */
- freg_t stparms; /* Traffic Management Parameters */
- freg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
- freg_t rm_type; /* */
- u_int filler5[0x17 - 0x06];
- freg_t cmd_reg; /* Command register */
- u_int filler18[0x20 - 0x18];
- freg_t cbr_base; /* CBR Pointer Base */
- freg_t vbr_base; /* VBR Pointer Base */
- freg_t abr_base; /* ABR Pointer Base */
- freg_t ubr_base; /* UBR Pointer Base */
- u_int filler24;
- freg_t vbrwq_base; /* VBR Wait Queue Base */
- freg_t abrwq_base; /* ABR Wait Queue Base */
- freg_t ubrwq_base; /* UBR Wait Queue Base */
- freg_t vct_base; /* Main VC Table Base */
- freg_t vcte_base; /* Extended Main VC Table Base */
- u_int filler2a[0x2C - 0x2A];
- freg_t cbr_tab_beg; /* CBR Table Begin */
- freg_t cbr_tab_end; /* CBR Table End */
- freg_t cbr_pointer; /* CBR Pointer */
- u_int filler2f[0x30 - 0x2F];
- freg_t prq_st_adr; /* Packet Ready Queue Start Address */
- freg_t prq_ed_adr; /* Packet Ready Queue End Address */
- freg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
- freg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
- freg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
- freg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
- freg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
- freg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
- u_int filler38[0x40 - 0x38];
- freg_t queue_base; /* Base address for PRQ and TCQ */
- freg_t desc_base; /* Base address of descriptor table */
- u_int filler42[0x45 - 0x42];
- freg_t mode_reg_0; /* Mode register 0 */
- freg_t mode_reg_1; /* Mode register 1 */
- freg_t intr_status_reg;/* Interrupt Status register */
- freg_t mask_reg; /* Mask Register */
- freg_t cell_ctr_high1; /* Total cell transfer count (high) */
- freg_t cell_ctr_lo1; /* Total cell transfer count (low) */
- freg_t state_reg; /* Status register */
- u_int filler4c[0x58 - 0x4c];
- freg_t curr_desc_num; /* Contains the current descriptor num */
- freg_t next_desc; /* Next descriptor */
- freg_t next_vc; /* Next VC */
- u_int filler5b[0x5d - 0x5b];
- freg_t present_slot_cnt;/* Present slot count */
- u_int filler5e[0x6a - 0x5e];
- freg_t new_desc_num; /* New descriptor number */
- freg_t new_vc; /* New VC */
- freg_t sched_tbl_ptr; /* Schedule table pointer */
- freg_t vbrwq_wptr; /* VBR wait queue write pointer */
- freg_t vbrwq_rptr; /* VBR wait queue read pointer */
- freg_t abrwq_wptr; /* ABR wait queue write pointer */
- freg_t abrwq_rptr; /* ABR wait queue read pointer */
- freg_t ubrwq_wptr; /* UBR wait queue write pointer */
- freg_t ubrwq_rptr; /* UBR wait queue read pointer */
- freg_t cbr_vc; /* CBR VC */
- freg_t vbr_sb_vc; /* VBR SB VC */
- freg_t abr_sb_vc; /* ABR SB VC */
- freg_t ubr_sb_vc; /* UBR SB VC */
- freg_t vbr_next_link; /* VBR next link */
- freg_t abr_next_link; /* ABR next link */
- freg_t ubr_next_link; /* UBR next link */
- u_int filler7a[0x7c-0x7a];
- freg_t out_rate_head; /* Out of rate head */
- u_int filler7d[0xca-0x7d]; /* pad out to full address space */
- freg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
- freg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
- u_int fillercc[0x100-0xcc]; /* pad out to full address space */
+ ffreg_t idlehead_high; /* Idle cell header (high) */
+ ffreg_t idlehead_low; /* Idle cell header (low) */
+ ffreg_t maxrate; /* Maximum rate */
+ ffreg_t stparms; /* Traffic Management Parameters */
+ ffreg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
+ ffreg_t rm_type; /* */
+ u_int filler5[0x17 - 0x06];
+ ffreg_t cmd_reg; /* Command register */
+ u_int filler18[0x20 - 0x18];
+ ffreg_t cbr_base; /* CBR Pointer Base */
+ ffreg_t vbr_base; /* VBR Pointer Base */
+ ffreg_t abr_base; /* ABR Pointer Base */
+ ffreg_t ubr_base; /* UBR Pointer Base */
+ u_int filler24;
+ ffreg_t vbrwq_base; /* VBR Wait Queue Base */
+ ffreg_t abrwq_base; /* ABR Wait Queue Base */
+ ffreg_t ubrwq_base; /* UBR Wait Queue Base */
+ ffreg_t vct_base; /* Main VC Table Base */
+ ffreg_t vcte_base; /* Extended Main VC Table Base */
+ u_int filler2a[0x2C - 0x2A];
+ ffreg_t cbr_tab_beg; /* CBR Table Begin */
+ ffreg_t cbr_tab_end; /* CBR Table End */
+ ffreg_t cbr_pointer; /* CBR Pointer */
+ u_int filler2f[0x30 - 0x2F];
+ ffreg_t prq_st_adr; /* Packet Ready Queue Start Address */
+ ffreg_t prq_ed_adr; /* Packet Ready Queue End Address */
+ ffreg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
+ ffreg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
+ ffreg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
+ ffreg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
+ ffreg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
+ ffreg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
+ u_int filler38[0x40 - 0x38];
+ ffreg_t queue_base; /* Base address for PRQ and TCQ */
+ ffreg_t desc_base; /* Base address of descriptor table */
+ u_int filler42[0x45 - 0x42];
+ ffreg_t mode_reg_0; /* Mode register 0 */
+ ffreg_t mode_reg_1; /* Mode register 1 */
+ ffreg_t intr_status_reg;/* Interrupt Status register */
+ ffreg_t mask_reg; /* Mask Register */
+ ffreg_t cell_ctr_high1; /* Total cell transfer count (high) */
+ ffreg_t cell_ctr_lo1; /* Total cell transfer count (low) */
+ ffreg_t state_reg; /* Status register */
+ u_int filler4c[0x58 - 0x4c];
+ ffreg_t curr_desc_num; /* Contains the current descriptor num */
+ ffreg_t next_desc; /* Next descriptor */
+ ffreg_t next_vc; /* Next VC */
+ u_int filler5b[0x5d - 0x5b];
+ ffreg_t present_slot_cnt;/* Present slot count */
+ u_int filler5e[0x6a - 0x5e];
+ ffreg_t new_desc_num; /* New descriptor number */
+ ffreg_t new_vc; /* New VC */
+ ffreg_t sched_tbl_ptr; /* Schedule table pointer */
+ ffreg_t vbrwq_wptr; /* VBR wait queue write pointer */
+ ffreg_t vbrwq_rptr; /* VBR wait queue read pointer */
+ ffreg_t abrwq_wptr; /* ABR wait queue write pointer */
+ ffreg_t abrwq_rptr; /* ABR wait queue read pointer */
+ ffreg_t ubrwq_wptr; /* UBR wait queue write pointer */
+ ffreg_t ubrwq_rptr; /* UBR wait queue read pointer */
+ ffreg_t cbr_vc; /* CBR VC */
+ ffreg_t vbr_sb_vc; /* VBR SB VC */
+ ffreg_t abr_sb_vc; /* ABR SB VC */
+ ffreg_t ubr_sb_vc; /* UBR SB VC */
+ ffreg_t vbr_next_link; /* VBR next link */
+ ffreg_t abr_next_link; /* ABR next link */
+ ffreg_t ubr_next_link; /* UBR next link */
+ u_int filler7a[0x7c-0x7a];
+ ffreg_t out_rate_head; /* Out of rate head */
+ u_int filler7d[0xca-0x7d]; /* pad out to full address space */
+ ffreg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
+ ffreg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
+ u_int fillercc[0x100-0xcc]; /* pad out to full address space */
} ffredn_t;
typedef struct _rfredn_t {
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index c8b453939da2..07abd9d76f7f 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -145,6 +145,17 @@ config EXTRA_FIRMWARE_DIR
this option you can point it elsewhere, such as /lib/firmware/ or
some other directory containing the firmware files.
+config FW_LOADER_USER_HELPER
+ bool "Fallback user-helper invocation for firmware loading"
+ depends on FW_LOADER
+ default y
+ help
+ This option enables / disables the invocation of user-helper
+ (e.g. udev) for loading firmware files as a fallback after the
+ direct file loading in kernel fails. The user-mode helper is
+ no longer required unless you have a special firmware file that
+ resides in a non-standard path.
+
config DEBUG_DRIVER
bool "Driver Core verbose debug messages"
depends on DEBUG_KERNEL
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 5aa2d703d19f..4e22ce3ed73d 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -21,6 +21,7 @@ endif
obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
obj-$(CONFIG_REGMAP) += regmap/
obj-$(CONFIG_SOC_BUS) += soc.o
+obj-$(CONFIG_PINCTRL) += pinctrl.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 24eb07868344..519865b53f76 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -290,7 +290,7 @@ int bus_for_each_dev(struct bus_type *bus, struct device *start,
struct device *dev;
int error = 0;
- if (!bus)
+ if (!bus || !bus->p)
return -EINVAL;
klist_iter_init_node(&bus->p->klist_devices, &i,
@@ -324,7 +324,7 @@ struct device *bus_find_device(struct bus_type *bus,
struct klist_iter i;
struct device *dev;
- if (!bus)
+ if (!bus || !bus->p)
return NULL;
klist_iter_init_node(&bus->p->klist_devices, &i,
@@ -700,12 +700,12 @@ int bus_add_driver(struct device_driver *drv)
if (error)
goto out_unregister;
+ klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers);
if (drv->bus->p->drivers_autoprobe) {
error = driver_attach(drv);
if (error)
goto out_unregister;
}
- klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers);
module_add_driver(drv->owner, drv);
error = driver_create_file(drv, &driver_attr_uevent);
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 03243d4002fd..3ce845471327 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -420,8 +420,8 @@ EXPORT_SYMBOL_GPL(class_for_each_device);
* code. There's no locking restriction.
*/
struct device *class_find_device(struct class *class, struct device *start,
- void *data,
- int (*match)(struct device *, void *))
+ const void *data,
+ int (*match)(struct device *, const void *))
{
struct class_dev_iter iter;
struct device *dev;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index a235085e343c..56536f4b0f6b 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1617,9 +1617,9 @@ struct device *device_create(struct class *class, struct device *parent,
}
EXPORT_SYMBOL_GPL(device_create);
-static int __match_devt(struct device *dev, void *data)
+static int __match_devt(struct device *dev, const void *data)
{
- dev_t *devt = data;
+ const dev_t *devt = data;
return dev->devt == *devt;
}
@@ -1685,8 +1685,6 @@ EXPORT_SYMBOL_GPL(device_destroy);
*/
int device_rename(struct device *dev, const char *new_name)
{
- char *old_class_name = NULL;
- char *new_class_name = NULL;
char *old_device_name = NULL;
int error;
@@ -1717,8 +1715,6 @@ int device_rename(struct device *dev, const char *new_name)
out:
put_device(dev);
- kfree(new_class_name);
- kfree(old_class_name);
kfree(old_device_name);
return error;
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 63452943abd1..fb10728f6372 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -224,7 +224,7 @@ static void cpu_device_release(struct device *dev)
* by the cpu device.
*
* Never copy this way of doing things, or you too will be made fun of
- * on the linux-kerenl list, you have been warned.
+ * on the linux-kernel list, you have been warned.
*/
}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index e3bbed8a617c..bb5645ea0282 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -24,6 +24,7 @@
#include <linux/wait.h>
#include <linux/async.h>
#include <linux/pm_runtime.h>
+#include <linux/pinctrl/devinfo.h>
#include "base.h"
#include "power/power.h"
@@ -172,6 +173,8 @@ static int deferred_probe_initcall(void)
driver_deferred_probe_enable = true;
driver_deferred_probe_trigger();
+ /* Sort as many dependencies as possible before exiting initcalls */
+ flush_workqueue(deferred_wq);
return 0;
}
late_initcall(deferred_probe_initcall);
@@ -269,6 +272,12 @@ static int really_probe(struct device *dev, struct device_driver *drv)
WARN_ON(!list_empty(&dev->devres_head));
dev->driver = drv;
+
+ /* If using pinctrl, bind pins now before probing */
+ ret = pinctrl_bind_pins(dev);
+ if (ret)
+ goto probe_failed;
+
if (driver_sysfs_add(dev)) {
printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
__func__, dev_name(dev));
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index a3f79c495a41..ff5b745c4705 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -134,15 +134,14 @@ EXPORT_SYMBOL_GPL(dma_buf_export);
*/
int dma_buf_fd(struct dma_buf *dmabuf, int flags)
{
- int error, fd;
+ int fd;
if (!dmabuf || !dmabuf->file)
return -EINVAL;
- error = get_unused_fd_flags(flags);
- if (error < 0)
- return error;
- fd = error;
+ fd = get_unused_fd_flags(flags);
+ if (fd < 0)
+ return fd;
fd_install(fd, dmabuf->file);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index d81460309182..4a223fedcd73 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -88,11 +88,6 @@ enum {
FW_STATUS_ABORT,
};
-enum fw_buf_fmt {
- VMALLOC_BUF, /* used in direct loading */
- PAGE_BUF, /* used in loading via userspace */
-};
-
static int loading_timeout = 60; /* In seconds */
static inline long firmware_loading_timeout(void)
@@ -128,12 +123,14 @@ struct firmware_buf {
struct completion completion;
struct firmware_cache *fwc;
unsigned long status;
- enum fw_buf_fmt fmt;
void *data;
size_t size;
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+ bool is_paged_buf;
struct page **pages;
int nr_pages;
int page_array_size;
+#endif
char fw_id[];
};
@@ -142,14 +139,6 @@ struct fw_cache_entry {
char name[];
};
-struct firmware_priv {
- struct delayed_work timeout_work;
- bool nowait;
- struct device dev;
- struct firmware_buf *buf;
- struct firmware *fw;
-};
-
struct fw_name_devm {
unsigned long magic;
char name[];
@@ -182,7 +171,6 @@ static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
strcpy(buf->fw_id, fw_name);
buf->fwc = fwc;
init_completion(&buf->completion);
- buf->fmt = VMALLOC_BUF;
pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf);
@@ -240,7 +228,6 @@ static void __fw_free_buf(struct kref *ref)
{
struct firmware_buf *buf = to_fwbuf(ref);
struct firmware_cache *fwc = buf->fwc;
- int i;
pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
__func__, buf->fw_id, buf, buf->data,
@@ -249,13 +236,15 @@ static void __fw_free_buf(struct kref *ref)
list_del(&buf->list);
spin_unlock(&fwc->lock);
-
- if (buf->fmt == PAGE_BUF) {
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+ if (buf->is_paged_buf) {
+ int i;
vunmap(buf->data);
for (i = 0; i < buf->nr_pages; i++)
__free_page(buf->pages[i]);
kfree(buf->pages);
} else
+#endif
vfree(buf->data);
kfree(buf);
}
@@ -305,7 +294,7 @@ static bool fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf
char *buf;
size = fw_file_size(file);
- if (size < 0)
+ if (size <= 0)
return false;
buf = vmalloc(size);
if (!buf)
@@ -319,7 +308,8 @@ static bool fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf
return true;
}
-static bool fw_get_filesystem_firmware(struct firmware_buf *buf)
+static bool fw_get_filesystem_firmware(struct device *device,
+ struct firmware_buf *buf)
{
int i;
bool success = false;
@@ -343,9 +333,114 @@ static bool fw_get_filesystem_firmware(struct firmware_buf *buf)
break;
}
__putname(path);
+
+ if (success) {
+ dev_dbg(device, "firmware: direct-loading firmware %s\n",
+ buf->fw_id);
+ mutex_lock(&fw_lock);
+ set_bit(FW_STATUS_DONE, &buf->status);
+ complete_all(&buf->completion);
+ mutex_unlock(&fw_lock);
+ }
+
return success;
}
+/* firmware holds the ownership of pages */
+static void firmware_free_data(const struct firmware *fw)
+{
+ /* Loaded directly? */
+ if (!fw->priv) {
+ vfree(fw->data);
+ return;
+ }
+ fw_free_buf(fw->priv);
+}
+
+/* store the pages buffer info firmware from buf */
+static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw)
+{
+ fw->priv = buf;
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+ fw->pages = buf->pages;
+#endif
+ fw->size = buf->size;
+ fw->data = buf->data;
+
+ pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
+ __func__, buf->fw_id, buf, buf->data,
+ (unsigned int)buf->size);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void fw_name_devm_release(struct device *dev, void *res)
+{
+ struct fw_name_devm *fwn = res;
+
+ if (fwn->magic == (unsigned long)&fw_cache)
+ pr_debug("%s: fw_name-%s devm-%p released\n",
+ __func__, fwn->name, res);
+}
+
+static int fw_devm_match(struct device *dev, void *res,
+ void *match_data)
+{
+ struct fw_name_devm *fwn = res;
+
+ return (fwn->magic == (unsigned long)&fw_cache) &&
+ !strcmp(fwn->name, match_data);
+}
+
+static struct fw_name_devm *fw_find_devm_name(struct device *dev,
+ const char *name)
+{
+ struct fw_name_devm *fwn;
+
+ fwn = devres_find(dev, fw_name_devm_release,
+ fw_devm_match, (void *)name);
+ return fwn;
+}
+
+/* add firmware name into devres list */
+static int fw_add_devm_name(struct device *dev, const char *name)
+{
+ struct fw_name_devm *fwn;
+
+ fwn = fw_find_devm_name(dev, name);
+ if (fwn)
+ return 1;
+
+ fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm) +
+ strlen(name) + 1, GFP_KERNEL);
+ if (!fwn)
+ return -ENOMEM;
+
+ fwn->magic = (unsigned long)&fw_cache;
+ strcpy(fwn->name, name);
+ devres_add(dev, fwn);
+
+ return 0;
+}
+#else
+static int fw_add_devm_name(struct device *dev, const char *name)
+{
+ return 0;
+}
+#endif
+
+
+/*
+ * user-mode helper code
+ */
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+struct firmware_priv {
+ struct delayed_work timeout_work;
+ bool nowait;
+ struct device dev;
+ struct firmware_buf *buf;
+ struct firmware *fw;
+};
+
static struct firmware_priv *to_firmware_priv(struct device *dev)
{
return container_of(dev, struct firmware_priv, dev);
@@ -359,6 +454,9 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
complete_all(&buf->completion);
}
+#define is_fw_load_aborted(buf) \
+ test_bit(FW_STATUS_ABORT, &(buf)->status)
+
static ssize_t firmware_timeout_show(struct class *class,
struct class_attribute *attr,
char *buf)
@@ -435,17 +533,6 @@ static ssize_t firmware_loading_show(struct device *dev,
return sprintf(buf, "%d\n", loading);
}
-/* firmware holds the ownership of pages */
-static void firmware_free_data(const struct firmware *fw)
-{
- /* Loaded directly? */
- if (!fw->priv) {
- vfree(fw->data);
- return;
- }
- fw_free_buf(fw->priv);
-}
-
/* Some architectures don't have PAGE_KERNEL_RO */
#ifndef PAGE_KERNEL_RO
#define PAGE_KERNEL_RO PAGE_KERNEL
@@ -454,7 +541,7 @@ static void firmware_free_data(const struct firmware *fw)
/* one pages buffer should be mapped/unmapped only once */
static int fw_map_pages_buf(struct firmware_buf *buf)
{
- if (buf->fmt != PAGE_BUF)
+ if (!buf->is_paged_buf)
return 0;
if (buf->data)
@@ -727,171 +814,16 @@ exit:
return fw_priv;
}
-/* store the pages buffer info firmware from buf */
-static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw)
-{
- fw->priv = buf;
- fw->pages = buf->pages;
- fw->size = buf->size;
- fw->data = buf->data;
-
- pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
- __func__, buf->fw_id, buf, buf->data,
- (unsigned int)buf->size);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static void fw_name_devm_release(struct device *dev, void *res)
-{
- struct fw_name_devm *fwn = res;
-
- if (fwn->magic == (unsigned long)&fw_cache)
- pr_debug("%s: fw_name-%s devm-%p released\n",
- __func__, fwn->name, res);
-}
-
-static int fw_devm_match(struct device *dev, void *res,
- void *match_data)
-{
- struct fw_name_devm *fwn = res;
-
- return (fwn->magic == (unsigned long)&fw_cache) &&
- !strcmp(fwn->name, match_data);
-}
-
-static struct fw_name_devm *fw_find_devm_name(struct device *dev,
- const char *name)
-{
- struct fw_name_devm *fwn;
-
- fwn = devres_find(dev, fw_name_devm_release,
- fw_devm_match, (void *)name);
- return fwn;
-}
-
-/* add firmware name into devres list */
-static int fw_add_devm_name(struct device *dev, const char *name)
-{
- struct fw_name_devm *fwn;
-
- fwn = fw_find_devm_name(dev, name);
- if (fwn)
- return 1;
-
- fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm) +
- strlen(name) + 1, GFP_KERNEL);
- if (!fwn)
- return -ENOMEM;
-
- fwn->magic = (unsigned long)&fw_cache;
- strcpy(fwn->name, name);
- devres_add(dev, fwn);
-
- return 0;
-}
-#else
-static int fw_add_devm_name(struct device *dev, const char *name)
-{
- return 0;
-}
-#endif
-
-static void _request_firmware_cleanup(const struct firmware **firmware_p)
-{
- release_firmware(*firmware_p);
- *firmware_p = NULL;
-}
-
-static struct firmware_priv *
-_request_firmware_prepare(const struct firmware **firmware_p, const char *name,
- struct device *device, bool uevent, bool nowait)
-{
- struct firmware *firmware;
- struct firmware_priv *fw_priv = NULL;
- struct firmware_buf *buf;
- int ret;
-
- if (!firmware_p)
- return ERR_PTR(-EINVAL);
-
- *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
- if (!firmware) {
- dev_err(device, "%s: kmalloc(struct firmware) failed\n",
- __func__);
- return ERR_PTR(-ENOMEM);
- }
-
- if (fw_get_builtin_firmware(firmware, name)) {
- dev_dbg(device, "firmware: using built-in firmware %s\n", name);
- return NULL;
- }
-
- ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf);
- if (!ret)
- fw_priv = fw_create_instance(firmware, name, device,
- uevent, nowait);
-
- if (IS_ERR(fw_priv) || ret < 0) {
- kfree(firmware);
- *firmware_p = NULL;
- return ERR_PTR(-ENOMEM);
- } else if (fw_priv) {
- fw_priv->buf = buf;
-
- /*
- * bind with 'buf' now to avoid warning in failure path
- * of requesting firmware.
- */
- firmware->priv = buf;
- return fw_priv;
- }
-
- /* share the cached buf, which is inprogessing or completed */
- check_status:
- mutex_lock(&fw_lock);
- if (test_bit(FW_STATUS_ABORT, &buf->status)) {
- fw_priv = ERR_PTR(-ENOENT);
- firmware->priv = buf;
- _request_firmware_cleanup(firmware_p);
- goto exit;
- } else if (test_bit(FW_STATUS_DONE, &buf->status)) {
- fw_priv = NULL;
- fw_set_page_data(buf, firmware);
- goto exit;
- }
- mutex_unlock(&fw_lock);
- wait_for_completion(&buf->completion);
- goto check_status;
-
-exit:
- mutex_unlock(&fw_lock);
- return fw_priv;
-}
-
+/* load a firmware via user helper */
static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
long timeout)
{
int retval = 0;
struct device *f_dev = &fw_priv->dev;
struct firmware_buf *buf = fw_priv->buf;
- struct firmware_cache *fwc = &fw_cache;
- int direct_load = 0;
-
- /* try direct loading from fs first */
- if (fw_get_filesystem_firmware(buf)) {
- dev_dbg(f_dev->parent, "firmware: direct-loading"
- " firmware %s\n", buf->fw_id);
-
- mutex_lock(&fw_lock);
- set_bit(FW_STATUS_DONE, &buf->status);
- mutex_unlock(&fw_lock);
- complete_all(&buf->completion);
- direct_load = 1;
- goto handle_fw;
- }
/* fall back on userspace loading */
- buf->fmt = PAGE_BUF;
+ buf->is_paged_buf = true;
dev_set_uevent_suppress(f_dev, true);
@@ -929,47 +861,196 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
cancel_delayed_work_sync(&fw_priv->timeout_work);
-handle_fw:
+ fw_priv->buf = NULL;
+
+ device_remove_file(f_dev, &dev_attr_loading);
+err_del_bin_attr:
+ device_remove_bin_file(f_dev, &firmware_attr_data);
+err_del_dev:
+ device_del(f_dev);
+err_put_dev:
+ put_device(f_dev);
+ return retval;
+}
+
+static int fw_load_from_user_helper(struct firmware *firmware,
+ const char *name, struct device *device,
+ bool uevent, bool nowait, long timeout)
+{
+ struct firmware_priv *fw_priv;
+
+ fw_priv = fw_create_instance(firmware, name, device, uevent, nowait);
+ if (IS_ERR(fw_priv))
+ return PTR_ERR(fw_priv);
+
+ fw_priv->buf = firmware->priv;
+ return _request_firmware_load(fw_priv, uevent, timeout);
+}
+#else /* CONFIG_FW_LOADER_USER_HELPER */
+static inline int
+fw_load_from_user_helper(struct firmware *firmware, const char *name,
+ struct device *device, bool uevent, bool nowait,
+ long timeout)
+{
+ return -ENOENT;
+}
+
+/* No abort during direct loading */
+#define is_fw_load_aborted(buf) false
+
+#endif /* CONFIG_FW_LOADER_USER_HELPER */
+
+
+/* wait until the shared firmware_buf becomes ready (or error) */
+static int sync_cached_firmware_buf(struct firmware_buf *buf)
+{
+ int ret = 0;
+
+ mutex_lock(&fw_lock);
+ while (!test_bit(FW_STATUS_DONE, &buf->status)) {
+ if (is_fw_load_aborted(buf)) {
+ ret = -ENOENT;
+ break;
+ }
+ mutex_unlock(&fw_lock);
+ wait_for_completion(&buf->completion);
+ mutex_lock(&fw_lock);
+ }
+ mutex_unlock(&fw_lock);
+ return ret;
+}
+
+/* prepare firmware and firmware_buf structs;
+ * return 0 if a firmware is already assigned, 1 if need to load one,
+ * or a negative error code
+ */
+static int
+_request_firmware_prepare(struct firmware **firmware_p, const char *name,
+ struct device *device)
+{
+ struct firmware *firmware;
+ struct firmware_buf *buf;
+ int ret;
+
+ *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
+ if (!firmware) {
+ dev_err(device, "%s: kmalloc(struct firmware) failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ if (fw_get_builtin_firmware(firmware, name)) {
+ dev_dbg(device, "firmware: using built-in firmware %s\n", name);
+ return 0; /* assigned */
+ }
+
+ ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf);
+
+ /*
+ * bind with 'buf' now to avoid warning in failure path
+ * of requesting firmware.
+ */
+ firmware->priv = buf;
+
+ if (ret > 0) {
+ ret = sync_cached_firmware_buf(buf);
+ if (!ret) {
+ fw_set_page_data(buf, firmware);
+ return 0; /* assigned */
+ }
+ }
+
+ if (ret < 0)
+ return ret;
+ return 1; /* need to load */
+}
+
+static int assign_firmware_buf(struct firmware *fw, struct device *device)
+{
+ struct firmware_buf *buf = fw->priv;
+
mutex_lock(&fw_lock);
- if (!buf->size || test_bit(FW_STATUS_ABORT, &buf->status))
- retval = -ENOENT;
+ if (!buf->size || is_fw_load_aborted(buf)) {
+ mutex_unlock(&fw_lock);
+ return -ENOENT;
+ }
/*
* add firmware name into devres list so that we can auto cache
* and uncache firmware for device.
*
- * f_dev->parent may has been deleted already, but the problem
+ * device may has been deleted already, but the problem
* should be fixed in devres or driver core.
*/
- if (!retval && f_dev->parent)
- fw_add_devm_name(f_dev->parent, buf->fw_id);
+ if (device)
+ fw_add_devm_name(device, buf->fw_id);
/*
* After caching firmware image is started, let it piggyback
* on request firmware.
*/
- if (!retval && fwc->state == FW_LOADER_START_CACHE) {
+ if (buf->fwc->state == FW_LOADER_START_CACHE) {
if (fw_cache_piggyback_on_request(buf->fw_id))
kref_get(&buf->ref);
}
/* pass the pages buffer to driver at the last minute */
- fw_set_page_data(buf, fw_priv->fw);
-
- fw_priv->buf = NULL;
+ fw_set_page_data(buf, fw);
mutex_unlock(&fw_lock);
+ return 0;
+}
- if (direct_load)
- goto err_put_dev;
+/* called from request_firmware() and request_firmware_work_func() */
+static int
+_request_firmware(const struct firmware **firmware_p, const char *name,
+ struct device *device, bool uevent, bool nowait)
+{
+ struct firmware *fw;
+ long timeout;
+ int ret;
- device_remove_file(f_dev, &dev_attr_loading);
-err_del_bin_attr:
- device_remove_bin_file(f_dev, &firmware_attr_data);
-err_del_dev:
- device_del(f_dev);
-err_put_dev:
- put_device(f_dev);
- return retval;
+ if (!firmware_p)
+ return -EINVAL;
+
+ ret = _request_firmware_prepare(&fw, name, device);
+ if (ret <= 0) /* error or already assigned */
+ goto out;
+
+ ret = 0;
+ timeout = firmware_loading_timeout();
+ if (nowait) {
+ timeout = usermodehelper_read_lock_wait(timeout);
+ if (!timeout) {
+ dev_dbg(device, "firmware: %s loading timed out\n",
+ name);
+ ret = -EBUSY;
+ goto out;
+ }
+ } else {
+ ret = usermodehelper_read_trylock();
+ if (WARN_ON(ret)) {
+ dev_err(device, "firmware: %s will not be loaded\n",
+ name);
+ goto out;
+ }
+ }
+
+ if (!fw_get_filesystem_firmware(device, fw->priv))
+ ret = fw_load_from_user_helper(fw, name, device,
+ uevent, nowait, timeout);
+ if (!ret)
+ ret = assign_firmware_buf(fw, device);
+
+ usermodehelper_read_unlock();
+
+ out:
+ if (ret < 0) {
+ release_firmware(fw);
+ fw = NULL;
+ }
+
+ *firmware_p = fw;
+ return ret;
}
/**
@@ -996,26 +1077,7 @@ int
request_firmware(const struct firmware **firmware_p, const char *name,
struct device *device)
{
- struct firmware_priv *fw_priv;
- int ret;
-
- fw_priv = _request_firmware_prepare(firmware_p, name, device, true,
- false);
- if (IS_ERR_OR_NULL(fw_priv))
- return PTR_RET(fw_priv);
-
- ret = usermodehelper_read_trylock();
- if (WARN_ON(ret)) {
- dev_err(device, "firmware: %s will not be loaded\n", name);
- } else {
- ret = _request_firmware_load(fw_priv, true,
- firmware_loading_timeout());
- usermodehelper_read_unlock();
- }
- if (ret)
- _request_firmware_cleanup(firmware_p);
-
- return ret;
+ return _request_firmware(firmware_p, name, device, true, false);
}
/**
@@ -1046,33 +1108,13 @@ static void request_firmware_work_func(struct work_struct *work)
{
struct firmware_work *fw_work;
const struct firmware *fw;
- struct firmware_priv *fw_priv;
- long timeout;
- int ret;
fw_work = container_of(work, struct firmware_work, work);
- fw_priv = _request_firmware_prepare(&fw, fw_work->name, fw_work->device,
- fw_work->uevent, true);
- if (IS_ERR_OR_NULL(fw_priv)) {
- ret = PTR_RET(fw_priv);
- goto out;
- }
-
- timeout = usermodehelper_read_lock_wait(firmware_loading_timeout());
- if (timeout) {
- ret = _request_firmware_load(fw_priv, fw_work->uevent, timeout);
- usermodehelper_read_unlock();
- } else {
- dev_dbg(fw_work->device, "firmware: %s loading timed out\n",
- fw_work->name);
- ret = -EAGAIN;
- }
- if (ret)
- _request_firmware_cleanup(&fw);
- out:
+ _request_firmware(&fw, fw_work->name, fw_work->device,
+ fw_work->uevent, true);
fw_work->cont(fw, fw_work->context);
- put_device(fw_work->device);
+ put_device(fw_work->device); /* taken in request_firmware_nowait() */
module_put(fw_work->module);
kfree(fw_work);
@@ -1474,7 +1516,11 @@ static void __init fw_cache_init(void)
static int __init firmware_class_init(void)
{
fw_cache_init();
+#ifdef CONFIG_FW_LOADER_USER_HELPER
return class_register(&firmware_class);
+#else
+ return 0;
+#endif
}
static void __exit firmware_class_exit(void)
@@ -1483,7 +1529,9 @@ static void __exit firmware_class_exit(void)
unregister_syscore_ops(&fw_syscore_ops);
unregister_pm_notifier(&fw_cache.pm_notify);
#endif
+#ifdef CONFIG_FW_LOADER_USER_HELPER
class_unregister(&firmware_class);
+#endif
}
fs_initcall(firmware_class_init);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 987604d56c83..a51007b79032 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -494,8 +494,8 @@ store_hard_offline_page(struct device *dev,
return ret ? ret : count;
}
-static DEVICE_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page);
-static DEVICE_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page);
+static DEVICE_ATTR(soft_offline_page, S_IWUSR, NULL, store_soft_offline_page);
+static DEVICE_ATTR(hard_offline_page, S_IWUSR, NULL, store_hard_offline_page);
static __init int memory_fail_init(void)
{
@@ -693,6 +693,12 @@ int offline_memory_block(struct memory_block *mem)
return ret;
}
+/* return true if the memory block is offlined, otherwise, return false */
+bool is_memblock_offlined(struct memory_block *mem)
+{
+ return mem->state == MEM_OFFLINE;
+}
+
/*
* Initialize the sysfs support for memory devices...
*/
diff --git a/drivers/base/pinctrl.c b/drivers/base/pinctrl.c
new file mode 100644
index 000000000000..67a274e86727
--- /dev/null
+++ b/drivers/base/pinctrl.c
@@ -0,0 +1,69 @@
+/*
+ * Driver core interface to the pinctrl subsystem.
+ *
+ * Copyright (C) 2012 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * Based on bits of regulator core, gpio core and clk core
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/device.h>
+#include <linux/pinctrl/devinfo.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/slab.h>
+
+/**
+ * pinctrl_bind_pins() - called by the device core before probe
+ * @dev: the device that is just about to probe
+ */
+int pinctrl_bind_pins(struct device *dev)
+{
+ int ret;
+
+ dev->pins = devm_kzalloc(dev, sizeof(*(dev->pins)), GFP_KERNEL);
+ if (!dev->pins)
+ return -ENOMEM;
+
+ dev->pins->p = devm_pinctrl_get(dev);
+ if (IS_ERR(dev->pins->p)) {
+ dev_dbg(dev, "no pinctrl handle\n");
+ ret = PTR_ERR(dev->pins->p);
+ goto cleanup_alloc;
+ }
+
+ dev->pins->default_state = pinctrl_lookup_state(dev->pins->p,
+ PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(dev->pins->default_state)) {
+ dev_dbg(dev, "no default pinctrl state\n");
+ ret = 0;
+ goto cleanup_get;
+ }
+
+ ret = pinctrl_select_state(dev->pins->p, dev->pins->default_state);
+ if (ret) {
+ dev_dbg(dev, "failed to activate default pinctrl state\n");
+ goto cleanup_get;
+ }
+
+ return 0;
+
+ /*
+ * If no pinctrl handle or default state was found for this device,
+ * let's explicitly free the pin container in the device, there is
+ * no point in keeping it around.
+ */
+cleanup_get:
+ devm_pinctrl_put(dev->pins->p);
+cleanup_alloc:
+ devm_kfree(dev, dev->pins);
+ dev->pins = NULL;
+
+ /* Only return deferrals */
+ if (ret != -EPROBE_DEFER)
+ ret = 0;
+
+ return ret;
+}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index acc3a8ded29d..9a6b05a35603 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -433,8 +433,7 @@ static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
*/
void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
{
- if (!work_pending(&genpd->power_off_work))
- queue_work(pm_wq, &genpd->power_off_work);
+ queue_work(pm_wq, &genpd->power_off_work);
}
/**
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 50b2831e027d..32ee0fc7ea54 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -162,7 +162,7 @@ unsigned long opp_get_voltage(struct opp *opp)
return v;
}
-EXPORT_SYMBOL(opp_get_voltage);
+EXPORT_SYMBOL_GPL(opp_get_voltage);
/**
* opp_get_freq() - Gets the frequency corresponding to an available opp
@@ -192,7 +192,7 @@ unsigned long opp_get_freq(struct opp *opp)
return f;
}
-EXPORT_SYMBOL(opp_get_freq);
+EXPORT_SYMBOL_GPL(opp_get_freq);
/**
* opp_get_opp_count() - Get number of opps available in the opp list
@@ -225,7 +225,7 @@ int opp_get_opp_count(struct device *dev)
return count;
}
-EXPORT_SYMBOL(opp_get_opp_count);
+EXPORT_SYMBOL_GPL(opp_get_opp_count);
/**
* opp_find_freq_exact() - search for an exact frequency
@@ -276,7 +276,7 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
return opp;
}
-EXPORT_SYMBOL(opp_find_freq_exact);
+EXPORT_SYMBOL_GPL(opp_find_freq_exact);
/**
* opp_find_freq_ceil() - Search for an rounded ceil freq
@@ -323,7 +323,7 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
return opp;
}
-EXPORT_SYMBOL(opp_find_freq_ceil);
+EXPORT_SYMBOL_GPL(opp_find_freq_ceil);
/**
* opp_find_freq_floor() - Search for a rounded floor freq
@@ -374,7 +374,7 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
return opp;
}
-EXPORT_SYMBOL(opp_find_freq_floor);
+EXPORT_SYMBOL_GPL(opp_find_freq_floor);
/**
* opp_add() - Add an OPP table from a table definitions
@@ -568,7 +568,7 @@ int opp_enable(struct device *dev, unsigned long freq)
{
return opp_set_availability(dev, freq, true);
}
-EXPORT_SYMBOL(opp_enable);
+EXPORT_SYMBOL_GPL(opp_enable);
/**
* opp_disable() - Disable a specific OPP
@@ -590,7 +590,7 @@ int opp_disable(struct device *dev, unsigned long freq)
{
return opp_set_availability(dev, freq, false);
}
-EXPORT_SYMBOL(opp_disable);
+EXPORT_SYMBOL_GPL(opp_disable);
#ifdef CONFIG_CPU_FREQ
/**
@@ -661,6 +661,7 @@ int opp_init_cpufreq_table(struct device *dev,
return 0;
}
+EXPORT_SYMBOL_GPL(opp_init_cpufreq_table);
/**
* opp_free_cpufreq_table() - free the cpufreq table
@@ -678,6 +679,7 @@ void opp_free_cpufreq_table(struct device *dev,
kfree(*table);
*table = NULL;
}
+EXPORT_SYMBOL_GPL(opp_free_cpufreq_table);
#endif /* CONFIG_CPU_FREQ */
/**
@@ -738,4 +740,5 @@ int of_init_opp_table(struct device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(of_init_opp_table);
#endif
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index d21349544ce5..3d4d1f8aac5c 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -91,6 +91,7 @@ enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
return ret;
}
+EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
/**
* __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 3148b10dc2e5..1244930e3d7a 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -124,6 +124,76 @@ unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
}
EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
+static int dev_memalloc_noio(struct device *dev, void *data)
+{
+ return dev->power.memalloc_noio;
+}
+
+/*
+ * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
+ * @dev: Device to handle.
+ * @enable: True for setting the flag and False for clearing the flag.
+ *
+ * Set the flag for all devices in the path from the device to the
+ * root device in the device tree if @enable is true, otherwise clear
+ * the flag for devices in the path whose siblings don't set the flag.
+ *
+ * The function should only be called by block device, or network
+ * device driver for solving the deadlock problem during runtime
+ * resume/suspend:
+ *
+ * If memory allocation with GFP_KERNEL is called inside runtime
+ * resume/suspend callback of any one of its ancestors(or the
+ * block device itself), the deadlock may be triggered inside the
+ * memory allocation since it might not complete until the block
+ * device becomes active and the involed page I/O finishes. The
+ * situation is pointed out first by Alan Stern. Network device
+ * are involved in iSCSI kind of situation.
+ *
+ * The lock of dev_hotplug_mutex is held in the function for handling
+ * hotplug race because pm_runtime_set_memalloc_noio() may be called
+ * in async probe().
+ *
+ * The function should be called between device_add() and device_del()
+ * on the affected device(block/network device).
+ */
+void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
+{
+ static DEFINE_MUTEX(dev_hotplug_mutex);
+
+ mutex_lock(&dev_hotplug_mutex);
+ for (;;) {
+ bool enabled;
+
+ /* hold power lock since bitfield is not SMP-safe. */
+ spin_lock_irq(&dev->power.lock);
+ enabled = dev->power.memalloc_noio;
+ dev->power.memalloc_noio = enable;
+ spin_unlock_irq(&dev->power.lock);
+
+ /*
+ * not need to enable ancestors any more if the device
+ * has been enabled.
+ */
+ if (enabled && enable)
+ break;
+
+ dev = dev->parent;
+
+ /*
+ * clear flag of the parent device only if all the
+ * children don't set the flag because ancestor's
+ * flag was set by any one of the descendants.
+ */
+ if (!dev || (!enable &&
+ device_for_each_child(dev, NULL,
+ dev_memalloc_noio)))
+ break;
+ }
+ mutex_unlock(&dev_hotplug_mutex);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
+
/**
* rpm_check_suspend_allowed - Test whether a device may be suspended.
* @dev: Device to test.
@@ -278,7 +348,24 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
if (!cb)
return -ENOSYS;
- retval = __rpm_callback(cb, dev);
+ if (dev->power.memalloc_noio) {
+ unsigned int noio_flag;
+
+ /*
+ * Deadlock might be caused if memory allocation with
+ * GFP_KERNEL happens inside runtime_suspend and
+ * runtime_resume callbacks of one block device's
+ * ancestor or the block device itself. Network
+ * device might be thought as part of iSCSI block
+ * device, so network device and its ancestor should
+ * be marked as memalloc_noio too.
+ */
+ noio_flag = memalloc_noio_save();
+ retval = __rpm_callback(cb, dev);
+ memalloc_noio_restore(noio_flag);
+ } else {
+ retval = __rpm_callback(cb, dev);
+ }
dev->power.runtime_error = retval;
return retval != -EACCES ? retval : -EIO;
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index e6ee5e80e546..79715e7fa43e 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -382,6 +382,12 @@ static void wakeup_source_activate(struct wakeup_source *ws)
{
unsigned int cec;
+ /*
+ * active wakeup source should bring the system
+ * out of PM_SUSPEND_FREEZE state
+ */
+ freeze_wake();
+
ws->active = true;
ws->active_count++;
ws->last_time = ktime_get();
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index 5e75d1b683e2..cf129980abd0 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_REGMAP) += regmap.o regcache.o
-obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o
+obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o regcache-flat.o
obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 401d1919635a..5a22bd33ce3d 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -16,6 +16,7 @@
#include <linux/regmap.h>
#include <linux/fs.h>
#include <linux/list.h>
+#include <linux/wait.h>
struct regmap;
struct regcache_ops;
@@ -25,6 +26,7 @@ struct regmap_debugfs_off_cache {
off_t min;
off_t max;
unsigned int base_reg;
+ unsigned int max_reg;
};
struct regmap_format {
@@ -39,6 +41,13 @@ struct regmap_format {
unsigned int (*parse_val)(void *buf);
};
+struct regmap_async {
+ struct list_head list;
+ struct work_struct cleanup;
+ struct regmap *map;
+ void *work_buf;
+};
+
struct regmap {
struct mutex mutex;
spinlock_t spinlock;
@@ -53,6 +62,11 @@ struct regmap {
void *bus_context;
const char *name;
+ spinlock_t async_lock;
+ wait_queue_head_t async_waitq;
+ struct list_head async_list;
+ int async_ret;
+
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
const char *debugfs_name;
@@ -74,6 +88,11 @@ struct regmap {
const struct regmap_access_table *volatile_table;
const struct regmap_access_table *precious_table;
+ int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
+ int (*reg_write)(void *context, unsigned int reg, unsigned int val);
+
+ bool defer_caching;
+
u8 read_flag_mask;
u8 write_flag_mask;
@@ -175,7 +194,10 @@ bool regcache_set_val(void *base, unsigned int idx,
unsigned int val, unsigned int word_size);
int regcache_lookup_reg(struct regmap *map, unsigned int reg);
+void regmap_async_complete_cb(struct regmap_async *async, int ret);
+
extern struct regcache_ops regcache_rbtree_ops;
extern struct regcache_ops regcache_lzo_ops;
+extern struct regcache_ops regcache_flat_ops;
#endif
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
new file mode 100644
index 000000000000..d9762e41959b
--- /dev/null
+++ b/drivers/base/regmap/regcache-flat.c
@@ -0,0 +1,72 @@
+/*
+ * Register cache access API - flat caching support
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/seq_file.h>
+
+#include "internal.h"
+
+static int regcache_flat_init(struct regmap *map)
+{
+ int i;
+ unsigned int *cache;
+
+ map->cache = kzalloc(sizeof(unsigned int) * (map->max_register + 1),
+ GFP_KERNEL);
+ if (!map->cache)
+ return -ENOMEM;
+
+ cache = map->cache;
+
+ for (i = 0; i < map->num_reg_defaults; i++)
+ cache[map->reg_defaults[i].reg] = map->reg_defaults[i].def;
+
+ return 0;
+}
+
+static int regcache_flat_exit(struct regmap *map)
+{
+ kfree(map->cache);
+ map->cache = NULL;
+
+ return 0;
+}
+
+static int regcache_flat_read(struct regmap *map,
+ unsigned int reg, unsigned int *value)
+{
+ unsigned int *cache = map->cache;
+
+ *value = cache[reg];
+
+ return 0;
+}
+
+static int regcache_flat_write(struct regmap *map, unsigned int reg,
+ unsigned int value)
+{
+ unsigned int *cache = map->cache;
+
+ cache[reg] = value;
+
+ return 0;
+}
+
+struct regcache_ops regcache_flat_ops = {
+ .type = REGCACHE_FLAT,
+ .name = "flat",
+ .init = regcache_flat_init,
+ .exit = regcache_flat_exit,
+ .read = regcache_flat_read,
+ .write = regcache_flat_write,
+};
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 835883bda977..e69ff3e4742c 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -22,6 +22,7 @@
static const struct regcache_ops *cache_types[] = {
&regcache_rbtree_ops,
&regcache_lzo_ops,
+ &regcache_flat_ops,
};
static int regcache_hw_init(struct regmap *map)
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 07aad786f817..78d5f20c5f5b 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -56,6 +56,19 @@ static const struct file_operations regmap_name_fops = {
.llseek = default_llseek,
};
+static void regmap_debugfs_free_dump_cache(struct regmap *map)
+{
+ struct regmap_debugfs_off_cache *c;
+
+ while (!list_empty(&map->debugfs_off_cache)) {
+ c = list_first_entry(&map->debugfs_off_cache,
+ struct regmap_debugfs_off_cache,
+ list);
+ list_del(&c->list);
+ kfree(c);
+ }
+}
+
/*
* Work out where the start offset maps into register numbers, bearing
* in mind that we suppress hidden registers.
@@ -68,6 +81,8 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
struct regmap_debugfs_off_cache *c = NULL;
loff_t p = 0;
unsigned int i, ret;
+ unsigned int fpos_offset;
+ unsigned int reg_offset;
/*
* If we don't have a cache build one so we don't have to do a
@@ -80,6 +95,9 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
regmap_precious(map, i)) {
if (c) {
c->max = p - 1;
+ fpos_offset = c->max - c->min;
+ reg_offset = fpos_offset / map->debugfs_tot_len;
+ c->max_reg = c->base_reg + reg_offset;
list_add_tail(&c->list,
&map->debugfs_off_cache);
c = NULL;
@@ -91,8 +109,10 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
/* No cache entry? Start a new one */
if (!c) {
c = kzalloc(sizeof(*c), GFP_KERNEL);
- if (!c)
- break;
+ if (!c) {
+ regmap_debugfs_free_dump_cache(map);
+ return base;
+ }
c->min = p;
c->base_reg = i;
}
@@ -101,19 +121,53 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
}
}
- /* Find the relevant block */
+ /* Close the last entry off if we didn't scan beyond it */
+ if (c) {
+ c->max = p - 1;
+ fpos_offset = c->max - c->min;
+ reg_offset = fpos_offset / map->debugfs_tot_len;
+ c->max_reg = c->base_reg + reg_offset;
+ list_add_tail(&c->list,
+ &map->debugfs_off_cache);
+ }
+
+ /*
+ * This should never happen; we return above if we fail to
+ * allocate and we should never be in this code if there are
+ * no registers at all.
+ */
+ WARN_ON(list_empty(&map->debugfs_off_cache));
+ ret = base;
+
+ /* Find the relevant block:offset */
list_for_each_entry(c, &map->debugfs_off_cache, list) {
- if (*pos >= c->min && *pos <= c->max) {
- *pos = c->min;
- return c->base_reg;
+ if (from >= c->min && from <= c->max) {
+ fpos_offset = from - c->min;
+ reg_offset = fpos_offset / map->debugfs_tot_len;
+ *pos = c->min + (reg_offset * map->debugfs_tot_len);
+ return c->base_reg + reg_offset;
}
- ret = c->max;
+ *pos = c->max;
+ ret = c->max_reg;
}
return ret;
}
+static inline void regmap_calc_tot_len(struct regmap *map,
+ void *buf, size_t count)
+{
+ /* Calculate the length of a fixed format */
+ if (!map->debugfs_tot_len) {
+ map->debugfs_reg_len = regmap_calc_reg_len(map->max_register,
+ buf, count);
+ map->debugfs_val_len = 2 * map->format.val_bytes;
+ map->debugfs_tot_len = map->debugfs_reg_len +
+ map->debugfs_val_len + 3; /* : \n */
+ }
+}
+
static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
unsigned int to, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -132,14 +186,7 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
if (!buf)
return -ENOMEM;
- /* Calculate the length of a fixed format */
- if (!map->debugfs_tot_len) {
- map->debugfs_reg_len = regmap_calc_reg_len(map->max_register,
- buf, count);
- map->debugfs_val_len = 2 * map->format.val_bytes;
- map->debugfs_tot_len = map->debugfs_reg_len +
- map->debugfs_val_len + 3; /* : \n */
- }
+ regmap_calc_tot_len(map, buf, count);
/* Work out which register we're starting at */
start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
@@ -154,7 +201,7 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
/* If we're in the region the user is trying to read */
if (p >= *ppos) {
/* ...but not beyond it */
- if (buf_pos + 1 + map->debugfs_tot_len >= count)
+ if (buf_pos + map->debugfs_tot_len > count)
break;
/* Format the register */
@@ -387,16 +434,8 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
void regmap_debugfs_exit(struct regmap *map)
{
- struct regmap_debugfs_off_cache *c;
-
debugfs_remove_recursive(map->debugfs);
- while (!list_empty(&map->debugfs_off_cache)) {
- c = list_first_entry(&map->debugfs_off_cache,
- struct regmap_debugfs_off_cache,
- list);
- list_del(&c->list);
- kfree(c);
- }
+ regmap_debugfs_free_dump_cache(map);
kfree(map->debugfs_name);
}
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 5972ad958544..4706c63d0bc6 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -34,6 +34,7 @@ struct regmap_irq_chip_data {
int irq;
int wake_count;
+ void *status_reg_buf;
unsigned int *status_buf;
unsigned int *mask_buf;
unsigned int *mask_buf_def;
@@ -87,6 +88,23 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
if (ret != 0)
dev_err(d->map->dev, "Failed to sync masks in %x\n",
reg);
+
+ reg = d->chip->wake_base +
+ (i * map->reg_stride * d->irq_reg_stride);
+ if (d->wake_buf) {
+ if (d->chip->wake_invert)
+ ret = regmap_update_bits(d->map, reg,
+ d->mask_buf_def[i],
+ ~d->wake_buf[i]);
+ else
+ ret = regmap_update_bits(d->map, reg,
+ d->mask_buf_def[i],
+ d->wake_buf[i]);
+ if (ret != 0)
+ dev_err(d->map->dev,
+ "Failed to sync wakes in %x: %d\n",
+ reg, ret);
+ }
}
if (d->chip->runtime_pm)
@@ -129,16 +147,15 @@ static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
struct regmap *map = d->map;
const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
- if (!d->chip->wake_base)
- return -EINVAL;
-
if (on) {
- d->wake_buf[irq_data->reg_offset / map->reg_stride]
- &= ~irq_data->mask;
+ if (d->wake_buf)
+ d->wake_buf[irq_data->reg_offset / map->reg_stride]
+ &= ~irq_data->mask;
d->wake_count++;
} else {
- d->wake_buf[irq_data->reg_offset / map->reg_stride]
- |= irq_data->mask;
+ if (d->wake_buf)
+ d->wake_buf[irq_data->reg_offset / map->reg_stride]
+ |= irq_data->mask;
d->wake_count--;
}
@@ -172,25 +189,69 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
}
/*
- * Ignore masked IRQs and ack if we need to; we ack early so
- * there is no race between handling and acknowleding the
- * interrupt. We assume that typically few of the interrupts
- * will fire simultaneously so don't worry about overhead from
- * doing a write per register.
+ * Read in the statuses, using a single bulk read if possible
+ * in order to reduce the I/O overheads.
*/
- for (i = 0; i < data->chip->num_regs; i++) {
- ret = regmap_read(map, chip->status_base + (i * map->reg_stride
- * data->irq_reg_stride),
- &data->status_buf[i]);
+ if (!map->use_single_rw && map->reg_stride == 1 &&
+ data->irq_reg_stride == 1) {
+ u8 *buf8 = data->status_reg_buf;
+ u16 *buf16 = data->status_reg_buf;
+ u32 *buf32 = data->status_reg_buf;
+ BUG_ON(!data->status_reg_buf);
+
+ ret = regmap_bulk_read(map, chip->status_base,
+ data->status_reg_buf,
+ chip->num_regs);
if (ret != 0) {
dev_err(map->dev, "Failed to read IRQ status: %d\n",
- ret);
- if (chip->runtime_pm)
- pm_runtime_put(map->dev);
+ ret);
return IRQ_NONE;
}
+ for (i = 0; i < data->chip->num_regs; i++) {
+ switch (map->format.val_bytes) {
+ case 1:
+ data->status_buf[i] = buf8[i];
+ break;
+ case 2:
+ data->status_buf[i] = buf16[i];
+ break;
+ case 4:
+ data->status_buf[i] = buf32[i];
+ break;
+ default:
+ BUG();
+ return IRQ_NONE;
+ }
+ }
+
+ } else {
+ for (i = 0; i < data->chip->num_regs; i++) {
+ ret = regmap_read(map, chip->status_base +
+ (i * map->reg_stride
+ * data->irq_reg_stride),
+ &data->status_buf[i]);
+
+ if (ret != 0) {
+ dev_err(map->dev,
+ "Failed to read IRQ status: %d\n",
+ ret);
+ if (chip->runtime_pm)
+ pm_runtime_put(map->dev);
+ return IRQ_NONE;
+ }
+ }
+ }
+
+ /*
+ * Ignore masked IRQs and ack if we need to; we ack early so
+ * there is no race between handling and acknowleding the
+ * interrupt. We assume that typically few of the interrupts
+ * will fire simultaneously so don't worry about overhead from
+ * doing a write per register.
+ */
+ for (i = 0; i < data->chip->num_regs; i++) {
data->status_buf[i] &= ~data->mask_buf[i];
if (data->status_buf[i] && chip->ack_base) {
@@ -316,11 +377,6 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
d->irq_chip = regmap_irq_chip;
d->irq_chip.name = chip->name;
- if (!chip->wake_base) {
- d->irq_chip.irq_set_wake = NULL;
- d->irq_chip.flags |= IRQCHIP_MASK_ON_SUSPEND |
- IRQCHIP_SKIP_SET_WAKE;
- }
d->irq = irq;
d->map = map;
d->chip = chip;
@@ -331,6 +387,14 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
else
d->irq_reg_stride = 1;
+ if (!map->use_single_rw && map->reg_stride == 1 &&
+ d->irq_reg_stride == 1) {
+ d->status_reg_buf = kmalloc(map->format.val_bytes *
+ chip->num_regs, GFP_KERNEL);
+ if (!d->status_reg_buf)
+ goto err_alloc;
+ }
+
mutex_init(&d->lock);
for (i = 0; i < chip->num_irqs; i++)
@@ -361,8 +425,15 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
d->wake_buf[i] = d->mask_buf_def[i];
reg = chip->wake_base +
(i * map->reg_stride * d->irq_reg_stride);
- ret = regmap_update_bits(map, reg, d->wake_buf[i],
- d->wake_buf[i]);
+
+ if (chip->wake_invert)
+ ret = regmap_update_bits(map, reg,
+ d->mask_buf_def[i],
+ 0);
+ else
+ ret = regmap_update_bits(map, reg,
+ d->mask_buf_def[i],
+ d->wake_buf[i]);
if (ret != 0) {
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
reg, ret);
@@ -401,6 +472,7 @@ err_alloc:
kfree(d->mask_buf_def);
kfree(d->mask_buf);
kfree(d->status_buf);
+ kfree(d->status_reg_buf);
kfree(d);
return ret;
}
@@ -422,6 +494,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
+ kfree(d->status_reg_buf);
kfree(d->status_buf);
kfree(d);
}
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index f05fc74dd84a..98745dd77e8c 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -16,6 +16,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/clk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -26,6 +27,7 @@
struct regmap_mmio_context {
void __iomem *regs;
unsigned val_bytes;
+ struct clk *clk;
};
static int regmap_mmio_gather_write(void *context,
@@ -34,9 +36,16 @@ static int regmap_mmio_gather_write(void *context,
{
struct regmap_mmio_context *ctx = context;
u32 offset;
+ int ret;
BUG_ON(reg_size != 4);
+ if (ctx->clk) {
+ ret = clk_enable(ctx->clk);
+ if (ret < 0)
+ return ret;
+ }
+
offset = *(u32 *)reg;
while (val_size) {
@@ -64,6 +73,9 @@ static int regmap_mmio_gather_write(void *context,
offset += ctx->val_bytes;
}
+ if (ctx->clk)
+ clk_disable(ctx->clk);
+
return 0;
}
@@ -80,9 +92,16 @@ static int regmap_mmio_read(void *context,
{
struct regmap_mmio_context *ctx = context;
u32 offset;
+ int ret;
BUG_ON(reg_size != 4);
+ if (ctx->clk) {
+ ret = clk_enable(ctx->clk);
+ if (ret < 0)
+ return ret;
+ }
+
offset = *(u32 *)reg;
while (val_size) {
@@ -110,11 +129,20 @@ static int regmap_mmio_read(void *context,
offset += ctx->val_bytes;
}
+ if (ctx->clk)
+ clk_disable(ctx->clk);
+
return 0;
}
static void regmap_mmio_free_context(void *context)
{
+ struct regmap_mmio_context *ctx = context;
+
+ if (ctx->clk) {
+ clk_unprepare(ctx->clk);
+ clk_put(ctx->clk);
+ }
kfree(context);
}
@@ -128,11 +156,14 @@ static struct regmap_bus regmap_mmio = {
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
};
-static struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs,
+static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
+ const char *clk_id,
+ void __iomem *regs,
const struct regmap_config *config)
{
struct regmap_mmio_context *ctx;
int min_stride;
+ int ret;
if (config->reg_bits != 32)
return ERR_PTR(-EINVAL);
@@ -179,37 +210,59 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs,
ctx->regs = regs;
ctx->val_bytes = config->val_bits / 8;
+ if (clk_id == NULL)
+ return ctx;
+
+ ctx->clk = clk_get(dev, clk_id);
+ if (IS_ERR(ctx->clk)) {
+ ret = PTR_ERR(ctx->clk);
+ goto err_free;
+ }
+
+ ret = clk_prepare(ctx->clk);
+ if (ret < 0) {
+ clk_put(ctx->clk);
+ goto err_free;
+ }
+
return ctx;
+
+err_free:
+ kfree(ctx);
+
+ return ERR_PTR(ret);
}
/**
- * regmap_init_mmio(): Initialise register map
+ * regmap_init_mmio_clk(): Initialise register map with register clock
*
* @dev: Device that will be interacted with
+ * @clk_id: register clock consumer ID
* @regs: Pointer to memory-mapped IO region
* @config: Configuration for register map
*
* The return value will be an ERR_PTR() on error or a valid pointer to
* a struct regmap.
*/
-struct regmap *regmap_init_mmio(struct device *dev,
- void __iomem *regs,
- const struct regmap_config *config)
+struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config)
{
struct regmap_mmio_context *ctx;
- ctx = regmap_mmio_gen_context(regs, config);
+ ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
if (IS_ERR(ctx))
return ERR_CAST(ctx);
return regmap_init(dev, &regmap_mmio, ctx, config);
}
-EXPORT_SYMBOL_GPL(regmap_init_mmio);
+EXPORT_SYMBOL_GPL(regmap_init_mmio_clk);
/**
- * devm_regmap_init_mmio(): Initialise managed register map
+ * devm_regmap_init_mmio_clk(): Initialise managed register map with clock
*
* @dev: Device that will be interacted with
+ * @clk_id: register clock consumer ID
* @regs: Pointer to memory-mapped IO region
* @config: Configuration for register map
*
@@ -217,18 +270,18 @@ EXPORT_SYMBOL_GPL(regmap_init_mmio);
* to a struct regmap. The regmap will be automatically freed by the
* device management code.
*/
-struct regmap *devm_regmap_init_mmio(struct device *dev,
- void __iomem *regs,
- const struct regmap_config *config)
+struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config)
{
struct regmap_mmio_context *ctx;
- ctx = regmap_mmio_gen_context(regs, config);
+ ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
if (IS_ERR(ctx))
return ERR_CAST(ctx);
return devm_regmap_init(dev, &regmap_mmio, ctx, config);
}
-EXPORT_SYMBOL_GPL(devm_regmap_init_mmio);
+EXPORT_SYMBOL_GPL(devm_regmap_init_mmio_clk);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index ffa46a92ad33..4c506bd940f3 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -15,6 +15,21 @@
#include <linux/init.h>
#include <linux/module.h>
+#include "internal.h"
+
+struct regmap_async_spi {
+ struct regmap_async core;
+ struct spi_message m;
+ struct spi_transfer t[2];
+};
+
+static void regmap_spi_complete(void *data)
+{
+ struct regmap_async_spi *async = data;
+
+ regmap_async_complete_cb(&async->core, async->m.status);
+}
+
static int regmap_spi_write(void *context, const void *data, size_t count)
{
struct device *dev = context;
@@ -40,6 +55,43 @@ static int regmap_spi_gather_write(void *context,
return spi_sync(spi, &m);
}
+static int regmap_spi_async_write(void *context,
+ const void *reg, size_t reg_len,
+ const void *val, size_t val_len,
+ struct regmap_async *a)
+{
+ struct regmap_async_spi *async = container_of(a,
+ struct regmap_async_spi,
+ core);
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+
+ async->t[0].tx_buf = reg;
+ async->t[0].len = reg_len;
+ async->t[1].tx_buf = val;
+ async->t[1].len = val_len;
+
+ spi_message_init(&async->m);
+ spi_message_add_tail(&async->t[0], &async->m);
+ spi_message_add_tail(&async->t[1], &async->m);
+
+ async->m.complete = regmap_spi_complete;
+ async->m.context = async;
+
+ return spi_async(spi, &async->m);
+}
+
+static struct regmap_async *regmap_spi_async_alloc(void)
+{
+ struct regmap_async_spi *async_spi;
+
+ async_spi = kzalloc(sizeof(*async_spi), GFP_KERNEL);
+ if (!async_spi)
+ return NULL;
+
+ return &async_spi->core;
+}
+
static int regmap_spi_read(void *context,
const void *reg, size_t reg_size,
void *val, size_t val_size)
@@ -53,6 +105,8 @@ static int regmap_spi_read(void *context,
static struct regmap_bus regmap_spi = {
.write = regmap_spi_write,
.gather_write = regmap_spi_gather_write,
+ .async_write = regmap_spi_async_write,
+ .async_alloc = regmap_spi_async_alloc,
.read = regmap_spi_read,
.read_flag_mask = 0x80,
};
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 42d5cb0f503f..3d2367501fd0 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -16,6 +16,7 @@
#include <linux/mutex.h>
#include <linux/err.h>
#include <linux/rbtree.h>
+#include <linux/sched.h>
#define CREATE_TRACE_POINTS
#include <trace/events/regmap.h>
@@ -34,6 +35,22 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
bool *change);
+static int _regmap_bus_read(void *context, unsigned int reg,
+ unsigned int *val);
+static int _regmap_bus_formatted_write(void *context, unsigned int reg,
+ unsigned int val);
+static int _regmap_bus_raw_write(void *context, unsigned int reg,
+ unsigned int val);
+
+static void async_cleanup(struct work_struct *work)
+{
+ struct regmap_async *async = container_of(work, struct regmap_async,
+ cleanup);
+
+ kfree(async->work_buf);
+ kfree(async);
+}
+
bool regmap_reg_in_ranges(unsigned int reg,
const struct regmap_range *ranges,
unsigned int nranges)
@@ -372,7 +389,7 @@ struct regmap *regmap_init(struct device *dev,
enum regmap_endian reg_endian, val_endian;
int i, j;
- if (!bus || !config)
+ if (!config)
goto err;
map = kzalloc(sizeof(*map), GFP_KERNEL);
@@ -386,7 +403,8 @@ struct regmap *regmap_init(struct device *dev,
map->unlock = config->unlock;
map->lock_arg = config->lock_arg;
} else {
- if (bus->fast_io) {
+ if ((bus && bus->fast_io) ||
+ config->fast_io) {
spin_lock_init(&map->spinlock);
map->lock = regmap_lock_spinlock;
map->unlock = regmap_unlock_spinlock;
@@ -423,13 +441,27 @@ struct regmap *regmap_init(struct device *dev,
map->cache_type = config->cache_type;
map->name = config->name;
+ spin_lock_init(&map->async_lock);
+ INIT_LIST_HEAD(&map->async_list);
+ init_waitqueue_head(&map->async_waitq);
+
if (config->read_flag_mask || config->write_flag_mask) {
map->read_flag_mask = config->read_flag_mask;
map->write_flag_mask = config->write_flag_mask;
- } else {
+ } else if (bus) {
map->read_flag_mask = bus->read_flag_mask;
}
+ if (!bus) {
+ map->reg_read = config->reg_read;
+ map->reg_write = config->reg_write;
+
+ map->defer_caching = false;
+ goto skip_format_initialization;
+ } else {
+ map->reg_read = _regmap_bus_read;
+ }
+
reg_endian = config->reg_format_endian;
if (reg_endian == REGMAP_ENDIAN_DEFAULT)
reg_endian = bus->reg_format_endian_default;
@@ -500,6 +532,12 @@ struct regmap *regmap_init(struct device *dev,
}
break;
+ case 24:
+ if (reg_endian != REGMAP_ENDIAN_BIG)
+ goto err_map;
+ map->format.format_reg = regmap_format_24;
+ break;
+
case 32:
switch (reg_endian) {
case REGMAP_ENDIAN_BIG:
@@ -575,6 +613,16 @@ struct regmap *regmap_init(struct device *dev,
goto err_map;
}
+ if (map->format.format_write) {
+ map->defer_caching = false;
+ map->reg_write = _regmap_bus_formatted_write;
+ } else if (map->format.format_val) {
+ map->defer_caching = true;
+ map->reg_write = _regmap_bus_raw_write;
+ }
+
+skip_format_initialization:
+
map->range_tree = RB_ROOT;
for (i = 0; i < config->num_ranges; i++) {
const struct regmap_range_cfg *range_cfg = &config->ranges[i];
@@ -776,7 +824,7 @@ void regmap_exit(struct regmap *map)
regcache_exit(map);
regmap_debugfs_exit(map);
regmap_range_exit(map);
- if (map->bus->free_context)
+ if (map->bus && map->bus->free_context)
map->bus->free_context(map->bus_context);
kfree(map->work_buf);
kfree(map);
@@ -870,15 +918,20 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg,
}
static int _regmap_raw_write(struct regmap *map, unsigned int reg,
- const void *val, size_t val_len)
+ const void *val, size_t val_len, bool async)
{
struct regmap_range_node *range;
+ unsigned long flags;
u8 *u8 = map->work_buf;
+ void *work_val = map->work_buf + map->format.reg_bytes +
+ map->format.pad_bytes;
void *buf;
int ret = -ENOTSUPP;
size_t len;
int i;
+ BUG_ON(!map->bus);
+
/* Check for unwritable registers before we start */
if (map->writeable_reg)
for (i = 0; i < val_len / map->format.val_bytes; i++)
@@ -918,7 +971,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
dev_dbg(map->dev, "Writing window %d/%zu\n",
win_residue, val_len / map->format.val_bytes);
ret = _regmap_raw_write(map, reg, val, win_residue *
- map->format.val_bytes);
+ map->format.val_bytes, async);
if (ret != 0)
return ret;
@@ -941,6 +994,50 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
u8[0] |= map->write_flag_mask;
+ if (async && map->bus->async_write) {
+ struct regmap_async *async = map->bus->async_alloc();
+ if (!async)
+ return -ENOMEM;
+
+ async->work_buf = kzalloc(map->format.buf_size,
+ GFP_KERNEL | GFP_DMA);
+ if (!async->work_buf) {
+ kfree(async);
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&async->cleanup, async_cleanup);
+ async->map = map;
+
+ /* If the caller supplied the value we can use it safely. */
+ memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
+ map->format.reg_bytes + map->format.val_bytes);
+ if (val == work_val)
+ val = async->work_buf + map->format.pad_bytes +
+ map->format.reg_bytes;
+
+ spin_lock_irqsave(&map->async_lock, flags);
+ list_add_tail(&async->list, &map->async_list);
+ spin_unlock_irqrestore(&map->async_lock, flags);
+
+ ret = map->bus->async_write(map->bus_context, async->work_buf,
+ map->format.reg_bytes +
+ map->format.pad_bytes,
+ val, val_len, async);
+
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to schedule write: %d\n",
+ ret);
+
+ spin_lock_irqsave(&map->async_lock, flags);
+ list_del(&async->list);
+ spin_unlock_irqrestore(&map->async_lock, flags);
+
+ kfree(async->work_buf);
+ kfree(async);
+ }
+ }
+
trace_regmap_hw_write_start(map->dev, reg,
val_len / map->format.val_bytes);
@@ -948,8 +1045,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
* send the work_buf directly, otherwise try to do a gather
* write.
*/
- if (val == (map->work_buf + map->format.pad_bytes +
- map->format.reg_bytes))
+ if (val == work_val)
ret = map->bus->write(map->bus_context, map->work_buf,
map->format.reg_bytes +
map->format.pad_bytes +
@@ -981,14 +1077,62 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
return ret;
}
+static int _regmap_bus_formatted_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ int ret;
+ struct regmap_range_node *range;
+ struct regmap *map = context;
+
+ BUG_ON(!map->bus || !map->format.format_write);
+
+ range = _regmap_range_lookup(map, reg);
+ if (range) {
+ ret = _regmap_select_page(map, &reg, range, 1);
+ if (ret != 0)
+ return ret;
+ }
+
+ map->format.format_write(map, reg, val);
+
+ trace_regmap_hw_write_start(map->dev, reg, 1);
+
+ ret = map->bus->write(map->bus_context, map->work_buf,
+ map->format.buf_size);
+
+ trace_regmap_hw_write_done(map->dev, reg, 1);
+
+ return ret;
+}
+
+static int _regmap_bus_raw_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct regmap *map = context;
+
+ BUG_ON(!map->bus || !map->format.format_val);
+
+ map->format.format_val(map->work_buf + map->format.reg_bytes
+ + map->format.pad_bytes, val, 0);
+ return _regmap_raw_write(map, reg,
+ map->work_buf +
+ map->format.reg_bytes +
+ map->format.pad_bytes,
+ map->format.val_bytes, false);
+}
+
+static inline void *_regmap_map_get_context(struct regmap *map)
+{
+ return (map->bus) ? map : map->bus_context;
+}
+
int _regmap_write(struct regmap *map, unsigned int reg,
unsigned int val)
{
- struct regmap_range_node *range;
int ret;
- BUG_ON(!map->format.format_write && !map->format.format_val);
+ void *context = _regmap_map_get_context(map);
- if (!map->cache_bypass && map->format.format_write) {
+ if (!map->cache_bypass && !map->defer_caching) {
ret = regcache_write(map, reg, val);
if (ret != 0)
return ret;
@@ -1005,33 +1149,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
trace_regmap_reg_write(map->dev, reg, val);
- if (map->format.format_write) {
- range = _regmap_range_lookup(map, reg);
- if (range) {
- ret = _regmap_select_page(map, &reg, range, 1);
- if (ret != 0)
- return ret;
- }
-
- map->format.format_write(map, reg, val);
-
- trace_regmap_hw_write_start(map->dev, reg, 1);
-
- ret = map->bus->write(map->bus_context, map->work_buf,
- map->format.buf_size);
-
- trace_regmap_hw_write_done(map->dev, reg, 1);
-
- return ret;
- } else {
- map->format.format_val(map->work_buf + map->format.reg_bytes
- + map->format.pad_bytes, val, 0);
- return _regmap_raw_write(map, reg,
- map->work_buf +
- map->format.reg_bytes +
- map->format.pad_bytes,
- map->format.val_bytes);
- }
+ return map->reg_write(context, reg, val);
}
/**
@@ -1082,6 +1200,8 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
{
int ret;
+ if (!map->bus)
+ return -EINVAL;
if (val_len % map->format.val_bytes)
return -EINVAL;
if (reg % map->reg_stride)
@@ -1089,7 +1209,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
map->lock(map->lock_arg);
- ret = _regmap_raw_write(map, reg, val, val_len);
+ ret = _regmap_raw_write(map, reg, val, val_len, false);
map->unlock(map->lock_arg);
@@ -1106,7 +1226,7 @@ EXPORT_SYMBOL_GPL(regmap_raw_write);
* @val_count: Number of registers to write
*
* This function is intended to be used for writing a large block of
- * data to be device either in single transfer or multiple transfer.
+ * data to the device either in single transfer or multiple transfer.
*
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
@@ -1118,6 +1238,8 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
size_t val_bytes = map->format.val_bytes;
void *wval;
+ if (!map->bus)
+ return -EINVAL;
if (!map->format.parse_val)
return -EINVAL;
if (reg % map->reg_stride)
@@ -1145,14 +1267,15 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
if (map->use_single_rw) {
for (i = 0; i < val_count; i++) {
ret = regmap_raw_write(map,
- reg + (i * map->reg_stride),
- val + (i * val_bytes),
- val_bytes);
+ reg + (i * map->reg_stride),
+ val + (i * val_bytes),
+ val_bytes);
if (ret != 0)
return ret;
}
} else {
- ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
+ ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count,
+ false);
}
if (val_bytes != 1)
@@ -1164,6 +1287,48 @@ out:
}
EXPORT_SYMBOL_GPL(regmap_bulk_write);
+/**
+ * regmap_raw_write_async(): Write raw values to one or more registers
+ * asynchronously
+ *
+ * @map: Register map to write to
+ * @reg: Initial register to write to
+ * @val: Block of data to be written, laid out for direct transmission to the
+ * device. Must be valid until regmap_async_complete() is called.
+ * @val_len: Length of data pointed to by val.
+ *
+ * This function is intended to be used for things like firmware
+ * download where a large block of data needs to be transferred to the
+ * device. No formatting will be done on the data provided.
+ *
+ * If supported by the underlying bus the write will be scheduled
+ * asynchronously, helping maximise I/O speed on higher speed buses
+ * like SPI. regmap_async_complete() can be called to ensure that all
+ * asynchrnous writes have been completed.
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_raw_write_async(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len)
+{
+ int ret;
+
+ if (val_len % map->format.val_bytes)
+ return -EINVAL;
+ if (reg % map->reg_stride)
+ return -EINVAL;
+
+ map->lock(map->lock_arg);
+
+ ret = _regmap_raw_write(map, reg, val, val_len, true);
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_raw_write_async);
+
static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
unsigned int val_len)
{
@@ -1171,6 +1336,8 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
u8 *u8 = map->work_buf;
int ret;
+ BUG_ON(!map->bus);
+
range = _regmap_range_lookup(map, reg);
if (range) {
ret = _regmap_select_page(map, &reg, range,
@@ -1202,10 +1369,29 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
return ret;
}
+static int _regmap_bus_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ int ret;
+ struct regmap *map = context;
+
+ if (!map->format.parse_val)
+ return -EINVAL;
+
+ ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
+ if (ret == 0)
+ *val = map->format.parse_val(map->work_buf);
+
+ return ret;
+}
+
static int _regmap_read(struct regmap *map, unsigned int reg,
unsigned int *val)
{
int ret;
+ void *context = _regmap_map_get_context(map);
+
+ BUG_ON(!map->reg_read);
if (!map->cache_bypass) {
ret = regcache_read(map, reg, val);
@@ -1213,26 +1399,21 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
return 0;
}
- if (!map->format.parse_val)
- return -EINVAL;
-
if (map->cache_only)
return -EBUSY;
- ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
+ ret = map->reg_read(context, reg, val);
if (ret == 0) {
- *val = map->format.parse_val(map->work_buf);
-
#ifdef LOG_DEVICE
if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
dev_info(map->dev, "%x => %x\n", reg, *val);
#endif
trace_regmap_reg_read(map->dev, reg, *val);
- }
- if (ret == 0 && !map->cache_bypass)
- regcache_write(map, reg, *val);
+ if (!map->cache_bypass)
+ regcache_write(map, reg, *val);
+ }
return ret;
}
@@ -1283,6 +1464,8 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
unsigned int v;
int ret, i;
+ if (!map->bus)
+ return -EINVAL;
if (val_len % map->format.val_bytes)
return -EINVAL;
if (reg % map->reg_stride)
@@ -1334,6 +1517,8 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
size_t val_bytes = map->format.val_bytes;
bool vol = regmap_volatile_range(map, reg, val_count);
+ if (!map->bus)
+ return -EINVAL;
if (!map->format.parse_val)
return -EINVAL;
if (reg % map->reg_stride)
@@ -1450,6 +1635,68 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,
}
EXPORT_SYMBOL_GPL(regmap_update_bits_check);
+void regmap_async_complete_cb(struct regmap_async *async, int ret)
+{
+ struct regmap *map = async->map;
+ bool wake;
+
+ spin_lock(&map->async_lock);
+
+ list_del(&async->list);
+ wake = list_empty(&map->async_list);
+
+ if (ret != 0)
+ map->async_ret = ret;
+
+ spin_unlock(&map->async_lock);
+
+ schedule_work(&async->cleanup);
+
+ if (wake)
+ wake_up(&map->async_waitq);
+}
+EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
+
+static int regmap_async_is_done(struct regmap *map)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&map->async_lock, flags);
+ ret = list_empty(&map->async_list);
+ spin_unlock_irqrestore(&map->async_lock, flags);
+
+ return ret;
+}
+
+/**
+ * regmap_async_complete: Ensure all asynchronous I/O has completed.
+ *
+ * @map: Map to operate on.
+ *
+ * Blocks until any pending asynchronous I/O has completed. Returns
+ * an error code for any failed I/O operations.
+ */
+int regmap_async_complete(struct regmap *map)
+{
+ unsigned long flags;
+ int ret;
+
+ /* Nothing to do with no async support */
+ if (!map->bus->async_write)
+ return 0;
+
+ wait_event(map->async_waitq, regmap_async_is_done(map));
+
+ spin_lock_irqsave(&map->async_lock, flags);
+ ret = map->async_ret;
+ map->async_ret = 0;
+ spin_unlock_irqrestore(&map->async_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_async_complete);
+
/**
* regmap_register_patch: Register and apply register updates to be applied
* on device initialistion
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 19e3fbfd5757..79595a001204 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -31,6 +31,8 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
int bcma_bus_suspend(struct bcma_bus *bus);
int bcma_bus_resume(struct bcma_bus *bus);
#endif
+struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
+ u8 unit);
/* scan.c */
int bcma_bus_scan(struct bcma_bus *bus);
@@ -45,6 +47,7 @@ int bcma_sprom_get(struct bcma_bus *bus);
/* driver_chipcommon.c */
#ifdef CONFIG_BCMA_DRIVER_MIPS
void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
+extern struct platform_device bcma_pflash_dev;
#endif /* CONFIG_BCMA_DRIVER_MIPS */
/* driver_chipcommon_pmu.c */
@@ -94,11 +97,16 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);
#ifdef CONFIG_BCMA_DRIVER_GPIO
/* driver_gpio.c */
int bcma_gpio_init(struct bcma_drv_cc *cc);
+int bcma_gpio_unregister(struct bcma_drv_cc *cc);
#else
static inline int bcma_gpio_init(struct bcma_drv_cc *cc)
{
return -ENOTSUPP;
}
+static inline int bcma_gpio_unregister(struct bcma_drv_cc *cc)
+{
+ return 0;
+}
#endif /* CONFIG_BCMA_DRIVER_GPIO */
#endif
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index e461ad25fda4..28fa50ad87be 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -329,7 +329,7 @@ void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
return;
}
- irq = bcma_core_mips_irq(cc->core);
+ irq = bcma_core_irq(cc->core);
/* Determine the registers of the UARTs */
cc->nr_serial_ports = (cc->capabilities & BCMA_CC_CAP_NRUART);
diff --git a/drivers/bcma/driver_chipcommon_nflash.c b/drivers/bcma/driver_chipcommon_nflash.c
index dbda91e4dff5..d4f699aef8c4 100644
--- a/drivers/bcma/driver_chipcommon_nflash.c
+++ b/drivers/bcma/driver_chipcommon_nflash.c
@@ -5,11 +5,11 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "bcma_private.h"
+
#include <linux/platform_device.h>
#include <linux/bcma/bcma.h>
-#include "bcma_private.h"
-
struct platform_device bcma_nflash_dev = {
.name = "bcma_nflash",
.num_resources = 0,
@@ -21,7 +21,7 @@ int bcma_nflash_init(struct bcma_drv_cc *cc)
struct bcma_bus *bus = cc->core->bus;
if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 &&
- cc->core->id.rev != 0x38) {
+ cc->core->id.rev != 38) {
bcma_err(bus, "NAND flash on unsupported board!\n");
return -ENOTSUPP;
}
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index c62c788b3289..932b101dee36 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -264,7 +264,7 @@ static u32 bcma_pmu_pll_clock_bcm4706(struct bcma_drv_cc *cc, u32 pll0, u32 m)
}
/* query bus clock frequency for PMU-enabled chipcommon */
-static u32 bcma_pmu_get_bus_clock(struct bcma_drv_cc *cc)
+u32 bcma_pmu_get_bus_clock(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
@@ -293,6 +293,7 @@ static u32 bcma_pmu_get_bus_clock(struct bcma_drv_cc *cc)
}
return BCMA_CC_PMU_HT_CLOCK;
}
+EXPORT_SYMBOL_GPL(bcma_pmu_get_bus_clock);
/* query cpu clock frequency for PMU-enabled chipcommon */
u32 bcma_pmu_get_cpu_clock(struct bcma_drv_cc *cc)
diff --git a/drivers/bcma/driver_chipcommon_sflash.c b/drivers/bcma/driver_chipcommon_sflash.c
index 1e694db4532d..e6ed4fe5dced 100644
--- a/drivers/bcma/driver_chipcommon_sflash.c
+++ b/drivers/bcma/driver_chipcommon_sflash.c
@@ -5,11 +5,11 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include "bcma_private.h"
+
#include <linux/platform_device.h>
#include <linux/bcma/bcma.h>
-#include "bcma_private.h"
-
static struct resource bcma_sflash_resource = {
.name = "bcma_sflash",
.start = BCMA_SOC_FLASH2,
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 9a6f585da2d9..45f0996a3752 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -73,6 +73,16 @@ static void bcma_gpio_free(struct gpio_chip *chip, unsigned gpio)
bcma_chipco_gpio_pullup(cc, 1 << gpio, 0);
}
+static int bcma_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
+{
+ struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
+
+ if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
+ return bcma_core_irq(cc->core);
+ else
+ return -EINVAL;
+}
+
int bcma_gpio_init(struct bcma_drv_cc *cc)
{
struct gpio_chip *chip = &cc->gpio;
@@ -85,6 +95,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
chip->set = bcma_gpio_set_value;
chip->direction_input = bcma_gpio_direction_input;
chip->direction_output = bcma_gpio_direction_output;
+ chip->to_irq = bcma_gpio_to_irq;
chip->ngpio = 16;
/* There is just one SoC in one device and its GPIO addresses should be
* deterministic to address them more easily. The other buses could get
@@ -96,3 +107,8 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
return gpiochip_add(chip);
}
+
+int bcma_gpio_unregister(struct bcma_drv_cc *cc)
+{
+ return gpiochip_remove(&cc->gpio);
+}
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index 792daad28cbc..9a7f0e3ab5a3 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -14,11 +14,33 @@
#include <linux/bcma/bcma.h>
+#include <linux/mtd/physmap.h>
+#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/time.h>
+static const char *part_probes[] = { "bcm47xxpart", NULL };
+
+static struct physmap_flash_data bcma_pflash_data = {
+ .part_probe_types = part_probes,
+};
+
+static struct resource bcma_pflash_resource = {
+ .name = "bcma_pflash",
+ .flags = IORESOURCE_MEM,
+};
+
+struct platform_device bcma_pflash_dev = {
+ .name = "physmap-flash",
+ .dev = {
+ .platform_data = &bcma_pflash_data,
+ },
+ .resource = &bcma_pflash_resource,
+ .num_resources = 1,
+};
+
/* The 47162a0 hangs when reading MIPS DMP registers registers */
static inline bool bcma_core_mips_bcm47162a0_quirk(struct bcma_device *dev)
{
@@ -74,28 +96,41 @@ static u32 bcma_core_mips_irqflag(struct bcma_device *dev)
return dev->core_index;
flag = bcma_aread32(dev, BCMA_MIPS_OOBSELOUTA30);
- return flag & 0x1F;
+ if (flag)
+ return flag & 0x1F;
+ else
+ return 0x3f;
}
/* Get the MIPS IRQ assignment for a specified device.
* If unassigned, 0 is returned.
+ * If disabled, 5 is returned.
+ * If not supported, 6 is returned.
*/
-unsigned int bcma_core_mips_irq(struct bcma_device *dev)
+static unsigned int bcma_core_mips_irq(struct bcma_device *dev)
{
struct bcma_device *mdev = dev->bus->drv_mips.core;
u32 irqflag;
unsigned int irq;
irqflag = bcma_core_mips_irqflag(dev);
+ if (irqflag == 0x3f)
+ return 6;
- for (irq = 1; irq <= 4; irq++)
+ for (irq = 0; irq <= 4; irq++)
if (bcma_read32(mdev, BCMA_MIPS_MIPS74K_INTMASK(irq)) &
(1 << irqflag))
return irq;
- return 0;
+ return 5;
+}
+
+unsigned int bcma_core_irq(struct bcma_device *dev)
+{
+ unsigned int mips_irq = bcma_core_mips_irq(dev);
+ return mips_irq <= 4 ? mips_irq + 2 : 0;
}
-EXPORT_SYMBOL(bcma_core_mips_irq);
+EXPORT_SYMBOL(bcma_core_irq);
static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
{
@@ -114,7 +149,7 @@ static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0),
bcma_read32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0)) &
~(1 << irqflag));
- else
+ else if (oldirq != 5)
bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(oldirq), 0);
/* assign the new one */
@@ -123,9 +158,9 @@ static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
bcma_read32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0)) |
(1 << irqflag));
} else {
- u32 oldirqflag = bcma_read32(mdev,
- BCMA_MIPS_MIPS74K_INTMASK(irq));
- if (oldirqflag) {
+ u32 irqinitmask = bcma_read32(mdev,
+ BCMA_MIPS_MIPS74K_INTMASK(irq));
+ if (irqinitmask) {
struct bcma_device *core;
/* backplane irq line is in use, find out who uses
@@ -133,7 +168,7 @@ static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
*/
list_for_each_entry(core, &bus->cores, list) {
if ((1 << bcma_core_mips_irqflag(core)) ==
- oldirqflag) {
+ irqinitmask) {
bcma_core_mips_set_irq(core, 0);
break;
}
@@ -143,15 +178,31 @@ static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
1 << irqflag);
}
- bcma_info(bus, "set_irq: core 0x%04x, irq %d => %d\n",
- dev->id.id, oldirq + 2, irq + 2);
+ bcma_debug(bus, "set_irq: core 0x%04x, irq %d => %d\n",
+ dev->id.id, oldirq <= 4 ? oldirq + 2 : 0, irq + 2);
+}
+
+static void bcma_core_mips_set_irq_name(struct bcma_bus *bus, unsigned int irq,
+ u16 coreid, u8 unit)
+{
+ struct bcma_device *core;
+
+ core = bcma_find_core_unit(bus, coreid, unit);
+ if (!core) {
+ bcma_warn(bus,
+ "Can not find core (id: 0x%x, unit %i) for IRQ configuration.\n",
+ coreid, unit);
+ return;
+ }
+
+ bcma_core_mips_set_irq(core, irq);
}
static void bcma_core_mips_print_irq(struct bcma_device *dev, unsigned int irq)
{
int i;
static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"};
- printk(KERN_INFO KBUILD_MODNAME ": core 0x%04x, irq :", dev->id.id);
+ printk(KERN_DEBUG KBUILD_MODNAME ": core 0x%04x, irq :", dev->id.id);
for (i = 0; i <= 6; i++)
printk(" %s%s", irq_name[i], i == irq ? "*" : " ");
printk("\n");
@@ -182,6 +233,7 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
{
struct bcma_bus *bus = mcore->core->bus;
struct bcma_drv_cc *cc = &bus->drv_cc;
+ struct bcma_pflash *pflash = &cc->pflash;
switch (cc->capabilities & BCMA_CC_CAP_FLASHT) {
case BCMA_CC_FLASHT_STSER:
@@ -191,15 +243,20 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
break;
case BCMA_CC_FLASHT_PARA:
bcma_debug(bus, "Found parallel flash\n");
- cc->pflash.present = true;
- cc->pflash.window = BCMA_SOC_FLASH2;
- cc->pflash.window_size = BCMA_SOC_FLASH2_SZ;
+ pflash->present = true;
+ pflash->window = BCMA_SOC_FLASH2;
+ pflash->window_size = BCMA_SOC_FLASH2_SZ;
if ((bcma_read32(cc->core, BCMA_CC_FLASH_CFG) &
BCMA_CC_FLASH_CFG_DS) == 0)
- cc->pflash.buswidth = 1;
+ pflash->buswidth = 1;
else
- cc->pflash.buswidth = 2;
+ pflash->buswidth = 2;
+
+ bcma_pflash_data.width = pflash->buswidth;
+ bcma_pflash_resource.start = pflash->window;
+ bcma_pflash_resource.end = pflash->window + pflash->window_size;
+
break;
default:
bcma_err(bus, "Flash type not supported\n");
@@ -227,6 +284,32 @@ void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
mcore->early_setup_done = true;
}
+static void bcma_fix_i2s_irqflag(struct bcma_bus *bus)
+{
+ struct bcma_device *cpu, *pcie, *i2s;
+
+ /* Fixup the interrupts in 4716/4748 for i2s core (2010 Broadcom SDK)
+ * (IRQ flags > 7 are ignored when setting the interrupt masks)
+ */
+ if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4716 &&
+ bus->chipinfo.id != BCMA_CHIP_ID_BCM4748)
+ return;
+
+ cpu = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
+ pcie = bcma_find_core(bus, BCMA_CORE_PCIE);
+ i2s = bcma_find_core(bus, BCMA_CORE_I2S);
+ if (cpu && pcie && i2s &&
+ bcma_aread32(cpu, BCMA_MIPS_OOBSELINA74) == 0x08060504 &&
+ bcma_aread32(pcie, BCMA_MIPS_OOBSELINA74) == 0x08060504 &&
+ bcma_aread32(i2s, BCMA_MIPS_OOBSELOUTA30) == 0x88) {
+ bcma_awrite32(cpu, BCMA_MIPS_OOBSELINA74, 0x07060504);
+ bcma_awrite32(pcie, BCMA_MIPS_OOBSELINA74, 0x07060504);
+ bcma_awrite32(i2s, BCMA_MIPS_OOBSELOUTA30, 0x87);
+ bcma_debug(bus,
+ "Moved i2s interrupt to oob line 7 instead of 8\n");
+ }
+}
+
void bcma_core_mips_init(struct bcma_drv_mips *mcore)
{
struct bcma_bus *bus;
@@ -236,43 +319,55 @@ void bcma_core_mips_init(struct bcma_drv_mips *mcore)
if (mcore->setup_done)
return;
- bcma_info(bus, "Initializing MIPS core...\n");
+ bcma_debug(bus, "Initializing MIPS core...\n");
bcma_core_mips_early_init(mcore);
- mcore->assigned_irqs = 1;
-
- /* Assign IRQs to all cores on the bus */
- list_for_each_entry(core, &bus->cores, list) {
- int mips_irq;
- if (core->irq)
- continue;
-
- mips_irq = bcma_core_mips_irq(core);
- if (mips_irq > 4)
- core->irq = 0;
- else
- core->irq = mips_irq + 2;
- if (core->irq > 5)
- continue;
- switch (core->id.id) {
- case BCMA_CORE_PCI:
- case BCMA_CORE_PCIE:
- case BCMA_CORE_ETHERNET:
- case BCMA_CORE_ETHERNET_GBIT:
- case BCMA_CORE_MAC_GBIT:
- case BCMA_CORE_80211:
- case BCMA_CORE_USB20_HOST:
- /* These devices get their own IRQ line if available,
- * the rest goes on IRQ0
- */
- if (mcore->assigned_irqs <= 4)
- bcma_core_mips_set_irq(core,
- mcore->assigned_irqs++);
- break;
+ bcma_fix_i2s_irqflag(bus);
+
+ switch (bus->chipinfo.id) {
+ case BCMA_CHIP_ID_BCM4716:
+ case BCMA_CHIP_ID_BCM4748:
+ bcma_core_mips_set_irq_name(bus, 1, BCMA_CORE_80211, 0);
+ bcma_core_mips_set_irq_name(bus, 2, BCMA_CORE_MAC_GBIT, 0);
+ bcma_core_mips_set_irq_name(bus, 3, BCMA_CORE_USB20_HOST, 0);
+ bcma_core_mips_set_irq_name(bus, 4, BCMA_CORE_PCIE, 0);
+ bcma_core_mips_set_irq_name(bus, 0, BCMA_CORE_CHIPCOMMON, 0);
+ bcma_core_mips_set_irq_name(bus, 0, BCMA_CORE_I2S, 0);
+ break;
+ case BCMA_CHIP_ID_BCM5356:
+ case BCMA_CHIP_ID_BCM47162:
+ case BCMA_CHIP_ID_BCM53572:
+ bcma_core_mips_set_irq_name(bus, 1, BCMA_CORE_80211, 0);
+ bcma_core_mips_set_irq_name(bus, 2, BCMA_CORE_MAC_GBIT, 0);
+ bcma_core_mips_set_irq_name(bus, 0, BCMA_CORE_CHIPCOMMON, 0);
+ break;
+ case BCMA_CHIP_ID_BCM5357:
+ case BCMA_CHIP_ID_BCM4749:
+ bcma_core_mips_set_irq_name(bus, 1, BCMA_CORE_80211, 0);
+ bcma_core_mips_set_irq_name(bus, 2, BCMA_CORE_MAC_GBIT, 0);
+ bcma_core_mips_set_irq_name(bus, 3, BCMA_CORE_USB20_HOST, 0);
+ bcma_core_mips_set_irq_name(bus, 0, BCMA_CORE_CHIPCOMMON, 0);
+ bcma_core_mips_set_irq_name(bus, 0, BCMA_CORE_I2S, 0);
+ break;
+ case BCMA_CHIP_ID_BCM4706:
+ bcma_core_mips_set_irq_name(bus, 1, BCMA_CORE_PCIE, 0);
+ bcma_core_mips_set_irq_name(bus, 2, BCMA_CORE_4706_MAC_GBIT,
+ 0);
+ bcma_core_mips_set_irq_name(bus, 3, BCMA_CORE_PCIE, 1);
+ bcma_core_mips_set_irq_name(bus, 4, BCMA_CORE_USB20_HOST, 0);
+ bcma_core_mips_set_irq_name(bus, 0, BCMA_CORE_4706_CHIPCOMMON,
+ 0);
+ break;
+ default:
+ list_for_each_entry(core, &bus->cores, list) {
+ core->irq = bcma_core_irq(core);
}
+ bcma_err(bus,
+ "Unknown device (0x%x) found, can not configure IRQs\n",
+ bus->chipinfo.id);
}
- bcma_info(bus, "IRQ reconfiguration done\n");
+ bcma_debug(bus, "IRQ reconfiguration done\n");
bcma_core_mips_dump_irq(bus);
mcore->setup_done = true;
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index af0c9fabee54..d3bde6cec927 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -94,19 +94,19 @@ static int bcma_extpci_read_config(struct bcma_drv_pci *pc, unsigned int dev,
if (dev == 0) {
/* we support only two functions on device 0 */
if (func > 1)
- return -EINVAL;
+ goto out;
/* accesses to config registers with offsets >= 256
* requires indirect access.
*/
if (off >= PCI_CONFIG_SPACE_SIZE) {
addr = (func << 12);
- addr |= (off & 0x0FFF);
+ addr |= (off & 0x0FFC);
val = bcma_pcie_read_config(pc, addr);
} else {
addr = BCMA_CORE_PCI_PCICFG0;
addr |= (func << 8);
- addr |= (off & 0xfc);
+ addr |= (off & 0xFC);
val = pcicore_read32(pc, addr);
}
} else {
@@ -119,11 +119,9 @@ static int bcma_extpci_read_config(struct bcma_drv_pci *pc, unsigned int dev,
goto out;
if (mips_busprobe32(val, mmio)) {
- val = 0xffffffff;
+ val = 0xFFFFFFFF;
goto unmap;
}
-
- val = readl(mmio);
}
val >>= (8 * (off & 3));
@@ -151,7 +149,7 @@ static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
const void *buf, int len)
{
int err = -EINVAL;
- u32 addr = 0, val = 0;
+ u32 addr, val;
void __iomem *mmio = 0;
u16 chipid = pc->core->bus->chipinfo.id;
@@ -159,16 +157,22 @@ static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
if (unlikely(len != 1 && len != 2 && len != 4))
goto out;
if (dev == 0) {
+ /* we support only two functions on device 0 */
+ if (func > 1)
+ goto out;
+
/* accesses to config registers with offsets >= 256
* requires indirect access.
*/
- if (off < PCI_CONFIG_SPACE_SIZE) {
- addr = pc->core->addr + BCMA_CORE_PCI_PCICFG0;
+ if (off >= PCI_CONFIG_SPACE_SIZE) {
+ addr = (func << 12);
+ addr |= (off & 0x0FFC);
+ val = bcma_pcie_read_config(pc, addr);
+ } else {
+ addr = BCMA_CORE_PCI_PCICFG0;
addr |= (func << 8);
- addr |= (off & 0xfc);
- mmio = ioremap_nocache(addr, sizeof(val));
- if (!mmio)
- goto out;
+ addr |= (off & 0xFC);
+ val = pcicore_read32(pc, addr);
}
} else {
addr = bcma_get_cfgspace_addr(pc, dev, func, off);
@@ -180,19 +184,17 @@ static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
goto out;
if (mips_busprobe32(val, mmio)) {
- val = 0xffffffff;
+ val = 0xFFFFFFFF;
goto unmap;
}
}
switch (len) {
case 1:
- val = readl(mmio);
val &= ~(0xFF << (8 * (off & 3)));
val |= *((const u8 *)buf) << (8 * (off & 3));
break;
case 2:
- val = readl(mmio);
val &= ~(0xFFFF << (8 * (off & 3)));
val |= *((const u16 *)buf) << (8 * (off & 3));
break;
@@ -200,13 +202,14 @@ static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
val = *((const u32 *)buf);
break;
}
- if (dev == 0 && !addr) {
+ if (dev == 0) {
/* accesses to config registers with offsets >= 256
* requires indirect access.
*/
- addr = (func << 12);
- addr |= (off & 0x0FFF);
- bcma_pcie_write_config(pc, addr, val);
+ if (off >= PCI_CONFIG_SPACE_SIZE)
+ bcma_pcie_write_config(pc, addr, val);
+ else
+ pcicore_write32(pc, addr, val);
} else {
writel(val, mmio);
@@ -276,7 +279,7 @@ static u8 bcma_find_pci_capability(struct bcma_drv_pci *pc, unsigned int dev,
/* check for Header type 0 */
bcma_extpci_read_config(pc, dev, func, PCI_HEADER_TYPE, &byte_val,
sizeof(u8));
- if ((byte_val & 0x7f) != PCI_HEADER_TYPE_NORMAL)
+ if ((byte_val & 0x7F) != PCI_HEADER_TYPE_NORMAL)
return cap_ptr;
/* check if the capability pointer field exists */
@@ -426,7 +429,7 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
/* Reset RC */
usleep_range(3000, 5000);
pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST_OE);
- usleep_range(1000, 2000);
+ msleep(50);
pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST |
BCMA_CORE_PCI_CTL_RST_OE);
@@ -488,6 +491,17 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
bcma_core_pci_enable_crs(pc);
+ if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706 ||
+ bus->chipinfo.id == BCMA_CHIP_ID_BCM4716) {
+ u16 val16;
+ bcma_extpci_read_config(pc, 0, 0, BCMA_CORE_PCI_CFG_DEVCTRL,
+ &val16, sizeof(val16));
+ val16 |= (2 << 5); /* Max payload size of 512 */
+ val16 |= (2 << 12); /* MRRS 512 */
+ bcma_extpci_write_config(pc, 0, 0, BCMA_CORE_PCI_CFG_DEVCTRL,
+ &val16, sizeof(val16));
+ }
+
/* Enable PCI bridge BAR0 memory & master access */
tmp = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
bcma_extpci_write_config(pc, 0, 0, PCI_COMMAND, &tmp, sizeof(tmp));
@@ -576,7 +590,7 @@ int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
pr_info("PCI: Fixing up device %s\n", pci_name(dev));
/* Fix up interrupt lines */
- dev->irq = bcma_core_mips_irq(pc_host->pdev->core) + 2;
+ dev->irq = bcma_core_irq(pc_host->pdev->core);
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
return 0;
@@ -595,6 +609,6 @@ int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev)
pc_host = container_of(dev->bus->ops, struct bcma_drv_pci_host,
pci_ops);
- return bcma_core_mips_irq(pc_host->pdev->core) + 2;
+ return bcma_core_irq(pc_host->pdev->core);
}
EXPORT_SYMBOL(bcma_core_pci_pcibios_map_irq);
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 4a92f647b58b..9a6188add590 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -81,8 +81,8 @@ struct bcma_device *bcma_find_core(struct bcma_bus *bus, u16 coreid)
}
EXPORT_SYMBOL_GPL(bcma_find_core);
-static struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
- u8 unit)
+struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
+ u8 unit)
{
struct bcma_device *core;
@@ -149,6 +149,14 @@ static int bcma_register_cores(struct bcma_bus *bus)
dev_id++;
}
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+ if (bus->drv_cc.pflash.present) {
+ err = platform_device_register(&bcma_pflash_dev);
+ if (err)
+ bcma_err(bus, "Error registering parallel flash\n");
+ }
+#endif
+
#ifdef CONFIG_BCMA_SFLASH
if (bus->drv_cc.sflash.present) {
err = platform_device_register(&bcma_sflash_dev);
@@ -268,6 +276,13 @@ int bcma_bus_register(struct bcma_bus *bus)
void bcma_bus_unregister(struct bcma_bus *bus)
{
struct bcma_device *cores[3];
+ int err;
+
+ err = bcma_gpio_unregister(&bus->drv_cc);
+ if (err == -EBUSY)
+ bcma_err(bus, "Some GPIOs are still in use.\n");
+ else if (err)
+ bcma_err(bus, "Can not unregister GPIO driver: %i\n", err);
cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index f58a4a4b4dfb..2b8303ad63c9 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -168,7 +168,7 @@ static void wake_all_senders(struct drbd_tconn *tconn) {
}
/* must hold resource->req_lock */
-static void start_new_tl_epoch(struct drbd_tconn *tconn)
+void start_new_tl_epoch(struct drbd_tconn *tconn)
{
/* no point closing an epoch, if it is empty, anyways. */
if (tconn->current_tle_writes == 0)
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 016de6b8bb57..c08d22964d06 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -267,6 +267,7 @@ struct bio_and_error {
int error;
};
+extern void start_new_tl_epoch(struct drbd_tconn *tconn);
extern void drbd_req_destroy(struct kref *kref);
extern void _req_may_be_done(struct drbd_request *req,
struct bio_and_error *m);
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index 53bf6182bac4..0fe220cfb9e9 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -931,6 +931,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
enum drbd_state_rv rv = SS_SUCCESS;
enum sanitize_state_warnings ssw;
struct after_state_chg_work *ascw;
+ bool did_remote, should_do_remote;
os = drbd_read_state(mdev);
@@ -981,11 +982,17 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
(os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
atomic_inc(&mdev->local_cnt);
+ did_remote = drbd_should_do_remote(mdev->state);
mdev->state.i = ns.i;
+ should_do_remote = drbd_should_do_remote(mdev->state);
mdev->tconn->susp = ns.susp;
mdev->tconn->susp_nod = ns.susp_nod;
mdev->tconn->susp_fen = ns.susp_fen;
+ /* put replicated vs not-replicated requests in seperate epochs */
+ if (did_remote != should_do_remote)
+ start_new_tl_epoch(mdev->tconn);
+
if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
drbd_print_uuids(mdev, "attached to UUIDs");
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 9694dd99bbbc..3fd100990453 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -626,12 +626,13 @@ static void mtip_timeout_function(unsigned long int data)
}
}
- if (cmdto_cnt && !test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
+ if (cmdto_cnt) {
print_tags(port->dd, "timed out", tagaccum, cmdto_cnt);
-
- mtip_restart_port(port);
+ if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
+ mtip_restart_port(port);
+ wake_up_interruptible(&port->svc_wait);
+ }
clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
- wake_up_interruptible(&port->svc_wait);
}
if (port->ic_pause_timer) {
@@ -3887,7 +3888,12 @@ static int mtip_block_remove(struct driver_data *dd)
* Delete our gendisk structure. This also removes the device
* from /dev
*/
- del_gendisk(dd->disk);
+ if (dd->disk) {
+ if (dd->disk->queue)
+ del_gendisk(dd->disk);
+ else
+ put_disk(dd->disk);
+ }
spin_lock(&rssd_index_lock);
ida_remove(&rssd_index_ida, dd->index);
@@ -3921,7 +3927,13 @@ static int mtip_block_shutdown(struct driver_data *dd)
"Shutting down %s ...\n", dd->disk->disk_name);
/* Delete our gendisk structure, and cleanup the blk queue. */
- del_gendisk(dd->disk);
+ if (dd->disk) {
+ if (dd->disk->queue)
+ del_gendisk(dd->disk);
+ else
+ put_disk(dd->disk);
+ }
+
spin_lock(&rssd_index_lock);
ida_remove(&rssd_index_ida, dd->index);
diff --git a/drivers/block/paride/Kconfig b/drivers/block/paride/Kconfig
index 28cf3082d442..efefb5ac3004 100644
--- a/drivers/block/paride/Kconfig
+++ b/drivers/block/paride/Kconfig
@@ -205,8 +205,8 @@ config PARIDE_EPAT
support.
config PARIDE_EPATC8
- bool "Support c7/c8 chips (EXPERIMENTAL)"
- depends on PARIDE_EPAT && EXPERIMENTAL
+ bool "Support c7/c8 chips"
+ depends on PARIDE_EPAT
help
This option enables support for the newer Shuttle EP1284 (aka c7 and
c8) chip. You need this if you are using any recent Imation SuperDisk
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 564156a8e572..5814deb6963d 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -461,7 +461,7 @@ static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
int op_len, err;
void *req_buf;
- if (!(((u64)1 << ((u64)op - 1)) & port->operations))
+ if (!(((u64)1 << (u64)op) & port->operations))
return -EOPNOTSUPP;
switch (op) {
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 765fa2b3d337..8766a2257091 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -844,6 +844,7 @@ static int swim_floppy_init(struct swim_priv *swd)
swd->unit[drive].swd = swd;
}
+ spin_lock_init(&swd->lock);
swd->queue = blk_init_queue(do_fd_request, &swd->lock);
if (!swd->queue) {
err = -ENOMEM;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 9d8409c02082..8ad21a25bc0d 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -889,6 +889,7 @@ static void virtblk_remove(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
int index = vblk->index;
+ int refc;
/* Prevent config work handler from accessing the device. */
mutex_lock(&vblk->config_lock);
@@ -903,11 +904,15 @@ static void virtblk_remove(struct virtio_device *vdev)
flush_work(&vblk->config_work);
+ refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount);
put_disk(vblk->disk);
mempool_destroy(vblk->pool);
vdev->config->del_vqs(vdev);
kfree(vblk);
- ida_simple_remove(&vd_index_ida, index);
+
+ /* Only free device id if we don't have any users */
+ if (refc == 1)
+ ida_simple_remove(&vd_index_ida, index);
}
#ifdef CONFIG_PM
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 74374fb762aa..5ac841ff6cc7 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -161,10 +161,12 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
static void make_response(struct xen_blkif *blkif, u64 id,
unsigned short op, int st);
-#define foreach_grant(pos, rbtree, node) \
- for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node); \
+#define foreach_grant_safe(pos, n, rbtree, node) \
+ for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
+ (n) = rb_next(&(pos)->node); \
&(pos)->node != NULL; \
- (pos) = container_of(rb_next(&(pos)->node), typeof(*(pos)), node))
+ (pos) = container_of(n, typeof(*(pos)), node), \
+ (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
static void add_persistent_gnt(struct rb_root *root,
@@ -217,10 +219,11 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct persistent_gnt *persistent_gnt;
+ struct rb_node *n;
int ret = 0;
int segs_to_unmap = 0;
- foreach_grant(persistent_gnt, root, node) {
+ foreach_grant_safe(persistent_gnt, n, root, node) {
BUG_ON(persistent_gnt->handle ==
BLKBACK_INVALID_HANDLE);
gnttab_set_unmap_op(&unmap[segs_to_unmap],
@@ -230,9 +233,6 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
persistent_gnt->handle);
pages[segs_to_unmap] = persistent_gnt->page;
- rb_erase(&persistent_gnt->node, root);
- kfree(persistent_gnt);
- num--;
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
!rb_next(&persistent_gnt->node)) {
@@ -241,6 +241,10 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
BUG_ON(ret);
segs_to_unmap = 0;
}
+
+ rb_erase(&persistent_gnt->node, root);
+ kfree(persistent_gnt);
+ num--;
}
BUG_ON(num != 0);
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 96e9b00db081..11043c18ac5a 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -792,6 +792,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
{
struct llist_node *all_gnts;
struct grant *persistent_gnt;
+ struct llist_node *n;
/* Prevent new requests being issued until we fix things up. */
spin_lock_irq(&info->io_lock);
@@ -804,7 +805,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
/* Remove all persistent grants */
if (info->persistent_gnts_c) {
all_gnts = llist_del_all(&info->persistent_gnts);
- llist_for_each_entry(persistent_gnt, all_gnts, node) {
+ llist_for_each_entry_safe(persistent_gnt, n, all_gnts, node) {
gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
__free_page(pfn_to_page(persistent_gnt->pfn));
kfree(persistent_gnt);
@@ -835,7 +836,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
struct blkif_response *bret)
{
- int i;
+ int i = 0;
struct bio_vec *bvec;
struct req_iterator iter;
unsigned long flags;
@@ -852,7 +853,8 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
*/
rq_for_each_segment(bvec, s->request, iter) {
BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE);
- i = offset >> PAGE_SHIFT;
+ if (bvec->bv_offset < offset)
+ i++;
BUG_ON(i >= s->req.u.rw.nr_segments);
shared_data = kmap_atomic(
pfn_to_page(s->grants_used[i]->pfn));
@@ -861,7 +863,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
bvec->bv_len);
bvec_kunmap_irq(bvec_data, &flags);
kunmap_atomic(shared_data);
- offset += bvec->bv_len;
+ offset = bvec->bv_offset + bvec->bv_len;
}
}
/* Add the persistent grant into the list of free grants */
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index e9f203eadb1f..fdfd61a2d523 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -26,6 +26,7 @@ config BT_HCIBTSDIO
config BT_HCIUART
tristate "HCI UART driver"
+ depends on TTY
help
Bluetooth HCI UART driver.
This driver is required if you want to use Bluetooth devices with
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index b00000e8aef6..a8a41e07a221 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -77,10 +77,15 @@ static struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0CF3, 0x311D) },
{ USB_DEVICE(0x13d3, 0x3375) },
{ USB_DEVICE(0x04CA, 0x3005) },
+ { USB_DEVICE(0x04CA, 0x3006) },
+ { USB_DEVICE(0x04CA, 0x3008) },
{ USB_DEVICE(0x13d3, 0x3362) },
{ USB_DEVICE(0x0CF3, 0xE004) },
{ USB_DEVICE(0x0930, 0x0219) },
{ USB_DEVICE(0x0489, 0xe057) },
+ { USB_DEVICE(0x13d3, 0x3393) },
+ { USB_DEVICE(0x0489, 0xe04e) },
+ { USB_DEVICE(0x0489, 0xe056) },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE02C) },
@@ -104,10 +109,15 @@ static struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU22 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
@@ -339,7 +349,7 @@ static int ath3k_load_syscfg(struct usb_device *udev)
ret = ath3k_get_state(udev, &fw_state);
if (ret < 0) {
- BT_ERR("Can't get state to change to load configration err");
+ BT_ERR("Can't get state to change to load configuration err");
return -EBUSY;
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index a1d4ede5b892..7e351e345476 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -135,10 +135,15 @@ static struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 72bedad6bf8c..3bb6fa3930be 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -53,7 +53,7 @@ source "drivers/tty/serial/Kconfig"
config TTY_PRINTK
bool "TTY driver to output user messages via printk"
- depends on EXPERT
+ depends on EXPERT && TTY
default n
---help---
If you say Y here, the support for writing user messages (i.e.
@@ -159,7 +159,7 @@ source "drivers/tty/hvc/Kconfig"
config VIRTIO_CONSOLE
tristate "Virtio console"
- depends on VIRTIO
+ depends on VIRTIO && TTY
select HVC_DRIVER
help
Virtio console for use with lguest and other hypervisors.
@@ -392,6 +392,7 @@ config XILINX_HWICAP
config R3964
tristate "Siemens R3964 line discipline"
+ depends on TTY
---help---
This driver allows synchronous communication with devices using the
Siemens R3964 packet protocol. Unless you are dealing with special
@@ -439,7 +440,7 @@ source "drivers/char/pcmcia/Kconfig"
config MWAVE
tristate "ACP Modem (Mwave) support"
- depends on X86
+ depends on X86 && TTY
select SERIAL_8250
---help---
The ACP modem (Mwave) for Linux is a WinModem. It is composed of a
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index fe6d4be48296..e3f9a99b8522 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -1041,7 +1041,7 @@ static int hpet_acpi_add(struct acpi_device *device)
return hpet_alloc(&data);
}
-static int hpet_acpi_remove(struct acpi_device *device, int type)
+static int hpet_acpi_remove(struct acpi_device *device)
{
/* XXX need to unregister clocksource, dealloc mem, etc */
return -EINVAL;
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
index 48bbfeca4b5d..ac47631ab34f 100644
--- a/drivers/char/hw_random/exynos-rng.c
+++ b/drivers/char/hw_random/exynos-rng.c
@@ -104,6 +104,7 @@ static int exynos_read(struct hwrng *rng, void *buf,
static int exynos_rng_probe(struct platform_device *pdev)
{
struct exynos_rng *exynos_rng;
+ struct resource *res;
exynos_rng = devm_kzalloc(&pdev->dev, sizeof(struct exynos_rng),
GFP_KERNEL);
@@ -120,10 +121,10 @@ static int exynos_rng_probe(struct platform_device *pdev)
return -ENOENT;
}
- exynos_rng->mem = devm_request_and_ioremap(&pdev->dev,
- platform_get_resource(pdev, IORESOURCE_MEM, 0));
- if (!exynos_rng->mem)
- return -EBUSY;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ exynos_rng->mem = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(exynos_rng->mem))
+ return PTR_ERR(exynos_rng->mem);
platform_set_drvdata(pdev, exynos_rng);
@@ -162,7 +163,7 @@ static int exynos_rng_runtime_resume(struct device *dev)
}
-UNIVERSAL_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_runtime_suspend,
+static UNIVERSAL_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_runtime_suspend,
exynos_rng_runtime_resume, NULL);
static struct platform_driver exynos_rng_driver = {
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index d8c54e253761..749dc16ca2cc 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -124,9 +124,9 @@ static int omap_rng_probe(struct platform_device *pdev)
goto err_ioremap;
}
- priv->base = devm_request_and_ioremap(&pdev->dev, priv->mem_res);
- if (!priv->base) {
- ret = -ENOMEM;
+ priv->base = devm_ioremap_resource(&pdev->dev, priv->mem_res);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
goto err_ioremap;
}
dev_set_drvdata(&pdev->dev, priv);
diff --git a/drivers/char/hw_random/tx4939-rng.c b/drivers/char/hw_random/tx4939-rng.c
index de473ef3882b..30991989d65b 100644
--- a/drivers/char/hw_random/tx4939-rng.c
+++ b/drivers/char/hw_random/tx4939-rng.c
@@ -7,6 +7,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -115,9 +116,9 @@ static int __init tx4939_rng_probe(struct platform_device *dev)
rngdev = devm_kzalloc(&dev->dev, sizeof(*rngdev), GFP_KERNEL);
if (!rngdev)
return -ENOMEM;
- rngdev->base = devm_request_and_ioremap(&dev->dev, r);
- if (!rngdev->base)
- return -EBUSY;
+ rngdev->base = devm_ioremap_resource(&dev->dev, r);
+ if (IS_ERR(rngdev->base))
+ return PTR_ERR(rngdev->base);
rngdev->rng.name = dev_name(&dev->dev);
rngdev->rng.data_present = tx4939_rng_data_present;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index c6fa3bc2baa8..6f6e92a3102d 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -399,7 +399,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
{
unsigned long p = *ppos;
ssize_t low_count, read, sz;
- char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
+ char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
int err = 0;
read = 0;
@@ -527,7 +527,7 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
unsigned long p = *ppos;
ssize_t wrote = 0;
ssize_t virtr = 0;
- char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
+ char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
int err = 0;
if (p < (unsigned long) high_memory) {
@@ -595,7 +595,7 @@ static ssize_t write_port(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
unsigned long i = *ppos;
- const char __user * tmp = buf;
+ const char __user *tmp = buf;
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT;
@@ -729,7 +729,7 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
return ret;
}
-static int open_port(struct inode * inode, struct file * filp)
+static int open_port(struct inode *inode, struct file *filp)
{
return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
}
@@ -898,7 +898,7 @@ static int __init chr_dev_init(void)
continue;
/*
- * Create /dev/port?
+ * Create /dev/port?
*/
if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
continue;
diff --git a/drivers/char/pcmcia/Kconfig b/drivers/char/pcmcia/Kconfig
index 6614416a8623..2a166d56738a 100644
--- a/drivers/char/pcmcia/Kconfig
+++ b/drivers/char/pcmcia/Kconfig
@@ -7,7 +7,7 @@ menu "PCMCIA character devices"
config SYNCLINK_CS
tristate "SyncLink PC Card support"
- depends on PCMCIA
+ depends on PCMCIA && TTY
help
Enable support for the SyncLink PC Card serial adapter, running
asynchronous and HDLC communications up to 512Kbps. The port is
@@ -45,7 +45,7 @@ config CARDMAN_4040
config IPWIRELESS
tristate "IPWireless 3G UMTS PCMCIA card support"
- depends on PCMCIA && NETDEVICES
+ depends on PCMCIA && NETDEVICES && TTY
select PPP
help
This is a driver for 3G UMTS PCMCIA card from IPWireless company. In
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index b66eaa04f8cb..5c5cc00ebb07 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -102,8 +102,7 @@ static MGSL_PARAMS default_params = {
ASYNC_PARITY_NONE /* unsigned char parity; */
};
-typedef struct
-{
+typedef struct {
int count;
unsigned char status;
char data[1];
@@ -210,7 +209,7 @@ typedef struct _mgslpc_info {
char testing_irq;
unsigned int init_error; /* startup error (DIAGS) */
- char flag_buf[MAX_ASYNC_BUFFER_SIZE];
+ char *flag_buf;
bool drop_rts_on_tx_done;
struct _input_signal_events input_signal_events;
@@ -326,10 +325,10 @@ typedef struct _mgslpc_info {
#define write_reg16(info, reg, val) outw((val), (info)->io_base + (reg))
#define set_reg_bits(info, reg, mask) \
- write_reg(info, (reg), \
+ write_reg(info, (reg), \
(unsigned char) (read_reg(info, (reg)) | (mask)))
#define clear_reg_bits(info, reg, mask) \
- write_reg(info, (reg), \
+ write_reg(info, (reg), \
(unsigned char) (read_reg(info, (reg)) & ~(mask)))
/*
* interrupt enable/disable routines
@@ -356,10 +355,10 @@ static void irq_enable(MGSLPC_INFO *info, unsigned char channel, unsigned short
}
#define port_irq_disable(info, mask) \
- { info->pim_value |= (mask); write_reg(info, PIM, info->pim_value); }
+ { info->pim_value |= (mask); write_reg(info, PIM, info->pim_value); }
#define port_irq_enable(info, mask) \
- { info->pim_value &= ~(mask); write_reg(info, PIM, info->pim_value); }
+ { info->pim_value &= ~(mask); write_reg(info, PIM, info->pim_value); }
static void rx_start(MGSLPC_INFO *info);
static void rx_stop(MGSLPC_INFO *info);
@@ -397,7 +396,7 @@ static int adapter_test(MGSLPC_INFO *info);
static int claim_resources(MGSLPC_INFO *info);
static void release_resources(MGSLPC_INFO *info);
-static void mgslpc_add_device(MGSLPC_INFO *info);
+static int mgslpc_add_device(MGSLPC_INFO *info);
static void mgslpc_remove_device(MGSLPC_INFO *info);
static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty);
@@ -514,49 +513,56 @@ static const struct tty_port_operations mgslpc_port_ops = {
static int mgslpc_probe(struct pcmcia_device *link)
{
- MGSLPC_INFO *info;
- int ret;
-
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("mgslpc_attach\n");
-
- info = kzalloc(sizeof(MGSLPC_INFO), GFP_KERNEL);
- if (!info) {
- printk("Error can't allocate device instance data\n");
- return -ENOMEM;
- }
-
- info->magic = MGSLPC_MAGIC;
- tty_port_init(&info->port);
- info->port.ops = &mgslpc_port_ops;
- INIT_WORK(&info->task, bh_handler);
- info->max_frame_size = 4096;
- info->port.close_delay = 5*HZ/10;
- info->port.closing_wait = 30*HZ;
- init_waitqueue_head(&info->status_event_wait_q);
- init_waitqueue_head(&info->event_wait_q);
- spin_lock_init(&info->lock);
- spin_lock_init(&info->netlock);
- memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
- info->idle_mode = HDLC_TXIDLE_FLAGS;
- info->imra_value = 0xffff;
- info->imrb_value = 0xffff;
- info->pim_value = 0xff;
-
- info->p_dev = link;
- link->priv = info;
-
- /* Initialize the struct pcmcia_device structure */
-
- ret = mgslpc_config(link);
- if (ret) {
- tty_port_destroy(&info->port);
- return ret;
- }
-
- mgslpc_add_device(info);
-
- return 0;
+ MGSLPC_INFO *info;
+ int ret;
+
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("mgslpc_attach\n");
+
+ info = kzalloc(sizeof(MGSLPC_INFO), GFP_KERNEL);
+ if (!info) {
+ printk("Error can't allocate device instance data\n");
+ return -ENOMEM;
+ }
+
+ info->magic = MGSLPC_MAGIC;
+ tty_port_init(&info->port);
+ info->port.ops = &mgslpc_port_ops;
+ INIT_WORK(&info->task, bh_handler);
+ info->max_frame_size = 4096;
+ info->port.close_delay = 5*HZ/10;
+ info->port.closing_wait = 30*HZ;
+ init_waitqueue_head(&info->status_event_wait_q);
+ init_waitqueue_head(&info->event_wait_q);
+ spin_lock_init(&info->lock);
+ spin_lock_init(&info->netlock);
+ memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
+ info->idle_mode = HDLC_TXIDLE_FLAGS;
+ info->imra_value = 0xffff;
+ info->imrb_value = 0xffff;
+ info->pim_value = 0xff;
+
+ info->p_dev = link;
+ link->priv = info;
+
+ /* Initialize the struct pcmcia_device structure */
+
+ ret = mgslpc_config(link);
+ if (ret != 0)
+ goto failed;
+
+ ret = mgslpc_add_device(info);
+ if (ret != 0)
+ goto failed_release;
+
+ return 0;
+
+failed_release:
+ mgslpc_release((u_long)link);
+failed:
+ tty_port_destroy(&info->port);
+ kfree(info);
+ return ret;
}
/* Card has been inserted.
@@ -569,35 +575,35 @@ static int mgslpc_ioprobe(struct pcmcia_device *p_dev, void *priv_data)
static int mgslpc_config(struct pcmcia_device *link)
{
- MGSLPC_INFO *info = link->priv;
- int ret;
+ MGSLPC_INFO *info = link->priv;
+ int ret;
- if (debug_level >= DEBUG_LEVEL_INFO)
- printk("mgslpc_config(0x%p)\n", link);
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("mgslpc_config(0x%p)\n", link);
- link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
+ link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
- ret = pcmcia_loop_config(link, mgslpc_ioprobe, NULL);
- if (ret != 0)
- goto failed;
+ ret = pcmcia_loop_config(link, mgslpc_ioprobe, NULL);
+ if (ret != 0)
+ goto failed;
- link->config_index = 8;
- link->config_regs = PRESENT_OPTION;
+ link->config_index = 8;
+ link->config_regs = PRESENT_OPTION;
- ret = pcmcia_request_irq(link, mgslpc_isr);
- if (ret)
- goto failed;
- ret = pcmcia_enable_device(link);
- if (ret)
- goto failed;
+ ret = pcmcia_request_irq(link, mgslpc_isr);
+ if (ret)
+ goto failed;
+ ret = pcmcia_enable_device(link);
+ if (ret)
+ goto failed;
- info->io_base = link->resource[0]->start;
- info->irq_level = link->irq;
- return 0;
+ info->io_base = link->resource[0]->start;
+ info->irq_level = link->irq;
+ return 0;
failed:
- mgslpc_release((u_long)link);
- return -ENODEV;
+ mgslpc_release((u_long)link);
+ return -ENODEV;
}
/* Card has been removed.
@@ -703,12 +709,12 @@ static void tx_pause(struct tty_struct *tty)
if (mgslpc_paranoia_check(info, tty->name, "tx_pause"))
return;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("tx_pause(%s)\n",info->device_name);
+ printk("tx_pause(%s)\n", info->device_name);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (info->tx_enabled)
- tx_stop(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ tx_stop(info);
+ spin_unlock_irqrestore(&info->lock, flags);
}
static void tx_release(struct tty_struct *tty)
@@ -719,12 +725,12 @@ static void tx_release(struct tty_struct *tty)
if (mgslpc_paranoia_check(info, tty->name, "tx_release"))
return;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("tx_release(%s)\n",info->device_name);
+ printk("tx_release(%s)\n", info->device_name);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (!info->tx_enabled)
- tx_start(info, tty);
- spin_unlock_irqrestore(&info->lock,flags);
+ tx_start(info, tty);
+ spin_unlock_irqrestore(&info->lock, flags);
}
/* Return next bottom half action to perform.
@@ -735,7 +741,7 @@ static int bh_action(MGSLPC_INFO *info)
unsigned long flags;
int rc = 0;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (info->pending_bh & BH_RECEIVE) {
info->pending_bh &= ~BH_RECEIVE;
@@ -754,7 +760,7 @@ static int bh_action(MGSLPC_INFO *info)
info->bh_requested = false;
}
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return rc;
}
@@ -765,11 +771,8 @@ static void bh_handler(struct work_struct *work)
struct tty_struct *tty;
int action;
- if (!info)
- return;
-
if (debug_level >= DEBUG_LEVEL_BH)
- printk( "%s(%d):bh_handler(%s) entry\n",
+ printk("%s(%d):bh_handler(%s) entry\n",
__FILE__,__LINE__,info->device_name);
info->bh_running = true;
@@ -778,8 +781,8 @@ static void bh_handler(struct work_struct *work)
while((action = bh_action(info)) != 0) {
/* Process work item */
- if ( debug_level >= DEBUG_LEVEL_BH )
- printk( "%s(%d):bh_handler() work item action=%d\n",
+ if (debug_level >= DEBUG_LEVEL_BH)
+ printk("%s(%d):bh_handler() work item action=%d\n",
__FILE__,__LINE__,action);
switch (action) {
@@ -802,7 +805,7 @@ static void bh_handler(struct work_struct *work)
tty_kref_put(tty);
if (debug_level >= DEBUG_LEVEL_BH)
- printk( "%s(%d):bh_handler(%s) exit\n",
+ printk("%s(%d):bh_handler(%s) exit\n",
__FILE__,__LINE__,info->device_name);
}
@@ -831,7 +834,7 @@ static void rx_ready_hdlc(MGSLPC_INFO *info, int eom)
RXBUF *buf = (RXBUF*)(info->rx_buf + (info->rx_put * info->rx_buf_size));
if (debug_level >= DEBUG_LEVEL_ISR)
- printk("%s(%d):rx_ready_hdlc(eom=%d)\n",__FILE__,__LINE__,eom);
+ printk("%s(%d):rx_ready_hdlc(eom=%d)\n", __FILE__, __LINE__, eom);
if (!info->rx_enabled)
return;
@@ -847,7 +850,8 @@ static void rx_ready_hdlc(MGSLPC_INFO *info, int eom)
if (eom) {
/* end of frame, get FIFO count from RBCL register */
- if (!(fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f)))
+ fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f);
+ if (fifo_count == 0)
fifo_count = 32;
} else
fifo_count = 32;
@@ -886,20 +890,13 @@ static void rx_ready_hdlc(MGSLPC_INFO *info, int eom)
issue_command(info, CHA, CMD_RXFIFO);
}
-static void rx_ready_async(MGSLPC_INFO *info, int tcd, struct tty_struct *tty)
+static void rx_ready_async(MGSLPC_INFO *info, int tcd)
{
+ struct tty_port *port = &info->port;
unsigned char data, status, flag;
int fifo_count;
int work = 0;
- struct mgsl_icount *icount = &info->icount;
-
- if (!tty) {
- /* tty is not available anymore */
- issue_command(info, CHA, CMD_RXRESET);
- if (debug_level >= DEBUG_LEVEL_ISR)
- printk("%s(%d):rx_ready_async(tty=NULL)\n",__FILE__,__LINE__);
- return;
- }
+ struct mgsl_icount *icount = &info->icount;
if (tcd) {
/* early termination, get FIFO count from RBCL register */
@@ -913,7 +910,7 @@ static void rx_ready_async(MGSLPC_INFO *info, int tcd, struct tty_struct *tty)
} else
fifo_count = 32;
- tty_buffer_request_room(tty, fifo_count);
+ tty_buffer_request_room(port, fifo_count);
/* Flush received async data to receive data buffer. */
while (fifo_count) {
data = read_reg(info, CHA + RXFIFO);
@@ -944,7 +941,7 @@ static void rx_ready_async(MGSLPC_INFO *info, int tcd, struct tty_struct *tty)
else if (status & BIT6)
flag = TTY_FRAME;
}
- work += tty_insert_flip_char(tty, data, flag);
+ work += tty_insert_flip_char(port, data, flag);
}
issue_command(info, CHA, CMD_RXFIFO);
@@ -957,7 +954,7 @@ static void rx_ready_async(MGSLPC_INFO *info, int tcd, struct tty_struct *tty)
}
if (work)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
}
@@ -1004,7 +1001,7 @@ static void tx_ready(MGSLPC_INFO *info, struct tty_struct *tty)
int c;
if (debug_level >= DEBUG_LEVEL_ISR)
- printk("%s(%d):tx_ready(%s)\n", __FILE__,__LINE__,info->device_name);
+ printk("%s(%d):tx_ready(%s)\n", __FILE__, __LINE__, info->device_name);
if (info->params.mode == MGSL_MODE_HDLC) {
if (!info->tx_active)
@@ -1217,7 +1214,7 @@ static irqreturn_t mgslpc_isr(int dummy, void *dev_id)
if (info->params.mode == MGSL_MODE_HDLC)
rx_ready_hdlc(info, isr & IRQ_RXEOM);
else
- rx_ready_async(info, isr & IRQ_RXEOM, tty);
+ rx_ready_async(info, isr & IRQ_RXEOM);
}
/* transmit IRQs */
@@ -1249,7 +1246,7 @@ static irqreturn_t mgslpc_isr(int dummy, void *dev_id)
*/
if (info->pending_bh && !info->bh_running && !info->bh_requested) {
- if ( debug_level >= DEBUG_LEVEL_ISR )
+ if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):%s queueing bh task.\n",
__FILE__,__LINE__,info->device_name);
schedule_work(&info->task);
@@ -1273,7 +1270,7 @@ static int startup(MGSLPC_INFO * info, struct tty_struct *tty)
int retval = 0;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):startup(%s)\n",__FILE__,__LINE__,info->device_name);
+ printk("%s(%d):startup(%s)\n", __FILE__, __LINE__, info->device_name);
if (info->port.flags & ASYNC_INITIALIZED)
return 0;
@@ -1283,7 +1280,7 @@ static int startup(MGSLPC_INFO * info, struct tty_struct *tty)
info->tx_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
if (!info->tx_buf) {
printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
- __FILE__,__LINE__,info->device_name);
+ __FILE__, __LINE__, info->device_name);
return -ENOMEM;
}
}
@@ -1298,15 +1295,15 @@ static int startup(MGSLPC_INFO * info, struct tty_struct *tty)
retval = claim_resources(info);
/* perform existence check and diagnostics */
- if ( !retval )
+ if (!retval)
retval = adapter_test(info);
- if ( retval ) {
- if (capable(CAP_SYS_ADMIN) && tty)
+ if (retval) {
+ if (capable(CAP_SYS_ADMIN) && tty)
set_bit(TTY_IO_ERROR, &tty->flags);
release_resources(info);
- return retval;
- }
+ return retval;
+ }
/* program hardware for current parameters */
mgslpc_change_params(info, tty);
@@ -1330,7 +1327,7 @@ static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_shutdown(%s)\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
/* clear status wait queue because status changes */
/* can't happen after shutting down the hardware */
@@ -1344,7 +1341,7 @@ static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty)
info->tx_buf = NULL;
}
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
rx_stop(info);
tx_stop(info);
@@ -1352,12 +1349,12 @@ static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty)
/* TODO:disable interrupts instead of reset to preserve signal states */
reset_device(info);
- if (!tty || tty->termios.c_cflag & HUPCL) {
- info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
+ if (!tty || tty->termios.c_cflag & HUPCL) {
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
set_signals(info);
}
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
release_resources(info);
@@ -1371,7 +1368,7 @@ static void mgslpc_program_hw(MGSLPC_INFO *info, struct tty_struct *tty)
{
unsigned long flags;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
rx_stop(info);
tx_stop(info);
@@ -1396,7 +1393,7 @@ static void mgslpc_program_hw(MGSLPC_INFO *info, struct tty_struct *tty)
if (info->netcount || (tty && (tty->termios.c_cflag & CREAD)))
rx_start(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
}
/* Reconfigure adapter based on new parameters
@@ -1411,16 +1408,16 @@ static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_change_params(%s)\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
cflag = tty->termios.c_cflag;
- /* if B0 rate (hangup) specified then negate DTR and RTS */
- /* otherwise assert DTR and RTS */
- if (cflag & CBAUD)
- info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ /* if B0 rate (hangup) specified then negate RTS and DTR */
+ /* otherwise assert RTS and DTR */
+ if (cflag & CBAUD)
+ info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
else
- info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
/* byte size and parity */
@@ -1463,7 +1460,7 @@ static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty)
info->params.data_rate = tty_get_baud_rate(tty);
}
- if ( info->params.data_rate ) {
+ if (info->params.data_rate) {
info->timeout = (32*HZ*bits_per_char) /
info->params.data_rate;
}
@@ -1498,8 +1495,8 @@ static int mgslpc_put_char(struct tty_struct *tty, unsigned char ch)
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO) {
- printk( "%s(%d):mgslpc_put_char(%d) on %s\n",
- __FILE__,__LINE__,ch,info->device_name);
+ printk("%s(%d):mgslpc_put_char(%d) on %s\n",
+ __FILE__, __LINE__, ch, info->device_name);
}
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_put_char"))
@@ -1508,7 +1505,7 @@ static int mgslpc_put_char(struct tty_struct *tty, unsigned char ch)
if (!info->tx_buf)
return 0;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (info->params.mode == MGSL_MODE_ASYNC || !info->tx_active) {
if (info->tx_count < TXBUFSIZE - 1) {
@@ -1518,7 +1515,7 @@ static int mgslpc_put_char(struct tty_struct *tty, unsigned char ch)
}
}
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return 1;
}
@@ -1531,8 +1528,8 @@ static void mgslpc_flush_chars(struct tty_struct *tty)
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk( "%s(%d):mgslpc_flush_chars() entry on %s tx_count=%d\n",
- __FILE__,__LINE__,info->device_name,info->tx_count);
+ printk("%s(%d):mgslpc_flush_chars() entry on %s tx_count=%d\n",
+ __FILE__, __LINE__, info->device_name, info->tx_count);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_chars"))
return;
@@ -1542,13 +1539,13 @@ static void mgslpc_flush_chars(struct tty_struct *tty)
return;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk( "%s(%d):mgslpc_flush_chars() entry on %s starting transmitter\n",
- __FILE__,__LINE__,info->device_name);
+ printk("%s(%d):mgslpc_flush_chars() entry on %s starting transmitter\n",
+ __FILE__, __LINE__, info->device_name);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (!info->tx_active)
- tx_start(info, tty);
- spin_unlock_irqrestore(&info->lock,flags);
+ tx_start(info, tty);
+ spin_unlock_irqrestore(&info->lock, flags);
}
/* Send a block of data
@@ -1569,8 +1566,8 @@ static int mgslpc_write(struct tty_struct * tty,
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk( "%s(%d):mgslpc_write(%s) count=%d\n",
- __FILE__,__LINE__,info->device_name,count);
+ printk("%s(%d):mgslpc_write(%s) count=%d\n",
+ __FILE__, __LINE__, info->device_name, count);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_write") ||
!info->tx_buf)
@@ -1596,26 +1593,26 @@ static int mgslpc_write(struct tty_struct * tty,
memcpy(info->tx_buf + info->tx_put, buf, c);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
info->tx_put = (info->tx_put + c) & (TXBUFSIZE-1);
info->tx_count += c;
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
buf += c;
count -= c;
ret += c;
}
start:
- if (info->tx_count && !tty->stopped && !tty->hw_stopped) {
- spin_lock_irqsave(&info->lock,flags);
+ if (info->tx_count && !tty->stopped && !tty->hw_stopped) {
+ spin_lock_irqsave(&info->lock, flags);
if (!info->tx_active)
- tx_start(info, tty);
- spin_unlock_irqrestore(&info->lock,flags);
- }
+ tx_start(info, tty);
+ spin_unlock_irqrestore(&info->lock, flags);
+ }
cleanup:
if (debug_level >= DEBUG_LEVEL_INFO)
- printk( "%s(%d):mgslpc_write(%s) returning=%d\n",
- __FILE__,__LINE__,info->device_name,ret);
+ printk("%s(%d):mgslpc_write(%s) returning=%d\n",
+ __FILE__, __LINE__, info->device_name, ret);
return ret;
}
@@ -1643,7 +1640,7 @@ static int mgslpc_write_room(struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_write_room(%s)=%d\n",
- __FILE__,__LINE__, info->device_name, ret);
+ __FILE__, __LINE__, info->device_name, ret);
return ret;
}
@@ -1656,7 +1653,7 @@ static int mgslpc_chars_in_buffer(struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_chars_in_buffer(%s)\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_chars_in_buffer"))
return 0;
@@ -1668,7 +1665,7 @@ static int mgslpc_chars_in_buffer(struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_chars_in_buffer(%s)=%d\n",
- __FILE__,__LINE__, info->device_name, rc);
+ __FILE__, __LINE__, info->device_name, rc);
return rc;
}
@@ -1682,15 +1679,15 @@ static void mgslpc_flush_buffer(struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_flush_buffer(%s) entry\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_buffer"))
return;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
info->tx_count = info->tx_put = info->tx_get = 0;
del_timer(&info->tx_timer);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
wake_up_interruptible(&tty->write_wait);
tty_wakeup(tty);
@@ -1705,17 +1702,17 @@ static void mgslpc_send_xchar(struct tty_struct *tty, char ch)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_send_xchar(%s,%d)\n",
- __FILE__,__LINE__, info->device_name, ch );
+ __FILE__, __LINE__, info->device_name, ch);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_send_xchar"))
return;
info->x_char = ch;
if (ch) {
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (!info->tx_enabled)
- tx_start(info, tty);
- spin_unlock_irqrestore(&info->lock,flags);
+ tx_start(info, tty);
+ spin_unlock_irqrestore(&info->lock, flags);
}
}
@@ -1728,7 +1725,7 @@ static void mgslpc_throttle(struct tty_struct * tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_throttle(%s) entry\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_throttle"))
return;
@@ -1736,11 +1733,11 @@ static void mgslpc_throttle(struct tty_struct * tty)
if (I_IXOFF(tty))
mgslpc_send_xchar(tty, STOP_CHAR(tty));
- if (tty->termios.c_cflag & CRTSCTS) {
- spin_lock_irqsave(&info->lock,flags);
+ if (tty->termios.c_cflag & CRTSCTS) {
+ spin_lock_irqsave(&info->lock, flags);
info->serial_signals &= ~SerialSignal_RTS;
- set_signals(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
}
}
@@ -1753,7 +1750,7 @@ static void mgslpc_unthrottle(struct tty_struct * tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_unthrottle(%s) entry\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_unthrottle"))
return;
@@ -1765,11 +1762,11 @@ static void mgslpc_unthrottle(struct tty_struct * tty)
mgslpc_send_xchar(tty, START_CHAR(tty));
}
- if (tty->termios.c_cflag & CRTSCTS) {
- spin_lock_irqsave(&info->lock,flags);
+ if (tty->termios.c_cflag & CRTSCTS) {
+ spin_lock_irqsave(&info->lock, flags);
info->serial_signals |= SerialSignal_RTS;
- set_signals(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
}
}
@@ -1807,33 +1804,33 @@ static int get_params(MGSLPC_INFO * info, MGSL_PARAMS __user *user_params)
*
* Arguments:
*
- * info pointer to device instance data
- * new_params user buffer containing new serial params
+ * info pointer to device instance data
+ * new_params user buffer containing new serial params
*
* Returns: 0 if success, otherwise error code
*/
static int set_params(MGSLPC_INFO * info, MGSL_PARAMS __user *new_params, struct tty_struct *tty)
{
- unsigned long flags;
+ unsigned long flags;
MGSL_PARAMS tmp_params;
int err;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):set_params %s\n", __FILE__,__LINE__,
- info->device_name );
+ info->device_name);
COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
if (err) {
- if ( debug_level >= DEBUG_LEVEL_INFO )
- printk( "%s(%d):set_params(%s) user buffer copy failed\n",
- __FILE__,__LINE__,info->device_name);
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):set_params(%s) user buffer copy failed\n",
+ __FILE__, __LINE__, info->device_name);
return -EFAULT;
}
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
- mgslpc_change_params(info, tty);
+ mgslpc_change_params(info, tty);
return 0;
}
@@ -1851,13 +1848,13 @@ static int get_txidle(MGSLPC_INFO * info, int __user *idle_mode)
static int set_txidle(MGSLPC_INFO * info, int idle_mode)
{
- unsigned long flags;
+ unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("set_txidle(%s,%d)\n", info->device_name, idle_mode);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
info->idle_mode = idle_mode;
tx_set_idle(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return 0;
}
@@ -1874,11 +1871,11 @@ static int get_interface(MGSLPC_INFO * info, int __user *if_mode)
static int set_interface(MGSLPC_INFO * info, int if_mode)
{
- unsigned long flags;
+ unsigned long flags;
unsigned char val;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("set_interface(%s,%d)\n", info->device_name, if_mode);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
info->if_mode = if_mode;
val = read_reg(info, PVR) & 0x0f;
@@ -1890,18 +1887,18 @@ static int set_interface(MGSLPC_INFO * info, int if_mode)
}
write_reg(info, PVR, val);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return 0;
}
static int set_txenable(MGSLPC_INFO * info, int enable, struct tty_struct *tty)
{
- unsigned long flags;
+ unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("set_txenable(%s,%d)\n", info->device_name, enable);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (enable) {
if (!info->tx_enabled)
tx_start(info, tty);
@@ -1909,18 +1906,18 @@ static int set_txenable(MGSLPC_INFO * info, int enable, struct tty_struct *tty)
if (info->tx_enabled)
tx_stop(info);
}
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return 0;
}
static int tx_abort(MGSLPC_INFO * info)
{
- unsigned long flags;
+ unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("tx_abort(%s)\n", info->device_name);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (info->tx_active && info->tx_count &&
info->params.mode == MGSL_MODE_HDLC) {
/* clear data count so FIFO is not filled on next IRQ.
@@ -1929,18 +1926,18 @@ static int tx_abort(MGSLPC_INFO * info)
info->tx_count = info->tx_put = info->tx_get = 0;
info->tx_aborting = true;
}
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return 0;
}
static int set_rxenable(MGSLPC_INFO * info, int enable)
{
- unsigned long flags;
+ unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("set_rxenable(%s,%d)\n", info->device_name, enable);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (enable) {
if (!info->rx_enabled)
rx_start(info);
@@ -1948,21 +1945,21 @@ static int set_rxenable(MGSLPC_INFO * info, int enable)
if (info->rx_enabled)
rx_stop(info);
}
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return 0;
}
/* wait for specified event to occur
*
- * Arguments: info pointer to device instance data
- * mask pointer to bitmask of events to wait for
- * Return Value: 0 if successful and bit mask updated with
+ * Arguments: info pointer to device instance data
+ * mask pointer to bitmask of events to wait for
+ * Return Value: 0 if successful and bit mask updated with
* of events triggerred,
- * otherwise error code
+ * otherwise error code
*/
static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
{
- unsigned long flags;
+ unsigned long flags;
int s;
int rc=0;
struct mgsl_icount cprev, cnow;
@@ -1978,18 +1975,18 @@ static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("wait_events(%s,%d)\n", info->device_name, mask);
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
/* return immediately if state matches requested events */
get_signals(info);
s = info->serial_signals;
events = mask &
( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
- ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
+ ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
if (events) {
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
goto exit;
}
@@ -2004,7 +2001,7 @@ static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&info->event_wait_q, &wait);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
for(;;) {
@@ -2015,11 +2012,11 @@ static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
}
/* get current irq counts */
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
cnow = info->icount;
newsigs = info->input_signal_events;
set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
/* if no change, wait aborted for some reason */
if (newsigs.dsr_up == oldsigs.dsr_up &&
@@ -2058,10 +2055,10 @@ static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
set_current_state(TASK_RUNNING);
if (mask & MgslEvent_ExitHuntMode) {
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (!waitqueue_active(&info->event_wait_q))
irq_disable(info, CHA, IRQ_EXITHUNT);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
}
exit:
if (rc == 0)
@@ -2071,17 +2068,17 @@ exit:
static int modem_input_wait(MGSLPC_INFO *info,int arg)
{
- unsigned long flags;
+ unsigned long flags;
int rc;
struct mgsl_icount cprev, cnow;
DECLARE_WAITQUEUE(wait, current);
/* save current irq counts */
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
cprev = info->icount;
add_wait_queue(&info->status_event_wait_q, &wait);
set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
for(;;) {
schedule();
@@ -2091,10 +2088,10 @@ static int modem_input_wait(MGSLPC_INFO *info,int arg)
}
/* get new irq counts */
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
cnow = info->icount;
set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
/* if no change, wait aborted for some reason */
if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
@@ -2125,11 +2122,11 @@ static int tiocmget(struct tty_struct *tty)
{
MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
unsigned int result;
- unsigned long flags;
+ unsigned long flags;
- spin_lock_irqsave(&info->lock,flags);
- get_signals(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
+ get_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
@@ -2140,7 +2137,7 @@ static int tiocmget(struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):%s tiocmget() value=%08X\n",
- __FILE__,__LINE__, info->device_name, result );
+ __FILE__, __LINE__, info->device_name, result);
return result;
}
@@ -2150,11 +2147,11 @@ static int tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
- unsigned long flags;
+ unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):%s tiocmset(%x,%x)\n",
- __FILE__,__LINE__,info->device_name, set, clear);
+ __FILE__, __LINE__, info->device_name, set, clear);
if (set & TIOCM_RTS)
info->serial_signals |= SerialSignal_RTS;
@@ -2165,9 +2162,9 @@ static int tiocmset(struct tty_struct *tty,
if (clear & TIOCM_DTR)
info->serial_signals &= ~SerialSignal_DTR;
- spin_lock_irqsave(&info->lock,flags);
- set_signals(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
return 0;
}
@@ -2184,17 +2181,17 @@ static int mgslpc_break(struct tty_struct *tty, int break_state)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_break(%s,%d)\n",
- __FILE__,__LINE__, info->device_name, break_state);
+ __FILE__, __LINE__, info->device_name, break_state);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_break"))
return -EINVAL;
- spin_lock_irqsave(&info->lock,flags);
- if (break_state == -1)
+ spin_lock_irqsave(&info->lock, flags);
+ if (break_state == -1)
set_reg_bits(info, CHA+DAFO, BIT6);
else
clear_reg_bits(info, CHA+DAFO, BIT6);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return 0;
}
@@ -2205,9 +2202,9 @@ static int mgslpc_get_icount(struct tty_struct *tty,
struct mgsl_icount cnow; /* kernel counter temps */
unsigned long flags;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
cnow = info->icount;
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
icount->cts = cnow.cts;
icount->dsr = cnow.dsr;
@@ -2228,9 +2225,9 @@ static int mgslpc_get_icount(struct tty_struct *tty,
*
* Arguments:
*
- * tty pointer to tty instance data
- * cmd IOCTL command code
- * arg command argument/context
+ * tty pointer to tty instance data
+ * cmd IOCTL command code
+ * arg command argument/context
*
* Return Value: 0 if success, otherwise error code
*/
@@ -2241,8 +2238,8 @@ static int mgslpc_ioctl(struct tty_struct *tty,
void __user *argp = (void __user *)arg;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
- info->device_name, cmd );
+ printk("%s(%d):mgslpc_ioctl %s cmd=%08X\n", __FILE__, __LINE__,
+ info->device_name, cmd);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_ioctl"))
return -ENODEV;
@@ -2288,8 +2285,8 @@ static int mgslpc_ioctl(struct tty_struct *tty,
*
* Arguments:
*
- * tty pointer to tty structure
- * termios pointer to buffer to hold returned old termios
+ * tty pointer to tty structure
+ * termios pointer to buffer to hold returned old termios
*/
static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
{
@@ -2297,8 +2294,8 @@ static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_term
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_set_termios %s\n", __FILE__,__LINE__,
- tty->driver->name );
+ printk("%s(%d):mgslpc_set_termios %s\n", __FILE__, __LINE__,
+ tty->driver->name);
/* just return if nothing has changed */
if ((tty->termios.c_cflag == old_termios->c_cflag)
@@ -2311,23 +2308,23 @@ static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_term
/* Handle transition to B0 status */
if (old_termios->c_cflag & CBAUD &&
!(tty->termios.c_cflag & CBAUD)) {
- info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
- spin_lock_irqsave(&info->lock,flags);
- set_signals(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
+ spin_lock_irqsave(&info->lock, flags);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
}
/* Handle transition away from B0 status */
if (!(old_termios->c_cflag & CBAUD) &&
tty->termios.c_cflag & CBAUD) {
info->serial_signals |= SerialSignal_DTR;
- if (!(tty->termios.c_cflag & CRTSCTS) ||
- !test_bit(TTY_THROTTLED, &tty->flags)) {
+ if (!(tty->termios.c_cflag & CRTSCTS) ||
+ !test_bit(TTY_THROTTLED, &tty->flags)) {
info->serial_signals |= SerialSignal_RTS;
- }
- spin_lock_irqsave(&info->lock,flags);
- set_signals(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ }
+ spin_lock_irqsave(&info->lock, flags);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
}
/* Handle turning off CRTSCTS */
@@ -2348,15 +2345,15 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
- __FILE__,__LINE__, info->device_name, port->count);
+ __FILE__, __LINE__, info->device_name, port->count);
WARN_ON(!port->count);
if (tty_port_close_start(port, tty, filp) == 0)
goto cleanup;
- if (port->flags & ASYNC_INITIALIZED)
- mgslpc_wait_until_sent(tty, info->timeout);
+ if (port->flags & ASYNC_INITIALIZED)
+ mgslpc_wait_until_sent(tty, info->timeout);
mgslpc_flush_buffer(tty);
@@ -2367,7 +2364,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
tty_port_tty_set(port, NULL);
cleanup:
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__,__LINE__,
+ printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
tty->driver->name, port->count);
}
@@ -2378,12 +2375,12 @@ static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout)
MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
unsigned long orig_jiffies, char_time;
- if (!info )
+ if (!info)
return;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_wait_until_sent(%s) entry\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_wait_until_sent"))
return;
@@ -2399,8 +2396,8 @@ static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout)
* Note: use tight timings here to satisfy the NIST-PCTS.
*/
- if ( info->params.data_rate ) {
- char_time = info->timeout/(32 * 5);
+ if (info->params.data_rate) {
+ char_time = info->timeout/(32 * 5);
if (!char_time)
char_time++;
} else
@@ -2431,7 +2428,7 @@ static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout)
exit:
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_wait_until_sent(%s) exit\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
}
/* Called by tty_hangup() when a hangup is signaled.
@@ -2443,7 +2440,7 @@ static void mgslpc_hangup(struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_hangup(%s)\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_hangup"))
return;
@@ -2458,9 +2455,9 @@ static int carrier_raised(struct tty_port *port)
MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port);
unsigned long flags;
- spin_lock_irqsave(&info->lock,flags);
- get_signals(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
+ get_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
if (info->serial_signals & SerialSignal_DCD)
return 1;
@@ -2472,13 +2469,13 @@ static void dtr_rts(struct tty_port *port, int onoff)
MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port);
unsigned long flags;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (onoff)
- info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
else
- info->serial_signals &= ~SerialSignal_RTS + SerialSignal_DTR;
+ info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
set_signals(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
}
@@ -2486,14 +2483,14 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
{
MGSLPC_INFO *info;
struct tty_port *port;
- int retval, line;
- unsigned long flags;
+ int retval, line;
+ unsigned long flags;
/* verify range of specified line number */
line = tty->index;
if (line >= mgslpc_device_count) {
printk("%s(%d):mgslpc_open with invalid line #%d.\n",
- __FILE__,__LINE__,line);
+ __FILE__, __LINE__, line);
return -ENODEV;
}
@@ -2510,7 +2507,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
- __FILE__,__LINE__,tty->driver->name, port->count);
+ __FILE__, __LINE__, tty->driver->name, port->count);
/* If port is closing, signal caller to try again */
if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
@@ -2521,7 +2518,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
goto cleanup;
}
- tty->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+ port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
spin_lock_irqsave(&info->netlock, flags);
if (info->netcount) {
@@ -2545,13 +2542,13 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
if (retval) {
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):block_til_ready(%s) returned %d\n",
- __FILE__,__LINE__, info->device_name, retval);
+ __FILE__, __LINE__, info->device_name, retval);
goto cleanup;
}
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgslpc_open(%s) success\n",
- __FILE__,__LINE__, info->device_name);
+ __FILE__, __LINE__, info->device_name);
retval = 0;
cleanup:
@@ -2571,9 +2568,9 @@ static inline void line_info(struct seq_file *m, MGSLPC_INFO *info)
info->device_name, info->io_base, info->irq_level);
/* output current serial signal states */
- spin_lock_irqsave(&info->lock,flags);
- get_signals(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
+ get_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
stat_buf[0] = 0;
stat_buf[1] = 0;
@@ -2635,7 +2632,7 @@ static int mgslpc_proc_show(struct seq_file *m, void *v)
seq_printf(m, "synclink driver:%s\n", driver_version);
info = mgslpc_device_list;
- while( info ) {
+ while (info) {
line_info(m, info);
info = info->next_device;
}
@@ -2674,6 +2671,14 @@ static int rx_alloc_buffers(MGSLPC_INFO *info)
if (info->rx_buf == NULL)
return -ENOMEM;
+ /* unused flag buffer to satisfy receive_buf calling interface */
+ info->flag_buf = kzalloc(info->max_frame_size, GFP_KERNEL);
+ if (!info->flag_buf) {
+ kfree(info->rx_buf);
+ info->rx_buf = NULL;
+ return -ENOMEM;
+ }
+
rx_reset_buffers(info);
return 0;
}
@@ -2682,12 +2687,14 @@ static void rx_free_buffers(MGSLPC_INFO *info)
{
kfree(info->rx_buf);
info->rx_buf = NULL;
+ kfree(info->flag_buf);
+ info->flag_buf = NULL;
}
static int claim_resources(MGSLPC_INFO *info)
{
- if (rx_alloc_buffers(info) < 0 ) {
- printk( "Can't allocate rx buffer %s\n", info->device_name);
+ if (rx_alloc_buffers(info) < 0) {
+ printk("Can't allocate rx buffer %s\n", info->device_name);
release_resources(info);
return -ENODEV;
}
@@ -2706,8 +2713,12 @@ static void release_resources(MGSLPC_INFO *info)
*
* Arguments: info pointer to device instance data
*/
-static void mgslpc_add_device(MGSLPC_INFO *info)
+static int mgslpc_add_device(MGSLPC_INFO *info)
{
+ MGSLPC_INFO *current_dev = NULL;
+ struct device *tty_dev;
+ int ret;
+
info->next_device = NULL;
info->line = mgslpc_device_count;
sprintf(info->device_name,"ttySLP%d",info->line);
@@ -2722,8 +2733,8 @@ static void mgslpc_add_device(MGSLPC_INFO *info)
if (!mgslpc_device_list)
mgslpc_device_list = info;
else {
- MGSLPC_INFO *current_dev = mgslpc_device_list;
- while( current_dev->next_device )
+ current_dev = mgslpc_device_list;
+ while (current_dev->next_device)
current_dev = current_dev->next_device;
current_dev->next_device = info;
}
@@ -2733,14 +2744,34 @@ static void mgslpc_add_device(MGSLPC_INFO *info)
else if (info->max_frame_size > 65535)
info->max_frame_size = 65535;
- printk( "SyncLink PC Card %s:IO=%04X IRQ=%d\n",
+ printk("SyncLink PC Card %s:IO=%04X IRQ=%d\n",
info->device_name, info->io_base, info->irq_level);
#if SYNCLINK_GENERIC_HDLC
- hdlcdev_init(info);
+ ret = hdlcdev_init(info);
+ if (ret != 0)
+ goto failed;
#endif
- tty_port_register_device(&info->port, serial_driver, info->line,
+
+ tty_dev = tty_port_register_device(&info->port, serial_driver, info->line,
&info->p_dev->dev);
+ if (IS_ERR(tty_dev)) {
+ ret = PTR_ERR(tty_dev);
+#if SYNCLINK_GENERIC_HDLC
+ hdlcdev_exit(info);
+#endif
+ goto failed;
+ }
+
+ return 0;
+
+failed:
+ if (current_dev)
+ current_dev->next_device = NULL;
+ else
+ mgslpc_device_list = NULL;
+ mgslpc_device_count--;
+ return ret;
}
static void mgslpc_remove_device(MGSLPC_INFO *remove_info)
@@ -3262,7 +3293,7 @@ static void rx_stop(MGSLPC_INFO *info)
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):rx_stop(%s)\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
/* MODE:03 RAC Receiver Active, 0=inactive */
clear_reg_bits(info, CHA + MODE, BIT3);
@@ -3275,7 +3306,7 @@ static void rx_start(MGSLPC_INFO *info)
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):rx_start(%s)\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
rx_reset_buffers(info);
info->rx_enabled = false;
@@ -3291,7 +3322,7 @@ static void tx_start(MGSLPC_INFO *info, struct tty_struct *tty)
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):tx_start(%s)\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
if (info->tx_count) {
/* If auto RTS enabled and RTS is inactive, then assert */
@@ -3329,7 +3360,7 @@ static void tx_stop(MGSLPC_INFO *info)
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):tx_stop(%s)\n",
- __FILE__,__LINE__, info->device_name );
+ __FILE__, __LINE__, info->device_name);
del_timer(&info->tx_timer);
@@ -3575,8 +3606,8 @@ static void get_signals(MGSLPC_INFO *info)
{
unsigned char status = 0;
- /* preserve DTR and RTS */
- info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS;
+ /* preserve RTS and DTR */
+ info->serial_signals &= SerialSignal_RTS | SerialSignal_DTR;
if (read_reg(info, CHB + VSTR) & BIT7)
info->serial_signals |= SerialSignal_DCD;
@@ -3590,7 +3621,7 @@ static void get_signals(MGSLPC_INFO *info)
info->serial_signals |= SerialSignal_DSR;
}
-/* Set the state of DTR and RTS based on contents of
+/* Set the state of RTS and DTR based on contents of
* serial_signals member of device extension.
*/
static void set_signals(MGSLPC_INFO *info)
@@ -3681,7 +3712,7 @@ static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_BH)
printk("%s(%d):rx_get_frame(%s) status=%04X size=%d\n",
- __FILE__,__LINE__,info->device_name,status,framesize);
+ __FILE__, __LINE__, info->device_name, status, framesize);
if (debug_level >= DEBUG_LEVEL_DATA)
trace_block(info, buf->data, framesize, 0);
@@ -3709,13 +3740,13 @@ static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty)
}
}
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
buf->status = buf->count = 0;
info->rx_frame_count--;
info->rx_get++;
if (info->rx_get >= info->rx_buf_count)
info->rx_get = 0;
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return true;
}
@@ -3729,7 +3760,7 @@ static bool register_test(MGSLPC_INFO *info)
bool rc = true;
unsigned long flags;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
reset_device(info);
for (i = 0; i < count; i++) {
@@ -3742,7 +3773,7 @@ static bool register_test(MGSLPC_INFO *info)
}
}
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return rc;
}
@@ -3751,7 +3782,7 @@ static bool irq_test(MGSLPC_INFO *info)
unsigned long end_time;
unsigned long flags;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
reset_device(info);
info->testing_irq = true;
@@ -3765,7 +3796,7 @@ static bool irq_test(MGSLPC_INFO *info)
write_reg(info, CHA + TIMR, 0); /* 512 cycles */
issue_command(info, CHA, CMD_START_TIMER);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
end_time=100;
while(end_time-- && !info->irq_occurred) {
@@ -3774,9 +3805,9 @@ static bool irq_test(MGSLPC_INFO *info)
info->testing_irq = false;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
reset_device(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return info->irq_occurred;
}
@@ -3785,21 +3816,21 @@ static int adapter_test(MGSLPC_INFO *info)
{
if (!register_test(info)) {
info->init_error = DiagStatus_AddressFailure;
- printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
- __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
+ printk("%s(%d):Register test failure for device %s Addr=%04X\n",
+ __FILE__, __LINE__, info->device_name, (unsigned short)(info->io_base));
return -ENODEV;
}
if (!irq_test(info)) {
info->init_error = DiagStatus_IrqFailure;
- printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
- __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
+ printk("%s(%d):Interrupt test failure for device %s IRQ=%d\n",
+ __FILE__, __LINE__, info->device_name, (unsigned short)(info->irq_level));
return -ENODEV;
}
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):device %s passed diagnostics\n",
- __FILE__,__LINE__,info->device_name);
+ __FILE__, __LINE__, info->device_name);
return 0;
}
@@ -3808,9 +3839,9 @@ static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit)
int i;
int linecount;
if (xmit)
- printk("%s tx data:\n",info->device_name);
+ printk("%s tx data:\n", info->device_name);
else
- printk("%s rx data:\n",info->device_name);
+ printk("%s rx data:\n", info->device_name);
while(count) {
if (count > 16)
@@ -3819,12 +3850,12 @@ static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit)
linecount = count;
for(i=0;i<linecount;i++)
- printk("%02X ",(unsigned char)data[i]);
+ printk("%02X ", (unsigned char)data[i]);
for(;i<17;i++)
printk(" ");
for(i=0;i<linecount;i++) {
if (data[i]>=040 && data[i]<=0176)
- printk("%c",data[i]);
+ printk("%c", data[i]);
else
printk(".");
}
@@ -3843,18 +3874,18 @@ static void tx_timeout(unsigned long context)
MGSLPC_INFO *info = (MGSLPC_INFO*)context;
unsigned long flags;
- if ( debug_level >= DEBUG_LEVEL_INFO )
- printk( "%s(%d):tx_timeout(%s)\n",
- __FILE__,__LINE__,info->device_name);
- if(info->tx_active &&
- info->params.mode == MGSL_MODE_HDLC) {
+ if (debug_level >= DEBUG_LEVEL_INFO)
+ printk("%s(%d):tx_timeout(%s)\n",
+ __FILE__, __LINE__, info->device_name);
+ if (info->tx_active &&
+ info->params.mode == MGSL_MODE_HDLC) {
info->icount.txtimeout++;
}
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
info->tx_active = false;
info->tx_count = info->tx_put = info->tx_get = 0;
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
#if SYNCLINK_GENERIC_HDLC
if (info->netcount)
@@ -3936,7 +3967,7 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
+ printk(KERN_INFO "%s:hdlc_xmit(%s)\n", __FILE__, dev->name);
/* stop sending until this frame completes */
netif_stop_queue(dev);
@@ -3957,13 +3988,13 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
dev->trans_start = jiffies;
/* start hardware transmitter if necessary */
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
if (!info->tx_active) {
struct tty_struct *tty = tty_port_tty_get(&info->port);
- tx_start(info, tty);
- tty_kref_put(tty);
+ tx_start(info, tty);
+ tty_kref_put(tty);
}
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
return NETDEV_TX_OK;
}
@@ -3984,10 +4015,11 @@ static int hdlcdev_open(struct net_device *dev)
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
+ printk("%s:hdlcdev_open(%s)\n", __FILE__, dev->name);
/* generic HDLC layer open processing */
- if ((rc = hdlc_open(dev)))
+ rc = hdlc_open(dev);
+ if (rc != 0)
return rc;
/* arbitrate between network and tty opens */
@@ -4002,15 +4034,16 @@ static int hdlcdev_open(struct net_device *dev)
tty = tty_port_tty_get(&info->port);
/* claim resources and init adapter */
- if ((rc = startup(info, tty)) != 0) {
+ rc = startup(info, tty);
+ if (rc != 0) {
tty_kref_put(tty);
spin_lock_irqsave(&info->netlock, flags);
info->netcount=0;
spin_unlock_irqrestore(&info->netlock, flags);
return rc;
}
- /* assert DTR and RTS, apply hardware settings */
- info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
+ /* assert RTS and DTR, apply hardware settings */
+ info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
mgslpc_program_hw(info, tty);
tty_kref_put(tty);
@@ -4044,7 +4077,7 @@ static int hdlcdev_close(struct net_device *dev)
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
+ printk("%s:hdlcdev_close(%s)\n", __FILE__, dev->name);
netif_stop_queue(dev);
@@ -4078,7 +4111,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
unsigned int flags;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
+ printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
/* return error if TTY interface open */
if (info->port.count)
@@ -4179,14 +4212,14 @@ static void hdlcdev_tx_timeout(struct net_device *dev)
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("hdlcdev_tx_timeout(%s)\n",dev->name);
+ printk("hdlcdev_tx_timeout(%s)\n", dev->name);
dev->stats.tx_errors++;
dev->stats.tx_aborted_errors++;
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
tx_stop(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->lock, flags);
netif_wake_queue(dev);
}
@@ -4217,7 +4250,7 @@ static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size)
struct net_device *dev = info->netdev;
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("hdlcdev_rx(%s)\n",dev->name);
+ printk("hdlcdev_rx(%s)\n", dev->name);
if (skb == NULL) {
printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name);
@@ -4260,8 +4293,9 @@ static int hdlcdev_init(MGSLPC_INFO *info)
/* allocate and initialize network and HDLC layer objects */
- if (!(dev = alloc_hdlcdev(info))) {
- printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
+ dev = alloc_hdlcdev(info);
+ if (dev == NULL) {
+ printk(KERN_ERR "%s:hdlc device allocation failure\n", __FILE__);
return -ENOMEM;
}
@@ -4280,8 +4314,9 @@ static int hdlcdev_init(MGSLPC_INFO *info)
hdlc->xmit = hdlcdev_xmit;
/* register objects with HDLC layer */
- if ((rc = register_hdlc_device(dev))) {
- printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
+ rc = register_hdlc_device(dev);
+ if (rc) {
+ printk(KERN_WARNING "%s:unable to register hdlc device\n", __FILE__);
free_netdev(dev);
return rc;
}
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 85e81ec1451e..594bda9dcfc8 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -445,7 +445,7 @@ static struct entropy_store input_pool = {
.poolinfo = &poolinfo_table[0],
.name = "input",
.limit = 1,
- .lock = __SPIN_LOCK_UNLOCKED(&input_pool.lock),
+ .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
.pool = input_pool_data
};
@@ -454,7 +454,7 @@ static struct entropy_store blocking_pool = {
.name = "blocking",
.limit = 1,
.pull = &input_pool,
- .lock = __SPIN_LOCK_UNLOCKED(&blocking_pool.lock),
+ .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
.pool = blocking_pool_data
};
@@ -462,7 +462,7 @@ static struct entropy_store nonblocking_pool = {
.poolinfo = &poolinfo_table[1],
.name = "nonblocking",
.pull = &input_pool,
- .lock = __SPIN_LOCK_UNLOCKED(&nonblocking_pool.lock),
+ .lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock),
.pool = nonblocking_pool_data
};
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index d780295a1473..6386a98e43c1 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -1142,7 +1142,7 @@ static int sonypi_acpi_add(struct acpi_device *device)
return 0;
}
-static int sonypi_acpi_remove(struct acpi_device *device, int type)
+static int sonypi_acpi_remove(struct acpi_device *device)
{
sonypi_acpi_device = NULL;
return 0;
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 915875e431d2..dbfd56446c31 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -75,10 +75,20 @@ config TCG_INFINEON
config TCG_IBMVTPM
tristate "IBM VTPM Interface"
- depends on PPC64
+ depends on PPC_PSERIES
---help---
If you have IBM virtual TPM (VTPM) support say Yes and it
will be accessible from within Linux. To compile this driver
as a module, choose M here; the module will be called tpm_ibmvtpm.
+config TCG_ST33_I2C
+ tristate "STMicroelectronics ST33 I2C TPM"
+ depends on I2C
+ depends on GPIOLIB
+ ---help---
+ If you have a TPM security chip from STMicroelectronics working with
+ an I2C bus say Yes and it will be accessible from within Linux.
+ To compile this driver as a module, choose M here; the module will be
+ called tpm_stm_st33_i2c.
+
endif # TCG_TPM
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 5b3fc8bc6c13..a3736c97c65a 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -17,3 +17,4 @@ obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
obj-$(CONFIG_TCG_IBMVTPM) += tpm_ibmvtpm.o
+obj-$(CONFIG_TCG_ST33_I2C) += tpm_i2c_stm_st33.o
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 93211df52aab..0d2e82f95577 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -40,8 +40,9 @@ enum tpm_duration {
};
#define TPM_MAX_ORDINAL 243
-#define TPM_MAX_PROTECTED_ORDINAL 12
-#define TPM_PROTECTED_ORDINAL_MASK 0xFF
+#define TSC_MAX_ORDINAL 12
+#define TPM_PROTECTED_COMMAND 0x00
+#define TPM_CONNECTION_COMMAND 0x40
/*
* Bug workaround - some TPM's don't flush the most
@@ -65,21 +66,6 @@ static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES);
* values of the SHORT, MEDIUM, and LONG durations are retrieved
* from the chip during initialization with a call to tpm_get_timeouts.
*/
-static const u8 tpm_protected_ordinal_duration[TPM_MAX_PROTECTED_ORDINAL] = {
- TPM_UNDEFINED, /* 0 */
- TPM_UNDEFINED,
- TPM_UNDEFINED,
- TPM_UNDEFINED,
- TPM_UNDEFINED,
- TPM_UNDEFINED, /* 5 */
- TPM_UNDEFINED,
- TPM_UNDEFINED,
- TPM_UNDEFINED,
- TPM_UNDEFINED,
- TPM_SHORT, /* 10 */
- TPM_SHORT,
-};
-
static const u8 tpm_ordinal_duration[TPM_MAX_ORDINAL] = {
TPM_UNDEFINED, /* 0 */
TPM_UNDEFINED,
@@ -351,14 +337,11 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
{
int duration_idx = TPM_UNDEFINED;
int duration = 0;
+ u8 category = (ordinal >> 24) & 0xFF;
- if (ordinal < TPM_MAX_ORDINAL)
+ if ((category == TPM_PROTECTED_COMMAND && ordinal < TPM_MAX_ORDINAL) ||
+ (category == TPM_CONNECTION_COMMAND && ordinal < TSC_MAX_ORDINAL))
duration_idx = tpm_ordinal_duration[ordinal];
- else if ((ordinal & TPM_PROTECTED_ORDINAL_MASK) <
- TPM_MAX_PROTECTED_ORDINAL)
- duration_idx =
- tpm_protected_ordinal_duration[ordinal &
- TPM_PROTECTED_ORDINAL_MASK];
if (duration_idx != TPM_UNDEFINED)
duration = chip->vendor.duration[duration_idx];
@@ -410,7 +393,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
chip->vendor.req_complete_val)
goto out_recv;
- if ((status == chip->vendor.req_canceled)) {
+ if (chip->vendor.req_canceled(chip, status)) {
dev_err(chip->dev, "Operation Canceled\n");
rc = -ECANCELED;
goto out;
@@ -468,7 +451,7 @@ static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd,
return -EFAULT;
err = be32_to_cpu(cmd->header.out.return_code);
- if (err != 0)
+ if (err != 0 && desc)
dev_err(chip->dev, "A TPM error (%d) occurred %s\n", err, desc);
return err;
@@ -528,6 +511,25 @@ void tpm_gen_interrupt(struct tpm_chip *chip)
}
EXPORT_SYMBOL_GPL(tpm_gen_interrupt);
+#define TPM_ORD_STARTUP cpu_to_be32(153)
+#define TPM_ST_CLEAR cpu_to_be16(1)
+#define TPM_ST_STATE cpu_to_be16(2)
+#define TPM_ST_DEACTIVATED cpu_to_be16(3)
+static const struct tpm_input_header tpm_startup_header = {
+ .tag = TPM_TAG_RQU_COMMAND,
+ .length = cpu_to_be32(12),
+ .ordinal = TPM_ORD_STARTUP
+};
+
+static int tpm_startup(struct tpm_chip *chip, __be16 startup_type)
+{
+ struct tpm_cmd_t start_cmd;
+ start_cmd.header.in = tpm_startup_header;
+ start_cmd.params.startup_in.startup_type = startup_type;
+ return transmit_cmd(chip, &start_cmd, TPM_INTERNAL_RESULT_SIZE,
+ "attempting to start the TPM");
+}
+
int tpm_get_timeouts(struct tpm_chip *chip)
{
struct tpm_cmd_t tpm_cmd;
@@ -541,11 +543,28 @@ int tpm_get_timeouts(struct tpm_chip *chip)
tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
+ rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, NULL);
- rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
- "attempting to determine the timeouts");
- if (rc)
+ if (rc == TPM_ERR_INVALID_POSTINIT) {
+ /* The TPM is not started, we are the first to talk to it.
+ Execute a startup command. */
+ dev_info(chip->dev, "Issuing TPM_STARTUP");
+ if (tpm_startup(chip, TPM_ST_CLEAR))
+ return rc;
+
+ tpm_cmd.header.in = tpm_getcap_header;
+ tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
+ tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
+ tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
+ rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
+ NULL);
+ }
+ if (rc) {
+ dev_err(chip->dev,
+ "A TPM error (%zd) occurred attempting to determine the timeouts\n",
+ rc);
goto duration;
+ }
if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
be32_to_cpu(tpm_cmd.header.out.length)
@@ -824,7 +843,7 @@ int tpm_do_selftest(struct tpm_chip *chip)
{
int rc;
unsigned int loops;
- unsigned int delay_msec = 1000;
+ unsigned int delay_msec = 100;
unsigned long duration;
struct tpm_cmd_t cmd;
@@ -845,6 +864,14 @@ int tpm_do_selftest(struct tpm_chip *chip)
cmd.header.in = pcrread_header;
cmd.params.pcrread_in.pcr_idx = cpu_to_be32(0);
rc = tpm_transmit(chip, (u8 *) &cmd, READ_PCR_RESULT_SIZE);
+ /* Some buggy TPMs will not respond to tpm_tis_ready() for
+ * around 300ms while the self test is ongoing, keep trying
+ * until the self test duration expires. */
+ if (rc == -ETIME) {
+ dev_info(chip->dev, HW_ERR "TPM command timed out during continue self test");
+ msleep(delay_msec);
+ continue;
+ }
if (rc < TPM_HEADER_SIZE)
return -EFAULT;
@@ -1075,12 +1102,28 @@ ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
}
EXPORT_SYMBOL_GPL(tpm_store_cancel);
+static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask, bool check_cancel,
+ bool *canceled)
+{
+ u8 status = chip->vendor.status(chip);
+
+ *canceled = false;
+ if ((status & mask) == mask)
+ return true;
+ if (check_cancel && chip->vendor.req_canceled(chip, status)) {
+ *canceled = true;
+ return true;
+ }
+ return false;
+}
+
int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
- wait_queue_head_t *queue)
+ wait_queue_head_t *queue, bool check_cancel)
{
unsigned long stop;
long rc;
u8 status;
+ bool canceled = false;
/* check current status */
status = chip->vendor.status(chip);
@@ -1095,11 +1138,14 @@ again:
if ((long)timeout <= 0)
return -ETIME;
rc = wait_event_interruptible_timeout(*queue,
- ((chip->vendor.status(chip)
- & mask) == mask),
- timeout);
- if (rc > 0)
+ wait_for_tpm_stat_cond(chip, mask, check_cancel,
+ &canceled),
+ timeout);
+ if (rc > 0) {
+ if (canceled)
+ return -ECANCELED;
return 0;
+ }
if (rc == -ERESTARTSYS && freezing(current)) {
clear_thread_flag(TIF_SIGPENDING);
goto again;
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 8ef7649a50aa..81b52015f669 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -47,6 +47,7 @@ enum tpm_addr {
#define TPM_WARN_DOING_SELFTEST 0x802
#define TPM_ERR_DEACTIVATED 0x6
#define TPM_ERR_DISABLED 0x7
+#define TPM_ERR_INVALID_POSTINIT 38
#define TPM_HEADER_SIZE 10
extern ssize_t tpm_show_pubek(struct device *, struct device_attribute *attr,
@@ -77,7 +78,7 @@ struct tpm_chip;
struct tpm_vendor_specific {
const u8 req_complete_mask;
const u8 req_complete_val;
- const u8 req_canceled;
+ bool (*req_canceled)(struct tpm_chip *chip, u8 status);
void __iomem *iobase; /* ioremapped address */
unsigned long base; /* TPM base address */
@@ -100,13 +101,19 @@ struct tpm_vendor_specific {
bool timeout_adjusted;
unsigned long duration[3]; /* jiffies */
bool duration_adjusted;
- void *data;
+ void *priv;
wait_queue_head_t read_queue;
wait_queue_head_t int_queue;
+
+ u16 manufacturer_id;
};
+#define TPM_VPRIV(c) (c)->vendor.priv
+
#define TPM_VID_INTEL 0x8086
+#define TPM_VID_WINBOND 0x1050
+#define TPM_VID_STM 0x104A
struct tpm_chip {
struct device *dev; /* Device stuff */
@@ -154,13 +161,13 @@ struct tpm_input_header {
__be16 tag;
__be32 length;
__be32 ordinal;
-}__attribute__((packed));
+} __packed;
struct tpm_output_header {
__be16 tag;
__be32 length;
__be32 return_code;
-}__attribute__((packed));
+} __packed;
struct stclear_flags_t {
__be16 tag;
@@ -169,14 +176,14 @@ struct stclear_flags_t {
u8 physicalPresence;
u8 physicalPresenceLock;
u8 bGlobalLock;
-}__attribute__((packed));
+} __packed;
struct tpm_version_t {
u8 Major;
u8 Minor;
u8 revMajor;
u8 revMinor;
-}__attribute__((packed));
+} __packed;
struct tpm_version_1_2_t {
__be16 tag;
@@ -184,20 +191,20 @@ struct tpm_version_1_2_t {
u8 Minor;
u8 revMajor;
u8 revMinor;
-}__attribute__((packed));
+} __packed;
struct timeout_t {
__be32 a;
__be32 b;
__be32 c;
__be32 d;
-}__attribute__((packed));
+} __packed;
struct duration_t {
__be32 tpm_short;
__be32 tpm_medium;
__be32 tpm_long;
-}__attribute__((packed));
+} __packed;
struct permanent_flags_t {
__be16 tag;
@@ -221,7 +228,7 @@ struct permanent_flags_t {
u8 tpmEstablished;
u8 maintenanceDone;
u8 disableFullDALogicInfo;
-}__attribute__((packed));
+} __packed;
typedef union {
struct permanent_flags_t perm_flags;
@@ -239,12 +246,12 @@ struct tpm_getcap_params_in {
__be32 cap;
__be32 subcap_size;
__be32 subcap;
-}__attribute__((packed));
+} __packed;
struct tpm_getcap_params_out {
__be32 cap_size;
cap_t cap;
-}__attribute__((packed));
+} __packed;
struct tpm_readpubek_params_out {
u8 algorithm[4];
@@ -255,7 +262,7 @@ struct tpm_readpubek_params_out {
__be32 keysize;
u8 modulus[256];
u8 checksum[20];
-}__attribute__((packed));
+} __packed;
typedef union {
struct tpm_input_header in;
@@ -265,16 +272,16 @@ typedef union {
#define TPM_DIGEST_SIZE 20
struct tpm_pcrread_out {
u8 pcr_result[TPM_DIGEST_SIZE];
-}__attribute__((packed));
+} __packed;
struct tpm_pcrread_in {
__be32 pcr_idx;
-}__attribute__((packed));
+} __packed;
struct tpm_pcrextend_in {
__be32 pcr_idx;
u8 hash[TPM_DIGEST_SIZE];
-}__attribute__((packed));
+} __packed;
/* 128 bytes is an arbitrary cap. This could be as large as TPM_BUFSIZE - 18
* bytes, but 128 is still a relatively large number of random bytes and
@@ -285,11 +292,15 @@ struct tpm_pcrextend_in {
struct tpm_getrandom_out {
__be32 rng_data_len;
u8 rng_data[TPM_MAX_RNG_DATA];
-}__attribute__((packed));
+} __packed;
struct tpm_getrandom_in {
__be32 num_bytes;
-}__attribute__((packed));
+} __packed;
+
+struct tpm_startup_in {
+ __be16 startup_type;
+} __packed;
typedef union {
struct tpm_getcap_params_out getcap_out;
@@ -301,12 +312,13 @@ typedef union {
struct tpm_pcrextend_in pcrextend_in;
struct tpm_getrandom_in getrandom_in;
struct tpm_getrandom_out getrandom_out;
+ struct tpm_startup_in startup_in;
} tpm_cmd_params;
struct tpm_cmd_t {
tpm_cmd_header header;
tpm_cmd_params params;
-}__attribute__((packed));
+} __packed;
ssize_t tpm_getcap(struct device *, __be32, cap_t *, const char *);
@@ -326,7 +338,7 @@ extern void tpm_remove_hardware(struct device *);
extern int tpm_pm_suspend(struct device *);
extern int tpm_pm_resume(struct device *);
extern int wait_for_tpm_stat(struct tpm_chip *, u8, unsigned long,
- wait_queue_head_t *);
+ wait_queue_head_t *, bool);
#ifdef CONFIG_ACPI
extern int tpm_add_ppi(struct kobject *);
diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
index 56051d0c97a2..64420b3396a2 100644
--- a/drivers/char/tpm/tpm_acpi.c
+++ b/drivers/char/tpm/tpm_acpi.c
@@ -33,13 +33,13 @@ struct acpi_tcpa {
u16 platform_class;
union {
struct client_hdr {
- u32 log_max_len __attribute__ ((packed));
- u64 log_start_addr __attribute__ ((packed));
+ u32 log_max_len __packed;
+ u64 log_start_addr __packed;
} client;
struct server_hdr {
u16 reserved;
- u64 log_max_len __attribute__ ((packed));
- u64 log_start_addr __attribute__ ((packed));
+ u64 log_max_len __packed;
+ u64 log_start_addr __packed;
} server;
};
};
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index 678d57019dc4..99d6820c611d 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -116,6 +116,11 @@ static u8 tpm_atml_status(struct tpm_chip *chip)
return ioread8(chip->vendor.iobase + 1);
}
+static bool tpm_atml_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return (status == ATML_STATUS_READY);
+}
+
static const struct file_operations atmel_ops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
@@ -147,7 +152,7 @@ static const struct tpm_vendor_specific tpm_atmel = {
.status = tpm_atml_status,
.req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL,
.req_complete_val = ATML_STATUS_DATA_AVAIL,
- .req_canceled = ATML_STATUS_READY,
+ .req_canceled = tpm_atml_req_canceled,
.attr_group = &atmel_attr_grp,
.miscdev = { .fops = &atmel_ops, },
};
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index fb447bd0cb61..8fe7ac3d095b 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -505,6 +505,11 @@ out_err:
return rc;
}
+static bool tpm_tis_i2c_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return (status == TPM_STS_COMMAND_READY);
+}
+
static const struct file_operations tis_ops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
@@ -550,7 +555,7 @@ static struct tpm_vendor_specific tpm_tis_i2c = {
.cancel = tpm_tis_i2c_ready,
.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
- .req_canceled = TPM_STS_COMMAND_READY,
+ .req_canceled = tpm_tis_i2c_req_canceled,
.attr_group = &tis_attr_grp,
.miscdev.fops = &tis_ops,
};
diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c
new file mode 100644
index 000000000000..1f5f71e14abe
--- /dev/null
+++ b/drivers/char/tpm/tpm_i2c_stm_st33.c
@@ -0,0 +1,887 @@
+/*
+ * STMicroelectronics TPM I2C Linux driver for TPM ST33ZP24
+ * Copyright (C) 2009, 2010 STMicroelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * STMicroelectronics version 1.2.0, Copyright (C) 2010
+ * STMicroelectronics comes with ABSOLUTELY NO WARRANTY.
+ * This is free software, and you are welcome to redistribute it
+ * under certain conditions.
+ *
+ * @Author: Christophe RICARD tpmsupport@st.com
+ *
+ * @File: tpm_stm_st33_i2c.c
+ *
+ * @Synopsis:
+ * 09/15/2010: First shot driver tpm_tis driver for
+ lpc is used as model.
+ */
+
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/wait.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/gpio.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include "tpm.h"
+#include "tpm_i2c_stm_st33.h"
+
+enum stm33zp24_access {
+ TPM_ACCESS_VALID = 0x80,
+ TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
+ TPM_ACCESS_REQUEST_PENDING = 0x04,
+ TPM_ACCESS_REQUEST_USE = 0x02,
+};
+
+enum stm33zp24_status {
+ TPM_STS_VALID = 0x80,
+ TPM_STS_COMMAND_READY = 0x40,
+ TPM_STS_GO = 0x20,
+ TPM_STS_DATA_AVAIL = 0x10,
+ TPM_STS_DATA_EXPECT = 0x08,
+};
+
+enum stm33zp24_int_flags {
+ TPM_GLOBAL_INT_ENABLE = 0x80,
+ TPM_INTF_CMD_READY_INT = 0x080,
+ TPM_INTF_FIFO_AVALAIBLE_INT = 0x040,
+ TPM_INTF_WAKE_UP_READY_INT = 0x020,
+ TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
+ TPM_INTF_STS_VALID_INT = 0x002,
+ TPM_INTF_DATA_AVAIL_INT = 0x001,
+};
+
+enum tis_defaults {
+ TIS_SHORT_TIMEOUT = 750,
+ TIS_LONG_TIMEOUT = 2000,
+};
+
+/*
+ * write8_reg
+ * Send byte to the TIS register according to the ST33ZP24 I2C protocol.
+ * @param: tpm_register, the tpm tis register where the data should be written
+ * @param: tpm_data, the tpm_data to write inside the tpm_register
+ * @param: tpm_size, The length of the data
+ * @return: Returns negative errno, or else the number of bytes written.
+ */
+static int write8_reg(struct i2c_client *client, u8 tpm_register,
+ u8 *tpm_data, u16 tpm_size)
+{
+ struct st33zp24_platform_data *pin_infos;
+
+ pin_infos = client->dev.platform_data;
+
+ pin_infos->tpm_i2c_buffer[0][0] = tpm_register;
+ memcpy(&pin_infos->tpm_i2c_buffer[0][1], tpm_data, tpm_size);
+ return i2c_master_send(client, pin_infos->tpm_i2c_buffer[0],
+ tpm_size + 1);
+} /* write8_reg() */
+
+/*
+ * read8_reg
+ * Recv byte from the TIS register according to the ST33ZP24 I2C protocol.
+ * @param: tpm_register, the tpm tis register where the data should be read
+ * @param: tpm_data, the TPM response
+ * @param: tpm_size, tpm TPM response size to read.
+ * @return: number of byte read successfully: should be one if success.
+ */
+static int read8_reg(struct i2c_client *client, u8 tpm_register,
+ u8 *tpm_data, int tpm_size)
+{
+ u8 status = 0;
+ u8 data;
+
+ data = TPM_DUMMY_BYTE;
+ status = write8_reg(client, tpm_register, &data, 1);
+ if (status == 2)
+ status = i2c_master_recv(client, tpm_data, tpm_size);
+ return status;
+} /* read8_reg() */
+
+/*
+ * I2C_WRITE_DATA
+ * Send byte to the TIS register according to the ST33ZP24 I2C protocol.
+ * @param: client, the chip description
+ * @param: tpm_register, the tpm tis register where the data should be written
+ * @param: tpm_data, the tpm_data to write inside the tpm_register
+ * @param: tpm_size, The length of the data
+ * @return: number of byte written successfully: should be one if success.
+ */
+#define I2C_WRITE_DATA(client, tpm_register, tpm_data, tpm_size) \
+ (write8_reg(client, tpm_register | \
+ TPM_WRITE_DIRECTION, tpm_data, tpm_size))
+
+/*
+ * I2C_READ_DATA
+ * Recv byte from the TIS register according to the ST33ZP24 I2C protocol.
+ * @param: tpm, the chip description
+ * @param: tpm_register, the tpm tis register where the data should be read
+ * @param: tpm_data, the TPM response
+ * @param: tpm_size, tpm TPM response size to read.
+ * @return: number of byte read successfully: should be one if success.
+ */
+#define I2C_READ_DATA(client, tpm_register, tpm_data, tpm_size) \
+ (read8_reg(client, tpm_register, tpm_data, tpm_size))
+
+/*
+ * clear_interruption
+ * clear the TPM interrupt register.
+ * @param: tpm, the chip description
+ */
+static void clear_interruption(struct i2c_client *client)
+{
+ u8 interrupt;
+ I2C_READ_DATA(client, TPM_INT_STATUS, &interrupt, 1);
+ I2C_WRITE_DATA(client, TPM_INT_STATUS, &interrupt, 1);
+ I2C_READ_DATA(client, TPM_INT_STATUS, &interrupt, 1);
+} /* clear_interruption() */
+
+/*
+ * _wait_for_interrupt_serirq_timeout
+ * @param: tpm, the chip description
+ * @param: timeout, the timeout of the interrupt
+ * @return: the status of the interruption.
+ */
+static long _wait_for_interrupt_serirq_timeout(struct tpm_chip *chip,
+ unsigned long timeout)
+{
+ long status;
+ struct i2c_client *client;
+ struct st33zp24_platform_data *pin_infos;
+
+ client = (struct i2c_client *) TPM_VPRIV(chip);
+ pin_infos = client->dev.platform_data;
+
+ status = wait_for_completion_interruptible_timeout(
+ &pin_infos->irq_detection,
+ timeout);
+ if (status > 0)
+ enable_irq(gpio_to_irq(pin_infos->io_serirq));
+ gpio_direction_input(pin_infos->io_serirq);
+
+ return status;
+} /* wait_for_interrupt_serirq_timeout() */
+
+static int wait_for_serirq_timeout(struct tpm_chip *chip, bool condition,
+ unsigned long timeout)
+{
+ int status = 2;
+ struct i2c_client *client;
+
+ client = (struct i2c_client *) TPM_VPRIV(chip);
+
+ status = _wait_for_interrupt_serirq_timeout(chip, timeout);
+ if (!status) {
+ status = -EBUSY;
+ } else{
+ clear_interruption(client);
+ if (condition)
+ status = 1;
+ }
+ return status;
+}
+
+/*
+ * tpm_stm_i2c_cancel, cancel is not implemented.
+ * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h
+ */
+static void tpm_stm_i2c_cancel(struct tpm_chip *chip)
+{
+ struct i2c_client *client;
+ u8 data;
+
+ client = (struct i2c_client *) TPM_VPRIV(chip);
+
+ data = TPM_STS_COMMAND_READY;
+ I2C_WRITE_DATA(client, TPM_STS, &data, 1);
+ if (chip->vendor.irq)
+ wait_for_serirq_timeout(chip, 1, chip->vendor.timeout_a);
+} /* tpm_stm_i2c_cancel() */
+
+/*
+ * tpm_stm_spi_status return the TPM_STS register
+ * @param: chip, the tpm chip description
+ * @return: the TPM_STS register value.
+ */
+static u8 tpm_stm_i2c_status(struct tpm_chip *chip)
+{
+ struct i2c_client *client;
+ u8 data;
+ client = (struct i2c_client *) TPM_VPRIV(chip);
+
+ I2C_READ_DATA(client, TPM_STS, &data, 1);
+ return data;
+} /* tpm_stm_i2c_status() */
+
+
+/*
+ * check_locality if the locality is active
+ * @param: chip, the tpm chip description
+ * @return: the active locality or -EACCESS.
+ */
+static int check_locality(struct tpm_chip *chip)
+{
+ struct i2c_client *client;
+ u8 data;
+ u8 status;
+
+ client = (struct i2c_client *) TPM_VPRIV(chip);
+
+ status = I2C_READ_DATA(client, TPM_ACCESS, &data, 1);
+ if (status && (data &
+ (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
+ (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
+ return chip->vendor.locality;
+
+ return -EACCES;
+
+} /* check_locality() */
+
+/*
+ * request_locality request the TPM locality
+ * @param: chip, the chip description
+ * @return: the active locality or EACCESS.
+ */
+static int request_locality(struct tpm_chip *chip)
+{
+ unsigned long stop;
+ long rc;
+ struct i2c_client *client;
+ u8 data;
+
+ client = (struct i2c_client *) TPM_VPRIV(chip);
+
+ if (check_locality(chip) == chip->vendor.locality)
+ return chip->vendor.locality;
+
+ data = TPM_ACCESS_REQUEST_USE;
+ rc = I2C_WRITE_DATA(client, TPM_ACCESS, &data, 1);
+ if (rc < 0)
+ goto end;
+
+ if (chip->vendor.irq) {
+ rc = wait_for_serirq_timeout(chip, (check_locality
+ (chip) >= 0),
+ chip->vendor.timeout_a);
+ if (rc > 0)
+ return chip->vendor.locality;
+ } else{
+ stop = jiffies + chip->vendor.timeout_a;
+ do {
+ if (check_locality(chip) >= 0)
+ return chip->vendor.locality;
+ msleep(TPM_TIMEOUT);
+ } while (time_before(jiffies, stop));
+ }
+ rc = -EACCES;
+end:
+ return rc;
+} /* request_locality() */
+
+/*
+ * release_locality release the active locality
+ * @param: chip, the tpm chip description.
+ */
+static void release_locality(struct tpm_chip *chip)
+{
+ struct i2c_client *client;
+ u8 data;
+
+ client = (struct i2c_client *) TPM_VPRIV(chip);
+ data = TPM_ACCESS_ACTIVE_LOCALITY;
+
+ I2C_WRITE_DATA(client, TPM_ACCESS, &data, 1);
+}
+
+/*
+ * get_burstcount return the burstcount address 0x19 0x1A
+ * @param: chip, the chip description
+ * return: the burstcount.
+ */
+static int get_burstcount(struct tpm_chip *chip)
+{
+ unsigned long stop;
+ int burstcnt, status;
+ u8 tpm_reg, temp;
+
+ struct i2c_client *client = (struct i2c_client *) TPM_VPRIV(chip);
+
+ stop = jiffies + chip->vendor.timeout_d;
+ do {
+ tpm_reg = TPM_STS + 1;
+ status = I2C_READ_DATA(client, tpm_reg, &temp, 1);
+ if (status < 0)
+ goto end;
+
+ tpm_reg = tpm_reg + 1;
+ burstcnt = temp;
+ status = I2C_READ_DATA(client, tpm_reg, &temp, 1);
+ if (status < 0)
+ goto end;
+
+ burstcnt |= temp << 8;
+ if (burstcnt)
+ return burstcnt;
+ msleep(TPM_TIMEOUT);
+ } while (time_before(jiffies, stop));
+
+end:
+ return -EBUSY;
+} /* get_burstcount() */
+
+/*
+ * wait_for_stat wait for a TPM_STS value
+ * @param: chip, the tpm chip description
+ * @param: mask, the value mask to wait
+ * @param: timeout, the timeout
+ * @param: queue, the wait queue.
+ * @return: the tpm status, 0 if success, -ETIME if timeout is reached.
+ */
+static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
+ wait_queue_head_t *queue)
+{
+ unsigned long stop;
+ long rc;
+ u8 status;
+
+ if (chip->vendor.irq) {
+ rc = wait_for_serirq_timeout(chip, ((tpm_stm_i2c_status
+ (chip) & mask) ==
+ mask), timeout);
+ if (rc > 0)
+ return 0;
+ } else{
+ stop = jiffies + timeout;
+ do {
+ msleep(TPM_TIMEOUT);
+ status = tpm_stm_i2c_status(chip);
+ if ((status & mask) == mask)
+ return 0;
+ } while (time_before(jiffies, stop));
+ }
+ return -ETIME;
+} /* wait_for_stat() */
+
+/*
+ * recv_data receive data
+ * @param: chip, the tpm chip description
+ * @param: buf, the buffer where the data are received
+ * @param: count, the number of data to receive
+ * @return: the number of bytes read from TPM FIFO.
+ */
+static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ int size = 0, burstcnt, len;
+ struct i2c_client *client;
+
+ client = (struct i2c_client *) TPM_VPRIV(chip);
+
+ while (size < count &&
+ wait_for_stat(chip,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ chip->vendor.timeout_c,
+ &chip->vendor.read_queue)
+ == 0) {
+ burstcnt = get_burstcount(chip);
+ len = min_t(int, burstcnt, count - size);
+ I2C_READ_DATA(client, TPM_DATA_FIFO, buf + size, len);
+ size += len;
+ }
+ return size;
+}
+
+/*
+ * tpm_ioserirq_handler the serirq irq handler
+ * @param: irq, the tpm chip description
+ * @param: dev_id, the description of the chip
+ * @return: the status of the handler.
+ */
+static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id)
+{
+ struct tpm_chip *chip = dev_id;
+ struct i2c_client *client;
+ struct st33zp24_platform_data *pin_infos;
+
+ disable_irq_nosync(irq);
+
+ client = (struct i2c_client *) TPM_VPRIV(chip);
+ pin_infos = client->dev.platform_data;
+
+ complete(&pin_infos->irq_detection);
+ return IRQ_HANDLED;
+} /* tpm_ioserirq_handler() */
+
+
+/*
+ * tpm_stm_i2c_send send TPM commands through the I2C bus.
+ *
+ * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h
+ * @param: buf, the buffer to send.
+ * @param: count, the number of bytes to send.
+ * @return: In case of success the number of bytes sent.
+ * In other case, a < 0 value describing the issue.
+ */
+static int tpm_stm_i2c_send(struct tpm_chip *chip, unsigned char *buf,
+ size_t len)
+{
+ u32 status,
+ burstcnt = 0, i, size;
+ int ret;
+ u8 data;
+ struct i2c_client *client;
+
+ if (chip == NULL)
+ return -EBUSY;
+ if (len < TPM_HEADER_SIZE)
+ return -EBUSY;
+
+ client = (struct i2c_client *)TPM_VPRIV(chip);
+
+ client->flags = 0;
+
+ ret = request_locality(chip);
+ if (ret < 0)
+ return ret;
+
+ status = tpm_stm_i2c_status(chip);
+ if ((status & TPM_STS_COMMAND_READY) == 0) {
+ tpm_stm_i2c_cancel(chip);
+ if (wait_for_stat
+ (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
+ &chip->vendor.int_queue) < 0) {
+ ret = -ETIME;
+ goto out_err;
+ }
+ }
+
+ for (i = 0 ; i < len - 1 ;) {
+ burstcnt = get_burstcount(chip);
+ size = min_t(int, len - i - 1, burstcnt);
+ ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf, size);
+ if (ret < 0)
+ goto out_err;
+
+ i += size;
+ }
+
+ status = tpm_stm_i2c_status(chip);
+ if ((status & TPM_STS_DATA_EXPECT) == 0) {
+ ret = -EIO;
+ goto out_err;
+ }
+
+ ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf + len - 1, 1);
+ if (ret < 0)
+ goto out_err;
+
+ status = tpm_stm_i2c_status(chip);
+ if ((status & TPM_STS_DATA_EXPECT) != 0) {
+ ret = -EIO;
+ goto out_err;
+ }
+
+ data = TPM_STS_GO;
+ I2C_WRITE_DATA(client, TPM_STS, &data, 1);
+
+ return len;
+out_err:
+ tpm_stm_i2c_cancel(chip);
+ release_locality(chip);
+ return ret;
+}
+
+/*
+ * tpm_stm_i2c_recv received TPM response through the I2C bus.
+ * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h.
+ * @param: buf, the buffer to store datas.
+ * @param: count, the number of bytes to send.
+ * @return: In case of success the number of bytes received.
+ * In other case, a < 0 value describing the issue.
+ */
+static int tpm_stm_i2c_recv(struct tpm_chip *chip, unsigned char *buf,
+ size_t count)
+{
+ int size = 0;
+ int expected;
+
+ if (chip == NULL)
+ return -EBUSY;
+
+ if (count < TPM_HEADER_SIZE) {
+ size = -EIO;
+ goto out;
+ }
+
+ size = recv_data(chip, buf, TPM_HEADER_SIZE);
+ if (size < TPM_HEADER_SIZE) {
+ dev_err(chip->dev, "Unable to read header\n");
+ goto out;
+ }
+
+ expected = be32_to_cpu(*(__be32 *) (buf + 2));
+ if (expected > count) {
+ size = -EIO;
+ goto out;
+ }
+
+ size += recv_data(chip, &buf[TPM_HEADER_SIZE],
+ expected - TPM_HEADER_SIZE);
+ if (size < expected) {
+ dev_err(chip->dev, "Unable to read remainder of result\n");
+ size = -ETIME;
+ goto out;
+ }
+
+out:
+ chip->vendor.cancel(chip);
+ release_locality(chip);
+ return size;
+}
+
+static bool tpm_st33_i2c_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return (status == TPM_STS_COMMAND_READY);
+}
+
+static const struct file_operations tpm_st33_i2c_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = tpm_read,
+ .write = tpm_write,
+ .open = tpm_open,
+ .release = tpm_release,
+};
+
+static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
+static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
+static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
+static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
+static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
+static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
+static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
+
+static struct attribute *stm_tpm_attrs[] = {
+ &dev_attr_pubek.attr,
+ &dev_attr_pcrs.attr,
+ &dev_attr_enabled.attr,
+ &dev_attr_active.attr,
+ &dev_attr_owned.attr,
+ &dev_attr_temp_deactivated.attr,
+ &dev_attr_caps.attr,
+ &dev_attr_cancel.attr, NULL,
+};
+
+static struct attribute_group stm_tpm_attr_grp = {
+ .attrs = stm_tpm_attrs
+};
+
+static struct tpm_vendor_specific st_i2c_tpm = {
+ .send = tpm_stm_i2c_send,
+ .recv = tpm_stm_i2c_recv,
+ .cancel = tpm_stm_i2c_cancel,
+ .status = tpm_stm_i2c_status,
+ .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_canceled = tpm_st33_i2c_req_canceled,
+ .attr_group = &stm_tpm_attr_grp,
+ .miscdev = {.fops = &tpm_st33_i2c_fops,},
+};
+
+static int interrupts ;
+module_param(interrupts, int, 0444);
+MODULE_PARM_DESC(interrupts, "Enable interrupts");
+
+static int power_mgt = 1;
+module_param(power_mgt, int, 0444);
+MODULE_PARM_DESC(power_mgt, "Power Management");
+
+/*
+ * tpm_st33_i2c_probe initialize the TPM device
+ * @param: client, the i2c_client drescription (TPM I2C description).
+ * @param: id, the i2c_device_id struct.
+ * @return: 0 in case of success.
+ * -1 in other case.
+ */
+static int
+tpm_st33_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ int err;
+ u8 intmask;
+ struct tpm_chip *chip;
+ struct st33zp24_platform_data *platform_data;
+
+ if (client == NULL) {
+ pr_info("%s: i2c client is NULL. Device not accessible.\n",
+ __func__);
+ err = -ENODEV;
+ goto end;
+ }
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_info(&client->dev, "client not i2c capable\n");
+ err = -ENODEV;
+ goto end;
+ }
+
+ chip = tpm_register_hardware(&client->dev, &st_i2c_tpm);
+ if (!chip) {
+ dev_info(&client->dev, "fail chip\n");
+ err = -ENODEV;
+ goto end;
+ }
+
+ platform_data = client->dev.platform_data;
+
+ if (!platform_data) {
+ dev_info(&client->dev, "chip not available\n");
+ err = -ENODEV;
+ goto _tpm_clean_answer;
+ }
+
+ platform_data->tpm_i2c_buffer[0] =
+ kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL);
+ if (platform_data->tpm_i2c_buffer[0] == NULL) {
+ err = -ENOMEM;
+ goto _tpm_clean_answer;
+ }
+ platform_data->tpm_i2c_buffer[1] =
+ kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL);
+ if (platform_data->tpm_i2c_buffer[1] == NULL) {
+ err = -ENOMEM;
+ goto _tpm_clean_response1;
+ }
+
+ TPM_VPRIV(chip) = client;
+
+ chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
+ chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+
+ chip->vendor.locality = LOCALITY0;
+
+ if (power_mgt) {
+ err = gpio_request(platform_data->io_lpcpd, "TPM IO_LPCPD");
+ if (err)
+ goto _gpio_init1;
+ gpio_set_value(platform_data->io_lpcpd, 1);
+ }
+
+ if (interrupts) {
+ init_completion(&platform_data->irq_detection);
+ if (request_locality(chip) != LOCALITY0) {
+ err = -ENODEV;
+ goto _tpm_clean_response2;
+ }
+ err = gpio_request(platform_data->io_serirq, "TPM IO_SERIRQ");
+ if (err)
+ goto _gpio_init2;
+
+ clear_interruption(client);
+ err = request_irq(gpio_to_irq(platform_data->io_serirq),
+ &tpm_ioserirq_handler,
+ IRQF_TRIGGER_HIGH,
+ "TPM SERIRQ management", chip);
+ if (err < 0) {
+ dev_err(chip->dev , "TPM SERIRQ signals %d not available\n",
+ gpio_to_irq(platform_data->io_serirq));
+ goto _irq_set;
+ }
+
+ err = I2C_READ_DATA(client, TPM_INT_ENABLE, &intmask, 1);
+ if (err < 0)
+ goto _irq_set;
+
+ intmask |= TPM_INTF_CMD_READY_INT
+ | TPM_INTF_FIFO_AVALAIBLE_INT
+ | TPM_INTF_WAKE_UP_READY_INT
+ | TPM_INTF_LOCALITY_CHANGE_INT
+ | TPM_INTF_STS_VALID_INT
+ | TPM_INTF_DATA_AVAIL_INT;
+
+ err = I2C_WRITE_DATA(client, TPM_INT_ENABLE, &intmask, 1);
+ if (err < 0)
+ goto _irq_set;
+
+ intmask = TPM_GLOBAL_INT_ENABLE;
+ err = I2C_WRITE_DATA(client, (TPM_INT_ENABLE + 3), &intmask, 1);
+ if (err < 0)
+ goto _irq_set;
+
+ err = I2C_READ_DATA(client, TPM_INT_STATUS, &intmask, 1);
+ if (err < 0)
+ goto _irq_set;
+
+ chip->vendor.irq = interrupts;
+
+ tpm_gen_interrupt(chip);
+ }
+
+ tpm_get_timeouts(chip);
+
+ i2c_set_clientdata(client, chip);
+
+ dev_info(chip->dev, "TPM I2C Initialized\n");
+ return 0;
+_irq_set:
+ free_irq(gpio_to_irq(platform_data->io_serirq), (void *) chip);
+_gpio_init2:
+ if (interrupts)
+ gpio_free(platform_data->io_serirq);
+_gpio_init1:
+ if (power_mgt)
+ gpio_free(platform_data->io_lpcpd);
+_tpm_clean_response2:
+ kzfree(platform_data->tpm_i2c_buffer[1]);
+ platform_data->tpm_i2c_buffer[1] = NULL;
+_tpm_clean_response1:
+ kzfree(platform_data->tpm_i2c_buffer[0]);
+ platform_data->tpm_i2c_buffer[0] = NULL;
+_tpm_clean_answer:
+ tpm_remove_hardware(chip->dev);
+end:
+ pr_info("TPM I2C initialisation fail\n");
+ return err;
+}
+
+/*
+ * tpm_st33_i2c_remove remove the TPM device
+ * @param: client, the i2c_client drescription (TPM I2C description).
+ clear_bit(0, &chip->is_open);
+ * @return: 0 in case of success.
+ */
+static int tpm_st33_i2c_remove(struct i2c_client *client)
+{
+ struct tpm_chip *chip = (struct tpm_chip *)i2c_get_clientdata(client);
+ struct st33zp24_platform_data *pin_infos =
+ ((struct i2c_client *) TPM_VPRIV(chip))->dev.platform_data;
+
+ if (pin_infos != NULL) {
+ free_irq(pin_infos->io_serirq, chip);
+
+ gpio_free(pin_infos->io_serirq);
+ gpio_free(pin_infos->io_lpcpd);
+
+ tpm_remove_hardware(chip->dev);
+
+ if (pin_infos->tpm_i2c_buffer[1] != NULL) {
+ kzfree(pin_infos->tpm_i2c_buffer[1]);
+ pin_infos->tpm_i2c_buffer[1] = NULL;
+ }
+ if (pin_infos->tpm_i2c_buffer[0] != NULL) {
+ kzfree(pin_infos->tpm_i2c_buffer[0]);
+ pin_infos->tpm_i2c_buffer[0] = NULL;
+ }
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+/*
+ * tpm_st33_i2c_pm_suspend suspend the TPM device
+ * Added: Work around when suspend and no tpm application is running, suspend
+ * may fail because chip->data_buffer is not set (only set in tpm_open in Linux
+ * TPM core)
+ * @param: client, the i2c_client drescription (TPM I2C description).
+ * @param: mesg, the power management message.
+ * @return: 0 in case of success.
+ */
+static int tpm_st33_i2c_pm_suspend(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct st33zp24_platform_data *pin_infos = dev->platform_data;
+ int ret = 0;
+
+ if (power_mgt)
+ gpio_set_value(pin_infos->io_lpcpd, 0);
+ else{
+ if (chip->data_buffer == NULL)
+ chip->data_buffer = pin_infos->tpm_i2c_buffer[0];
+ ret = tpm_pm_suspend(dev);
+ }
+ return ret;
+} /* tpm_st33_i2c_suspend() */
+
+/*
+ * tpm_st33_i2c_pm_resume resume the TPM device
+ * @param: client, the i2c_client drescription (TPM I2C description).
+ * @return: 0 in case of success.
+ */
+static int tpm_st33_i2c_pm_resume(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct st33zp24_platform_data *pin_infos = dev->platform_data;
+
+ int ret = 0;
+
+ if (power_mgt) {
+ gpio_set_value(pin_infos->io_lpcpd, 1);
+ ret = wait_for_serirq_timeout(chip,
+ (chip->vendor.status(chip) &
+ TPM_STS_VALID) == TPM_STS_VALID,
+ chip->vendor.timeout_b);
+ } else{
+ if (chip->data_buffer == NULL)
+ chip->data_buffer = pin_infos->tpm_i2c_buffer[0];
+ ret = tpm_pm_resume(dev);
+ if (!ret)
+ tpm_do_selftest(chip);
+ }
+ return ret;
+} /* tpm_st33_i2c_pm_resume() */
+#endif
+
+static const struct i2c_device_id tpm_st33_i2c_id[] = {
+ {TPM_ST33_I2C, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tpm_st33_i2c_id);
+static SIMPLE_DEV_PM_OPS(tpm_st33_i2c_ops, tpm_st33_i2c_pm_suspend, tpm_st33_i2c_pm_resume);
+static struct i2c_driver tpm_st33_i2c_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = TPM_ST33_I2C,
+ .pm = &tpm_st33_i2c_ops,
+ },
+ .probe = tpm_st33_i2c_probe,
+ .remove = tpm_st33_i2c_remove,
+ .id_table = tpm_st33_i2c_id
+};
+
+module_i2c_driver(tpm_st33_i2c_driver);
+
+MODULE_AUTHOR("Christophe Ricard (tpmsupport@st.com)");
+MODULE_DESCRIPTION("STM TPM I2C ST33 Driver");
+MODULE_VERSION("1.2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.h b/drivers/char/tpm/tpm_i2c_stm_st33.h
new file mode 100644
index 000000000000..439a43249aa6
--- /dev/null
+++ b/drivers/char/tpm/tpm_i2c_stm_st33.h
@@ -0,0 +1,61 @@
+/*
+ * STMicroelectronics TPM I2C Linux driver for TPM ST33ZP24
+ * Copyright (C) 2009, 2010 STMicroelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * STMicroelectronics version 1.2.0, Copyright (C) 2010
+ * STMicroelectronics comes with ABSOLUTELY NO WARRANTY.
+ * This is free software, and you are welcome to redistribute it
+ * under certain conditions.
+ *
+ * @Author: Christophe RICARD tpmsupport@st.com
+ *
+ * @File: stm_st33_tpm_i2c.h
+ *
+ * @Date: 09/15/2010
+ */
+#ifndef __STM_ST33_TPM_I2C_MAIN_H__
+#define __STM_ST33_TPM_I2C_MAIN_H__
+
+#define TPM_ACCESS (0x0)
+#define TPM_STS (0x18)
+#define TPM_HASH_END (0x20)
+#define TPM_DATA_FIFO (0x24)
+#define TPM_HASH_DATA (0x24)
+#define TPM_HASH_START (0x28)
+#define TPM_INTF_CAPABILITY (0x14)
+#define TPM_INT_STATUS (0x10)
+#define TPM_INT_ENABLE (0x08)
+
+#define TPM_DUMMY_BYTE 0xAA
+#define TPM_WRITE_DIRECTION 0x80
+#define TPM_HEADER_SIZE 10
+#define TPM_BUFSIZE 2048
+
+#define LOCALITY0 0
+
+#define TPM_ST33_I2C "st33zp24_i2c"
+
+struct st33zp24_platform_data {
+ int io_serirq;
+ int io_lpcpd;
+ struct i2c_client *client;
+ u8 *tpm_i2c_buffer[2]; /* 0 Request 1 Response */
+ struct completion irq_detection;
+ struct mutex lock;
+};
+
+#endif /* __STM_ST33_TPM_I2C_MAIN_H__ */
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index 9978609d93b2..56b07c35a13e 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -64,7 +64,7 @@ static struct ibmvtpm_dev *ibmvtpm_get_data(const struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
if (chip)
- return (struct ibmvtpm_dev *)chip->vendor.data;
+ return (struct ibmvtpm_dev *)TPM_VPRIV(chip);
return NULL;
}
@@ -83,7 +83,7 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
u16 len;
int sig;
- ibmvtpm = (struct ibmvtpm_dev *)chip->vendor.data;
+ ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip);
if (!ibmvtpm->rtce_buf) {
dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
@@ -127,7 +127,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
u64 *word = (u64 *) &crq;
int rc;
- ibmvtpm = (struct ibmvtpm_dev *)chip->vendor.data;
+ ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip);
if (!ibmvtpm->rtce_buf) {
dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
@@ -398,6 +398,11 @@ static int tpm_ibmvtpm_resume(struct device *dev)
return rc;
}
+static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return (status == 0);
+}
+
static const struct file_operations ibmvtpm_ops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
@@ -441,7 +446,7 @@ static const struct tpm_vendor_specific tpm_ibmvtpm = {
.status = tpm_ibmvtpm_status,
.req_complete_mask = 0,
.req_complete_val = 0,
- .req_canceled = 0,
+ .req_canceled = tpm_ibmvtpm_req_canceled,
.attr_group = &ibmvtpm_attr_grp,
.miscdev = { .fops = &ibmvtpm_ops, },
};
@@ -647,7 +652,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
ibmvtpm->dev = dev;
ibmvtpm->vdev = vio_dev;
- chip->vendor.data = (void *)ibmvtpm;
+ TPM_VPRIV(chip) = (void *)ibmvtpm;
spin_lock_init(&ibmvtpm->rtce_lock);
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index 640c9a427b59..770c46f8eb30 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -227,6 +227,11 @@ static u8 tpm_nsc_status(struct tpm_chip *chip)
return inb(chip->vendor.base + NSC_STATUS);
}
+static bool tpm_nsc_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return (status == NSC_STATUS_RDY);
+}
+
static const struct file_operations nsc_ops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
@@ -258,7 +263,7 @@ static const struct tpm_vendor_specific tpm_nsc = {
.status = tpm_nsc_status,
.req_complete_mask = NSC_STATUS_OBF,
.req_complete_val = NSC_STATUS_OBF,
- .req_canceled = NSC_STATUS_RDY,
+ .req_canceled = tpm_nsc_req_canceled,
.attr_group = &nsc_attr_grp,
.miscdev = { .fops = &nsc_ops, },
};
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index ea31dafbcac2..8a41b6be23a0 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -84,6 +84,9 @@ static int is_itpm(struct pnp_dev *dev)
struct acpi_device *acpi = pnp_acpi_device(dev);
struct acpi_hardware_id *id;
+ if (!acpi)
+ return 0;
+
list_for_each_entry(id, &acpi->pnp.ids, list) {
if (!strcmp("INTC0102", id->id))
return 1;
@@ -98,6 +101,22 @@ static inline int is_itpm(struct pnp_dev *dev)
}
#endif
+/* Before we attempt to access the TPM we must see that the valid bit is set.
+ * The specification says that this bit is 0 at reset and remains 0 until the
+ * 'TPM has gone through its self test and initialization and has established
+ * correct values in the other bits.' */
+static int wait_startup(struct tpm_chip *chip, int l)
+{
+ unsigned long stop = jiffies + chip->vendor.timeout_a;
+ do {
+ if (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
+ TPM_ACCESS_VALID)
+ return 0;
+ msleep(TPM_TIMEOUT);
+ } while (time_before(jiffies, stop));
+ return -1;
+}
+
static int check_locality(struct tpm_chip *chip, int l)
{
if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
@@ -198,7 +217,7 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
wait_for_tpm_stat(chip,
TPM_STS_DATA_AVAIL | TPM_STS_VALID,
chip->vendor.timeout_c,
- &chip->vendor.read_queue)
+ &chip->vendor.read_queue, true)
== 0) {
burstcnt = get_burstcount(chip);
for (; burstcnt > 0 && size < count; burstcnt--)
@@ -241,7 +260,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
}
wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
- &chip->vendor.int_queue);
+ &chip->vendor.int_queue, false);
status = tpm_tis_status(chip);
if (status & TPM_STS_DATA_AVAIL) { /* retry? */
dev_err(chip->dev, "Error left over data\n");
@@ -277,7 +296,7 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
tpm_tis_ready(chip);
if (wait_for_tpm_stat
(chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
- &chip->vendor.int_queue) < 0) {
+ &chip->vendor.int_queue, false) < 0) {
rc = -ETIME;
goto out_err;
}
@@ -292,7 +311,7 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
}
wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
- &chip->vendor.int_queue);
+ &chip->vendor.int_queue, false);
status = tpm_tis_status(chip);
if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
rc = -EIO;
@@ -304,7 +323,7 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
iowrite8(buf[count],
chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality));
wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
- &chip->vendor.int_queue);
+ &chip->vendor.int_queue, false);
status = tpm_tis_status(chip);
if ((status & TPM_STS_DATA_EXPECT) != 0) {
rc = -EIO;
@@ -342,7 +361,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
if (wait_for_tpm_stat
(chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
tpm_calc_ordinal_duration(chip, ordinal),
- &chip->vendor.read_queue) < 0) {
+ &chip->vendor.read_queue, false) < 0) {
rc = -ETIME;
goto out_err;
}
@@ -374,7 +393,7 @@ static int probe_itpm(struct tpm_chip *chip)
if (vendor != TPM_VID_INTEL)
return 0;
- itpm = 0;
+ itpm = false;
rc = tpm_tis_send_data(chip, cmd_getticks, len);
if (rc == 0)
@@ -383,7 +402,7 @@ static int probe_itpm(struct tpm_chip *chip)
tpm_tis_ready(chip);
release_locality(chip, chip->vendor.locality, 0);
- itpm = 1;
+ itpm = true;
rc = tpm_tis_send_data(chip, cmd_getticks, len);
if (rc == 0) {
@@ -400,6 +419,19 @@ out:
return rc;
}
+static bool tpm_tis_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ switch (chip->vendor.manufacturer_id) {
+ case TPM_VID_WINBOND:
+ return ((status == TPM_STS_VALID) ||
+ (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)));
+ case TPM_VID_STM:
+ return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY));
+ default:
+ return (status == TPM_STS_COMMAND_READY);
+ }
+}
+
static const struct file_operations tis_ops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
@@ -445,7 +477,7 @@ static struct tpm_vendor_specific tpm_tis = {
.cancel = tpm_tis_ready,
.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
- .req_canceled = TPM_STS_COMMAND_READY,
+ .req_canceled = tpm_tis_req_canceled,
.attr_group = &tis_attr_grp,
.miscdev = {
.fops = &tis_ops,},
@@ -502,7 +534,7 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
return IRQ_HANDLED;
}
-static bool interrupts = 1;
+static bool interrupts = true;
module_param(interrupts, bool, 0444);
MODULE_PARM_DESC(interrupts, "Enable interrupts");
@@ -528,12 +560,18 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ if (wait_startup(chip, 0) != 0) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+
if (request_locality(chip, 0) != 0) {
rc = -ENODEV;
goto out_err;
}
vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
+ chip->vendor.manufacturer_id = vendor;
dev_info(dev,
"1.2 TPM (device-id 0x%X, rev-id %d)\n",
@@ -545,7 +583,7 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
rc = -ENODEV;
goto out_err;
}
- itpm = (probe == 0) ? 0 : 1;
+ itpm = !!probe;
}
if (itpm)
@@ -741,10 +779,10 @@ static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
if (pnp_irq_valid(pnp_dev, 0))
irq = pnp_irq(pnp_dev, 0);
else
- interrupts = 0;
+ interrupts = false;
if (is_itpm(pnp_dev))
- itpm = 1;
+ itpm = true;
return tpm_tis_init(&pnp_dev->dev, start, len, irq);
}
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 684b0d53764f..ee4dbeafb377 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -2062,7 +2062,8 @@ static void virtcons_remove(struct virtio_device *vdev)
/* Disable interrupts for vqs */
vdev->config->reset(vdev);
/* Finish up work that's lined up */
- cancel_work_sync(&portdev->control_work);
+ if (use_multiport(portdev))
+ cancel_work_sync(&portdev->control_work);
list_for_each_entry_safe(port, port2, &portdev->ports, list)
unplug_port(port);
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index ee90e87e7675..300d4775d926 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -1,8 +1,13 @@
# common clock types
obj-$(CONFIG_HAVE_CLK) += clk-devres.o
obj-$(CONFIG_CLKDEV_LOOKUP) += clkdev.o
-obj-$(CONFIG_COMMON_CLK) += clk.o clk-fixed-rate.o clk-gate.o \
- clk-mux.o clk-divider.o clk-fixed-factor.o
+obj-$(CONFIG_COMMON_CLK) += clk.o
+obj-$(CONFIG_COMMON_CLK) += clk-divider.o
+obj-$(CONFIG_COMMON_CLK) += clk-fixed-factor.o
+obj-$(CONFIG_COMMON_CLK) += clk-fixed-rate.o
+obj-$(CONFIG_COMMON_CLK) += clk-gate.o
+obj-$(CONFIG_COMMON_CLK) += clk-mux.o
+
# SoCs specific
obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835.o
obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
@@ -20,8 +25,10 @@ endif
obj-$(CONFIG_MACH_LOONGSON1) += clk-ls1x.o
obj-$(CONFIG_ARCH_U8500) += ux500/
obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
-obj-$(CONFIG_ARCH_SUNXI) += clk-sunxi.o
obj-$(CONFIG_ARCH_ZYNQ) += clk-zynq.o
+obj-$(CONFIG_ARCH_TEGRA) += tegra/
+
+obj-$(CONFIG_X86) += x86/
# Chip specific
obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
diff --git a/drivers/clk/clk-bcm2835.c b/drivers/clk/clk-bcm2835.c
index e69991aab43a..792bc57a9db7 100644
--- a/drivers/clk/clk-bcm2835.c
+++ b/drivers/clk/clk-bcm2835.c
@@ -20,6 +20,13 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk/bcm2835.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+
+static const __initconst struct of_device_id clk_match[] = {
+ { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
+ { }
+};
/*
* These are fixed clocks. They're probably not all root clocks and it may
@@ -56,4 +63,6 @@ void __init bcm2835_init_clocks(void)
ret = clk_register_clkdev(clk, NULL, "20215000.uart");
if (ret)
pr_err("uart1_pclk alias not registered\n");
+
+ of_clk_init(clk_match);
}
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index a9204c69148d..68b402101170 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/err.h>
#include <linux/string.h>
+#include <linux/log2.h>
/*
* DOC: basic adjustable divider clock that cannot gate
@@ -29,8 +30,7 @@
#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
-#define div_mask(d) ((1 << (d->width)) - 1)
-#define is_power_of_two(i) !(i & ~i)
+#define div_mask(d) ((1 << ((d)->width)) - 1)
static unsigned int _get_table_maxdiv(const struct clk_div_table *table)
{
@@ -137,7 +137,7 @@ static bool _is_valid_table_div(const struct clk_div_table *table,
static bool _is_valid_div(struct clk_divider *divider, unsigned int div)
{
if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
- return is_power_of_two(div);
+ return is_power_of_2(div);
if (divider->table)
return _is_valid_table_div(divider->table, div);
return true;
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index a4899855c0f6..1ef271e47594 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -28,8 +28,11 @@ static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_fixed_factor *fix = to_clk_fixed_factor(hw);
+ unsigned long long int rate;
- return parent_rate * fix->mult / fix->div;
+ rate = (unsigned long long int)parent_rate * fix->mult;
+ do_div(rate, fix->div);
+ return (unsigned long)rate;
}
static long clk_factor_round_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index af78ed6b67ef..dc58fbd8516f 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -85,7 +85,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
/**
* of_fixed_clk_setup() - Setup function for simple fixed rate clock
*/
-void __init of_fixed_clk_setup(struct device_node *node)
+void of_fixed_clk_setup(struct device_node *node)
{
struct clk *clk;
const char *clk_name = node->name;
@@ -101,4 +101,5 @@ void __init of_fixed_clk_setup(struct device_node *node)
of_clk_add_provider(node, of_clk_src_simple_get, clk);
}
EXPORT_SYMBOL_GPL(of_fixed_clk_setup);
+CLK_OF_DECLARE(fixed_clk, "fixed-clock", of_fixed_clk_setup);
#endif
diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c
index 52fecadf004a..2e08cb001936 100644
--- a/drivers/clk/clk-highbank.c
+++ b/drivers/clk/clk-highbank.c
@@ -182,8 +182,10 @@ static int clk_pll_set_rate(struct clk_hw *hwclk, unsigned long rate,
reg |= HB_PLL_EXT_ENA;
reg &= ~HB_PLL_EXT_BYPASS;
} else {
+ writel(reg | HB_PLL_EXT_BYPASS, hbclk->reg);
reg &= ~HB_PLL_DIVQ_MASK;
reg |= divq << HB_PLL_DIVQ_SHIFT;
+ writel(reg | HB_PLL_EXT_BYPASS, hbclk->reg);
}
writel(reg, hbclk->reg);
@@ -314,33 +316,23 @@ static void __init hb_pll_init(struct device_node *node)
{
hb_clk_init(node, &clk_pll_ops);
}
+CLK_OF_DECLARE(hb_pll, "calxeda,hb-pll-clock", hb_pll_init);
static void __init hb_a9periph_init(struct device_node *node)
{
hb_clk_init(node, &a9periphclk_ops);
}
+CLK_OF_DECLARE(hb_a9periph, "calxeda,hb-a9periph-clock", hb_a9periph_init);
static void __init hb_a9bus_init(struct device_node *node)
{
struct clk *clk = hb_clk_init(node, &a9bclk_ops);
clk_prepare_enable(clk);
}
+CLK_OF_DECLARE(hb_a9bus, "calxeda,hb-a9bus-clock", hb_a9bus_init);
static void __init hb_emmc_init(struct device_node *node)
{
hb_clk_init(node, &periclk_ops);
}
-
-static const __initconst struct of_device_id clk_match[] = {
- { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
- { .compatible = "calxeda,hb-pll-clock", .data = hb_pll_init, },
- { .compatible = "calxeda,hb-a9periph-clock", .data = hb_a9periph_init, },
- { .compatible = "calxeda,hb-a9bus-clock", .data = hb_a9bus_init, },
- { .compatible = "calxeda,hb-emmc-clock", .data = hb_emmc_init, },
- {}
-};
-
-void __init highbank_clocks_init(void)
-{
- of_clk_init(clk_match);
-}
+CLK_OF_DECLARE(hb_emmc, "calxeda,hb-emmc-clock", hb_emmc_init);
diff --git a/drivers/clk/clk-max77686.c b/drivers/clk/clk-max77686.c
index d098f72e1d5f..9f57bc37cd60 100644
--- a/drivers/clk/clk-max77686.c
+++ b/drivers/clk/clk-max77686.c
@@ -44,33 +44,23 @@ struct max77686_clk {
struct clk_lookup *lookup;
};
-static struct max77686_clk *get_max77686_clk(struct clk_hw *hw)
+static struct max77686_clk *to_max77686_clk(struct clk_hw *hw)
{
return container_of(hw, struct max77686_clk, hw);
}
static int max77686_clk_prepare(struct clk_hw *hw)
{
- struct max77686_clk *max77686;
- int ret;
-
- max77686 = get_max77686_clk(hw);
- if (!max77686)
- return -ENOMEM;
-
- ret = regmap_update_bits(max77686->iodev->regmap,
- MAX77686_REG_32KHZ, max77686->mask, max77686->mask);
+ struct max77686_clk *max77686 = to_max77686_clk(hw);
- return ret;
+ return regmap_update_bits(max77686->iodev->regmap,
+ MAX77686_REG_32KHZ, max77686->mask,
+ max77686->mask);
}
static void max77686_clk_unprepare(struct clk_hw *hw)
{
- struct max77686_clk *max77686;
-
- max77686 = get_max77686_clk(hw);
- if (!max77686)
- return;
+ struct max77686_clk *max77686 = to_max77686_clk(hw);
regmap_update_bits(max77686->iodev->regmap,
MAX77686_REG_32KHZ, max77686->mask, ~max77686->mask);
@@ -78,14 +68,10 @@ static void max77686_clk_unprepare(struct clk_hw *hw)
static int max77686_clk_is_enabled(struct clk_hw *hw)
{
- struct max77686_clk *max77686;
+ struct max77686_clk *max77686 = to_max77686_clk(hw);
int ret;
u32 val;
- max77686 = get_max77686_clk(hw);
- if (!max77686)
- return -ENOMEM;
-
ret = regmap_read(max77686->iodev->regmap,
MAX77686_REG_32KHZ, &val);
@@ -130,9 +116,8 @@ static int max77686_clk_register(struct device *dev,
if (IS_ERR(clk))
return -ENOMEM;
- max77686->lookup = devm_kzalloc(dev, sizeof(struct clk_lookup),
- GFP_KERNEL);
- if (IS_ERR(max77686->lookup))
+ max77686->lookup = kzalloc(sizeof(struct clk_lookup), GFP_KERNEL);
+ if (!max77686->lookup)
return -ENOMEM;
max77686->lookup->con_id = hw->init->name;
@@ -151,13 +136,13 @@ static int max77686_clk_probe(struct platform_device *pdev)
max77686_clks = devm_kzalloc(&pdev->dev, sizeof(struct max77686_clk *)
* MAX77686_CLKS_NUM, GFP_KERNEL);
- if (IS_ERR(max77686_clks))
+ if (!max77686_clks)
return -ENOMEM;
for (i = 0; i < MAX77686_CLKS_NUM; i++) {
max77686_clks[i] = devm_kzalloc(&pdev->dev,
sizeof(struct max77686_clk), GFP_KERNEL);
- if (IS_ERR(max77686_clks[i]))
+ if (!max77686_clks[i])
return -ENOMEM;
}
diff --git a/drivers/clk/clk-prima2.c b/drivers/clk/clk-prima2.c
index a203ecccdc4f..f8e9d0c27be2 100644
--- a/drivers/clk/clk-prima2.c
+++ b/drivers/clk/clk-prima2.c
@@ -1025,20 +1025,67 @@ static struct of_device_id rsc_ids[] = {
{},
};
+enum prima2_clk_index {
+ /* 0 1 2 3 4 5 6 7 8 9 */
+ rtc, osc, pll1, pll2, pll3, mem, sys, security, dsp, gps,
+ mf, io, cpu, uart0, uart1, uart2, tsc, i2c0, i2c1, spi0,
+ spi1, pwmc, efuse, pulse, dmac0, dmac1, nand, audio, usp0, usp1,
+ usp2, vip, gfx, mm, lcd, vpp, mmc01, mmc23, mmc45, usbpll,
+ usb0, usb1, maxclk,
+};
+
+static __initdata struct clk_hw* prima2_clk_hw_array[maxclk] = {
+ NULL, /* dummy */
+ NULL,
+ &clk_pll1.hw,
+ &clk_pll2.hw,
+ &clk_pll3.hw,
+ &clk_mem.hw,
+ &clk_sys.hw,
+ &clk_security.hw,
+ &clk_dsp.hw,
+ &clk_gps.hw,
+ &clk_mf.hw,
+ &clk_io.hw,
+ &clk_cpu.hw,
+ &clk_uart0.hw,
+ &clk_uart1.hw,
+ &clk_uart2.hw,
+ &clk_tsc.hw,
+ &clk_i2c0.hw,
+ &clk_i2c1.hw,
+ &clk_spi0.hw,
+ &clk_spi1.hw,
+ &clk_pwmc.hw,
+ &clk_efuse.hw,
+ &clk_pulse.hw,
+ &clk_dmac0.hw,
+ &clk_dmac1.hw,
+ &clk_nand.hw,
+ &clk_audio.hw,
+ &clk_usp0.hw,
+ &clk_usp1.hw,
+ &clk_usp2.hw,
+ &clk_vip.hw,
+ &clk_gfx.hw,
+ &clk_mm.hw,
+ &clk_lcd.hw,
+ &clk_vpp.hw,
+ &clk_mmc01.hw,
+ &clk_mmc23.hw,
+ &clk_mmc45.hw,
+ &usb_pll_clk_hw,
+ &clk_usb0.hw,
+ &clk_usb1.hw,
+};
+
+static struct clk *prima2_clks[maxclk];
+static struct clk_onecell_data clk_data;
+
void __init sirfsoc_of_clk_init(void)
{
- struct clk *clk;
struct device_node *np;
-
- np = of_find_matching_node(NULL, clkc_ids);
- if (!np)
- panic("unable to find compatible clkc node in dtb\n");
-
- sirfsoc_clk_vbase = of_iomap(np, 0);
- if (!sirfsoc_clk_vbase)
- panic("unable to map clkc registers\n");
-
- of_node_put(np);
+ int i;
np = of_find_matching_node(NULL, rsc_ids);
if (!np)
@@ -1050,122 +1097,30 @@ void __init sirfsoc_of_clk_init(void)
of_node_put(np);
+ np = of_find_matching_node(NULL, clkc_ids);
+ if (!np)
+ return;
+
+ sirfsoc_clk_vbase = of_iomap(np, 0);
+ if (!sirfsoc_clk_vbase)
+ panic("unable to map clkc registers\n");
/* These are always available (RTC and 26MHz OSC)*/
- clk = clk_register_fixed_rate(NULL, "rtc", NULL,
+ prima2_clks[rtc] = clk_register_fixed_rate(NULL, "rtc", NULL,
CLK_IS_ROOT, 32768);
- BUG_ON(IS_ERR(clk));
- clk = clk_register_fixed_rate(NULL, "osc", NULL,
+ prima2_clks[osc]= clk_register_fixed_rate(NULL, "osc", NULL,
CLK_IS_ROOT, 26000000);
- BUG_ON(IS_ERR(clk));
-
- clk = clk_register(NULL, &clk_pll1.hw);
- BUG_ON(IS_ERR(clk));
- clk = clk_register(NULL, &clk_pll2.hw);
- BUG_ON(IS_ERR(clk));
- clk = clk_register(NULL, &clk_pll3.hw);
- BUG_ON(IS_ERR(clk));
- clk = clk_register(NULL, &clk_mem.hw);
- BUG_ON(IS_ERR(clk));
- clk = clk_register(NULL, &clk_sys.hw);
- BUG_ON(IS_ERR(clk));
- clk = clk_register(NULL, &clk_security.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b8030000.security");
- clk = clk_register(NULL, &clk_dsp.hw);
- BUG_ON(IS_ERR(clk));
- clk = clk_register(NULL, &clk_gps.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "a8010000.gps");
- clk = clk_register(NULL, &clk_mf.hw);
- BUG_ON(IS_ERR(clk));
- clk = clk_register(NULL, &clk_io.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "io");
- clk = clk_register(NULL, &clk_cpu.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "cpu");
- clk = clk_register(NULL, &clk_uart0.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b0050000.uart");
- clk = clk_register(NULL, &clk_uart1.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b0060000.uart");
- clk = clk_register(NULL, &clk_uart2.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b0070000.uart");
- clk = clk_register(NULL, &clk_tsc.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b0110000.tsc");
- clk = clk_register(NULL, &clk_i2c0.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b00e0000.i2c");
- clk = clk_register(NULL, &clk_i2c1.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b00f0000.i2c");
- clk = clk_register(NULL, &clk_spi0.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b00d0000.spi");
- clk = clk_register(NULL, &clk_spi1.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b0170000.spi");
- clk = clk_register(NULL, &clk_pwmc.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b0130000.pwm");
- clk = clk_register(NULL, &clk_efuse.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b0140000.efusesys");
- clk = clk_register(NULL, &clk_pulse.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b0150000.pulsec");
- clk = clk_register(NULL, &clk_dmac0.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b00b0000.dma-controller");
- clk = clk_register(NULL, &clk_dmac1.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b0160000.dma-controller");
- clk = clk_register(NULL, &clk_nand.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b0030000.nand");
- clk = clk_register(NULL, &clk_audio.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b0040000.audio");
- clk = clk_register(NULL, &clk_usp0.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b0080000.usp");
- clk = clk_register(NULL, &clk_usp1.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b0090000.usp");
- clk = clk_register(NULL, &clk_usp2.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b00a0000.usp");
- clk = clk_register(NULL, &clk_vip.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b00c0000.vip");
- clk = clk_register(NULL, &clk_gfx.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "98000000.graphics");
- clk = clk_register(NULL, &clk_mm.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "a0000000.multimedia");
- clk = clk_register(NULL, &clk_lcd.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "90010000.display");
- clk = clk_register(NULL, &clk_vpp.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "90020000.vpp");
- clk = clk_register(NULL, &clk_mmc01.hw);
- BUG_ON(IS_ERR(clk));
- clk = clk_register(NULL, &clk_mmc23.hw);
- BUG_ON(IS_ERR(clk));
- clk = clk_register(NULL, &clk_mmc45.hw);
- BUG_ON(IS_ERR(clk));
- clk = clk_register(NULL, &usb_pll_clk_hw);
- BUG_ON(IS_ERR(clk));
- clk = clk_register(NULL, &clk_usb0.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b00e0000.usb");
- clk = clk_register(NULL, &clk_usb1.hw);
- BUG_ON(IS_ERR(clk));
- clk_register_clkdev(clk, NULL, "b00f0000.usb");
+
+ for (i = pll1; i < maxclk; i++) {
+ prima2_clks[i] = clk_register(NULL, prima2_clk_hw_array[i]);
+ BUG_ON(!prima2_clks[i]);
+ }
+ clk_register_clkdev(prima2_clks[cpu], NULL, "cpu");
+ clk_register_clkdev(prima2_clks[io], NULL, "io");
+ clk_register_clkdev(prima2_clks[mem], NULL, "mem");
+
+ clk_data.clks = prima2_clks;
+ clk_data.clk_num = maxclk;
+
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
}
diff --git a/drivers/clk/clk-sunxi.c b/drivers/clk/clk-sunxi.c
deleted file mode 100644
index 0e831b584ba7..000000000000
--- a/drivers/clk/clk-sunxi.c
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright 2012 Maxime Ripard
- *
- * Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
-#include <linux/clk/sunxi.h>
-#include <linux/of.h>
-
-static const __initconst struct of_device_id clk_match[] = {
- { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
- {}
-};
-
-void __init sunxi_init_clocks(void)
-{
- of_clk_init(clk_match);
-}
diff --git a/drivers/clk/clk-vt8500.c b/drivers/clk/clk-vt8500.c
index fe25570874d6..b5538bba7a10 100644
--- a/drivers/clk/clk-vt8500.c
+++ b/drivers/clk/clk-vt8500.c
@@ -41,6 +41,7 @@ struct clk_device {
#define PLL_TYPE_VT8500 0
#define PLL_TYPE_WM8650 1
+#define PLL_TYPE_WM8750 2
struct clk_pll {
struct clk_hw hw;
@@ -121,7 +122,16 @@ static long vt8500_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_device *cdev = to_clk_device(hw);
- u32 divisor = *prate / rate;
+ u32 divisor;
+
+ if (rate == 0)
+ return 0;
+
+ divisor = *prate / rate;
+
+ /* If prate / rate would be decimal, incr the divisor */
+ if (rate * divisor < *prate)
+ divisor++;
/*
* If this is a request for SDMMC we have to adjust the divisor
@@ -138,9 +148,18 @@ static int vt8500_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_device *cdev = to_clk_device(hw);
- u32 divisor = parent_rate / rate;
+ u32 divisor;
unsigned long flags = 0;
+ if (rate == 0)
+ return 0;
+
+ divisor = parent_rate / rate;
+
+ /* If prate / rate would be decimal, incr the divisor */
+ if (rate * divisor < *prate)
+ divisor++;
+
if (divisor == cdev->div_mask + 1)
divisor = 0;
@@ -272,7 +291,7 @@ static __init void vtwm_device_clk_init(struct device_node *node)
rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
clk_register_clkdev(clk, clk_name, NULL);
}
-
+CLK_OF_DECLARE(vt8500_device, "via,vt8500-device-clock", vtwm_device_clk_init);
/* PLL clock related functions */
@@ -298,6 +317,16 @@ static __init void vtwm_device_clk_init(struct device_node *node)
#define WM8650_BITS_TO_VAL(m, d1, d2) \
((d2 << 13) | (d1 << 10) | (m & 0x3FF))
+/* Helper macros for PLL_WM8750 */
+#define WM8750_PLL_MUL(x) (((x >> 16) & 0xFF) + 1)
+#define WM8750_PLL_DIV(x) ((((x >> 8) & 1) + 1) * (1 << (x & 7)))
+
+#define WM8750_BITS_TO_FREQ(r, m, d1, d2) \
+ (r * (m+1) / ((d1+1) * (1 << d2)))
+
+#define WM8750_BITS_TO_VAL(f, m, d1, d2) \
+ ((f << 24) | ((m - 1) << 16) | ((d1 - 1) << 8) | d2)
+
static void vt8500_find_pll_bits(unsigned long rate, unsigned long parent_rate,
u32 *multiplier, u32 *prediv)
@@ -361,16 +390,87 @@ static void wm8650_find_pll_bits(unsigned long rate, unsigned long parent_rate,
/* if we got here, it wasn't an exact match */
pr_warn("%s: requested rate %lu, found rate %lu\n", __func__, rate,
rate - best_err);
- *multiplier = mul;
- *divisor1 = div1;
- *divisor2 = div2;
+ *multiplier = best_mul;
+ *divisor1 = best_div1;
+ *divisor2 = best_div2;
+}
+
+static u32 wm8750_get_filter(u32 parent_rate, u32 divisor1)
+{
+ /* calculate frequency (MHz) after pre-divisor */
+ u32 freq = (parent_rate / 1000000) / (divisor1 + 1);
+
+ if ((freq < 10) || (freq > 200))
+ pr_warn("%s: PLL recommended input frequency 10..200Mhz (requested %d Mhz)\n",
+ __func__, freq);
+
+ if (freq >= 166)
+ return 7;
+ else if (freq >= 104)
+ return 6;
+ else if (freq >= 65)
+ return 5;
+ else if (freq >= 42)
+ return 4;
+ else if (freq >= 26)
+ return 3;
+ else if (freq >= 16)
+ return 2;
+ else if (freq >= 10)
+ return 1;
+
+ return 0;
+}
+
+static void wm8750_find_pll_bits(unsigned long rate, unsigned long parent_rate,
+ u32 *filter, u32 *multiplier, u32 *divisor1, u32 *divisor2)
+{
+ u32 mul, div1, div2;
+ u32 best_mul, best_div1, best_div2;
+ unsigned long tclk, rate_err, best_err;
+
+ best_err = (unsigned long)-1;
+
+ /* Find the closest match (lower or equal to requested) */
+ for (div1 = 1; div1 >= 0; div1--)
+ for (div2 = 7; div2 >= 0; div2--)
+ for (mul = 0; mul <= 255; mul++) {
+ tclk = parent_rate * (mul + 1) / ((div1 + 1) * (1 << div2));
+ if (tclk > rate)
+ continue;
+ /* error will always be +ve */
+ rate_err = rate - tclk;
+ if (rate_err == 0) {
+ *filter = wm8750_get_filter(parent_rate, div1);
+ *multiplier = mul;
+ *divisor1 = div1;
+ *divisor2 = div2;
+ return;
+ }
+
+ if (rate_err < best_err) {
+ best_err = rate_err;
+ best_mul = mul;
+ best_div1 = div1;
+ best_div2 = div2;
+ }
+ }
+
+ /* if we got here, it wasn't an exact match */
+ pr_warn("%s: requested rate %lu, found rate %lu\n", __func__, rate,
+ rate - best_err);
+
+ *filter = wm8750_get_filter(parent_rate, best_div1);
+ *multiplier = best_mul;
+ *divisor1 = best_div1;
+ *divisor2 = best_div2;
}
static int vtwm_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_pll *pll = to_clk_pll(hw);
- u32 mul, div1, div2;
+ u32 filter, mul, div1, div2;
u32 pll_val;
unsigned long flags = 0;
@@ -385,6 +485,9 @@ static int vtwm_pll_set_rate(struct clk_hw *hw, unsigned long rate,
wm8650_find_pll_bits(rate, parent_rate, &mul, &div1, &div2);
pll_val = WM8650_BITS_TO_VAL(mul, div1, div2);
break;
+ case PLL_TYPE_WM8750:
+ wm8750_find_pll_bits(rate, parent_rate, &filter, &mul, &div1, &div2);
+ pll_val = WM8750_BITS_TO_VAL(filter, mul, div1, div2);
default:
pr_err("%s: invalid pll type\n", __func__);
return 0;
@@ -405,7 +508,7 @@ static long vtwm_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_pll *pll = to_clk_pll(hw);
- u32 mul, div1, div2;
+ u32 filter, mul, div1, div2;
long round_rate;
switch (pll->type) {
@@ -417,6 +520,9 @@ static long vtwm_pll_round_rate(struct clk_hw *hw, unsigned long rate,
wm8650_find_pll_bits(rate, *prate, &mul, &div1, &div2);
round_rate = WM8650_BITS_TO_FREQ(*prate, mul, div1, div2);
break;
+ case PLL_TYPE_WM8750:
+ wm8750_find_pll_bits(rate, *prate, &filter, &mul, &div1, &div2);
+ round_rate = WM8750_BITS_TO_FREQ(*prate, mul, div1, div2);
default:
round_rate = 0;
}
@@ -440,6 +546,10 @@ static unsigned long vtwm_pll_recalc_rate(struct clk_hw *hw,
pll_freq = parent_rate * WM8650_PLL_MUL(pll_val);
pll_freq /= WM8650_PLL_DIV(pll_val);
break;
+ case PLL_TYPE_WM8750:
+ pll_freq = parent_rate * WM8750_PLL_MUL(pll_val);
+ pll_freq /= WM8750_PLL_DIV(pll_val);
+ break;
default:
pll_freq = 0;
}
@@ -502,20 +612,19 @@ static void __init vt8500_pll_init(struct device_node *node)
{
vtwm_pll_clk_init(node, PLL_TYPE_VT8500);
}
+CLK_OF_DECLARE(vt8500_pll, "via,vt8500-pll-clock", vt8500_pll_init);
static void __init wm8650_pll_init(struct device_node *node)
{
vtwm_pll_clk_init(node, PLL_TYPE_WM8650);
}
+CLK_OF_DECLARE(wm8650_pll, "wm,wm8650-pll-clock", wm8650_pll_init);
-static const __initconst struct of_device_id clk_match[] = {
- { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
- { .compatible = "via,vt8500-pll-clock", .data = vt8500_pll_init, },
- { .compatible = "wm,wm8650-pll-clock", .data = wm8650_pll_init, },
- { .compatible = "via,vt8500-device-clock",
- .data = vtwm_device_clk_init, },
- { /* sentinel */ }
-};
+static void __init wm8750_pll_init(struct device_node *node)
+{
+ vtwm_pll_clk_init(node, PLL_TYPE_WM8750);
+}
+CLK_OF_DECLARE(wm8750_pll, "wm,wm8750-pll-clock", wm8750_pll_init);
void __init vtwm_clk_init(void __iomem *base)
{
@@ -524,5 +633,5 @@ void __init vtwm_clk_init(void __iomem *base)
pmc_base = base;
- of_clk_init(clk_match);
+ of_clk_init(NULL);
}
diff --git a/drivers/clk/clk-zynq.c b/drivers/clk/clk-zynq.c
index 37a30514fd66..b14a25f39255 100644
--- a/drivers/clk/clk-zynq.c
+++ b/drivers/clk/clk-zynq.c
@@ -81,6 +81,7 @@ static void __init zynq_pll_clk_setup(struct device_node *np)
if (WARN_ON(ret))
return;
}
+CLK_OF_DECLARE(zynq_pll, "xlnx,zynq-pll", zynq_pll_clk_setup);
struct zynq_periph_clk {
struct clk_hw hw;
@@ -187,6 +188,7 @@ static void __init zynq_periph_clk_setup(struct device_node *np)
if (WARN_ON(err))
return;
}
+CLK_OF_DECLARE(zynq_periph, "xlnx,zynq-periph-clock", zynq_periph_clk_setup);
/* CPU Clock domain is modelled as a mux with 4 children subclks, whose
* derivative rates depend on CLK_621_TRUE
@@ -366,18 +368,10 @@ static void __init zynq_cpu_clk_setup(struct device_node *np)
if (WARN_ON(err))
return;
}
-
-static const __initconst struct of_device_id zynq_clk_match[] = {
- { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
- { .compatible = "xlnx,zynq-pll", .data = zynq_pll_clk_setup, },
- { .compatible = "xlnx,zynq-periph-clock",
- .data = zynq_periph_clk_setup, },
- { .compatible = "xlnx,zynq-cpu-clock", .data = zynq_cpu_clk_setup, },
- {}
-};
+CLK_OF_DECLARE(zynq_cpu, "xlnx,zynq-cpu-clock", zynq_cpu_clk_setup);
void __init xilinx_zynq_clocks_init(void __iomem *slcr)
{
slcr_base = slcr;
- of_clk_init(zynq_clk_match);
+ of_clk_init(NULL);
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 251e45d6024d..fabbfe1a9253 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/device.h>
+#include <linux/init.h>
static DEFINE_SPINLOCK(enable_lock);
static DEFINE_MUTEX(prepare_lock);
@@ -35,6 +36,137 @@ static struct dentry *rootdir;
static struct dentry *orphandir;
static int inited = 0;
+static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
+{
+ if (!c)
+ return;
+
+ seq_printf(s, "%*s%-*s %-11d %-12d %-10lu",
+ level * 3 + 1, "",
+ 30 - level * 3, c->name,
+ c->enable_count, c->prepare_count, c->rate);
+ seq_printf(s, "\n");
+}
+
+static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
+ int level)
+{
+ struct clk *child;
+ struct hlist_node *tmp;
+
+ if (!c)
+ return;
+
+ clk_summary_show_one(s, c, level);
+
+ hlist_for_each_entry(child, tmp, &c->children, child_node)
+ clk_summary_show_subtree(s, child, level + 1);
+}
+
+static int clk_summary_show(struct seq_file *s, void *data)
+{
+ struct clk *c;
+ struct hlist_node *tmp;
+
+ seq_printf(s, " clock enable_cnt prepare_cnt rate\n");
+ seq_printf(s, "---------------------------------------------------------------------\n");
+
+ mutex_lock(&prepare_lock);
+
+ hlist_for_each_entry(c, tmp, &clk_root_list, child_node)
+ clk_summary_show_subtree(s, c, 0);
+
+ hlist_for_each_entry(c, tmp, &clk_orphan_list, child_node)
+ clk_summary_show_subtree(s, c, 0);
+
+ mutex_unlock(&prepare_lock);
+
+ return 0;
+}
+
+
+static int clk_summary_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, clk_summary_show, inode->i_private);
+}
+
+static const struct file_operations clk_summary_fops = {
+ .open = clk_summary_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
+{
+ if (!c)
+ return;
+
+ seq_printf(s, "\"%s\": { ", c->name);
+ seq_printf(s, "\"enable_count\": %d,", c->enable_count);
+ seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
+ seq_printf(s, "\"rate\": %lu", c->rate);
+}
+
+static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
+{
+ struct clk *child;
+ struct hlist_node *tmp;
+
+ if (!c)
+ return;
+
+ clk_dump_one(s, c, level);
+
+ hlist_for_each_entry(child, tmp, &c->children, child_node) {
+ seq_printf(s, ",");
+ clk_dump_subtree(s, child, level + 1);
+ }
+
+ seq_printf(s, "}");
+}
+
+static int clk_dump(struct seq_file *s, void *data)
+{
+ struct clk *c;
+ struct hlist_node *tmp;
+ bool first_node = true;
+
+ seq_printf(s, "{");
+
+ mutex_lock(&prepare_lock);
+
+ hlist_for_each_entry(c, tmp, &clk_root_list, child_node) {
+ if (!first_node)
+ seq_printf(s, ",");
+ first_node = false;
+ clk_dump_subtree(s, c, 0);
+ }
+
+ hlist_for_each_entry(c, tmp, &clk_orphan_list, child_node) {
+ seq_printf(s, ",");
+ clk_dump_subtree(s, c, 0);
+ }
+
+ mutex_unlock(&prepare_lock);
+
+ seq_printf(s, "}");
+ return 0;
+}
+
+
+static int clk_dump_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, clk_dump, inode->i_private);
+}
+
+static const struct file_operations clk_dump_fops = {
+ .open = clk_dump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/* caller must hold prepare_lock */
static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
{
@@ -168,12 +300,23 @@ static int __init clk_debug_init(void)
{
struct clk *clk;
struct hlist_node *tmp;
+ struct dentry *d;
rootdir = debugfs_create_dir("clk", NULL);
if (!rootdir)
return -ENOMEM;
+ d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, NULL,
+ &clk_summary_fops);
+ if (!d)
+ return -ENOMEM;
+
+ d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, NULL,
+ &clk_dump_fops);
+ if (!d)
+ return -ENOMEM;
+
orphandir = debugfs_create_dir("orphans", rootdir);
if (!orphandir)
@@ -259,32 +402,33 @@ late_initcall(clk_disable_unused);
/*** helper functions ***/
-inline const char *__clk_get_name(struct clk *clk)
+const char *__clk_get_name(struct clk *clk)
{
return !clk ? NULL : clk->name;
}
+EXPORT_SYMBOL_GPL(__clk_get_name);
-inline struct clk_hw *__clk_get_hw(struct clk *clk)
+struct clk_hw *__clk_get_hw(struct clk *clk)
{
return !clk ? NULL : clk->hw;
}
-inline u8 __clk_get_num_parents(struct clk *clk)
+u8 __clk_get_num_parents(struct clk *clk)
{
return !clk ? 0 : clk->num_parents;
}
-inline struct clk *__clk_get_parent(struct clk *clk)
+struct clk *__clk_get_parent(struct clk *clk)
{
return !clk ? NULL : clk->parent;
}
-inline unsigned int __clk_get_enable_count(struct clk *clk)
+unsigned int __clk_get_enable_count(struct clk *clk)
{
return !clk ? 0 : clk->enable_count;
}
-inline unsigned int __clk_get_prepare_count(struct clk *clk)
+unsigned int __clk_get_prepare_count(struct clk *clk)
{
return !clk ? 0 : clk->prepare_count;
}
@@ -310,7 +454,7 @@ out:
return ret;
}
-inline unsigned long __clk_get_flags(struct clk *clk)
+unsigned long __clk_get_flags(struct clk *clk)
{
return !clk ? 0 : clk->flags;
}
@@ -950,9 +1094,6 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
/* change the rates */
clk_change_rate(top);
- mutex_unlock(&prepare_lock);
-
- return 0;
out:
mutex_unlock(&prepare_lock);
@@ -1663,6 +1804,11 @@ struct of_clk_provider {
void *data;
};
+extern struct of_device_id __clk_of_table[];
+
+static const struct of_device_id __clk_of_table_sentinel
+ __used __section(__clk_of_table_end);
+
static LIST_HEAD(of_clk_providers);
static DEFINE_MUTEX(of_clk_lock);
@@ -1791,6 +1937,9 @@ void __init of_clk_init(const struct of_device_id *matches)
{
struct device_node *np;
+ if (!matches)
+ matches = __clk_of_table;
+
for_each_matching_node(np, matches) {
const struct of_device_id *match = of_match_node(matches, np);
of_clk_init_cb_t clk_init_cb = match->data;
diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c
index ff004578a119..9dd2551a0a41 100644
--- a/drivers/clk/mvebu/clk-cpu.c
+++ b/drivers/clk/mvebu/clk-cpu.c
@@ -124,7 +124,7 @@ void __init of_cpu_clk_setup(struct device_node *node)
clks = kzalloc(ncpus * sizeof(*clks), GFP_KERNEL);
if (WARN_ON(!clks))
- return;
+ goto clks_out;
for_each_node_by_type(dn, "cpu") {
struct clk_init_data init;
@@ -134,11 +134,11 @@ void __init of_cpu_clk_setup(struct device_node *node)
int cpu, err;
if (WARN_ON(!clk_name))
- return;
+ goto bail_out;
err = of_property_read_u32(dn, "reg", &cpu);
if (WARN_ON(err))
- return;
+ goto bail_out;
sprintf(clk_name, "cpu%d", cpu);
parent_clk = of_clk_get(node, 0);
@@ -167,6 +167,9 @@ void __init of_cpu_clk_setup(struct device_node *node)
return;
bail_out:
kfree(clks);
+ while(ncpus--)
+ kfree(cpuclk[ncpus].clk_name);
+clks_out:
kfree(cpuclk);
}
diff --git a/drivers/clk/mvebu/clk-gating-ctrl.c b/drivers/clk/mvebu/clk-gating-ctrl.c
index 8fa5408b6c7d..ebf141d4374b 100644
--- a/drivers/clk/mvebu/clk-gating-ctrl.c
+++ b/drivers/clk/mvebu/clk-gating-ctrl.c
@@ -193,6 +193,7 @@ static const struct mvebu_soc_descr __initconst kirkwood_gating_descr[] = {
{ "runit", NULL, 7 },
{ "xor0", NULL, 8 },
{ "audio", NULL, 9 },
+ { "powersave", "cpuclk", 11 },
{ "sata0", NULL, 14 },
{ "sata1", NULL, 15 },
{ "xor1", NULL, 16 },
diff --git a/drivers/clk/mxs/clk-imx23.c b/drivers/clk/mxs/clk-imx23.c
index 8dd476e2a9c5..b5c06f9766f6 100644
--- a/drivers/clk/mxs/clk-imx23.c
+++ b/drivers/clk/mxs/clk-imx23.c
@@ -99,7 +99,7 @@ static enum imx23_clk clks_init_on[] __initdata = {
int __init mx23_clocks_init(void)
{
struct device_node *np;
- int i;
+ u32 i;
clk_misc_init();
diff --git a/drivers/clk/mxs/clk-imx28.c b/drivers/clk/mxs/clk-imx28.c
index db3af0874121..76ce6c6d1113 100644
--- a/drivers/clk/mxs/clk-imx28.c
+++ b/drivers/clk/mxs/clk-imx28.c
@@ -154,7 +154,7 @@ static enum imx28_clk clks_init_on[] __initdata = {
int __init mx28_clocks_init(void)
{
struct device_node *np;
- int i;
+ u32 i;
clk_misc_init();
@@ -238,7 +238,7 @@ int __init mx28_clocks_init(void)
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
}
- clk_register_clkdev(clks[clk32k], NULL, "timrot");
+ clk_register_clkdev(clks[xbus], NULL, "timrot");
clk_register_clkdev(clks[enet_out], NULL, "enet_out");
for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
new file mode 100644
index 000000000000..2b41b0f4f731
--- /dev/null
+++ b/drivers/clk/tegra/Makefile
@@ -0,0 +1,11 @@
+obj-y += clk.o
+obj-y += clk-audio-sync.o
+obj-y += clk-divider.o
+obj-y += clk-periph.o
+obj-y += clk-periph-gate.o
+obj-y += clk-pll.o
+obj-y += clk-pll-out.o
+obj-y += clk-super.o
+
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20.o
+obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra30.o
diff --git a/drivers/clk/tegra/clk-audio-sync.c b/drivers/clk/tegra/clk-audio-sync.c
new file mode 100644
index 000000000000..c0f7843e80e6
--- /dev/null
+++ b/drivers/clk/tegra/clk-audio-sync.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+#include "clk.h"
+
+static unsigned long clk_sync_source_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_sync_source *sync = to_clk_sync_source(hw);
+
+ return sync->rate;
+}
+
+static long clk_sync_source_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct tegra_clk_sync_source *sync = to_clk_sync_source(hw);
+
+ if (rate > sync->max_rate)
+ return -EINVAL;
+ else
+ return rate;
+}
+
+static int clk_sync_source_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_sync_source *sync = to_clk_sync_source(hw);
+
+ sync->rate = rate;
+ return 0;
+}
+
+const struct clk_ops tegra_clk_sync_source_ops = {
+ .round_rate = clk_sync_source_round_rate,
+ .set_rate = clk_sync_source_set_rate,
+ .recalc_rate = clk_sync_source_recalc_rate,
+};
+
+struct clk *tegra_clk_register_sync_source(const char *name,
+ unsigned long rate, unsigned long max_rate)
+{
+ struct tegra_clk_sync_source *sync;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ sync = kzalloc(sizeof(*sync), GFP_KERNEL);
+ if (!sync) {
+ pr_err("%s: could not allocate sync source clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sync->rate = rate;
+ sync->max_rate = max_rate;
+
+ init.ops = &tegra_clk_sync_source_ops;
+ init.name = name;
+ init.flags = CLK_IS_ROOT;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+
+ /* Data in .init is copied by clk_register(), so stack variable OK */
+ sync->hw.init = &init;
+
+ clk = clk_register(NULL, &sync->hw);
+ if (IS_ERR(clk))
+ kfree(sync);
+
+ return clk;
+}
diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c
new file mode 100644
index 000000000000..4d75b1f37e3a
--- /dev/null
+++ b/drivers/clk/tegra/clk-divider.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+
+#include "clk.h"
+
+#define pll_out_override(p) (BIT((p->shift - 6)))
+#define div_mask(d) ((1 << (d->width)) - 1)
+#define get_mul(d) (1 << d->frac_width)
+#define get_max_div(d) div_mask(d)
+
+#define PERIPH_CLK_UART_DIV_ENB BIT(24)
+
+static int get_div(struct tegra_clk_frac_div *divider, unsigned long rate,
+ unsigned long parent_rate)
+{
+ s64 divider_ux1 = parent_rate;
+ u8 flags = divider->flags;
+ int mul;
+
+ if (!rate)
+ return 0;
+
+ mul = get_mul(divider);
+
+ if (!(flags & TEGRA_DIVIDER_INT))
+ divider_ux1 *= mul;
+
+ if (flags & TEGRA_DIVIDER_ROUND_UP)
+ divider_ux1 += rate - 1;
+
+ do_div(divider_ux1, rate);
+
+ if (flags & TEGRA_DIVIDER_INT)
+ divider_ux1 *= mul;
+
+ divider_ux1 -= mul;
+
+ if (divider_ux1 < 0)
+ return 0;
+
+ if (divider_ux1 > get_max_div(divider))
+ return -EINVAL;
+
+ return divider_ux1;
+}
+
+static unsigned long clk_frac_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_frac_div *divider = to_clk_frac_div(hw);
+ u32 reg;
+ int div, mul;
+ u64 rate = parent_rate;
+
+ reg = readl_relaxed(divider->reg) >> divider->shift;
+ div = reg & div_mask(divider);
+
+ mul = get_mul(divider);
+ div += mul;
+
+ rate *= mul;
+ rate += div - 1;
+ do_div(rate, div);
+
+ return rate;
+}
+
+static long clk_frac_div_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct tegra_clk_frac_div *divider = to_clk_frac_div(hw);
+ int div, mul;
+ unsigned long output_rate = *prate;
+
+ if (!rate)
+ return output_rate;
+
+ div = get_div(divider, rate, output_rate);
+ if (div < 0)
+ return *prate;
+
+ mul = get_mul(divider);
+
+ return DIV_ROUND_UP(output_rate * mul, div + mul);
+}
+
+static int clk_frac_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_frac_div *divider = to_clk_frac_div(hw);
+ int div;
+ unsigned long flags = 0;
+ u32 val;
+
+ div = get_div(divider, rate, parent_rate);
+ if (div < 0)
+ return div;
+
+ if (divider->lock)
+ spin_lock_irqsave(divider->lock, flags);
+
+ val = readl_relaxed(divider->reg);
+ val &= ~(div_mask(divider) << divider->shift);
+ val |= div << divider->shift;
+
+ if (divider->flags & TEGRA_DIVIDER_UART) {
+ if (div)
+ val |= PERIPH_CLK_UART_DIV_ENB;
+ else
+ val &= ~PERIPH_CLK_UART_DIV_ENB;
+ }
+
+ if (divider->flags & TEGRA_DIVIDER_FIXED)
+ val |= pll_out_override(divider);
+
+ writel_relaxed(val, divider->reg);
+
+ if (divider->lock)
+ spin_unlock_irqrestore(divider->lock, flags);
+
+ return 0;
+}
+
+const struct clk_ops tegra_clk_frac_div_ops = {
+ .recalc_rate = clk_frac_div_recalc_rate,
+ .set_rate = clk_frac_div_set_rate,
+ .round_rate = clk_frac_div_round_rate,
+};
+
+struct clk *tegra_clk_register_divider(const char *name,
+ const char *parent_name, void __iomem *reg,
+ unsigned long flags, u8 clk_divider_flags, u8 shift, u8 width,
+ u8 frac_width, spinlock_t *lock)
+{
+ struct tegra_clk_frac_div *divider;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ divider = kzalloc(sizeof(*divider), GFP_KERNEL);
+ if (!divider) {
+ pr_err("%s: could not allocate fractional divider clk\n",
+ __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.ops = &tegra_clk_frac_div_ops;
+ init.flags = flags;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+
+ divider->reg = reg;
+ divider->shift = shift;
+ divider->width = width;
+ divider->frac_width = frac_width;
+ divider->lock = lock;
+ divider->flags = clk_divider_flags;
+
+ /* Data in .init is copied by clk_register(), so stack variable OK */
+ divider->hw.init = &init;
+
+ clk = clk_register(NULL, &divider->hw);
+ if (IS_ERR(clk))
+ kfree(divider);
+
+ return clk;
+}
diff --git a/drivers/clk/tegra/clk-periph-gate.c b/drivers/clk/tegra/clk-periph-gate.c
new file mode 100644
index 000000000000..6dd533251e7b
--- /dev/null
+++ b/drivers/clk/tegra/clk-periph-gate.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/tegra-soc.h>
+
+#include "clk.h"
+
+static DEFINE_SPINLOCK(periph_ref_lock);
+
+/* Macros to assist peripheral gate clock */
+#define read_enb(gate) \
+ readl_relaxed(gate->clk_base + (gate->regs->enb_reg))
+#define write_enb_set(val, gate) \
+ writel_relaxed(val, gate->clk_base + (gate->regs->enb_set_reg))
+#define write_enb_clr(val, gate) \
+ writel_relaxed(val, gate->clk_base + (gate->regs->enb_clr_reg))
+
+#define read_rst(gate) \
+ readl_relaxed(gate->clk_base + (gate->regs->rst_reg))
+#define write_rst_set(val, gate) \
+ writel_relaxed(val, gate->clk_base + (gate->regs->rst_set_reg))
+#define write_rst_clr(val, gate) \
+ writel_relaxed(val, gate->clk_base + (gate->regs->rst_clr_reg))
+
+#define periph_clk_to_bit(periph) (1 << (gate->clk_num % 32))
+
+/* Peripheral gate clock ops */
+static int clk_periph_is_enabled(struct clk_hw *hw)
+{
+ struct tegra_clk_periph_gate *gate = to_clk_periph_gate(hw);
+ int state = 1;
+
+ if (!(read_enb(gate) & periph_clk_to_bit(gate)))
+ state = 0;
+
+ if (!(gate->flags & TEGRA_PERIPH_NO_RESET))
+ if (read_rst(gate) & periph_clk_to_bit(gate))
+ state = 0;
+
+ return state;
+}
+
+static int clk_periph_enable(struct clk_hw *hw)
+{
+ struct tegra_clk_periph_gate *gate = to_clk_periph_gate(hw);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&periph_ref_lock, flags);
+
+ gate->enable_refcnt[gate->clk_num]++;
+ if (gate->enable_refcnt[gate->clk_num] > 1) {
+ spin_unlock_irqrestore(&periph_ref_lock, flags);
+ return 0;
+ }
+
+ write_enb_set(periph_clk_to_bit(gate), gate);
+ udelay(2);
+
+ if (!(gate->flags & TEGRA_PERIPH_NO_RESET) &&
+ !(gate->flags & TEGRA_PERIPH_MANUAL_RESET)) {
+ if (read_rst(gate) & periph_clk_to_bit(gate)) {
+ udelay(5); /* reset propogation delay */
+ write_rst_clr(periph_clk_to_bit(gate), gate);
+ }
+ }
+
+ spin_unlock_irqrestore(&periph_ref_lock, flags);
+
+ return 0;
+}
+
+static void clk_periph_disable(struct clk_hw *hw)
+{
+ struct tegra_clk_periph_gate *gate = to_clk_periph_gate(hw);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&periph_ref_lock, flags);
+
+ gate->enable_refcnt[gate->clk_num]--;
+ if (gate->enable_refcnt[gate->clk_num] > 0) {
+ spin_unlock_irqrestore(&periph_ref_lock, flags);
+ return;
+ }
+
+ /*
+ * If peripheral is in the APB bus then read the APB bus to
+ * flush the write operation in apb bus. This will avoid the
+ * peripheral access after disabling clock
+ */
+ if (gate->flags & TEGRA_PERIPH_ON_APB)
+ tegra_read_chipid();
+
+ write_enb_clr(periph_clk_to_bit(gate), gate);
+
+ spin_unlock_irqrestore(&periph_ref_lock, flags);
+}
+
+void tegra_periph_reset(struct tegra_clk_periph_gate *gate, bool assert)
+{
+ if (gate->flags & TEGRA_PERIPH_NO_RESET)
+ return;
+
+ if (assert) {
+ /*
+ * If peripheral is in the APB bus then read the APB bus to
+ * flush the write operation in apb bus. This will avoid the
+ * peripheral access after disabling clock
+ */
+ if (gate->flags & TEGRA_PERIPH_ON_APB)
+ tegra_read_chipid();
+
+ write_rst_set(periph_clk_to_bit(gate), gate);
+ } else {
+ write_rst_clr(periph_clk_to_bit(gate), gate);
+ }
+}
+
+const struct clk_ops tegra_clk_periph_gate_ops = {
+ .is_enabled = clk_periph_is_enabled,
+ .enable = clk_periph_enable,
+ .disable = clk_periph_disable,
+};
+
+struct clk *tegra_clk_register_periph_gate(const char *name,
+ const char *parent_name, u8 gate_flags, void __iomem *clk_base,
+ unsigned long flags, int clk_num,
+ struct tegra_clk_periph_regs *pregs, int *enable_refcnt)
+{
+ struct tegra_clk_periph_gate *gate;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate) {
+ pr_err("%s: could not allocate periph gate clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.flags = flags;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+ init.ops = &tegra_clk_periph_gate_ops;
+
+ gate->magic = TEGRA_CLK_PERIPH_GATE_MAGIC;
+ gate->clk_base = clk_base;
+ gate->clk_num = clk_num;
+ gate->flags = gate_flags;
+ gate->enable_refcnt = enable_refcnt;
+ gate->regs = pregs;
+
+ /* Data in .init is copied by clk_register(), so stack variable OK */
+ gate->hw.init = &init;
+
+ clk = clk_register(NULL, &gate->hw);
+ if (IS_ERR(clk))
+ kfree(gate);
+
+ return clk;
+}
diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c
new file mode 100644
index 000000000000..788486e6331a
--- /dev/null
+++ b/drivers/clk/tegra/clk-periph.c
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+#include "clk.h"
+
+static u8 clk_periph_get_parent(struct clk_hw *hw)
+{
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ const struct clk_ops *mux_ops = periph->mux_ops;
+ struct clk_hw *mux_hw = &periph->mux.hw;
+
+ mux_hw->clk = hw->clk;
+
+ return mux_ops->get_parent(mux_hw);
+}
+
+static int clk_periph_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ const struct clk_ops *mux_ops = periph->mux_ops;
+ struct clk_hw *mux_hw = &periph->mux.hw;
+
+ mux_hw->clk = hw->clk;
+
+ return mux_ops->set_parent(mux_hw, index);
+}
+
+static unsigned long clk_periph_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ const struct clk_ops *div_ops = periph->div_ops;
+ struct clk_hw *div_hw = &periph->divider.hw;
+
+ div_hw->clk = hw->clk;
+
+ return div_ops->recalc_rate(div_hw, parent_rate);
+}
+
+static long clk_periph_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ const struct clk_ops *div_ops = periph->div_ops;
+ struct clk_hw *div_hw = &periph->divider.hw;
+
+ div_hw->clk = hw->clk;
+
+ return div_ops->round_rate(div_hw, rate, prate);
+}
+
+static int clk_periph_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ const struct clk_ops *div_ops = periph->div_ops;
+ struct clk_hw *div_hw = &periph->divider.hw;
+
+ div_hw->clk = hw->clk;
+
+ return div_ops->set_rate(div_hw, rate, parent_rate);
+}
+
+static int clk_periph_is_enabled(struct clk_hw *hw)
+{
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ const struct clk_ops *gate_ops = periph->gate_ops;
+ struct clk_hw *gate_hw = &periph->gate.hw;
+
+ gate_hw->clk = hw->clk;
+
+ return gate_ops->is_enabled(gate_hw);
+}
+
+static int clk_periph_enable(struct clk_hw *hw)
+{
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ const struct clk_ops *gate_ops = periph->gate_ops;
+ struct clk_hw *gate_hw = &periph->gate.hw;
+
+ gate_hw->clk = hw->clk;
+
+ return gate_ops->enable(gate_hw);
+}
+
+static void clk_periph_disable(struct clk_hw *hw)
+{
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ const struct clk_ops *gate_ops = periph->gate_ops;
+ struct clk_hw *gate_hw = &periph->gate.hw;
+
+ gate_ops->disable(gate_hw);
+}
+
+void tegra_periph_reset_deassert(struct clk *c)
+{
+ struct clk_hw *hw = __clk_get_hw(c);
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ struct tegra_clk_periph_gate *gate;
+
+ if (periph->magic != TEGRA_CLK_PERIPH_MAGIC) {
+ gate = to_clk_periph_gate(hw);
+ if (gate->magic != TEGRA_CLK_PERIPH_GATE_MAGIC) {
+ WARN_ON(1);
+ return;
+ }
+ } else {
+ gate = &periph->gate;
+ }
+
+ tegra_periph_reset(gate, 0);
+}
+
+void tegra_periph_reset_assert(struct clk *c)
+{
+ struct clk_hw *hw = __clk_get_hw(c);
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ struct tegra_clk_periph_gate *gate;
+
+ if (periph->magic != TEGRA_CLK_PERIPH_MAGIC) {
+ gate = to_clk_periph_gate(hw);
+ if (gate->magic != TEGRA_CLK_PERIPH_GATE_MAGIC) {
+ WARN_ON(1);
+ return;
+ }
+ } else {
+ gate = &periph->gate;
+ }
+
+ tegra_periph_reset(gate, 1);
+}
+
+const struct clk_ops tegra_clk_periph_ops = {
+ .get_parent = clk_periph_get_parent,
+ .set_parent = clk_periph_set_parent,
+ .recalc_rate = clk_periph_recalc_rate,
+ .round_rate = clk_periph_round_rate,
+ .set_rate = clk_periph_set_rate,
+ .is_enabled = clk_periph_is_enabled,
+ .enable = clk_periph_enable,
+ .disable = clk_periph_disable,
+};
+
+const struct clk_ops tegra_clk_periph_nodiv_ops = {
+ .get_parent = clk_periph_get_parent,
+ .set_parent = clk_periph_set_parent,
+ .is_enabled = clk_periph_is_enabled,
+ .enable = clk_periph_enable,
+ .disable = clk_periph_disable,
+};
+
+static struct clk *_tegra_clk_register_periph(const char *name,
+ const char **parent_names, int num_parents,
+ struct tegra_clk_periph *periph,
+ void __iomem *clk_base, u32 offset, bool div)
+{
+ struct clk *clk;
+ struct clk_init_data init;
+
+ init.name = name;
+ init.ops = div ? &tegra_clk_periph_ops : &tegra_clk_periph_nodiv_ops;
+ init.flags = div ? 0 : CLK_SET_RATE_PARENT;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ /* Data in .init is copied by clk_register(), so stack variable OK */
+ periph->hw.init = &init;
+ periph->magic = TEGRA_CLK_PERIPH_MAGIC;
+ periph->mux.reg = clk_base + offset;
+ periph->divider.reg = div ? (clk_base + offset) : NULL;
+ periph->gate.clk_base = clk_base;
+
+ clk = clk_register(NULL, &periph->hw);
+ if (IS_ERR(clk))
+ return clk;
+
+ periph->mux.hw.clk = clk;
+ periph->divider.hw.clk = div ? clk : NULL;
+ periph->gate.hw.clk = clk;
+
+ return clk;
+}
+
+struct clk *tegra_clk_register_periph(const char *name,
+ const char **parent_names, int num_parents,
+ struct tegra_clk_periph *periph, void __iomem *clk_base,
+ u32 offset)
+{
+ return _tegra_clk_register_periph(name, parent_names, num_parents,
+ periph, clk_base, offset, true);
+}
+
+struct clk *tegra_clk_register_periph_nodiv(const char *name,
+ const char **parent_names, int num_parents,
+ struct tegra_clk_periph *periph, void __iomem *clk_base,
+ u32 offset)
+{
+ return _tegra_clk_register_periph(name, parent_names, num_parents,
+ periph, clk_base, offset, false);
+}
diff --git a/drivers/clk/tegra/clk-pll-out.c b/drivers/clk/tegra/clk-pll-out.c
new file mode 100644
index 000000000000..3598987a451d
--- /dev/null
+++ b/drivers/clk/tegra/clk-pll-out.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+
+#include "clk.h"
+
+#define pll_out_enb(p) (BIT(p->enb_bit_idx))
+#define pll_out_rst(p) (BIT(p->rst_bit_idx))
+
+static int clk_pll_out_is_enabled(struct clk_hw *hw)
+{
+ struct tegra_clk_pll_out *pll_out = to_clk_pll_out(hw);
+ u32 val = readl_relaxed(pll_out->reg);
+ int state;
+
+ state = (val & pll_out_enb(pll_out)) ? 1 : 0;
+ if (!(val & (pll_out_rst(pll_out))))
+ state = 0;
+ return state;
+}
+
+static int clk_pll_out_enable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll_out *pll_out = to_clk_pll_out(hw);
+ unsigned long flags = 0;
+ u32 val;
+
+ if (pll_out->lock)
+ spin_lock_irqsave(pll_out->lock, flags);
+
+ val = readl_relaxed(pll_out->reg);
+
+ val |= (pll_out_enb(pll_out) | pll_out_rst(pll_out));
+
+ writel_relaxed(val, pll_out->reg);
+ udelay(2);
+
+ if (pll_out->lock)
+ spin_unlock_irqrestore(pll_out->lock, flags);
+
+ return 0;
+}
+
+static void clk_pll_out_disable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll_out *pll_out = to_clk_pll_out(hw);
+ unsigned long flags = 0;
+ u32 val;
+
+ if (pll_out->lock)
+ spin_lock_irqsave(pll_out->lock, flags);
+
+ val = readl_relaxed(pll_out->reg);
+
+ val &= ~(pll_out_enb(pll_out) | pll_out_rst(pll_out));
+
+ writel_relaxed(val, pll_out->reg);
+ udelay(2);
+
+ if (pll_out->lock)
+ spin_unlock_irqrestore(pll_out->lock, flags);
+}
+
+const struct clk_ops tegra_clk_pll_out_ops = {
+ .is_enabled = clk_pll_out_is_enabled,
+ .enable = clk_pll_out_enable,
+ .disable = clk_pll_out_disable,
+};
+
+struct clk *tegra_clk_register_pll_out(const char *name,
+ const char *parent_name, void __iomem *reg, u8 enb_bit_idx,
+ u8 rst_bit_idx, unsigned long flags, u8 pll_out_flags,
+ spinlock_t *lock)
+{
+ struct tegra_clk_pll_out *pll_out;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll_out = kzalloc(sizeof(*pll_out), GFP_KERNEL);
+ if (!pll_out)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &tegra_clk_pll_out_ops;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+ init.flags = flags;
+
+ pll_out->reg = reg;
+ pll_out->enb_bit_idx = enb_bit_idx;
+ pll_out->rst_bit_idx = rst_bit_idx;
+ pll_out->flags = pll_out_flags;
+ pll_out->lock = lock;
+
+ /* Data in .init is copied by clk_register(), so stack variable OK */
+ pll_out->hw.init = &init;
+
+ clk = clk_register(NULL, &pll_out->hw);
+ if (IS_ERR(clk))
+ kfree(pll_out);
+
+ return clk;
+}
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
new file mode 100644
index 000000000000..165f24734c1b
--- /dev/null
+++ b/drivers/clk/tegra/clk-pll.c
@@ -0,0 +1,648 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+
+#include "clk.h"
+
+#define PLL_BASE_BYPASS BIT(31)
+#define PLL_BASE_ENABLE BIT(30)
+#define PLL_BASE_REF_ENABLE BIT(29)
+#define PLL_BASE_OVERRIDE BIT(28)
+
+#define PLL_BASE_DIVP_SHIFT 20
+#define PLL_BASE_DIVP_WIDTH 3
+#define PLL_BASE_DIVN_SHIFT 8
+#define PLL_BASE_DIVN_WIDTH 10
+#define PLL_BASE_DIVM_SHIFT 0
+#define PLL_BASE_DIVM_WIDTH 5
+#define PLLU_POST_DIVP_MASK 0x1
+
+#define PLL_MISC_DCCON_SHIFT 20
+#define PLL_MISC_CPCON_SHIFT 8
+#define PLL_MISC_CPCON_WIDTH 4
+#define PLL_MISC_CPCON_MASK ((1 << PLL_MISC_CPCON_WIDTH) - 1)
+#define PLL_MISC_LFCON_SHIFT 4
+#define PLL_MISC_LFCON_WIDTH 4
+#define PLL_MISC_LFCON_MASK ((1 << PLL_MISC_LFCON_WIDTH) - 1)
+#define PLL_MISC_VCOCON_SHIFT 0
+#define PLL_MISC_VCOCON_WIDTH 4
+#define PLL_MISC_VCOCON_MASK ((1 << PLL_MISC_VCOCON_WIDTH) - 1)
+
+#define OUT_OF_TABLE_CPCON 8
+
+#define PMC_PLLP_WB0_OVERRIDE 0xf8
+#define PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE BIT(12)
+#define PMC_PLLP_WB0_OVERRIDE_PLLM_OVERRIDE BIT(11)
+
+#define PLL_POST_LOCK_DELAY 50
+
+#define PLLDU_LFCON_SET_DIVN 600
+
+#define PLLE_BASE_DIVCML_SHIFT 24
+#define PLLE_BASE_DIVCML_WIDTH 4
+#define PLLE_BASE_DIVP_SHIFT 16
+#define PLLE_BASE_DIVP_WIDTH 7
+#define PLLE_BASE_DIVN_SHIFT 8
+#define PLLE_BASE_DIVN_WIDTH 8
+#define PLLE_BASE_DIVM_SHIFT 0
+#define PLLE_BASE_DIVM_WIDTH 8
+
+#define PLLE_MISC_SETUP_BASE_SHIFT 16
+#define PLLE_MISC_SETUP_BASE_MASK (0xffff << PLLE_MISC_SETUP_BASE_SHIFT)
+#define PLLE_MISC_LOCK_ENABLE BIT(9)
+#define PLLE_MISC_READY BIT(15)
+#define PLLE_MISC_SETUP_EX_SHIFT 2
+#define PLLE_MISC_SETUP_EX_MASK (3 << PLLE_MISC_SETUP_EX_SHIFT)
+#define PLLE_MISC_SETUP_MASK (PLLE_MISC_SETUP_BASE_MASK | \
+ PLLE_MISC_SETUP_EX_MASK)
+#define PLLE_MISC_SETUP_VALUE (7 << PLLE_MISC_SETUP_BASE_SHIFT)
+
+#define PLLE_SS_CTRL 0x68
+#define PLLE_SS_DISABLE (7 << 10)
+
+#define PMC_SATA_PWRGT 0x1ac
+#define PMC_SATA_PWRGT_PLLE_IDDQ_VALUE BIT(5)
+#define PMC_SATA_PWRGT_PLLE_IDDQ_SWCTL BIT(4)
+
+#define pll_readl(offset, p) readl_relaxed(p->clk_base + offset)
+#define pll_readl_base(p) pll_readl(p->params->base_reg, p)
+#define pll_readl_misc(p) pll_readl(p->params->misc_reg, p)
+
+#define pll_writel(val, offset, p) writel_relaxed(val, p->clk_base + offset)
+#define pll_writel_base(val, p) pll_writel(val, p->params->base_reg, p)
+#define pll_writel_misc(val, p) pll_writel(val, p->params->misc_reg, p)
+
+#define mask(w) ((1 << (w)) - 1)
+#define divm_mask(p) mask(p->divm_width)
+#define divn_mask(p) mask(p->divn_width)
+#define divp_mask(p) (p->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK : \
+ mask(p->divp_width))
+
+#define divm_max(p) (divm_mask(p))
+#define divn_max(p) (divn_mask(p))
+#define divp_max(p) (1 << (divp_mask(p)))
+
+static void clk_pll_enable_lock(struct tegra_clk_pll *pll)
+{
+ u32 val;
+
+ if (!(pll->flags & TEGRA_PLL_USE_LOCK))
+ return;
+
+ val = pll_readl_misc(pll);
+ val |= BIT(pll->params->lock_enable_bit_idx);
+ pll_writel_misc(val, pll);
+}
+
+static int clk_pll_wait_for_lock(struct tegra_clk_pll *pll,
+ void __iomem *lock_addr, u32 lock_bit_idx)
+{
+ int i;
+ u32 val;
+
+ if (!(pll->flags & TEGRA_PLL_USE_LOCK)) {
+ udelay(pll->params->lock_delay);
+ return 0;
+ }
+
+ for (i = 0; i < pll->params->lock_delay; i++) {
+ val = readl_relaxed(lock_addr);
+ if (val & BIT(lock_bit_idx)) {
+ udelay(PLL_POST_LOCK_DELAY);
+ return 0;
+ }
+ udelay(2); /* timeout = 2 * lock time */
+ }
+
+ pr_err("%s: Timed out waiting for pll %s lock\n", __func__,
+ __clk_get_name(pll->hw.clk));
+
+ return -1;
+}
+
+static int clk_pll_is_enabled(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ u32 val;
+
+ if (pll->flags & TEGRA_PLLM) {
+ val = readl_relaxed(pll->pmc + PMC_PLLP_WB0_OVERRIDE);
+ if (val & PMC_PLLP_WB0_OVERRIDE_PLLM_OVERRIDE)
+ return val & PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE ? 1 : 0;
+ }
+
+ val = pll_readl_base(pll);
+
+ return val & PLL_BASE_ENABLE ? 1 : 0;
+}
+
+static int _clk_pll_enable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ u32 val;
+
+ clk_pll_enable_lock(pll);
+
+ val = pll_readl_base(pll);
+ val &= ~PLL_BASE_BYPASS;
+ val |= PLL_BASE_ENABLE;
+ pll_writel_base(val, pll);
+
+ if (pll->flags & TEGRA_PLLM) {
+ val = readl_relaxed(pll->pmc + PMC_PLLP_WB0_OVERRIDE);
+ val |= PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE;
+ writel_relaxed(val, pll->pmc + PMC_PLLP_WB0_OVERRIDE);
+ }
+
+ clk_pll_wait_for_lock(pll, pll->clk_base + pll->params->base_reg,
+ pll->params->lock_bit_idx);
+
+ return 0;
+}
+
+static void _clk_pll_disable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ u32 val;
+
+ val = pll_readl_base(pll);
+ val &= ~(PLL_BASE_BYPASS | PLL_BASE_ENABLE);
+ pll_writel_base(val, pll);
+
+ if (pll->flags & TEGRA_PLLM) {
+ val = readl_relaxed(pll->pmc + PMC_PLLP_WB0_OVERRIDE);
+ val &= ~PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE;
+ writel_relaxed(val, pll->pmc + PMC_PLLP_WB0_OVERRIDE);
+ }
+}
+
+static int clk_pll_enable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ unsigned long flags = 0;
+ int ret;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ ret = _clk_pll_enable(hw);
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ return ret;
+}
+
+static void clk_pll_disable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ unsigned long flags = 0;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ _clk_pll_disable(hw);
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+}
+
+static int _get_table_rate(struct clk_hw *hw,
+ struct tegra_clk_pll_freq_table *cfg,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ struct tegra_clk_pll_freq_table *sel;
+
+ for (sel = pll->freq_table; sel->input_rate != 0; sel++)
+ if (sel->input_rate == parent_rate &&
+ sel->output_rate == rate)
+ break;
+
+ if (sel->input_rate == 0)
+ return -EINVAL;
+
+ BUG_ON(sel->p < 1);
+
+ cfg->input_rate = sel->input_rate;
+ cfg->output_rate = sel->output_rate;
+ cfg->m = sel->m;
+ cfg->n = sel->n;
+ cfg->p = sel->p;
+ cfg->cpcon = sel->cpcon;
+
+ return 0;
+}
+
+static int _calc_rate(struct clk_hw *hw, struct tegra_clk_pll_freq_table *cfg,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ unsigned long cfreq;
+ u32 p_div = 0;
+
+ switch (parent_rate) {
+ case 12000000:
+ case 26000000:
+ cfreq = (rate <= 1000000 * 1000) ? 1000000 : 2000000;
+ break;
+ case 13000000:
+ cfreq = (rate <= 1000000 * 1000) ? 1000000 : 2600000;
+ break;
+ case 16800000:
+ case 19200000:
+ cfreq = (rate <= 1200000 * 1000) ? 1200000 : 2400000;
+ break;
+ case 9600000:
+ case 28800000:
+ /*
+ * PLL_P_OUT1 rate is not listed in PLLA table
+ */
+ cfreq = parent_rate/(parent_rate/1000000);
+ break;
+ default:
+ pr_err("%s Unexpected reference rate %lu\n",
+ __func__, parent_rate);
+ BUG();
+ }
+
+ /* Raise VCO to guarantee 0.5% accuracy */
+ for (cfg->output_rate = rate; cfg->output_rate < 200 * cfreq;
+ cfg->output_rate <<= 1)
+ p_div++;
+
+ cfg->p = 1 << p_div;
+ cfg->m = parent_rate / cfreq;
+ cfg->n = cfg->output_rate / cfreq;
+ cfg->cpcon = OUT_OF_TABLE_CPCON;
+
+ if (cfg->m > divm_max(pll) || cfg->n > divn_max(pll) ||
+ cfg->p > divp_max(pll) || cfg->output_rate > pll->params->vco_max) {
+ pr_err("%s: Failed to set %s rate %lu\n",
+ __func__, __clk_get_name(hw->clk), rate);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int _program_pll(struct clk_hw *hw, struct tegra_clk_pll_freq_table *cfg,
+ unsigned long rate)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ unsigned long flags = 0;
+ u32 divp, val, old_base;
+ int state;
+
+ divp = __ffs(cfg->p);
+
+ if (pll->flags & TEGRA_PLLU)
+ divp ^= 1;
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ old_base = val = pll_readl_base(pll);
+ val &= ~((divm_mask(pll) << pll->divm_shift) |
+ (divn_mask(pll) << pll->divn_shift) |
+ (divp_mask(pll) << pll->divp_shift));
+ val |= ((cfg->m << pll->divm_shift) |
+ (cfg->n << pll->divn_shift) |
+ (divp << pll->divp_shift));
+ if (val == old_base) {
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+ return 0;
+ }
+
+ state = clk_pll_is_enabled(hw);
+
+ if (state) {
+ _clk_pll_disable(hw);
+ val &= ~(PLL_BASE_BYPASS | PLL_BASE_ENABLE);
+ }
+ pll_writel_base(val, pll);
+
+ if (pll->flags & TEGRA_PLL_HAS_CPCON) {
+ val = pll_readl_misc(pll);
+ val &= ~(PLL_MISC_CPCON_MASK << PLL_MISC_CPCON_SHIFT);
+ val |= cfg->cpcon << PLL_MISC_CPCON_SHIFT;
+ if (pll->flags & TEGRA_PLL_SET_LFCON) {
+ val &= ~(PLL_MISC_LFCON_MASK << PLL_MISC_LFCON_SHIFT);
+ if (cfg->n >= PLLDU_LFCON_SET_DIVN)
+ val |= 0x1 << PLL_MISC_LFCON_SHIFT;
+ } else if (pll->flags & TEGRA_PLL_SET_DCCON) {
+ val &= ~(0x1 << PLL_MISC_DCCON_SHIFT);
+ if (rate >= (pll->params->vco_max >> 1))
+ val |= 0x1 << PLL_MISC_DCCON_SHIFT;
+ }
+ pll_writel_misc(val, pll);
+ }
+
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ if (state)
+ clk_pll_enable(hw);
+
+ return 0;
+}
+
+static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ struct tegra_clk_pll_freq_table cfg;
+
+ if (pll->flags & TEGRA_PLL_FIXED) {
+ if (rate != pll->fixed_rate) {
+ pr_err("%s: Can not change %s fixed rate %lu to %lu\n",
+ __func__, __clk_get_name(hw->clk),
+ pll->fixed_rate, rate);
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ if (_get_table_rate(hw, &cfg, rate, parent_rate) &&
+ _calc_rate(hw, &cfg, rate, parent_rate))
+ return -EINVAL;
+
+ return _program_pll(hw, &cfg, rate);
+}
+
+static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ struct tegra_clk_pll_freq_table cfg;
+ u64 output_rate = *prate;
+
+ if (pll->flags & TEGRA_PLL_FIXED)
+ return pll->fixed_rate;
+
+ /* PLLM is used for memory; we do not change rate */
+ if (pll->flags & TEGRA_PLLM)
+ return __clk_get_rate(hw->clk);
+
+ if (_get_table_rate(hw, &cfg, rate, *prate) &&
+ _calc_rate(hw, &cfg, rate, *prate))
+ return -EINVAL;
+
+ output_rate *= cfg.n;
+ do_div(output_rate, cfg.m * cfg.p);
+
+ return output_rate;
+}
+
+static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ u32 val = pll_readl_base(pll);
+ u32 divn = 0, divm = 0, divp = 0;
+ u64 rate = parent_rate;
+
+ if (val & PLL_BASE_BYPASS)
+ return parent_rate;
+
+ if ((pll->flags & TEGRA_PLL_FIXED) && !(val & PLL_BASE_OVERRIDE)) {
+ struct tegra_clk_pll_freq_table sel;
+ if (_get_table_rate(hw, &sel, pll->fixed_rate, parent_rate)) {
+ pr_err("Clock %s has unknown fixed frequency\n",
+ __clk_get_name(hw->clk));
+ BUG();
+ }
+ return pll->fixed_rate;
+ }
+
+ divp = (val >> pll->divp_shift) & (divp_mask(pll));
+ if (pll->flags & TEGRA_PLLU)
+ divp ^= 1;
+
+ divn = (val >> pll->divn_shift) & (divn_mask(pll));
+ divm = (val >> pll->divm_shift) & (divm_mask(pll));
+ divm *= (1 << divp);
+
+ rate *= divn;
+ do_div(rate, divm);
+ return rate;
+}
+
+static int clk_plle_training(struct tegra_clk_pll *pll)
+{
+ u32 val;
+ unsigned long timeout;
+
+ if (!pll->pmc)
+ return -ENOSYS;
+
+ /*
+ * PLLE is already disabled, and setup cleared;
+ * create falling edge on PLLE IDDQ input.
+ */
+ val = readl(pll->pmc + PMC_SATA_PWRGT);
+ val |= PMC_SATA_PWRGT_PLLE_IDDQ_VALUE;
+ writel(val, pll->pmc + PMC_SATA_PWRGT);
+
+ val = readl(pll->pmc + PMC_SATA_PWRGT);
+ val |= PMC_SATA_PWRGT_PLLE_IDDQ_SWCTL;
+ writel(val, pll->pmc + PMC_SATA_PWRGT);
+
+ val = readl(pll->pmc + PMC_SATA_PWRGT);
+ val &= ~PMC_SATA_PWRGT_PLLE_IDDQ_VALUE;
+ writel(val, pll->pmc + PMC_SATA_PWRGT);
+
+ val = pll_readl_misc(pll);
+
+ timeout = jiffies + msecs_to_jiffies(100);
+ while (1) {
+ val = pll_readl_misc(pll);
+ if (val & PLLE_MISC_READY)
+ break;
+ if (time_after(jiffies, timeout)) {
+ pr_err("%s: timeout waiting for PLLE\n", __func__);
+ return -EBUSY;
+ }
+ udelay(300);
+ }
+
+ return 0;
+}
+
+static int clk_plle_enable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk));
+ struct tegra_clk_pll_freq_table sel;
+ u32 val;
+ int err;
+
+ if (_get_table_rate(hw, &sel, pll->fixed_rate, input_rate))
+ return -EINVAL;
+
+ clk_pll_disable(hw);
+
+ val = pll_readl_misc(pll);
+ val &= ~(PLLE_MISC_LOCK_ENABLE | PLLE_MISC_SETUP_MASK);
+ pll_writel_misc(val, pll);
+
+ val = pll_readl_misc(pll);
+ if (!(val & PLLE_MISC_READY)) {
+ err = clk_plle_training(pll);
+ if (err)
+ return err;
+ }
+
+ if (pll->flags & TEGRA_PLLE_CONFIGURE) {
+ /* configure dividers */
+ val = pll_readl_base(pll);
+ val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll));
+ val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT);
+ val |= sel.m << pll->divm_shift;
+ val |= sel.n << pll->divn_shift;
+ val |= sel.p << pll->divp_shift;
+ val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT;
+ pll_writel_base(val, pll);
+ }
+
+ val = pll_readl_misc(pll);
+ val |= PLLE_MISC_SETUP_VALUE;
+ val |= PLLE_MISC_LOCK_ENABLE;
+ pll_writel_misc(val, pll);
+
+ val = readl(pll->clk_base + PLLE_SS_CTRL);
+ val |= PLLE_SS_DISABLE;
+ writel(val, pll->clk_base + PLLE_SS_CTRL);
+
+ val |= pll_readl_base(pll);
+ val |= (PLL_BASE_BYPASS | PLL_BASE_ENABLE);
+ pll_writel_base(val, pll);
+
+ clk_pll_wait_for_lock(pll, pll->clk_base + pll->params->misc_reg,
+ pll->params->lock_bit_idx);
+ return 0;
+}
+
+static unsigned long clk_plle_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ u32 val = pll_readl_base(pll);
+ u32 divn = 0, divm = 0, divp = 0;
+ u64 rate = parent_rate;
+
+ divp = (val >> pll->divp_shift) & (divp_mask(pll));
+ divn = (val >> pll->divn_shift) & (divn_mask(pll));
+ divm = (val >> pll->divm_shift) & (divm_mask(pll));
+ divm *= divp;
+
+ rate *= divn;
+ do_div(rate, divm);
+ return rate;
+}
+
+const struct clk_ops tegra_clk_pll_ops = {
+ .is_enabled = clk_pll_is_enabled,
+ .enable = clk_pll_enable,
+ .disable = clk_pll_disable,
+ .recalc_rate = clk_pll_recalc_rate,
+ .round_rate = clk_pll_round_rate,
+ .set_rate = clk_pll_set_rate,
+};
+
+const struct clk_ops tegra_clk_plle_ops = {
+ .recalc_rate = clk_plle_recalc_rate,
+ .is_enabled = clk_pll_is_enabled,
+ .disable = clk_pll_disable,
+ .enable = clk_plle_enable,
+};
+
+static struct clk *_tegra_clk_register_pll(const char *name,
+ const char *parent_name, void __iomem *clk_base,
+ void __iomem *pmc, unsigned long flags,
+ unsigned long fixed_rate,
+ struct tegra_clk_pll_params *pll_params, u8 pll_flags,
+ struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock,
+ const struct clk_ops *ops)
+{
+ struct tegra_clk_pll *pll;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = ops;
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ pll->clk_base = clk_base;
+ pll->pmc = pmc;
+
+ pll->freq_table = freq_table;
+ pll->params = pll_params;
+ pll->fixed_rate = fixed_rate;
+ pll->flags = pll_flags;
+ pll->lock = lock;
+
+ pll->divp_shift = PLL_BASE_DIVP_SHIFT;
+ pll->divp_width = PLL_BASE_DIVP_WIDTH;
+ pll->divn_shift = PLL_BASE_DIVN_SHIFT;
+ pll->divn_width = PLL_BASE_DIVN_WIDTH;
+ pll->divm_shift = PLL_BASE_DIVM_SHIFT;
+ pll->divm_width = PLL_BASE_DIVM_WIDTH;
+
+ /* Data in .init is copied by clk_register(), so stack variable OK */
+ pll->hw.init = &init;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
+
+struct clk *tegra_clk_register_pll(const char *name, const char *parent_name,
+ void __iomem *clk_base, void __iomem *pmc,
+ unsigned long flags, unsigned long fixed_rate,
+ struct tegra_clk_pll_params *pll_params, u8 pll_flags,
+ struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock)
+{
+ return _tegra_clk_register_pll(name, parent_name, clk_base, pmc,
+ flags, fixed_rate, pll_params, pll_flags, freq_table,
+ lock, &tegra_clk_pll_ops);
+}
+
+struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
+ void __iomem *clk_base, void __iomem *pmc,
+ unsigned long flags, unsigned long fixed_rate,
+ struct tegra_clk_pll_params *pll_params, u8 pll_flags,
+ struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock)
+{
+ return _tegra_clk_register_pll(name, parent_name, clk_base, pmc,
+ flags, fixed_rate, pll_params, pll_flags, freq_table,
+ lock, &tegra_clk_plle_ops);
+}
diff --git a/drivers/clk/tegra/clk-super.c b/drivers/clk/tegra/clk-super.c
new file mode 100644
index 000000000000..2fd924d38606
--- /dev/null
+++ b/drivers/clk/tegra/clk-super.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+
+#include "clk.h"
+
+#define SUPER_STATE_IDLE 0
+#define SUPER_STATE_RUN 1
+#define SUPER_STATE_IRQ 2
+#define SUPER_STATE_FIQ 3
+
+#define SUPER_STATE_SHIFT 28
+#define SUPER_STATE_MASK ((BIT(SUPER_STATE_IDLE) | BIT(SUPER_STATE_RUN) | \
+ BIT(SUPER_STATE_IRQ) | BIT(SUPER_STATE_FIQ)) \
+ << SUPER_STATE_SHIFT)
+
+#define SUPER_LP_DIV2_BYPASS (1 << 16)
+
+#define super_state(s) (BIT(s) << SUPER_STATE_SHIFT)
+#define super_state_to_src_shift(m, s) ((m->width * s))
+#define super_state_to_src_mask(m) (((1 << m->width) - 1))
+
+static u8 clk_super_get_parent(struct clk_hw *hw)
+{
+ struct tegra_clk_super_mux *mux = to_clk_super_mux(hw);
+ u32 val, state;
+ u8 source, shift;
+
+ val = readl_relaxed(mux->reg);
+
+ state = val & SUPER_STATE_MASK;
+
+ BUG_ON((state != super_state(SUPER_STATE_RUN)) &&
+ (state != super_state(SUPER_STATE_IDLE)));
+ shift = (state == super_state(SUPER_STATE_IDLE)) ?
+ super_state_to_src_shift(mux, SUPER_STATE_IDLE) :
+ super_state_to_src_shift(mux, SUPER_STATE_RUN);
+
+ source = (val >> shift) & super_state_to_src_mask(mux);
+
+ /*
+ * If LP_DIV2_BYPASS is not set and PLLX is current parent then
+ * PLLX/2 is the input source to CCLKLP.
+ */
+ if ((mux->flags & TEGRA_DIVIDER_2) && !(val & SUPER_LP_DIV2_BYPASS) &&
+ (source == mux->pllx_index))
+ source = mux->div2_index;
+
+ return source;
+}
+
+static int clk_super_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct tegra_clk_super_mux *mux = to_clk_super_mux(hw);
+ u32 val, state;
+ int err = 0;
+ u8 parent_index, shift;
+ unsigned long flags = 0;
+
+ if (mux->lock)
+ spin_lock_irqsave(mux->lock, flags);
+
+ val = readl_relaxed(mux->reg);
+ state = val & SUPER_STATE_MASK;
+ BUG_ON((state != super_state(SUPER_STATE_RUN)) &&
+ (state != super_state(SUPER_STATE_IDLE)));
+ shift = (state == super_state(SUPER_STATE_IDLE)) ?
+ super_state_to_src_shift(mux, SUPER_STATE_IDLE) :
+ super_state_to_src_shift(mux, SUPER_STATE_RUN);
+
+ /*
+ * For LP mode super-clock switch between PLLX direct
+ * and divided-by-2 outputs is allowed only when other
+ * than PLLX clock source is current parent.
+ */
+ if ((mux->flags & TEGRA_DIVIDER_2) && ((index == mux->div2_index) ||
+ (index == mux->pllx_index))) {
+ parent_index = clk_super_get_parent(hw);
+ if ((parent_index == mux->div2_index) ||
+ (parent_index == mux->pllx_index)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ val ^= SUPER_LP_DIV2_BYPASS;
+ writel_relaxed(val, mux->reg);
+ udelay(2);
+
+ if (index == mux->div2_index)
+ index = mux->pllx_index;
+ }
+ val &= ~((super_state_to_src_mask(mux)) << shift);
+ val |= (index & (super_state_to_src_mask(mux))) << shift;
+
+ writel_relaxed(val, mux->reg);
+ udelay(2);
+
+out:
+ if (mux->lock)
+ spin_unlock_irqrestore(mux->lock, flags);
+
+ return err;
+}
+
+const struct clk_ops tegra_clk_super_ops = {
+ .get_parent = clk_super_get_parent,
+ .set_parent = clk_super_set_parent,
+};
+
+struct clk *tegra_clk_register_super_mux(const char *name,
+ const char **parent_names, u8 num_parents,
+ unsigned long flags, void __iomem *reg, u8 clk_super_flags,
+ u8 width, u8 pllx_index, u8 div2_index, spinlock_t *lock)
+{
+ struct tegra_clk_super_mux *super;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ super = kzalloc(sizeof(*super), GFP_KERNEL);
+ if (!super) {
+ pr_err("%s: could not allocate super clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.ops = &tegra_clk_super_ops;
+ init.flags = flags;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ super->reg = reg;
+ super->pllx_index = pllx_index;
+ super->div2_index = div2_index;
+ super->lock = lock;
+ super->width = width;
+ super->flags = clk_super_flags;
+
+ /* Data in .init is copied by clk_register(), so stack variable OK */
+ super->hw.init = &init;
+
+ clk = clk_register(NULL, &super->hw);
+ if (IS_ERR(clk))
+ kfree(super);
+
+ return clk;
+}
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
new file mode 100644
index 000000000000..143ce1f899ad
--- /dev/null
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -0,0 +1,1355 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/tegra.h>
+#include <linux/delay.h>
+
+#include "clk.h"
+
+#define RST_DEVICES_L 0x004
+#define RST_DEVICES_H 0x008
+#define RST_DEVICES_U 0x00c
+#define RST_DEVICES_SET_L 0x300
+#define RST_DEVICES_CLR_L 0x304
+#define RST_DEVICES_SET_H 0x308
+#define RST_DEVICES_CLR_H 0x30c
+#define RST_DEVICES_SET_U 0x310
+#define RST_DEVICES_CLR_U 0x314
+#define RST_DEVICES_NUM 3
+
+#define CLK_OUT_ENB_L 0x010
+#define CLK_OUT_ENB_H 0x014
+#define CLK_OUT_ENB_U 0x018
+#define CLK_OUT_ENB_SET_L 0x320
+#define CLK_OUT_ENB_CLR_L 0x324
+#define CLK_OUT_ENB_SET_H 0x328
+#define CLK_OUT_ENB_CLR_H 0x32c
+#define CLK_OUT_ENB_SET_U 0x330
+#define CLK_OUT_ENB_CLR_U 0x334
+#define CLK_OUT_ENB_NUM 3
+
+#define OSC_CTRL 0x50
+#define OSC_CTRL_OSC_FREQ_MASK (3<<30)
+#define OSC_CTRL_OSC_FREQ_13MHZ (0<<30)
+#define OSC_CTRL_OSC_FREQ_19_2MHZ (1<<30)
+#define OSC_CTRL_OSC_FREQ_12MHZ (2<<30)
+#define OSC_CTRL_OSC_FREQ_26MHZ (3<<30)
+#define OSC_CTRL_MASK (0x3f2 | OSC_CTRL_OSC_FREQ_MASK)
+
+#define OSC_CTRL_PLL_REF_DIV_MASK (3<<28)
+#define OSC_CTRL_PLL_REF_DIV_1 (0<<28)
+#define OSC_CTRL_PLL_REF_DIV_2 (1<<28)
+#define OSC_CTRL_PLL_REF_DIV_4 (2<<28)
+
+#define OSC_FREQ_DET 0x58
+#define OSC_FREQ_DET_TRIG (1<<31)
+
+#define OSC_FREQ_DET_STATUS 0x5c
+#define OSC_FREQ_DET_BUSY (1<<31)
+#define OSC_FREQ_DET_CNT_MASK 0xFFFF
+
+#define PLLS_BASE 0xf0
+#define PLLS_MISC 0xf4
+#define PLLC_BASE 0x80
+#define PLLC_MISC 0x8c
+#define PLLM_BASE 0x90
+#define PLLM_MISC 0x9c
+#define PLLP_BASE 0xa0
+#define PLLP_MISC 0xac
+#define PLLA_BASE 0xb0
+#define PLLA_MISC 0xbc
+#define PLLU_BASE 0xc0
+#define PLLU_MISC 0xcc
+#define PLLD_BASE 0xd0
+#define PLLD_MISC 0xdc
+#define PLLX_BASE 0xe0
+#define PLLX_MISC 0xe4
+#define PLLE_BASE 0xe8
+#define PLLE_MISC 0xec
+
+#define PLL_BASE_LOCK 27
+#define PLLE_MISC_LOCK 11
+
+#define PLL_MISC_LOCK_ENABLE 18
+#define PLLDU_MISC_LOCK_ENABLE 22
+#define PLLE_MISC_LOCK_ENABLE 9
+
+#define PLLC_OUT 0x84
+#define PLLM_OUT 0x94
+#define PLLP_OUTA 0xa4
+#define PLLP_OUTB 0xa8
+#define PLLA_OUT 0xb4
+
+#define CCLK_BURST_POLICY 0x20
+#define SUPER_CCLK_DIVIDER 0x24
+#define SCLK_BURST_POLICY 0x28
+#define SUPER_SCLK_DIVIDER 0x2c
+#define CLK_SYSTEM_RATE 0x30
+
+#define CCLK_BURST_POLICY_SHIFT 28
+#define CCLK_RUN_POLICY_SHIFT 4
+#define CCLK_IDLE_POLICY_SHIFT 0
+#define CCLK_IDLE_POLICY 1
+#define CCLK_RUN_POLICY 2
+#define CCLK_BURST_POLICY_PLLX 8
+
+#define CLK_SOURCE_I2S1 0x100
+#define CLK_SOURCE_I2S2 0x104
+#define CLK_SOURCE_SPDIF_OUT 0x108
+#define CLK_SOURCE_SPDIF_IN 0x10c
+#define CLK_SOURCE_PWM 0x110
+#define CLK_SOURCE_SPI 0x114
+#define CLK_SOURCE_SBC1 0x134
+#define CLK_SOURCE_SBC2 0x118
+#define CLK_SOURCE_SBC3 0x11c
+#define CLK_SOURCE_SBC4 0x1b4
+#define CLK_SOURCE_XIO 0x120
+#define CLK_SOURCE_TWC 0x12c
+#define CLK_SOURCE_IDE 0x144
+#define CLK_SOURCE_NDFLASH 0x160
+#define CLK_SOURCE_VFIR 0x168
+#define CLK_SOURCE_SDMMC1 0x150
+#define CLK_SOURCE_SDMMC2 0x154
+#define CLK_SOURCE_SDMMC3 0x1bc
+#define CLK_SOURCE_SDMMC4 0x164
+#define CLK_SOURCE_CVE 0x140
+#define CLK_SOURCE_TVO 0x188
+#define CLK_SOURCE_TVDAC 0x194
+#define CLK_SOURCE_HDMI 0x18c
+#define CLK_SOURCE_DISP1 0x138
+#define CLK_SOURCE_DISP2 0x13c
+#define CLK_SOURCE_CSITE 0x1d4
+#define CLK_SOURCE_LA 0x1f8
+#define CLK_SOURCE_OWR 0x1cc
+#define CLK_SOURCE_NOR 0x1d0
+#define CLK_SOURCE_MIPI 0x174
+#define CLK_SOURCE_I2C1 0x124
+#define CLK_SOURCE_I2C2 0x198
+#define CLK_SOURCE_I2C3 0x1b8
+#define CLK_SOURCE_DVC 0x128
+#define CLK_SOURCE_UARTA 0x178
+#define CLK_SOURCE_UARTB 0x17c
+#define CLK_SOURCE_UARTC 0x1a0
+#define CLK_SOURCE_UARTD 0x1c0
+#define CLK_SOURCE_UARTE 0x1c4
+#define CLK_SOURCE_3D 0x158
+#define CLK_SOURCE_2D 0x15c
+#define CLK_SOURCE_MPE 0x170
+#define CLK_SOURCE_EPP 0x16c
+#define CLK_SOURCE_HOST1X 0x180
+#define CLK_SOURCE_VDE 0x1c8
+#define CLK_SOURCE_VI 0x148
+#define CLK_SOURCE_VI_SENSOR 0x1a8
+#define CLK_SOURCE_EMC 0x19c
+
+#define AUDIO_SYNC_CLK 0x38
+
+#define PMC_CTRL 0x0
+#define PMC_CTRL_BLINK_ENB 7
+#define PMC_DPD_PADS_ORIDE 0x1c
+#define PMC_DPD_PADS_ORIDE_BLINK_ENB 20
+#define PMC_BLINK_TIMER 0x40
+
+/* Tegra CPU clock and reset control regs */
+#define TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX 0x4c
+#define TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET 0x340
+#define TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR 0x344
+
+#define CPU_CLOCK(cpu) (0x1 << (8 + cpu))
+#define CPU_RESET(cpu) (0x1111ul << (cpu))
+
+#ifdef CONFIG_PM_SLEEP
+static struct cpu_clk_suspend_context {
+ u32 pllx_misc;
+ u32 pllx_base;
+
+ u32 cpu_burst;
+ u32 clk_csite_src;
+ u32 cclk_divider;
+} tegra20_cpu_clk_sctx;
+#endif
+
+static int periph_clk_enb_refcnt[CLK_OUT_ENB_NUM * 32];
+
+static void __iomem *clk_base;
+static void __iomem *pmc_base;
+
+static DEFINE_SPINLOCK(pll_div_lock);
+static DEFINE_SPINLOCK(sysrate_lock);
+
+#define TEGRA_INIT_DATA_MUX(_name, _con_id, _dev_id, _parents, _offset, \
+ _clk_num, _regs, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ 30, 2, 0, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP, \
+ _regs, _clk_num, periph_clk_enb_refcnt, \
+ _gate_flags, _clk_id)
+
+#define TEGRA_INIT_DATA_INT(_name, _con_id, _dev_id, _parents, _offset, \
+ _clk_num, _regs, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ 30, 2, 0, 0, 8, 1, TEGRA_DIVIDER_INT, _regs, \
+ _clk_num, periph_clk_enb_refcnt, _gate_flags, \
+ _clk_id)
+
+#define TEGRA_INIT_DATA_DIV16(_name, _con_id, _dev_id, _parents, _offset, \
+ _clk_num, _regs, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ 30, 2, 0, 0, 16, 0, TEGRA_DIVIDER_ROUND_UP, _regs, \
+ _clk_num, periph_clk_enb_refcnt, _gate_flags, \
+ _clk_id)
+
+#define TEGRA_INIT_DATA_NODIV(_name, _con_id, _dev_id, _parents, _offset, \
+ _mux_shift, _mux_width, _clk_num, _regs, \
+ _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ _mux_shift, _mux_width, 0, 0, 0, 0, 0, _regs, \
+ _clk_num, periph_clk_enb_refcnt, _gate_flags, \
+ _clk_id)
+
+/* IDs assigned here must be in sync with DT bindings definition
+ * for Tegra20 clocks .
+ */
+enum tegra20_clk {
+ cpu, ac97 = 3, rtc, timer, uarta, gpio = 8, sdmmc2, i2s1 = 11, i2c1,
+ ndflash, sdmmc1, sdmmc4, twc, pwm, i2s2, epp, gr2d = 21, usbd, isp,
+ gr3d, ide, disp2, disp1, host1x, vcp, cache2 = 31, mem, ahbdma, apbdma,
+ kbc = 36, stat_mon, pmc, fuse, kfuse, sbc1, nor, spi, sbc2, xio, sbc3,
+ dvc, dsi, mipi = 50, hdmi, csi, tvdac, i2c2, uartc, emc = 57, usb2,
+ usb3, mpe, vde, bsea, bsev, speedo, uartd, uarte, i2c3, sbc4, sdmmc3,
+ pex, owr, afi, csite, pcie_xclk, avpucq = 75, la, irama = 84, iramb,
+ iramc, iramd, cram2, audio_2x, clk_d, csus = 92, cdev1, cdev2,
+ uartb = 96, vfir, spdif_in, spdif_out, vi, vi_sensor, tvo, cve,
+ osc, clk_32k, clk_m, sclk, cclk, hclk, pclk, blink, pll_a, pll_a_out0,
+ pll_c, pll_c_out1, pll_d, pll_d_out0, pll_e, pll_m, pll_m_out1,
+ pll_p, pll_p_out1, pll_p_out2, pll_p_out3, pll_p_out4, pll_s, pll_u,
+ pll_x, cop, audio, pll_ref, twd, clk_max,
+};
+
+static struct clk *clks[clk_max];
+static struct clk_onecell_data clk_data;
+
+static struct tegra_clk_pll_freq_table pll_c_freq_table[] = {
+ { 12000000, 600000000, 600, 12, 1, 8 },
+ { 13000000, 600000000, 600, 13, 1, 8 },
+ { 19200000, 600000000, 500, 16, 1, 6 },
+ { 26000000, 600000000, 600, 26, 1, 8 },
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_m_freq_table[] = {
+ { 12000000, 666000000, 666, 12, 1, 8},
+ { 13000000, 666000000, 666, 13, 1, 8},
+ { 19200000, 666000000, 555, 16, 1, 8},
+ { 26000000, 666000000, 666, 26, 1, 8},
+ { 12000000, 600000000, 600, 12, 1, 8},
+ { 13000000, 600000000, 600, 13, 1, 8},
+ { 19200000, 600000000, 375, 12, 1, 6},
+ { 26000000, 600000000, 600, 26, 1, 8},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_p_freq_table[] = {
+ { 12000000, 216000000, 432, 12, 2, 8},
+ { 13000000, 216000000, 432, 13, 2, 8},
+ { 19200000, 216000000, 90, 4, 2, 1},
+ { 26000000, 216000000, 432, 26, 2, 8},
+ { 12000000, 432000000, 432, 12, 1, 8},
+ { 13000000, 432000000, 432, 13, 1, 8},
+ { 19200000, 432000000, 90, 4, 1, 1},
+ { 26000000, 432000000, 432, 26, 1, 8},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_a_freq_table[] = {
+ { 28800000, 56448000, 49, 25, 1, 1},
+ { 28800000, 73728000, 64, 25, 1, 1},
+ { 28800000, 24000000, 5, 6, 1, 1},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_d_freq_table[] = {
+ { 12000000, 216000000, 216, 12, 1, 4},
+ { 13000000, 216000000, 216, 13, 1, 4},
+ { 19200000, 216000000, 135, 12, 1, 3},
+ { 26000000, 216000000, 216, 26, 1, 4},
+
+ { 12000000, 594000000, 594, 12, 1, 8},
+ { 13000000, 594000000, 594, 13, 1, 8},
+ { 19200000, 594000000, 495, 16, 1, 8},
+ { 26000000, 594000000, 594, 26, 1, 8},
+
+ { 12000000, 1000000000, 1000, 12, 1, 12},
+ { 13000000, 1000000000, 1000, 13, 1, 12},
+ { 19200000, 1000000000, 625, 12, 1, 8},
+ { 26000000, 1000000000, 1000, 26, 1, 12},
+
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_u_freq_table[] = {
+ { 12000000, 480000000, 960, 12, 2, 0},
+ { 13000000, 480000000, 960, 13, 2, 0},
+ { 19200000, 480000000, 200, 4, 2, 0},
+ { 26000000, 480000000, 960, 26, 2, 0},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_x_freq_table[] = {
+ /* 1 GHz */
+ { 12000000, 1000000000, 1000, 12, 1, 12},
+ { 13000000, 1000000000, 1000, 13, 1, 12},
+ { 19200000, 1000000000, 625, 12, 1, 8},
+ { 26000000, 1000000000, 1000, 26, 1, 12},
+
+ /* 912 MHz */
+ { 12000000, 912000000, 912, 12, 1, 12},
+ { 13000000, 912000000, 912, 13, 1, 12},
+ { 19200000, 912000000, 760, 16, 1, 8},
+ { 26000000, 912000000, 912, 26, 1, 12},
+
+ /* 816 MHz */
+ { 12000000, 816000000, 816, 12, 1, 12},
+ { 13000000, 816000000, 816, 13, 1, 12},
+ { 19200000, 816000000, 680, 16, 1, 8},
+ { 26000000, 816000000, 816, 26, 1, 12},
+
+ /* 760 MHz */
+ { 12000000, 760000000, 760, 12, 1, 12},
+ { 13000000, 760000000, 760, 13, 1, 12},
+ { 19200000, 760000000, 950, 24, 1, 8},
+ { 26000000, 760000000, 760, 26, 1, 12},
+
+ /* 750 MHz */
+ { 12000000, 750000000, 750, 12, 1, 12},
+ { 13000000, 750000000, 750, 13, 1, 12},
+ { 19200000, 750000000, 625, 16, 1, 8},
+ { 26000000, 750000000, 750, 26, 1, 12},
+
+ /* 608 MHz */
+ { 12000000, 608000000, 608, 12, 1, 12},
+ { 13000000, 608000000, 608, 13, 1, 12},
+ { 19200000, 608000000, 380, 12, 1, 8},
+ { 26000000, 608000000, 608, 26, 1, 12},
+
+ /* 456 MHz */
+ { 12000000, 456000000, 456, 12, 1, 12},
+ { 13000000, 456000000, 456, 13, 1, 12},
+ { 19200000, 456000000, 380, 16, 1, 8},
+ { 26000000, 456000000, 456, 26, 1, 12},
+
+ /* 312 MHz */
+ { 12000000, 312000000, 312, 12, 1, 12},
+ { 13000000, 312000000, 312, 13, 1, 12},
+ { 19200000, 312000000, 260, 16, 1, 8},
+ { 26000000, 312000000, 312, 26, 1, 12},
+
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_e_freq_table[] = {
+ { 12000000, 100000000, 200, 24, 1, 0 },
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+/* PLL parameters */
+static struct tegra_clk_pll_params pll_c_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1400000000,
+ .base_reg = PLLC_BASE,
+ .misc_reg = PLLC_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_m_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1200000000,
+ .base_reg = PLLM_BASE,
+ .misc_reg = PLLM_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_p_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1400000000,
+ .base_reg = PLLP_BASE,
+ .misc_reg = PLLP_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_a_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1400000000,
+ .base_reg = PLLA_BASE,
+ .misc_reg = PLLA_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_d_params = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 40000000,
+ .vco_max = 1000000000,
+ .base_reg = PLLD_BASE,
+ .misc_reg = PLLD_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
+ .lock_delay = 1000,
+};
+
+static struct tegra_clk_pll_params pll_u_params = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 48000000,
+ .vco_max = 960000000,
+ .base_reg = PLLU_BASE,
+ .misc_reg = PLLU_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
+ .lock_delay = 1000,
+};
+
+static struct tegra_clk_pll_params pll_x_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1200000000,
+ .base_reg = PLLX_BASE,
+ .misc_reg = PLLX_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_e_params = {
+ .input_min = 12000000,
+ .input_max = 12000000,
+ .cf_min = 0,
+ .cf_max = 0,
+ .vco_min = 0,
+ .vco_max = 0,
+ .base_reg = PLLE_BASE,
+ .misc_reg = PLLE_MISC,
+ .lock_bit_idx = PLLE_MISC_LOCK,
+ .lock_enable_bit_idx = PLLE_MISC_LOCK_ENABLE,
+ .lock_delay = 0,
+};
+
+/* Peripheral clock registers */
+static struct tegra_clk_periph_regs periph_l_regs = {
+ .enb_reg = CLK_OUT_ENB_L,
+ .enb_set_reg = CLK_OUT_ENB_SET_L,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_L,
+ .rst_reg = RST_DEVICES_L,
+ .rst_set_reg = RST_DEVICES_SET_L,
+ .rst_clr_reg = RST_DEVICES_CLR_L,
+};
+
+static struct tegra_clk_periph_regs periph_h_regs = {
+ .enb_reg = CLK_OUT_ENB_H,
+ .enb_set_reg = CLK_OUT_ENB_SET_H,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_H,
+ .rst_reg = RST_DEVICES_H,
+ .rst_set_reg = RST_DEVICES_SET_H,
+ .rst_clr_reg = RST_DEVICES_CLR_H,
+};
+
+static struct tegra_clk_periph_regs periph_u_regs = {
+ .enb_reg = CLK_OUT_ENB_U,
+ .enb_set_reg = CLK_OUT_ENB_SET_U,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_U,
+ .rst_reg = RST_DEVICES_U,
+ .rst_set_reg = RST_DEVICES_SET_U,
+ .rst_clr_reg = RST_DEVICES_CLR_U,
+};
+
+static unsigned long tegra20_clk_measure_input_freq(void)
+{
+ u32 osc_ctrl = readl_relaxed(clk_base + OSC_CTRL);
+ u32 auto_clk_control = osc_ctrl & OSC_CTRL_OSC_FREQ_MASK;
+ u32 pll_ref_div = osc_ctrl & OSC_CTRL_PLL_REF_DIV_MASK;
+ unsigned long input_freq;
+
+ switch (auto_clk_control) {
+ case OSC_CTRL_OSC_FREQ_12MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ input_freq = 12000000;
+ break;
+ case OSC_CTRL_OSC_FREQ_13MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ input_freq = 13000000;
+ break;
+ case OSC_CTRL_OSC_FREQ_19_2MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ input_freq = 19200000;
+ break;
+ case OSC_CTRL_OSC_FREQ_26MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ input_freq = 26000000;
+ break;
+ default:
+ pr_err("Unexpected clock autodetect value %d",
+ auto_clk_control);
+ BUG();
+ return 0;
+ }
+
+ return input_freq;
+}
+
+static unsigned int tegra20_get_pll_ref_div(void)
+{
+ u32 pll_ref_div = readl_relaxed(clk_base + OSC_CTRL) &
+ OSC_CTRL_PLL_REF_DIV_MASK;
+
+ switch (pll_ref_div) {
+ case OSC_CTRL_PLL_REF_DIV_1:
+ return 1;
+ case OSC_CTRL_PLL_REF_DIV_2:
+ return 2;
+ case OSC_CTRL_PLL_REF_DIV_4:
+ return 4;
+ default:
+ pr_err("Invalied pll ref divider %d\n", pll_ref_div);
+ BUG();
+ }
+ return 0;
+}
+
+static void tegra20_pll_init(void)
+{
+ struct clk *clk;
+
+ /* PLLC */
+ clk = tegra_clk_register_pll("pll_c", "pll_ref", clk_base, NULL, 0,
+ 0, &pll_c_params, TEGRA_PLL_HAS_CPCON,
+ pll_c_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_c", NULL);
+ clks[pll_c] = clk;
+
+ /* PLLC_OUT1 */
+ clk = tegra_clk_register_divider("pll_c_out1_div", "pll_c",
+ clk_base + PLLC_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_c_out1", "pll_c_out1_div",
+ clk_base + PLLC_OUT, 1, 0, CLK_SET_RATE_PARENT,
+ 0, NULL);
+ clk_register_clkdev(clk, "pll_c_out1", NULL);
+ clks[pll_c_out1] = clk;
+
+ /* PLLP */
+ clk = tegra_clk_register_pll("pll_p", "pll_ref", clk_base, NULL, 0,
+ 216000000, &pll_p_params, TEGRA_PLL_FIXED |
+ TEGRA_PLL_HAS_CPCON, pll_p_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_p", NULL);
+ clks[pll_p] = clk;
+
+ /* PLLP_OUT1 */
+ clk = tegra_clk_register_divider("pll_p_out1_div", "pll_p",
+ clk_base + PLLP_OUTA, 0,
+ TEGRA_DIVIDER_FIXED | TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out1", "pll_p_out1_div",
+ clk_base + PLLP_OUTA, 1, 0,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out1", NULL);
+ clks[pll_p_out1] = clk;
+
+ /* PLLP_OUT2 */
+ clk = tegra_clk_register_divider("pll_p_out2_div", "pll_p",
+ clk_base + PLLP_OUTA, 0,
+ TEGRA_DIVIDER_FIXED | TEGRA_DIVIDER_ROUND_UP,
+ 24, 8, 1, &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out2", "pll_p_out2_div",
+ clk_base + PLLP_OUTA, 17, 16,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out2", NULL);
+ clks[pll_p_out2] = clk;
+
+ /* PLLP_OUT3 */
+ clk = tegra_clk_register_divider("pll_p_out3_div", "pll_p",
+ clk_base + PLLP_OUTB, 0,
+ TEGRA_DIVIDER_FIXED | TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out3", "pll_p_out3_div",
+ clk_base + PLLP_OUTB, 1, 0,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out3", NULL);
+ clks[pll_p_out3] = clk;
+
+ /* PLLP_OUT4 */
+ clk = tegra_clk_register_divider("pll_p_out4_div", "pll_p",
+ clk_base + PLLP_OUTB, 0,
+ TEGRA_DIVIDER_FIXED | TEGRA_DIVIDER_ROUND_UP,
+ 24, 8, 1, &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out4", "pll_p_out4_div",
+ clk_base + PLLP_OUTB, 17, 16,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out4", NULL);
+ clks[pll_p_out4] = clk;
+
+ /* PLLM */
+ clk = tegra_clk_register_pll("pll_m", "pll_ref", clk_base, NULL,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, 0,
+ &pll_m_params, TEGRA_PLL_HAS_CPCON,
+ pll_m_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_m", NULL);
+ clks[pll_m] = clk;
+
+ /* PLLM_OUT1 */
+ clk = tegra_clk_register_divider("pll_m_out1_div", "pll_m",
+ clk_base + PLLM_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_m_out1", "pll_m_out1_div",
+ clk_base + PLLM_OUT, 1, 0, CLK_IGNORE_UNUSED |
+ CLK_SET_RATE_PARENT, 0, NULL);
+ clk_register_clkdev(clk, "pll_m_out1", NULL);
+ clks[pll_m_out1] = clk;
+
+ /* PLLX */
+ clk = tegra_clk_register_pll("pll_x", "pll_ref", clk_base, NULL, 0,
+ 0, &pll_x_params, TEGRA_PLL_HAS_CPCON,
+ pll_x_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_x", NULL);
+ clks[pll_x] = clk;
+
+ /* PLLU */
+ clk = tegra_clk_register_pll("pll_u", "pll_ref", clk_base, NULL, 0,
+ 0, &pll_u_params, TEGRA_PLLU | TEGRA_PLL_HAS_CPCON,
+ pll_u_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_u", NULL);
+ clks[pll_u] = clk;
+
+ /* PLLD */
+ clk = tegra_clk_register_pll("pll_d", "pll_ref", clk_base, NULL, 0,
+ 0, &pll_d_params, TEGRA_PLL_HAS_CPCON,
+ pll_d_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_d", NULL);
+ clks[pll_d] = clk;
+
+ /* PLLD_OUT0 */
+ clk = clk_register_fixed_factor(NULL, "pll_d_out0", "pll_d",
+ CLK_SET_RATE_PARENT, 1, 2);
+ clk_register_clkdev(clk, "pll_d_out0", NULL);
+ clks[pll_d_out0] = clk;
+
+ /* PLLA */
+ clk = tegra_clk_register_pll("pll_a", "pll_p_out1", clk_base, NULL, 0,
+ 0, &pll_a_params, TEGRA_PLL_HAS_CPCON,
+ pll_a_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_a", NULL);
+ clks[pll_a] = clk;
+
+ /* PLLA_OUT0 */
+ clk = tegra_clk_register_divider("pll_a_out0_div", "pll_a",
+ clk_base + PLLA_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_a_out0", "pll_a_out0_div",
+ clk_base + PLLA_OUT, 1, 0, CLK_IGNORE_UNUSED |
+ CLK_SET_RATE_PARENT, 0, NULL);
+ clk_register_clkdev(clk, "pll_a_out0", NULL);
+ clks[pll_a_out0] = clk;
+
+ /* PLLE */
+ clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, NULL,
+ 0, 100000000, &pll_e_params,
+ 0, pll_e_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_e", NULL);
+ clks[pll_e] = clk;
+}
+
+static const char *cclk_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
+ "pll_p_cclk", "pll_p_out4_cclk",
+ "pll_p_out3_cclk", "clk_d", "pll_x" };
+static const char *sclk_parents[] = { "clk_m", "pll_c_out1", "pll_p_out4",
+ "pll_p_out3", "pll_p_out2", "clk_d",
+ "clk_32k", "pll_m_out1" };
+
+static void tegra20_super_clk_init(void)
+{
+ struct clk *clk;
+
+ /*
+ * DIV_U71 dividers for CCLK, these dividers are used only
+ * if parent clock is fixed rate.
+ */
+
+ /*
+ * Clock input to cclk divided from pll_p using
+ * U71 divider of cclk.
+ */
+ clk = tegra_clk_register_divider("pll_p_cclk", "pll_p",
+ clk_base + SUPER_CCLK_DIVIDER, 0,
+ TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ clk_register_clkdev(clk, "pll_p_cclk", NULL);
+
+ /*
+ * Clock input to cclk divided from pll_p_out3 using
+ * U71 divider of cclk.
+ */
+ clk = tegra_clk_register_divider("pll_p_out3_cclk", "pll_p_out3",
+ clk_base + SUPER_CCLK_DIVIDER, 0,
+ TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ clk_register_clkdev(clk, "pll_p_out3_cclk", NULL);
+
+ /*
+ * Clock input to cclk divided from pll_p_out4 using
+ * U71 divider of cclk.
+ */
+ clk = tegra_clk_register_divider("pll_p_out4_cclk", "pll_p_out4",
+ clk_base + SUPER_CCLK_DIVIDER, 0,
+ TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ clk_register_clkdev(clk, "pll_p_out4_cclk", NULL);
+
+ /* CCLK */
+ clk = tegra_clk_register_super_mux("cclk", cclk_parents,
+ ARRAY_SIZE(cclk_parents), CLK_SET_RATE_PARENT,
+ clk_base + CCLK_BURST_POLICY, 0, 4, 0, 0, NULL);
+ clk_register_clkdev(clk, "cclk", NULL);
+ clks[cclk] = clk;
+
+ /* SCLK */
+ clk = tegra_clk_register_super_mux("sclk", sclk_parents,
+ ARRAY_SIZE(sclk_parents), CLK_SET_RATE_PARENT,
+ clk_base + SCLK_BURST_POLICY, 0, 4, 0, 0, NULL);
+ clk_register_clkdev(clk, "sclk", NULL);
+ clks[sclk] = clk;
+
+ /* HCLK */
+ clk = clk_register_divider(NULL, "hclk_div", "sclk", 0,
+ clk_base + CLK_SYSTEM_RATE, 4, 2, 0,
+ &sysrate_lock);
+ clk = clk_register_gate(NULL, "hclk", "hclk_div", CLK_SET_RATE_PARENT,
+ clk_base + CLK_SYSTEM_RATE, 7,
+ CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
+ clk_register_clkdev(clk, "hclk", NULL);
+ clks[hclk] = clk;
+
+ /* PCLK */
+ clk = clk_register_divider(NULL, "pclk_div", "hclk", 0,
+ clk_base + CLK_SYSTEM_RATE, 0, 2, 0,
+ &sysrate_lock);
+ clk = clk_register_gate(NULL, "pclk", "pclk_div", CLK_SET_RATE_PARENT,
+ clk_base + CLK_SYSTEM_RATE, 3,
+ CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
+ clk_register_clkdev(clk, "pclk", NULL);
+ clks[pclk] = clk;
+
+ /* twd */
+ clk = clk_register_fixed_factor(NULL, "twd", "cclk", 0, 1, 4);
+ clk_register_clkdev(clk, "twd", NULL);
+ clks[twd] = clk;
+}
+
+static const char *audio_parents[] = {"spdif_in", "i2s1", "i2s2", "unused",
+ "pll_a_out0", "unused", "unused",
+ "unused"};
+
+static void __init tegra20_audio_clk_init(void)
+{
+ struct clk *clk;
+
+ /* audio */
+ clk = clk_register_mux(NULL, "audio_mux", audio_parents,
+ ARRAY_SIZE(audio_parents), 0,
+ clk_base + AUDIO_SYNC_CLK, 0, 3, 0, NULL);
+ clk = clk_register_gate(NULL, "audio", "audio_mux", 0,
+ clk_base + AUDIO_SYNC_CLK, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ clk_register_clkdev(clk, "audio", NULL);
+ clks[audio] = clk;
+
+ /* audio_2x */
+ clk = clk_register_fixed_factor(NULL, "audio_doubler", "audio",
+ CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_periph_gate("audio_2x", "audio_doubler",
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ CLK_SET_RATE_PARENT, 89, &periph_u_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "audio_2x", NULL);
+ clks[audio_2x] = clk;
+
+}
+
+static const char *i2s1_parents[] = {"pll_a_out0", "audio_2x", "pll_p",
+ "clk_m"};
+static const char *i2s2_parents[] = {"pll_a_out0", "audio_2x", "pll_p",
+ "clk_m"};
+static const char *spdif_out_parents[] = {"pll_a_out0", "audio_2x", "pll_p",
+ "clk_m"};
+static const char *spdif_in_parents[] = {"pll_p", "pll_c", "pll_m"};
+static const char *pwm_parents[] = {"pll_p", "pll_c", "audio", "clk_m",
+ "clk_32k"};
+static const char *mux_pllpcm_clkm[] = {"pll_p", "pll_c", "pll_m", "clk_m"};
+static const char *mux_pllmcpa[] = {"pll_m", "pll_c", "pll_c", "pll_a"};
+static const char *mux_pllpdc_clkm[] = {"pll_p", "pll_d_out0", "pll_c",
+ "clk_m"};
+static const char *mux_pllmcp_clkm[] = {"pll_m", "pll_c", "pll_p", "clk_m"};
+
+static struct tegra_periph_init_data tegra_periph_clk_list[] = {
+ TEGRA_INIT_DATA_MUX("i2s1", NULL, "tegra20-i2s.0", i2s1_parents, CLK_SOURCE_I2S1, 11, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s1),
+ TEGRA_INIT_DATA_MUX("i2s2", NULL, "tegra20-i2s.1", i2s2_parents, CLK_SOURCE_I2S2, 18, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s2),
+ TEGRA_INIT_DATA_MUX("spdif_out", "spdif_out", "tegra20-spdif", spdif_out_parents, CLK_SOURCE_SPDIF_OUT, 10, &periph_l_regs, TEGRA_PERIPH_ON_APB, spdif_out),
+ TEGRA_INIT_DATA_MUX("spdif_in", "spdif_in", "tegra20-spdif", spdif_in_parents, CLK_SOURCE_SPDIF_IN, 10, &periph_l_regs, TEGRA_PERIPH_ON_APB, spdif_in),
+ TEGRA_INIT_DATA_MUX("sbc1", NULL, "spi_tegra.0", mux_pllpcm_clkm, CLK_SOURCE_SBC1, 41, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc1),
+ TEGRA_INIT_DATA_MUX("sbc2", NULL, "spi_tegra.1", mux_pllpcm_clkm, CLK_SOURCE_SBC2, 44, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc2),
+ TEGRA_INIT_DATA_MUX("sbc3", NULL, "spi_tegra.2", mux_pllpcm_clkm, CLK_SOURCE_SBC3, 46, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc3),
+ TEGRA_INIT_DATA_MUX("sbc4", NULL, "spi_tegra.3", mux_pllpcm_clkm, CLK_SOURCE_SBC4, 68, &periph_u_regs, TEGRA_PERIPH_ON_APB, sbc4),
+ TEGRA_INIT_DATA_MUX("spi", NULL, "spi", mux_pllpcm_clkm, CLK_SOURCE_SPI, 43, &periph_h_regs, TEGRA_PERIPH_ON_APB, spi),
+ TEGRA_INIT_DATA_MUX("xio", NULL, "xio", mux_pllpcm_clkm, CLK_SOURCE_XIO, 45, &periph_h_regs, 0, xio),
+ TEGRA_INIT_DATA_MUX("twc", NULL, "twc", mux_pllpcm_clkm, CLK_SOURCE_TWC, 16, &periph_l_regs, TEGRA_PERIPH_ON_APB, twc),
+ TEGRA_INIT_DATA_MUX("ide", NULL, "ide", mux_pllpcm_clkm, CLK_SOURCE_XIO, 25, &periph_l_regs, 0, ide),
+ TEGRA_INIT_DATA_MUX("ndflash", NULL, "tegra_nand", mux_pllpcm_clkm, CLK_SOURCE_NDFLASH, 13, &periph_l_regs, 0, ndflash),
+ TEGRA_INIT_DATA_MUX("vfir", NULL, "vfir", mux_pllpcm_clkm, CLK_SOURCE_VFIR, 7, &periph_l_regs, TEGRA_PERIPH_ON_APB, vfir),
+ TEGRA_INIT_DATA_MUX("csite", NULL, "csite", mux_pllpcm_clkm, CLK_SOURCE_CSITE, 73, &periph_u_regs, 0, csite),
+ TEGRA_INIT_DATA_MUX("la", NULL, "la", mux_pllpcm_clkm, CLK_SOURCE_LA, 76, &periph_u_regs, 0, la),
+ TEGRA_INIT_DATA_MUX("owr", NULL, "tegra_w1", mux_pllpcm_clkm, CLK_SOURCE_OWR, 71, &periph_u_regs, TEGRA_PERIPH_ON_APB, owr),
+ TEGRA_INIT_DATA_MUX("mipi", NULL, "mipi", mux_pllpcm_clkm, CLK_SOURCE_MIPI, 50, &periph_h_regs, TEGRA_PERIPH_ON_APB, mipi),
+ TEGRA_INIT_DATA_MUX("vde", NULL, "vde", mux_pllpcm_clkm, CLK_SOURCE_VDE, 61, &periph_h_regs, 0, vde),
+ TEGRA_INIT_DATA_MUX("vi", "vi", "tegra_camera", mux_pllmcpa, CLK_SOURCE_VI, 20, &periph_l_regs, 0, vi),
+ TEGRA_INIT_DATA_MUX("epp", NULL, "epp", mux_pllmcpa, CLK_SOURCE_EPP, 19, &periph_l_regs, 0, epp),
+ TEGRA_INIT_DATA_MUX("mpe", NULL, "mpe", mux_pllmcpa, CLK_SOURCE_MPE, 60, &periph_h_regs, 0, mpe),
+ TEGRA_INIT_DATA_MUX("host1x", NULL, "host1x", mux_pllmcpa, CLK_SOURCE_HOST1X, 28, &periph_l_regs, 0, host1x),
+ TEGRA_INIT_DATA_MUX("3d", NULL, "3d", mux_pllmcpa, CLK_SOURCE_3D, 24, &periph_l_regs, TEGRA_PERIPH_MANUAL_RESET, gr3d),
+ TEGRA_INIT_DATA_MUX("2d", NULL, "2d", mux_pllmcpa, CLK_SOURCE_2D, 21, &periph_l_regs, 0, gr2d),
+ TEGRA_INIT_DATA_MUX("nor", NULL, "tegra-nor", mux_pllpcm_clkm, CLK_SOURCE_NOR, 42, &periph_h_regs, 0, nor),
+ TEGRA_INIT_DATA_MUX("sdmmc1", NULL, "sdhci-tegra.0", mux_pllpcm_clkm, CLK_SOURCE_SDMMC1, 14, &periph_l_regs, 0, sdmmc1),
+ TEGRA_INIT_DATA_MUX("sdmmc2", NULL, "sdhci-tegra.1", mux_pllpcm_clkm, CLK_SOURCE_SDMMC2, 9, &periph_l_regs, 0, sdmmc2),
+ TEGRA_INIT_DATA_MUX("sdmmc3", NULL, "sdhci-tegra.2", mux_pllpcm_clkm, CLK_SOURCE_SDMMC3, 69, &periph_u_regs, 0, sdmmc3),
+ TEGRA_INIT_DATA_MUX("sdmmc4", NULL, "sdhci-tegra.3", mux_pllpcm_clkm, CLK_SOURCE_SDMMC4, 15, &periph_l_regs, 0, sdmmc4),
+ TEGRA_INIT_DATA_MUX("cve", NULL, "cve", mux_pllpdc_clkm, CLK_SOURCE_CVE, 49, &periph_h_regs, 0, cve),
+ TEGRA_INIT_DATA_MUX("tvo", NULL, "tvo", mux_pllpdc_clkm, CLK_SOURCE_TVO, 49, &periph_h_regs, 0, tvo),
+ TEGRA_INIT_DATA_MUX("tvdac", NULL, "tvdac", mux_pllpdc_clkm, CLK_SOURCE_TVDAC, 53, &periph_h_regs, 0, tvdac),
+ TEGRA_INIT_DATA_MUX("vi_sensor", "vi_sensor", "tegra_camera", mux_pllmcpa, CLK_SOURCE_VI_SENSOR, 20, &periph_l_regs, TEGRA_PERIPH_NO_RESET, vi_sensor),
+ TEGRA_INIT_DATA_DIV16("i2c1", "div-clk", "tegra-i2c.0", mux_pllpcm_clkm, CLK_SOURCE_I2C1, 12, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2c1),
+ TEGRA_INIT_DATA_DIV16("i2c2", "div-clk", "tegra-i2c.1", mux_pllpcm_clkm, CLK_SOURCE_I2C2, 54, &periph_h_regs, TEGRA_PERIPH_ON_APB, i2c2),
+ TEGRA_INIT_DATA_DIV16("i2c3", "div-clk", "tegra-i2c.2", mux_pllpcm_clkm, CLK_SOURCE_I2C3, 67, &periph_u_regs, TEGRA_PERIPH_ON_APB, i2c3),
+ TEGRA_INIT_DATA_DIV16("dvc", "div-clk", "tegra-i2c.3", mux_pllpcm_clkm, CLK_SOURCE_DVC, 47, &periph_h_regs, TEGRA_PERIPH_ON_APB, dvc),
+ TEGRA_INIT_DATA_MUX("hdmi", NULL, "hdmi", mux_pllpdc_clkm, CLK_SOURCE_HDMI, 51, &periph_h_regs, 0, hdmi),
+ TEGRA_INIT_DATA("pwm", NULL, "tegra-pwm", pwm_parents, CLK_SOURCE_PWM, 28, 3, 0, 0, 8, 1, 0, &periph_l_regs, 17, periph_clk_enb_refcnt, TEGRA_PERIPH_ON_APB, pwm),
+};
+
+static struct tegra_periph_init_data tegra_periph_nodiv_clk_list[] = {
+ TEGRA_INIT_DATA_NODIV("uarta", NULL, "tegra_uart.0", mux_pllpcm_clkm, CLK_SOURCE_UARTA, 30, 2, 6, &periph_l_regs, TEGRA_PERIPH_ON_APB, uarta),
+ TEGRA_INIT_DATA_NODIV("uartb", NULL, "tegra_uart.1", mux_pllpcm_clkm, CLK_SOURCE_UARTB, 30, 2, 7, &periph_l_regs, TEGRA_PERIPH_ON_APB, uartb),
+ TEGRA_INIT_DATA_NODIV("uartc", NULL, "tegra_uart.2", mux_pllpcm_clkm, CLK_SOURCE_UARTC, 30, 2, 55, &periph_h_regs, TEGRA_PERIPH_ON_APB, uartc),
+ TEGRA_INIT_DATA_NODIV("uartd", NULL, "tegra_uart.3", mux_pllpcm_clkm, CLK_SOURCE_UARTD, 30, 2, 65, &periph_u_regs, TEGRA_PERIPH_ON_APB, uartd),
+ TEGRA_INIT_DATA_NODIV("uarte", NULL, "tegra_uart.4", mux_pllpcm_clkm, CLK_SOURCE_UARTE, 30, 2, 66, &periph_u_regs, TEGRA_PERIPH_ON_APB, uarte),
+ TEGRA_INIT_DATA_NODIV("disp1", NULL, "tegradc.0", mux_pllpdc_clkm, CLK_SOURCE_DISP1, 30, 2, 27, &periph_l_regs, 0, disp1),
+ TEGRA_INIT_DATA_NODIV("disp2", NULL, "tegradc.1", mux_pllpdc_clkm, CLK_SOURCE_DISP2, 30, 2, 26, &periph_l_regs, 0, disp2),
+};
+
+static void __init tegra20_periph_clk_init(void)
+{
+ struct tegra_periph_init_data *data;
+ struct clk *clk;
+ int i;
+
+ /* apbdma */
+ clk = tegra_clk_register_periph_gate("apbdma", "pclk", 0, clk_base,
+ 0, 34, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra-apbdma");
+ clks[apbdma] = clk;
+
+ /* rtc */
+ clk = tegra_clk_register_periph_gate("rtc", "clk_32k",
+ TEGRA_PERIPH_NO_RESET,
+ clk_base, 0, 4, &periph_l_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "rtc-tegra");
+ clks[rtc] = clk;
+
+ /* timer */
+ clk = tegra_clk_register_periph_gate("timer", "clk_m", 0, clk_base,
+ 0, 5, &periph_l_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "timer");
+ clks[timer] = clk;
+
+ /* kbc */
+ clk = tegra_clk_register_periph_gate("kbc", "clk_32k",
+ TEGRA_PERIPH_NO_RESET | TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 36, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra-kbc");
+ clks[kbc] = clk;
+
+ /* csus */
+ clk = tegra_clk_register_periph_gate("csus", "clk_m",
+ TEGRA_PERIPH_NO_RESET,
+ clk_base, 0, 92, &periph_u_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "csus", "tengra_camera");
+ clks[csus] = clk;
+
+ /* vcp */
+ clk = tegra_clk_register_periph_gate("vcp", "clk_m", 0,
+ clk_base, 0, 29, &periph_l_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "vcp", "tegra-avp");
+ clks[vcp] = clk;
+
+ /* bsea */
+ clk = tegra_clk_register_periph_gate("bsea", "clk_m", 0,
+ clk_base, 0, 62, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "bsea", "tegra-avp");
+ clks[bsea] = clk;
+
+ /* bsev */
+ clk = tegra_clk_register_periph_gate("bsev", "clk_m", 0,
+ clk_base, 0, 63, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "bsev", "tegra-aes");
+ clks[bsev] = clk;
+
+ /* emc */
+ clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
+ ARRAY_SIZE(mux_pllmcp_clkm), 0,
+ clk_base + CLK_SOURCE_EMC,
+ 30, 2, 0, NULL);
+ clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
+ 57, &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "emc", NULL);
+ clks[emc] = clk;
+
+ /* usbd */
+ clk = tegra_clk_register_periph_gate("usbd", "clk_m", 0, clk_base, 0,
+ 22, &periph_l_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "fsl-tegra-udc");
+ clks[usbd] = clk;
+
+ /* usb2 */
+ clk = tegra_clk_register_periph_gate("usb2", "clk_m", 0, clk_base, 0,
+ 58, &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra-ehci.1");
+ clks[usb2] = clk;
+
+ /* usb3 */
+ clk = tegra_clk_register_periph_gate("usb3", "clk_m", 0, clk_base, 0,
+ 59, &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra-ehci.2");
+ clks[usb3] = clk;
+
+ /* dsi */
+ clk = tegra_clk_register_periph_gate("dsi", "pll_d", 0, clk_base, 0,
+ 48, &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "dsi");
+ clks[dsi] = clk;
+
+ /* csi */
+ clk = tegra_clk_register_periph_gate("csi", "pll_p_out3", 0, clk_base,
+ 0, 52, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "csi", "tegra_camera");
+ clks[csi] = clk;
+
+ /* isp */
+ clk = tegra_clk_register_periph_gate("isp", "clk_m", 0, clk_base, 0, 23,
+ &periph_l_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "isp", "tegra_camera");
+ clks[isp] = clk;
+
+ /* pex */
+ clk = tegra_clk_register_periph_gate("pex", "clk_m", 0, clk_base, 0, 70,
+ &periph_u_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "pex", NULL);
+ clks[pex] = clk;
+
+ /* afi */
+ clk = tegra_clk_register_periph_gate("afi", "clk_m", 0, clk_base, 0, 72,
+ &periph_u_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "afi", NULL);
+ clks[afi] = clk;
+
+ /* pcie_xclk */
+ clk = tegra_clk_register_periph_gate("pcie_xclk", "clk_m", 0, clk_base,
+ 0, 74, &periph_u_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "pcie_xclk", NULL);
+ clks[pcie_xclk] = clk;
+
+ /* cdev1 */
+ clk = clk_register_fixed_rate(NULL, "cdev1_fixed", NULL, CLK_IS_ROOT,
+ 26000000);
+ clk = tegra_clk_register_periph_gate("cdev1", "cdev1_fixed", 0,
+ clk_base, 0, 94, &periph_u_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "cdev1", NULL);
+ clks[cdev1] = clk;
+
+ /* cdev2 */
+ clk = clk_register_fixed_rate(NULL, "cdev2_fixed", NULL, CLK_IS_ROOT,
+ 26000000);
+ clk = tegra_clk_register_periph_gate("cdev2", "cdev2_fixed", 0,
+ clk_base, 0, 93, &periph_u_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "cdev2", NULL);
+ clks[cdev2] = clk;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) {
+ data = &tegra_periph_clk_list[i];
+ clk = tegra_clk_register_periph(data->name, data->parent_names,
+ data->num_parents, &data->periph,
+ clk_base, data->offset);
+ clk_register_clkdev(clk, data->con_id, data->dev_id);
+ clks[data->clk_id] = clk;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tegra_periph_nodiv_clk_list); i++) {
+ data = &tegra_periph_nodiv_clk_list[i];
+ clk = tegra_clk_register_periph_nodiv(data->name,
+ data->parent_names,
+ data->num_parents, &data->periph,
+ clk_base, data->offset);
+ clk_register_clkdev(clk, data->con_id, data->dev_id);
+ clks[data->clk_id] = clk;
+ }
+}
+
+
+static void __init tegra20_fixed_clk_init(void)
+{
+ struct clk *clk;
+
+ /* clk_32k */
+ clk = clk_register_fixed_rate(NULL, "clk_32k", NULL, CLK_IS_ROOT,
+ 32768);
+ clk_register_clkdev(clk, "clk_32k", NULL);
+ clks[clk_32k] = clk;
+}
+
+static void __init tegra20_pmc_clk_init(void)
+{
+ struct clk *clk;
+
+ /* blink */
+ writel_relaxed(0, pmc_base + PMC_BLINK_TIMER);
+ clk = clk_register_gate(NULL, "blink_override", "clk_32k", 0,
+ pmc_base + PMC_DPD_PADS_ORIDE,
+ PMC_DPD_PADS_ORIDE_BLINK_ENB, 0, NULL);
+ clk = clk_register_gate(NULL, "blink", "blink_override", 0,
+ pmc_base + PMC_CTRL,
+ PMC_CTRL_BLINK_ENB, 0, NULL);
+ clk_register_clkdev(clk, "blink", NULL);
+ clks[blink] = clk;
+}
+
+static void __init tegra20_osc_clk_init(void)
+{
+ struct clk *clk;
+ unsigned long input_freq;
+ unsigned int pll_ref_div;
+
+ input_freq = tegra20_clk_measure_input_freq();
+
+ /* clk_m */
+ clk = clk_register_fixed_rate(NULL, "clk_m", NULL, CLK_IS_ROOT |
+ CLK_IGNORE_UNUSED, input_freq);
+ clk_register_clkdev(clk, "clk_m", NULL);
+ clks[clk_m] = clk;
+
+ /* pll_ref */
+ pll_ref_div = tegra20_get_pll_ref_div();
+ clk = clk_register_fixed_factor(NULL, "pll_ref", "clk_m",
+ CLK_SET_RATE_PARENT, 1, pll_ref_div);
+ clk_register_clkdev(clk, "pll_ref", NULL);
+ clks[pll_ref] = clk;
+}
+
+/* Tegra20 CPU clock and reset control functions */
+static void tegra20_wait_cpu_in_reset(u32 cpu)
+{
+ unsigned int reg;
+
+ do {
+ reg = readl(clk_base +
+ TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET);
+ cpu_relax();
+ } while (!(reg & (1 << cpu))); /* check CPU been reset or not */
+
+ return;
+}
+
+static void tegra20_put_cpu_in_reset(u32 cpu)
+{
+ writel(CPU_RESET(cpu),
+ clk_base + TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET);
+ dmb();
+}
+
+static void tegra20_cpu_out_of_reset(u32 cpu)
+{
+ writel(CPU_RESET(cpu),
+ clk_base + TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR);
+ wmb();
+}
+
+static void tegra20_enable_cpu_clock(u32 cpu)
+{
+ unsigned int reg;
+
+ reg = readl(clk_base + TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+ writel(reg & ~CPU_CLOCK(cpu),
+ clk_base + TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+ barrier();
+ reg = readl(clk_base + TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+}
+
+static void tegra20_disable_cpu_clock(u32 cpu)
+{
+ unsigned int reg;
+
+ reg = readl(clk_base + TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+ writel(reg | CPU_CLOCK(cpu),
+ clk_base + TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static bool tegra20_cpu_rail_off_ready(void)
+{
+ unsigned int cpu_rst_status;
+
+ cpu_rst_status = readl(clk_base +
+ TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET);
+
+ return !!(cpu_rst_status & 0x2);
+}
+
+static void tegra20_cpu_clock_suspend(void)
+{
+ /* switch coresite to clk_m, save off original source */
+ tegra20_cpu_clk_sctx.clk_csite_src =
+ readl(clk_base + CLK_SOURCE_CSITE);
+ writel(3<<30, clk_base + CLK_SOURCE_CSITE);
+
+ tegra20_cpu_clk_sctx.cpu_burst =
+ readl(clk_base + CCLK_BURST_POLICY);
+ tegra20_cpu_clk_sctx.pllx_base =
+ readl(clk_base + PLLX_BASE);
+ tegra20_cpu_clk_sctx.pllx_misc =
+ readl(clk_base + PLLX_MISC);
+ tegra20_cpu_clk_sctx.cclk_divider =
+ readl(clk_base + SUPER_CCLK_DIVIDER);
+}
+
+static void tegra20_cpu_clock_resume(void)
+{
+ unsigned int reg, policy;
+
+ /* Is CPU complex already running on PLLX? */
+ reg = readl(clk_base + CCLK_BURST_POLICY);
+ policy = (reg >> CCLK_BURST_POLICY_SHIFT) & 0xF;
+
+ if (policy == CCLK_IDLE_POLICY)
+ reg = (reg >> CCLK_IDLE_POLICY_SHIFT) & 0xF;
+ else if (policy == CCLK_RUN_POLICY)
+ reg = (reg >> CCLK_RUN_POLICY_SHIFT) & 0xF;
+ else
+ BUG();
+
+ if (reg != CCLK_BURST_POLICY_PLLX) {
+ /* restore PLLX settings if CPU is on different PLL */
+ writel(tegra20_cpu_clk_sctx.pllx_misc,
+ clk_base + PLLX_MISC);
+ writel(tegra20_cpu_clk_sctx.pllx_base,
+ clk_base + PLLX_BASE);
+
+ /* wait for PLL stabilization if PLLX was enabled */
+ if (tegra20_cpu_clk_sctx.pllx_base & (1 << 30))
+ udelay(300);
+ }
+
+ /*
+ * Restore original burst policy setting for calls resulting from CPU
+ * LP2 in idle or system suspend.
+ */
+ writel(tegra20_cpu_clk_sctx.cclk_divider,
+ clk_base + SUPER_CCLK_DIVIDER);
+ writel(tegra20_cpu_clk_sctx.cpu_burst,
+ clk_base + CCLK_BURST_POLICY);
+
+ writel(tegra20_cpu_clk_sctx.clk_csite_src,
+ clk_base + CLK_SOURCE_CSITE);
+}
+#endif
+
+static struct tegra_cpu_car_ops tegra20_cpu_car_ops = {
+ .wait_for_reset = tegra20_wait_cpu_in_reset,
+ .put_in_reset = tegra20_put_cpu_in_reset,
+ .out_of_reset = tegra20_cpu_out_of_reset,
+ .enable_clock = tegra20_enable_cpu_clock,
+ .disable_clock = tegra20_disable_cpu_clock,
+#ifdef CONFIG_PM_SLEEP
+ .rail_off_ready = tegra20_cpu_rail_off_ready,
+ .suspend = tegra20_cpu_clock_suspend,
+ .resume = tegra20_cpu_clock_resume,
+#endif
+};
+
+static __initdata struct tegra_clk_init_table init_table[] = {
+ {pll_p, clk_max, 216000000, 1},
+ {pll_p_out1, clk_max, 28800000, 1},
+ {pll_p_out2, clk_max, 48000000, 1},
+ {pll_p_out3, clk_max, 72000000, 1},
+ {pll_p_out4, clk_max, 24000000, 1},
+ {pll_c, clk_max, 600000000, 1},
+ {pll_c_out1, clk_max, 120000000, 1},
+ {sclk, pll_c_out1, 0, 1},
+ {hclk, clk_max, 0, 1},
+ {pclk, clk_max, 60000000, 1},
+ {csite, clk_max, 0, 1},
+ {emc, clk_max, 0, 1},
+ {cclk, clk_max, 0, 1},
+ {uarta, pll_p, 0, 0},
+ {uartb, pll_p, 0, 0},
+ {uartc, pll_p, 0, 0},
+ {uartd, pll_p, 0, 0},
+ {uarte, pll_p, 0, 0},
+ {usbd, clk_max, 12000000, 0},
+ {usb2, clk_max, 12000000, 0},
+ {usb3, clk_max, 12000000, 0},
+ {pll_a, clk_max, 56448000, 1},
+ {pll_a_out0, clk_max, 11289600, 1},
+ {cdev1, clk_max, 0, 1},
+ {blink, clk_max, 32768, 1},
+ {i2s1, pll_a_out0, 11289600, 0},
+ {i2s2, pll_a_out0, 11289600, 0},
+ {sdmmc1, pll_p, 48000000, 0},
+ {sdmmc3, pll_p, 48000000, 0},
+ {sdmmc4, pll_p, 48000000, 0},
+ {spi, pll_p, 20000000, 0},
+ {sbc1, pll_p, 100000000, 0},
+ {sbc2, pll_p, 100000000, 0},
+ {sbc3, pll_p, 100000000, 0},
+ {sbc4, pll_p, 100000000, 0},
+ {host1x, pll_c, 150000000, 0},
+ {disp1, pll_p, 600000000, 0},
+ {disp2, pll_p, 600000000, 0},
+ {clk_max, clk_max, 0, 0}, /* This MUST be the last entry */
+};
+
+/*
+ * Some clocks may be used by different drivers depending on the board
+ * configuration. List those here to register them twice in the clock lookup
+ * table under two names.
+ */
+static struct tegra_clk_duplicate tegra_clk_duplicates[] = {
+ TEGRA_CLK_DUPLICATE(usbd, "utmip-pad", NULL),
+ TEGRA_CLK_DUPLICATE(usbd, "tegra-ehci.0", NULL),
+ TEGRA_CLK_DUPLICATE(usbd, "tegra-otg", NULL),
+ TEGRA_CLK_DUPLICATE(cclk, NULL, "cpu"),
+ TEGRA_CLK_DUPLICATE(twd, "smp_twd", NULL),
+ TEGRA_CLK_DUPLICATE(clk_max, NULL, NULL), /* Must be the last entry */
+};
+
+static const struct of_device_id pmc_match[] __initconst = {
+ { .compatible = "nvidia,tegra20-pmc" },
+ {},
+};
+
+void __init tegra20_clock_init(struct device_node *np)
+{
+ int i;
+ struct device_node *node;
+
+ clk_base = of_iomap(np, 0);
+ if (!clk_base) {
+ pr_err("Can't map CAR registers\n");
+ BUG();
+ }
+
+ node = of_find_matching_node(NULL, pmc_match);
+ if (!node) {
+ pr_err("Failed to find pmc node\n");
+ BUG();
+ }
+
+ pmc_base = of_iomap(node, 0);
+ if (!pmc_base) {
+ pr_err("Can't map pmc registers\n");
+ BUG();
+ }
+
+ tegra20_osc_clk_init();
+ tegra20_pmc_clk_init();
+ tegra20_fixed_clk_init();
+ tegra20_pll_init();
+ tegra20_super_clk_init();
+ tegra20_periph_clk_init();
+ tegra20_audio_clk_init();
+
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++) {
+ if (IS_ERR(clks[i])) {
+ pr_err("Tegra20 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clks[i]));
+ BUG();
+ }
+ if (!clks[i])
+ clks[i] = ERR_PTR(-EINVAL);
+ }
+
+ tegra_init_dup_clks(tegra_clk_duplicates, clks, clk_max);
+
+ clk_data.clks = clks;
+ clk_data.clk_num = ARRAY_SIZE(clks);
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+
+ tegra_init_from_table(init_table, clks, clk_max);
+
+ tegra_cpu_car_ops = &tegra20_cpu_car_ops;
+}
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
new file mode 100644
index 000000000000..32c61cb6d0bb
--- /dev/null
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -0,0 +1,1994 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/tegra.h>
+
+#include <mach/powergate.h>
+
+#include "clk.h"
+
+#define RST_DEVICES_L 0x004
+#define RST_DEVICES_H 0x008
+#define RST_DEVICES_U 0x00c
+#define RST_DEVICES_V 0x358
+#define RST_DEVICES_W 0x35c
+#define RST_DEVICES_SET_L 0x300
+#define RST_DEVICES_CLR_L 0x304
+#define RST_DEVICES_SET_H 0x308
+#define RST_DEVICES_CLR_H 0x30c
+#define RST_DEVICES_SET_U 0x310
+#define RST_DEVICES_CLR_U 0x314
+#define RST_DEVICES_SET_V 0x430
+#define RST_DEVICES_CLR_V 0x434
+#define RST_DEVICES_SET_W 0x438
+#define RST_DEVICES_CLR_W 0x43c
+#define RST_DEVICES_NUM 5
+
+#define CLK_OUT_ENB_L 0x010
+#define CLK_OUT_ENB_H 0x014
+#define CLK_OUT_ENB_U 0x018
+#define CLK_OUT_ENB_V 0x360
+#define CLK_OUT_ENB_W 0x364
+#define CLK_OUT_ENB_SET_L 0x320
+#define CLK_OUT_ENB_CLR_L 0x324
+#define CLK_OUT_ENB_SET_H 0x328
+#define CLK_OUT_ENB_CLR_H 0x32c
+#define CLK_OUT_ENB_SET_U 0x330
+#define CLK_OUT_ENB_CLR_U 0x334
+#define CLK_OUT_ENB_SET_V 0x440
+#define CLK_OUT_ENB_CLR_V 0x444
+#define CLK_OUT_ENB_SET_W 0x448
+#define CLK_OUT_ENB_CLR_W 0x44c
+#define CLK_OUT_ENB_NUM 5
+
+#define OSC_CTRL 0x50
+#define OSC_CTRL_OSC_FREQ_MASK (0xF<<28)
+#define OSC_CTRL_OSC_FREQ_13MHZ (0X0<<28)
+#define OSC_CTRL_OSC_FREQ_19_2MHZ (0X4<<28)
+#define OSC_CTRL_OSC_FREQ_12MHZ (0X8<<28)
+#define OSC_CTRL_OSC_FREQ_26MHZ (0XC<<28)
+#define OSC_CTRL_OSC_FREQ_16_8MHZ (0X1<<28)
+#define OSC_CTRL_OSC_FREQ_38_4MHZ (0X5<<28)
+#define OSC_CTRL_OSC_FREQ_48MHZ (0X9<<28)
+#define OSC_CTRL_MASK (0x3f2 | OSC_CTRL_OSC_FREQ_MASK)
+
+#define OSC_CTRL_PLL_REF_DIV_MASK (3<<26)
+#define OSC_CTRL_PLL_REF_DIV_1 (0<<26)
+#define OSC_CTRL_PLL_REF_DIV_2 (1<<26)
+#define OSC_CTRL_PLL_REF_DIV_4 (2<<26)
+
+#define OSC_FREQ_DET 0x58
+#define OSC_FREQ_DET_TRIG BIT(31)
+
+#define OSC_FREQ_DET_STATUS 0x5c
+#define OSC_FREQ_DET_BUSY BIT(31)
+#define OSC_FREQ_DET_CNT_MASK 0xffff
+
+#define CCLKG_BURST_POLICY 0x368
+#define SUPER_CCLKG_DIVIDER 0x36c
+#define CCLKLP_BURST_POLICY 0x370
+#define SUPER_CCLKLP_DIVIDER 0x374
+#define SCLK_BURST_POLICY 0x028
+#define SUPER_SCLK_DIVIDER 0x02c
+
+#define SYSTEM_CLK_RATE 0x030
+
+#define PLLC_BASE 0x80
+#define PLLC_MISC 0x8c
+#define PLLM_BASE 0x90
+#define PLLM_MISC 0x9c
+#define PLLP_BASE 0xa0
+#define PLLP_MISC 0xac
+#define PLLX_BASE 0xe0
+#define PLLX_MISC 0xe4
+#define PLLD_BASE 0xd0
+#define PLLD_MISC 0xdc
+#define PLLD2_BASE 0x4b8
+#define PLLD2_MISC 0x4bc
+#define PLLE_BASE 0xe8
+#define PLLE_MISC 0xec
+#define PLLA_BASE 0xb0
+#define PLLA_MISC 0xbc
+#define PLLU_BASE 0xc0
+#define PLLU_MISC 0xcc
+
+#define PLL_MISC_LOCK_ENABLE 18
+#define PLLDU_MISC_LOCK_ENABLE 22
+#define PLLE_MISC_LOCK_ENABLE 9
+
+#define PLL_BASE_LOCK 27
+#define PLLE_MISC_LOCK 11
+
+#define PLLE_AUX 0x48c
+#define PLLC_OUT 0x84
+#define PLLM_OUT 0x94
+#define PLLP_OUTA 0xa4
+#define PLLP_OUTB 0xa8
+#define PLLA_OUT 0xb4
+
+#define AUDIO_SYNC_CLK_I2S0 0x4a0
+#define AUDIO_SYNC_CLK_I2S1 0x4a4
+#define AUDIO_SYNC_CLK_I2S2 0x4a8
+#define AUDIO_SYNC_CLK_I2S3 0x4ac
+#define AUDIO_SYNC_CLK_I2S4 0x4b0
+#define AUDIO_SYNC_CLK_SPDIF 0x4b4
+
+#define PMC_CLK_OUT_CNTRL 0x1a8
+
+#define CLK_SOURCE_I2S0 0x1d8
+#define CLK_SOURCE_I2S1 0x100
+#define CLK_SOURCE_I2S2 0x104
+#define CLK_SOURCE_I2S3 0x3bc
+#define CLK_SOURCE_I2S4 0x3c0
+#define CLK_SOURCE_SPDIF_OUT 0x108
+#define CLK_SOURCE_SPDIF_IN 0x10c
+#define CLK_SOURCE_PWM 0x110
+#define CLK_SOURCE_D_AUDIO 0x3d0
+#define CLK_SOURCE_DAM0 0x3d8
+#define CLK_SOURCE_DAM1 0x3dc
+#define CLK_SOURCE_DAM2 0x3e0
+#define CLK_SOURCE_HDA 0x428
+#define CLK_SOURCE_HDA2CODEC_2X 0x3e4
+#define CLK_SOURCE_SBC1 0x134
+#define CLK_SOURCE_SBC2 0x118
+#define CLK_SOURCE_SBC3 0x11c
+#define CLK_SOURCE_SBC4 0x1b4
+#define CLK_SOURCE_SBC5 0x3c8
+#define CLK_SOURCE_SBC6 0x3cc
+#define CLK_SOURCE_SATA_OOB 0x420
+#define CLK_SOURCE_SATA 0x424
+#define CLK_SOURCE_NDFLASH 0x160
+#define CLK_SOURCE_NDSPEED 0x3f8
+#define CLK_SOURCE_VFIR 0x168
+#define CLK_SOURCE_SDMMC1 0x150
+#define CLK_SOURCE_SDMMC2 0x154
+#define CLK_SOURCE_SDMMC3 0x1bc
+#define CLK_SOURCE_SDMMC4 0x164
+#define CLK_SOURCE_VDE 0x1c8
+#define CLK_SOURCE_CSITE 0x1d4
+#define CLK_SOURCE_LA 0x1f8
+#define CLK_SOURCE_OWR 0x1cc
+#define CLK_SOURCE_NOR 0x1d0
+#define CLK_SOURCE_MIPI 0x174
+#define CLK_SOURCE_I2C1 0x124
+#define CLK_SOURCE_I2C2 0x198
+#define CLK_SOURCE_I2C3 0x1b8
+#define CLK_SOURCE_I2C4 0x3c4
+#define CLK_SOURCE_I2C5 0x128
+#define CLK_SOURCE_UARTA 0x178
+#define CLK_SOURCE_UARTB 0x17c
+#define CLK_SOURCE_UARTC 0x1a0
+#define CLK_SOURCE_UARTD 0x1c0
+#define CLK_SOURCE_UARTE 0x1c4
+#define CLK_SOURCE_VI 0x148
+#define CLK_SOURCE_VI_SENSOR 0x1a8
+#define CLK_SOURCE_3D 0x158
+#define CLK_SOURCE_3D2 0x3b0
+#define CLK_SOURCE_2D 0x15c
+#define CLK_SOURCE_EPP 0x16c
+#define CLK_SOURCE_MPE 0x170
+#define CLK_SOURCE_HOST1X 0x180
+#define CLK_SOURCE_CVE 0x140
+#define CLK_SOURCE_TVO 0x188
+#define CLK_SOURCE_DTV 0x1dc
+#define CLK_SOURCE_HDMI 0x18c
+#define CLK_SOURCE_TVDAC 0x194
+#define CLK_SOURCE_DISP1 0x138
+#define CLK_SOURCE_DISP2 0x13c
+#define CLK_SOURCE_DSIB 0xd0
+#define CLK_SOURCE_TSENSOR 0x3b8
+#define CLK_SOURCE_ACTMON 0x3e8
+#define CLK_SOURCE_EXTERN1 0x3ec
+#define CLK_SOURCE_EXTERN2 0x3f0
+#define CLK_SOURCE_EXTERN3 0x3f4
+#define CLK_SOURCE_I2CSLOW 0x3fc
+#define CLK_SOURCE_SE 0x42c
+#define CLK_SOURCE_MSELECT 0x3b4
+#define CLK_SOURCE_EMC 0x19c
+
+#define AUDIO_SYNC_DOUBLER 0x49c
+
+#define PMC_CTRL 0
+#define PMC_CTRL_BLINK_ENB 7
+
+#define PMC_DPD_PADS_ORIDE 0x1c
+#define PMC_DPD_PADS_ORIDE_BLINK_ENB 20
+#define PMC_BLINK_TIMER 0x40
+
+#define UTMIP_PLL_CFG2 0x488
+#define UTMIP_PLL_CFG2_STABLE_COUNT(x) (((x) & 0xffff) << 6)
+#define UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(x) (((x) & 0x3f) << 18)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN BIT(0)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN BIT(2)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN BIT(4)
+
+#define UTMIP_PLL_CFG1 0x484
+#define UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 6)
+#define UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0)
+#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN BIT(14)
+#define UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN BIT(12)
+#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN BIT(16)
+
+/* Tegra CPU clock and reset control regs */
+#define TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX 0x4c
+#define TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET 0x340
+#define TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR 0x344
+#define TEGRA30_CLK_RST_CONTROLLER_CLK_CPU_CMPLX_CLR 0x34c
+#define TEGRA30_CLK_RST_CONTROLLER_CPU_CMPLX_STATUS 0x470
+
+#define CPU_CLOCK(cpu) (0x1 << (8 + cpu))
+#define CPU_RESET(cpu) (0x1111ul << (cpu))
+
+#define CLK_RESET_CCLK_BURST 0x20
+#define CLK_RESET_CCLK_DIVIDER 0x24
+#define CLK_RESET_PLLX_BASE 0xe0
+#define CLK_RESET_PLLX_MISC 0xe4
+
+#define CLK_RESET_SOURCE_CSITE 0x1d4
+
+#define CLK_RESET_CCLK_BURST_POLICY_SHIFT 28
+#define CLK_RESET_CCLK_RUN_POLICY_SHIFT 4
+#define CLK_RESET_CCLK_IDLE_POLICY_SHIFT 0
+#define CLK_RESET_CCLK_IDLE_POLICY 1
+#define CLK_RESET_CCLK_RUN_POLICY 2
+#define CLK_RESET_CCLK_BURST_POLICY_PLLX 8
+
+#ifdef CONFIG_PM_SLEEP
+static struct cpu_clk_suspend_context {
+ u32 pllx_misc;
+ u32 pllx_base;
+
+ u32 cpu_burst;
+ u32 clk_csite_src;
+ u32 cclk_divider;
+} tegra30_cpu_clk_sctx;
+#endif
+
+static int periph_clk_enb_refcnt[CLK_OUT_ENB_NUM * 32];
+
+static void __iomem *clk_base;
+static void __iomem *pmc_base;
+static unsigned long input_freq;
+
+static DEFINE_SPINLOCK(clk_doubler_lock);
+static DEFINE_SPINLOCK(clk_out_lock);
+static DEFINE_SPINLOCK(pll_div_lock);
+static DEFINE_SPINLOCK(cml_lock);
+static DEFINE_SPINLOCK(pll_d_lock);
+static DEFINE_SPINLOCK(sysrate_lock);
+
+#define TEGRA_INIT_DATA_MUX(_name, _con_id, _dev_id, _parents, _offset, \
+ _clk_num, _regs, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ 30, 2, 0, 0, 8, 1, 0, _regs, _clk_num, \
+ periph_clk_enb_refcnt, _gate_flags, _clk_id)
+
+#define TEGRA_INIT_DATA_DIV16(_name, _con_id, _dev_id, _parents, _offset, \
+ _clk_num, _regs, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ 30, 2, 0, 0, 16, 0, TEGRA_DIVIDER_ROUND_UP, \
+ _regs, _clk_num, periph_clk_enb_refcnt, \
+ _gate_flags, _clk_id)
+
+#define TEGRA_INIT_DATA_MUX8(_name, _con_id, _dev_id, _parents, _offset, \
+ _clk_num, _regs, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ 29, 3, 0, 0, 8, 1, 0, _regs, _clk_num, \
+ periph_clk_enb_refcnt, _gate_flags, _clk_id)
+
+#define TEGRA_INIT_DATA_INT(_name, _con_id, _dev_id, _parents, _offset, \
+ _clk_num, _regs, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ 30, 2, 0, 0, 8, 1, TEGRA_DIVIDER_INT, _regs, \
+ _clk_num, periph_clk_enb_refcnt, _gate_flags, \
+ _clk_id)
+
+#define TEGRA_INIT_DATA_UART(_name, _con_id, _dev_id, _parents, _offset,\
+ _clk_num, _regs, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ 30, 2, 0, 0, 16, 1, TEGRA_DIVIDER_UART, _regs, \
+ _clk_num, periph_clk_enb_refcnt, 0, _clk_id)
+
+#define TEGRA_INIT_DATA_NODIV(_name, _con_id, _dev_id, _parents, _offset, \
+ _mux_shift, _mux_width, _clk_num, _regs, \
+ _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parents, _offset, \
+ _mux_shift, _mux_width, 0, 0, 0, 0, 0, _regs, \
+ _clk_num, periph_clk_enb_refcnt, _gate_flags, \
+ _clk_id)
+
+/*
+ * IDs assigned here must be in sync with DT bindings definition
+ * for Tegra30 clocks.
+ */
+enum tegra30_clk {
+ cpu, rtc = 4, timer, uarta, gpio = 8, sdmmc2, i2s1 = 11, i2c1, ndflash,
+ sdmmc1, sdmmc4, pwm = 17, i2s2, epp, gr2d = 21, usbd, isp, gr3d,
+ disp2 = 26, disp1, host1x, vcp, i2s0, cop_cache, mc, ahbdma, apbdma,
+ kbc = 36, statmon, pmc, kfuse = 40, sbc1, nor, sbc2 = 44, sbc3 = 46,
+ i2c5, dsia, mipi = 50, hdmi, csi, tvdac, i2c2, uartc, emc = 57, usb2,
+ usb3, mpe, vde, bsea, bsev, speedo, uartd, uarte, i2c3, sbc4, sdmmc3,
+ pcie, owr, afi, csite, pciex, avpucq, la, dtv = 79, ndspeed, i2cslow,
+ dsib, irama = 84, iramb, iramc, iramd, cram2, audio_2x = 90, csus = 92,
+ cdev1, cdev2, cpu_g = 96, cpu_lp, gr3d2, mselect, tsensor, i2s3, i2s4,
+ i2c4, sbc5, sbc6, d_audio, apbif, dam0, dam1, dam2, hda2codec_2x,
+ atomics, audio0_2x, audio1_2x, audio2_2x, audio3_2x, audio4_2x,
+ spdif_2x, actmon, extern1, extern2, extern3, sata_oob, sata, hda,
+ se = 127, hda2hdmi, sata_cold, uartb = 160, vfir, spdif_in, spdif_out,
+ vi, vi_sensor, fuse, fuse_burn, cve, tvo, clk_32k, clk_m, clk_m_div2,
+ clk_m_div4, pll_ref, pll_c, pll_c_out1, pll_m, pll_m_out1, pll_p,
+ pll_p_out1, pll_p_out2, pll_p_out3, pll_p_out4, pll_a, pll_a_out0,
+ pll_d, pll_d_out0, pll_d2, pll_d2_out0, pll_u, pll_x, pll_x_out0, pll_e,
+ spdif_in_sync, i2s0_sync, i2s1_sync, i2s2_sync, i2s3_sync, i2s4_sync,
+ vimclk_sync, audio0, audio1, audio2, audio3, audio4, spdif, clk_out_1,
+ clk_out_2, clk_out_3, sclk, blink, cclk_g, cclk_lp, twd, cml0, cml1,
+ hclk, pclk, clk_out_1_mux = 300, clk_max
+};
+
+static struct clk *clks[clk_max];
+static struct clk_onecell_data clk_data;
+
+/*
+ * Structure defining the fields for USB UTMI clocks Parameters.
+ */
+struct utmi_clk_param {
+ /* Oscillator Frequency in KHz */
+ u32 osc_frequency;
+ /* UTMIP PLL Enable Delay Count */
+ u8 enable_delay_count;
+ /* UTMIP PLL Stable count */
+ u8 stable_count;
+ /* UTMIP PLL Active delay count */
+ u8 active_delay_count;
+ /* UTMIP PLL Xtal frequency count */
+ u8 xtal_freq_count;
+};
+
+static const struct utmi_clk_param utmi_parameters[] = {
+/* OSC_FREQUENCY, ENABLE_DLY, STABLE_CNT, ACTIVE_DLY, XTAL_FREQ_CNT */
+ {13000000, 0x02, 0x33, 0x05, 0x7F},
+ {19200000, 0x03, 0x4B, 0x06, 0xBB},
+ {12000000, 0x02, 0x2F, 0x04, 0x76},
+ {26000000, 0x04, 0x66, 0x09, 0xFE},
+ {16800000, 0x03, 0x41, 0x0A, 0xA4},
+};
+
+static struct tegra_clk_pll_freq_table pll_c_freq_table[] = {
+ { 12000000, 1040000000, 520, 6, 1, 8},
+ { 13000000, 1040000000, 480, 6, 1, 8},
+ { 16800000, 1040000000, 495, 8, 1, 8}, /* actual: 1039.5 MHz */
+ { 19200000, 1040000000, 325, 6, 1, 6},
+ { 26000000, 1040000000, 520, 13, 1, 8},
+
+ { 12000000, 832000000, 416, 6, 1, 8},
+ { 13000000, 832000000, 832, 13, 1, 8},
+ { 16800000, 832000000, 396, 8, 1, 8}, /* actual: 831.6 MHz */
+ { 19200000, 832000000, 260, 6, 1, 8},
+ { 26000000, 832000000, 416, 13, 1, 8},
+
+ { 12000000, 624000000, 624, 12, 1, 8},
+ { 13000000, 624000000, 624, 13, 1, 8},
+ { 16800000, 600000000, 520, 14, 1, 8},
+ { 19200000, 624000000, 520, 16, 1, 8},
+ { 26000000, 624000000, 624, 26, 1, 8},
+
+ { 12000000, 600000000, 600, 12, 1, 8},
+ { 13000000, 600000000, 600, 13, 1, 8},
+ { 16800000, 600000000, 500, 14, 1, 8},
+ { 19200000, 600000000, 375, 12, 1, 6},
+ { 26000000, 600000000, 600, 26, 1, 8},
+
+ { 12000000, 520000000, 520, 12, 1, 8},
+ { 13000000, 520000000, 520, 13, 1, 8},
+ { 16800000, 520000000, 495, 16, 1, 8}, /* actual: 519.75 MHz */
+ { 19200000, 520000000, 325, 12, 1, 6},
+ { 26000000, 520000000, 520, 26, 1, 8},
+
+ { 12000000, 416000000, 416, 12, 1, 8},
+ { 13000000, 416000000, 416, 13, 1, 8},
+ { 16800000, 416000000, 396, 16, 1, 8}, /* actual: 415.8 MHz */
+ { 19200000, 416000000, 260, 12, 1, 6},
+ { 26000000, 416000000, 416, 26, 1, 8},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_m_freq_table[] = {
+ { 12000000, 666000000, 666, 12, 1, 8},
+ { 13000000, 666000000, 666, 13, 1, 8},
+ { 16800000, 666000000, 555, 14, 1, 8},
+ { 19200000, 666000000, 555, 16, 1, 8},
+ { 26000000, 666000000, 666, 26, 1, 8},
+ { 12000000, 600000000, 600, 12, 1, 8},
+ { 13000000, 600000000, 600, 13, 1, 8},
+ { 16800000, 600000000, 500, 14, 1, 8},
+ { 19200000, 600000000, 375, 12, 1, 6},
+ { 26000000, 600000000, 600, 26, 1, 8},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_p_freq_table[] = {
+ { 12000000, 216000000, 432, 12, 2, 8},
+ { 13000000, 216000000, 432, 13, 2, 8},
+ { 16800000, 216000000, 360, 14, 2, 8},
+ { 19200000, 216000000, 360, 16, 2, 8},
+ { 26000000, 216000000, 432, 26, 2, 8},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_a_freq_table[] = {
+ { 9600000, 564480000, 294, 5, 1, 4},
+ { 9600000, 552960000, 288, 5, 1, 4},
+ { 9600000, 24000000, 5, 2, 1, 1},
+
+ { 28800000, 56448000, 49, 25, 1, 1},
+ { 28800000, 73728000, 64, 25, 1, 1},
+ { 28800000, 24000000, 5, 6, 1, 1},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_d_freq_table[] = {
+ { 12000000, 216000000, 216, 12, 1, 4},
+ { 13000000, 216000000, 216, 13, 1, 4},
+ { 16800000, 216000000, 180, 14, 1, 4},
+ { 19200000, 216000000, 180, 16, 1, 4},
+ { 26000000, 216000000, 216, 26, 1, 4},
+
+ { 12000000, 594000000, 594, 12, 1, 8},
+ { 13000000, 594000000, 594, 13, 1, 8},
+ { 16800000, 594000000, 495, 14, 1, 8},
+ { 19200000, 594000000, 495, 16, 1, 8},
+ { 26000000, 594000000, 594, 26, 1, 8},
+
+ { 12000000, 1000000000, 1000, 12, 1, 12},
+ { 13000000, 1000000000, 1000, 13, 1, 12},
+ { 19200000, 1000000000, 625, 12, 1, 8},
+ { 26000000, 1000000000, 1000, 26, 1, 12},
+
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_u_freq_table[] = {
+ { 12000000, 480000000, 960, 12, 2, 12},
+ { 13000000, 480000000, 960, 13, 2, 12},
+ { 16800000, 480000000, 400, 7, 2, 5},
+ { 19200000, 480000000, 200, 4, 2, 3},
+ { 26000000, 480000000, 960, 26, 2, 12},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_x_freq_table[] = {
+ /* 1.7 GHz */
+ { 12000000, 1700000000, 850, 6, 1, 8},
+ { 13000000, 1700000000, 915, 7, 1, 8}, /* actual: 1699.2 MHz */
+ { 16800000, 1700000000, 708, 7, 1, 8}, /* actual: 1699.2 MHz */
+ { 19200000, 1700000000, 885, 10, 1, 8}, /* actual: 1699.2 MHz */
+ { 26000000, 1700000000, 850, 13, 1, 8},
+
+ /* 1.6 GHz */
+ { 12000000, 1600000000, 800, 6, 1, 8},
+ { 13000000, 1600000000, 738, 6, 1, 8}, /* actual: 1599.0 MHz */
+ { 16800000, 1600000000, 857, 9, 1, 8}, /* actual: 1599.7 MHz */
+ { 19200000, 1600000000, 500, 6, 1, 8},
+ { 26000000, 1600000000, 800, 13, 1, 8},
+
+ /* 1.5 GHz */
+ { 12000000, 1500000000, 750, 6, 1, 8},
+ { 13000000, 1500000000, 923, 8, 1, 8}, /* actual: 1499.8 MHz */
+ { 16800000, 1500000000, 625, 7, 1, 8},
+ { 19200000, 1500000000, 625, 8, 1, 8},
+ { 26000000, 1500000000, 750, 13, 1, 8},
+
+ /* 1.4 GHz */
+ { 12000000, 1400000000, 700, 6, 1, 8},
+ { 13000000, 1400000000, 969, 9, 1, 8}, /* actual: 1399.7 MHz */
+ { 16800000, 1400000000, 1000, 12, 1, 8},
+ { 19200000, 1400000000, 875, 12, 1, 8},
+ { 26000000, 1400000000, 700, 13, 1, 8},
+
+ /* 1.3 GHz */
+ { 12000000, 1300000000, 975, 9, 1, 8},
+ { 13000000, 1300000000, 1000, 10, 1, 8},
+ { 16800000, 1300000000, 928, 12, 1, 8}, /* actual: 1299.2 MHz */
+ { 19200000, 1300000000, 812, 12, 1, 8}, /* actual: 1299.2 MHz */
+ { 26000000, 1300000000, 650, 13, 1, 8},
+
+ /* 1.2 GHz */
+ { 12000000, 1200000000, 1000, 10, 1, 8},
+ { 13000000, 1200000000, 923, 10, 1, 8}, /* actual: 1199.9 MHz */
+ { 16800000, 1200000000, 1000, 14, 1, 8},
+ { 19200000, 1200000000, 1000, 16, 1, 8},
+ { 26000000, 1200000000, 600, 13, 1, 8},
+
+ /* 1.1 GHz */
+ { 12000000, 1100000000, 825, 9, 1, 8},
+ { 13000000, 1100000000, 846, 10, 1, 8}, /* actual: 1099.8 MHz */
+ { 16800000, 1100000000, 982, 15, 1, 8}, /* actual: 1099.8 MHz */
+ { 19200000, 1100000000, 859, 15, 1, 8}, /* actual: 1099.5 MHz */
+ { 26000000, 1100000000, 550, 13, 1, 8},
+
+ /* 1 GHz */
+ { 12000000, 1000000000, 1000, 12, 1, 8},
+ { 13000000, 1000000000, 1000, 13, 1, 8},
+ { 16800000, 1000000000, 833, 14, 1, 8}, /* actual: 999.6 MHz */
+ { 19200000, 1000000000, 625, 12, 1, 8},
+ { 26000000, 1000000000, 1000, 26, 1, 8},
+
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+static struct tegra_clk_pll_freq_table pll_e_freq_table[] = {
+ /* PLLE special case: use cpcon field to store cml divider value */
+ { 12000000, 100000000, 150, 1, 18, 11},
+ { 216000000, 100000000, 200, 18, 24, 13},
+ { 0, 0, 0, 0, 0, 0 },
+};
+
+/* PLL parameters */
+static struct tegra_clk_pll_params pll_c_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1400000000,
+ .base_reg = PLLC_BASE,
+ .misc_reg = PLLC_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_m_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1200000000,
+ .base_reg = PLLM_BASE,
+ .misc_reg = PLLM_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_p_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1400000000,
+ .base_reg = PLLP_BASE,
+ .misc_reg = PLLP_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_a_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1400000000,
+ .base_reg = PLLA_BASE,
+ .misc_reg = PLLA_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_d_params = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 40000000,
+ .vco_max = 1000000000,
+ .base_reg = PLLD_BASE,
+ .misc_reg = PLLD_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
+ .lock_delay = 1000,
+};
+
+static struct tegra_clk_pll_params pll_d2_params = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 40000000,
+ .vco_max = 1000000000,
+ .base_reg = PLLD2_BASE,
+ .misc_reg = PLLD2_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
+ .lock_delay = 1000,
+};
+
+static struct tegra_clk_pll_params pll_u_params = {
+ .input_min = 2000000,
+ .input_max = 40000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 48000000,
+ .vco_max = 960000000,
+ .base_reg = PLLU_BASE,
+ .misc_reg = PLLU_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
+ .lock_delay = 1000,
+};
+
+static struct tegra_clk_pll_params pll_x_params = {
+ .input_min = 2000000,
+ .input_max = 31000000,
+ .cf_min = 1000000,
+ .cf_max = 6000000,
+ .vco_min = 20000000,
+ .vco_max = 1700000000,
+ .base_reg = PLLX_BASE,
+ .misc_reg = PLLX_MISC,
+ .lock_bit_idx = PLL_BASE_LOCK,
+ .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+static struct tegra_clk_pll_params pll_e_params = {
+ .input_min = 12000000,
+ .input_max = 216000000,
+ .cf_min = 12000000,
+ .cf_max = 12000000,
+ .vco_min = 1200000000,
+ .vco_max = 2400000000U,
+ .base_reg = PLLE_BASE,
+ .misc_reg = PLLE_MISC,
+ .lock_bit_idx = PLLE_MISC_LOCK,
+ .lock_enable_bit_idx = PLLE_MISC_LOCK_ENABLE,
+ .lock_delay = 300,
+};
+
+/* Peripheral clock registers */
+static struct tegra_clk_periph_regs periph_l_regs = {
+ .enb_reg = CLK_OUT_ENB_L,
+ .enb_set_reg = CLK_OUT_ENB_SET_L,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_L,
+ .rst_reg = RST_DEVICES_L,
+ .rst_set_reg = RST_DEVICES_SET_L,
+ .rst_clr_reg = RST_DEVICES_CLR_L,
+};
+
+static struct tegra_clk_periph_regs periph_h_regs = {
+ .enb_reg = CLK_OUT_ENB_H,
+ .enb_set_reg = CLK_OUT_ENB_SET_H,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_H,
+ .rst_reg = RST_DEVICES_H,
+ .rst_set_reg = RST_DEVICES_SET_H,
+ .rst_clr_reg = RST_DEVICES_CLR_H,
+};
+
+static struct tegra_clk_periph_regs periph_u_regs = {
+ .enb_reg = CLK_OUT_ENB_U,
+ .enb_set_reg = CLK_OUT_ENB_SET_U,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_U,
+ .rst_reg = RST_DEVICES_U,
+ .rst_set_reg = RST_DEVICES_SET_U,
+ .rst_clr_reg = RST_DEVICES_CLR_U,
+};
+
+static struct tegra_clk_periph_regs periph_v_regs = {
+ .enb_reg = CLK_OUT_ENB_V,
+ .enb_set_reg = CLK_OUT_ENB_SET_V,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_V,
+ .rst_reg = RST_DEVICES_V,
+ .rst_set_reg = RST_DEVICES_SET_V,
+ .rst_clr_reg = RST_DEVICES_CLR_V,
+};
+
+static struct tegra_clk_periph_regs periph_w_regs = {
+ .enb_reg = CLK_OUT_ENB_W,
+ .enb_set_reg = CLK_OUT_ENB_SET_W,
+ .enb_clr_reg = CLK_OUT_ENB_CLR_W,
+ .rst_reg = RST_DEVICES_W,
+ .rst_set_reg = RST_DEVICES_SET_W,
+ .rst_clr_reg = RST_DEVICES_CLR_W,
+};
+
+static void tegra30_clk_measure_input_freq(void)
+{
+ u32 osc_ctrl = readl_relaxed(clk_base + OSC_CTRL);
+ u32 auto_clk_control = osc_ctrl & OSC_CTRL_OSC_FREQ_MASK;
+ u32 pll_ref_div = osc_ctrl & OSC_CTRL_PLL_REF_DIV_MASK;
+
+ switch (auto_clk_control) {
+ case OSC_CTRL_OSC_FREQ_12MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ input_freq = 12000000;
+ break;
+ case OSC_CTRL_OSC_FREQ_13MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ input_freq = 13000000;
+ break;
+ case OSC_CTRL_OSC_FREQ_19_2MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ input_freq = 19200000;
+ break;
+ case OSC_CTRL_OSC_FREQ_26MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ input_freq = 26000000;
+ break;
+ case OSC_CTRL_OSC_FREQ_16_8MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_1);
+ input_freq = 16800000;
+ break;
+ case OSC_CTRL_OSC_FREQ_38_4MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_2);
+ input_freq = 38400000;
+ break;
+ case OSC_CTRL_OSC_FREQ_48MHZ:
+ BUG_ON(pll_ref_div != OSC_CTRL_PLL_REF_DIV_4);
+ input_freq = 48000000;
+ break;
+ default:
+ pr_err("Unexpected auto clock control value %d",
+ auto_clk_control);
+ BUG();
+ return;
+ }
+}
+
+static unsigned int tegra30_get_pll_ref_div(void)
+{
+ u32 pll_ref_div = readl_relaxed(clk_base + OSC_CTRL) &
+ OSC_CTRL_PLL_REF_DIV_MASK;
+
+ switch (pll_ref_div) {
+ case OSC_CTRL_PLL_REF_DIV_1:
+ return 1;
+ case OSC_CTRL_PLL_REF_DIV_2:
+ return 2;
+ case OSC_CTRL_PLL_REF_DIV_4:
+ return 4;
+ default:
+ pr_err("Invalid pll ref divider %d", pll_ref_div);
+ BUG();
+ }
+ return 0;
+}
+
+static void tegra30_utmi_param_configure(void)
+{
+ u32 reg;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(utmi_parameters); i++) {
+ if (input_freq == utmi_parameters[i].osc_frequency)
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(utmi_parameters)) {
+ pr_err("%s: Unexpected input rate %lu\n", __func__, input_freq);
+ return;
+ }
+
+ reg = readl_relaxed(clk_base + UTMIP_PLL_CFG2);
+
+ /* Program UTMIP PLL stable and active counts */
+ reg &= ~UTMIP_PLL_CFG2_STABLE_COUNT(~0);
+ reg |= UTMIP_PLL_CFG2_STABLE_COUNT(
+ utmi_parameters[i].stable_count);
+
+ reg &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0);
+
+ reg |= UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(
+ utmi_parameters[i].active_delay_count);
+
+ /* Remove power downs from UTMIP PLL control bits */
+ reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN;
+
+ writel_relaxed(reg, clk_base + UTMIP_PLL_CFG2);
+
+ /* Program UTMIP PLL delay and oscillator frequency counts */
+ reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1);
+ reg &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0);
+
+ reg |= UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(
+ utmi_parameters[i].enable_delay_count);
+
+ reg &= ~UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(~0);
+ reg |= UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(
+ utmi_parameters[i].xtal_freq_count);
+
+ /* Remove power downs from UTMIP PLL control bits */
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN;
+ reg &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN;
+
+ writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1);
+}
+
+static const char *pll_e_parents[] = {"pll_ref", "pll_p"};
+
+static void __init tegra30_pll_init(void)
+{
+ struct clk *clk;
+
+ /* PLLC */
+ clk = tegra_clk_register_pll("pll_c", "pll_ref", clk_base, pmc_base, 0,
+ 0, &pll_c_params,
+ TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK,
+ pll_c_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_c", NULL);
+ clks[pll_c] = clk;
+
+ /* PLLC_OUT1 */
+ clk = tegra_clk_register_divider("pll_c_out1_div", "pll_c",
+ clk_base + PLLC_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_c_out1", "pll_c_out1_div",
+ clk_base + PLLC_OUT, 1, 0, CLK_SET_RATE_PARENT,
+ 0, NULL);
+ clk_register_clkdev(clk, "pll_c_out1", NULL);
+ clks[pll_c_out1] = clk;
+
+ /* PLLP */
+ clk = tegra_clk_register_pll("pll_p", "pll_ref", clk_base, pmc_base, 0,
+ 408000000, &pll_p_params,
+ TEGRA_PLL_FIXED | TEGRA_PLL_HAS_CPCON |
+ TEGRA_PLL_USE_LOCK, pll_p_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_p", NULL);
+ clks[pll_p] = clk;
+
+ /* PLLP_OUT1 */
+ clk = tegra_clk_register_divider("pll_p_out1_div", "pll_p",
+ clk_base + PLLP_OUTA, 0, TEGRA_DIVIDER_FIXED |
+ TEGRA_DIVIDER_ROUND_UP, 8, 8, 1,
+ &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out1", "pll_p_out1_div",
+ clk_base + PLLP_OUTA, 1, 0,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out1", NULL);
+ clks[pll_p_out1] = clk;
+
+ /* PLLP_OUT2 */
+ clk = tegra_clk_register_divider("pll_p_out2_div", "pll_p",
+ clk_base + PLLP_OUTA, 0, TEGRA_DIVIDER_FIXED |
+ TEGRA_DIVIDER_ROUND_UP, 24, 8, 1,
+ &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out2", "pll_p_out2_div",
+ clk_base + PLLP_OUTA, 17, 16,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out2", NULL);
+ clks[pll_p_out2] = clk;
+
+ /* PLLP_OUT3 */
+ clk = tegra_clk_register_divider("pll_p_out3_div", "pll_p",
+ clk_base + PLLP_OUTB, 0, TEGRA_DIVIDER_FIXED |
+ TEGRA_DIVIDER_ROUND_UP, 8, 8, 1,
+ &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out3", "pll_p_out3_div",
+ clk_base + PLLP_OUTB, 1, 0,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out3", NULL);
+ clks[pll_p_out3] = clk;
+
+ /* PLLP_OUT4 */
+ clk = tegra_clk_register_divider("pll_p_out4_div", "pll_p",
+ clk_base + PLLP_OUTB, 0, TEGRA_DIVIDER_FIXED |
+ TEGRA_DIVIDER_ROUND_UP, 24, 8, 1,
+ &pll_div_lock);
+ clk = tegra_clk_register_pll_out("pll_p_out4", "pll_p_out4_div",
+ clk_base + PLLP_OUTB, 17, 16,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0,
+ &pll_div_lock);
+ clk_register_clkdev(clk, "pll_p_out4", NULL);
+ clks[pll_p_out4] = clk;
+
+ /* PLLM */
+ clk = tegra_clk_register_pll("pll_m", "pll_ref", clk_base, pmc_base,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, 0,
+ &pll_m_params, TEGRA_PLLM | TEGRA_PLL_HAS_CPCON |
+ TEGRA_PLL_SET_DCCON | TEGRA_PLL_USE_LOCK,
+ pll_m_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_m", NULL);
+ clks[pll_m] = clk;
+
+ /* PLLM_OUT1 */
+ clk = tegra_clk_register_divider("pll_m_out1_div", "pll_m",
+ clk_base + PLLM_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_m_out1", "pll_m_out1_div",
+ clk_base + PLLM_OUT, 1, 0, CLK_IGNORE_UNUSED |
+ CLK_SET_RATE_PARENT, 0, NULL);
+ clk_register_clkdev(clk, "pll_m_out1", NULL);
+ clks[pll_m_out1] = clk;
+
+ /* PLLX */
+ clk = tegra_clk_register_pll("pll_x", "pll_ref", clk_base, pmc_base, 0,
+ 0, &pll_x_params, TEGRA_PLL_HAS_CPCON |
+ TEGRA_PLL_SET_DCCON | TEGRA_PLL_USE_LOCK,
+ pll_x_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_x", NULL);
+ clks[pll_x] = clk;
+
+ /* PLLX_OUT0 */
+ clk = clk_register_fixed_factor(NULL, "pll_x_out0", "pll_x",
+ CLK_SET_RATE_PARENT, 1, 2);
+ clk_register_clkdev(clk, "pll_x_out0", NULL);
+ clks[pll_x_out0] = clk;
+
+ /* PLLU */
+ clk = tegra_clk_register_pll("pll_u", "pll_ref", clk_base, pmc_base, 0,
+ 0, &pll_u_params, TEGRA_PLLU | TEGRA_PLL_HAS_CPCON |
+ TEGRA_PLL_SET_LFCON | TEGRA_PLL_USE_LOCK,
+ pll_u_freq_table,
+ NULL);
+ clk_register_clkdev(clk, "pll_u", NULL);
+ clks[pll_u] = clk;
+
+ tegra30_utmi_param_configure();
+
+ /* PLLD */
+ clk = tegra_clk_register_pll("pll_d", "pll_ref", clk_base, pmc_base, 0,
+ 0, &pll_d_params, TEGRA_PLL_HAS_CPCON |
+ TEGRA_PLL_SET_LFCON | TEGRA_PLL_USE_LOCK,
+ pll_d_freq_table, &pll_d_lock);
+ clk_register_clkdev(clk, "pll_d", NULL);
+ clks[pll_d] = clk;
+
+ /* PLLD_OUT0 */
+ clk = clk_register_fixed_factor(NULL, "pll_d_out0", "pll_d",
+ CLK_SET_RATE_PARENT, 1, 2);
+ clk_register_clkdev(clk, "pll_d_out0", NULL);
+ clks[pll_d_out0] = clk;
+
+ /* PLLD2 */
+ clk = tegra_clk_register_pll("pll_d2", "pll_ref", clk_base, pmc_base, 0,
+ 0, &pll_d2_params, TEGRA_PLL_HAS_CPCON |
+ TEGRA_PLL_SET_LFCON | TEGRA_PLL_USE_LOCK,
+ pll_d_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_d2", NULL);
+ clks[pll_d2] = clk;
+
+ /* PLLD2_OUT0 */
+ clk = clk_register_fixed_factor(NULL, "pll_d2_out0", "pll_d2",
+ CLK_SET_RATE_PARENT, 1, 2);
+ clk_register_clkdev(clk, "pll_d2_out0", NULL);
+ clks[pll_d2_out0] = clk;
+
+ /* PLLA */
+ clk = tegra_clk_register_pll("pll_a", "pll_p_out1", clk_base, pmc_base,
+ 0, 0, &pll_a_params, TEGRA_PLL_HAS_CPCON |
+ TEGRA_PLL_USE_LOCK, pll_a_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_a", NULL);
+ clks[pll_a] = clk;
+
+ /* PLLA_OUT0 */
+ clk = tegra_clk_register_divider("pll_a_out0_div", "pll_a",
+ clk_base + PLLA_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_a_out0", "pll_a_out0_div",
+ clk_base + PLLA_OUT, 1, 0, CLK_IGNORE_UNUSED |
+ CLK_SET_RATE_PARENT, 0, NULL);
+ clk_register_clkdev(clk, "pll_a_out0", NULL);
+ clks[pll_a_out0] = clk;
+
+ /* PLLE */
+ clk = clk_register_mux(NULL, "pll_e_mux", pll_e_parents,
+ ARRAY_SIZE(pll_e_parents), 0,
+ clk_base + PLLE_AUX, 2, 1, 0, NULL);
+ clk = tegra_clk_register_plle("pll_e", "pll_e_mux", clk_base, pmc_base,
+ CLK_GET_RATE_NOCACHE, 100000000, &pll_e_params,
+ TEGRA_PLLE_CONFIGURE, pll_e_freq_table, NULL);
+ clk_register_clkdev(clk, "pll_e", NULL);
+ clks[pll_e] = clk;
+}
+
+static const char *mux_audio_sync_clk[] = { "spdif_in_sync", "i2s0_sync",
+ "i2s1_sync", "i2s2_sync", "i2s3_sync", "i2s4_sync", "vimclk_sync",};
+static const char *clk_out1_parents[] = { "clk_m", "clk_m_div2",
+ "clk_m_div4", "extern1", };
+static const char *clk_out2_parents[] = { "clk_m", "clk_m_div2",
+ "clk_m_div4", "extern2", };
+static const char *clk_out3_parents[] = { "clk_m", "clk_m_div2",
+ "clk_m_div4", "extern3", };
+
+static void __init tegra30_audio_clk_init(void)
+{
+ struct clk *clk;
+
+ /* spdif_in_sync */
+ clk = tegra_clk_register_sync_source("spdif_in_sync", 24000000,
+ 24000000);
+ clk_register_clkdev(clk, "spdif_in_sync", NULL);
+ clks[spdif_in_sync] = clk;
+
+ /* i2s0_sync */
+ clk = tegra_clk_register_sync_source("i2s0_sync", 24000000, 24000000);
+ clk_register_clkdev(clk, "i2s0_sync", NULL);
+ clks[i2s0_sync] = clk;
+
+ /* i2s1_sync */
+ clk = tegra_clk_register_sync_source("i2s1_sync", 24000000, 24000000);
+ clk_register_clkdev(clk, "i2s1_sync", NULL);
+ clks[i2s1_sync] = clk;
+
+ /* i2s2_sync */
+ clk = tegra_clk_register_sync_source("i2s2_sync", 24000000, 24000000);
+ clk_register_clkdev(clk, "i2s2_sync", NULL);
+ clks[i2s2_sync] = clk;
+
+ /* i2s3_sync */
+ clk = tegra_clk_register_sync_source("i2s3_sync", 24000000, 24000000);
+ clk_register_clkdev(clk, "i2s3_sync", NULL);
+ clks[i2s3_sync] = clk;
+
+ /* i2s4_sync */
+ clk = tegra_clk_register_sync_source("i2s4_sync", 24000000, 24000000);
+ clk_register_clkdev(clk, "i2s4_sync", NULL);
+ clks[i2s4_sync] = clk;
+
+ /* vimclk_sync */
+ clk = tegra_clk_register_sync_source("vimclk_sync", 24000000, 24000000);
+ clk_register_clkdev(clk, "vimclk_sync", NULL);
+ clks[vimclk_sync] = clk;
+
+ /* audio0 */
+ clk = clk_register_mux(NULL, "audio0_mux", mux_audio_sync_clk,
+ ARRAY_SIZE(mux_audio_sync_clk), 0,
+ clk_base + AUDIO_SYNC_CLK_I2S0, 0, 3, 0, NULL);
+ clk = clk_register_gate(NULL, "audio0", "audio0_mux", 0,
+ clk_base + AUDIO_SYNC_CLK_I2S0, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ clk_register_clkdev(clk, "audio0", NULL);
+ clks[audio0] = clk;
+
+ /* audio1 */
+ clk = clk_register_mux(NULL, "audio1_mux", mux_audio_sync_clk,
+ ARRAY_SIZE(mux_audio_sync_clk), 0,
+ clk_base + AUDIO_SYNC_CLK_I2S1, 0, 3, 0, NULL);
+ clk = clk_register_gate(NULL, "audio1", "audio1_mux", 0,
+ clk_base + AUDIO_SYNC_CLK_I2S1, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ clk_register_clkdev(clk, "audio1", NULL);
+ clks[audio1] = clk;
+
+ /* audio2 */
+ clk = clk_register_mux(NULL, "audio2_mux", mux_audio_sync_clk,
+ ARRAY_SIZE(mux_audio_sync_clk), 0,
+ clk_base + AUDIO_SYNC_CLK_I2S2, 0, 3, 0, NULL);
+ clk = clk_register_gate(NULL, "audio2", "audio2_mux", 0,
+ clk_base + AUDIO_SYNC_CLK_I2S2, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ clk_register_clkdev(clk, "audio2", NULL);
+ clks[audio2] = clk;
+
+ /* audio3 */
+ clk = clk_register_mux(NULL, "audio3_mux", mux_audio_sync_clk,
+ ARRAY_SIZE(mux_audio_sync_clk), 0,
+ clk_base + AUDIO_SYNC_CLK_I2S3, 0, 3, 0, NULL);
+ clk = clk_register_gate(NULL, "audio3", "audio3_mux", 0,
+ clk_base + AUDIO_SYNC_CLK_I2S3, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ clk_register_clkdev(clk, "audio3", NULL);
+ clks[audio3] = clk;
+
+ /* audio4 */
+ clk = clk_register_mux(NULL, "audio4_mux", mux_audio_sync_clk,
+ ARRAY_SIZE(mux_audio_sync_clk), 0,
+ clk_base + AUDIO_SYNC_CLK_I2S4, 0, 3, 0, NULL);
+ clk = clk_register_gate(NULL, "audio4", "audio4_mux", 0,
+ clk_base + AUDIO_SYNC_CLK_I2S4, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ clk_register_clkdev(clk, "audio4", NULL);
+ clks[audio4] = clk;
+
+ /* spdif */
+ clk = clk_register_mux(NULL, "spdif_mux", mux_audio_sync_clk,
+ ARRAY_SIZE(mux_audio_sync_clk), 0,
+ clk_base + AUDIO_SYNC_CLK_SPDIF, 0, 3, 0, NULL);
+ clk = clk_register_gate(NULL, "spdif", "spdif_mux", 0,
+ clk_base + AUDIO_SYNC_CLK_SPDIF, 4,
+ CLK_GATE_SET_TO_DISABLE, NULL);
+ clk_register_clkdev(clk, "spdif", NULL);
+ clks[spdif] = clk;
+
+ /* audio0_2x */
+ clk = clk_register_fixed_factor(NULL, "audio0_doubler", "audio0",
+ CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_divider("audio0_div", "audio0_doubler",
+ clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 24, 1, 0,
+ &clk_doubler_lock);
+ clk = tegra_clk_register_periph_gate("audio0_2x", "audio0_div",
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ CLK_SET_RATE_PARENT, 113, &periph_v_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "audio0_2x", NULL);
+ clks[audio0_2x] = clk;
+
+ /* audio1_2x */
+ clk = clk_register_fixed_factor(NULL, "audio1_doubler", "audio1",
+ CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_divider("audio1_div", "audio1_doubler",
+ clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 25, 1, 0,
+ &clk_doubler_lock);
+ clk = tegra_clk_register_periph_gate("audio1_2x", "audio1_div",
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ CLK_SET_RATE_PARENT, 114, &periph_v_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "audio1_2x", NULL);
+ clks[audio1_2x] = clk;
+
+ /* audio2_2x */
+ clk = clk_register_fixed_factor(NULL, "audio2_doubler", "audio2",
+ CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_divider("audio2_div", "audio2_doubler",
+ clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 26, 1, 0,
+ &clk_doubler_lock);
+ clk = tegra_clk_register_periph_gate("audio2_2x", "audio2_div",
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ CLK_SET_RATE_PARENT, 115, &periph_v_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "audio2_2x", NULL);
+ clks[audio2_2x] = clk;
+
+ /* audio3_2x */
+ clk = clk_register_fixed_factor(NULL, "audio3_doubler", "audio3",
+ CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_divider("audio3_div", "audio3_doubler",
+ clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 27, 1, 0,
+ &clk_doubler_lock);
+ clk = tegra_clk_register_periph_gate("audio3_2x", "audio3_div",
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ CLK_SET_RATE_PARENT, 116, &periph_v_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "audio3_2x", NULL);
+ clks[audio3_2x] = clk;
+
+ /* audio4_2x */
+ clk = clk_register_fixed_factor(NULL, "audio4_doubler", "audio4",
+ CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_divider("audio4_div", "audio4_doubler",
+ clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 28, 1, 0,
+ &clk_doubler_lock);
+ clk = tegra_clk_register_periph_gate("audio4_2x", "audio4_div",
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ CLK_SET_RATE_PARENT, 117, &periph_v_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "audio4_2x", NULL);
+ clks[audio4_2x] = clk;
+
+ /* spdif_2x */
+ clk = clk_register_fixed_factor(NULL, "spdif_doubler", "spdif",
+ CLK_SET_RATE_PARENT, 2, 1);
+ clk = tegra_clk_register_divider("spdif_div", "spdif_doubler",
+ clk_base + AUDIO_SYNC_DOUBLER, 0, 0, 29, 1, 0,
+ &clk_doubler_lock);
+ clk = tegra_clk_register_periph_gate("spdif_2x", "spdif_div",
+ TEGRA_PERIPH_NO_RESET, clk_base,
+ CLK_SET_RATE_PARENT, 118, &periph_v_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "spdif_2x", NULL);
+ clks[spdif_2x] = clk;
+}
+
+static void __init tegra30_pmc_clk_init(void)
+{
+ struct clk *clk;
+
+ /* clk_out_1 */
+ clk = clk_register_mux(NULL, "clk_out_1_mux", clk_out1_parents,
+ ARRAY_SIZE(clk_out1_parents), 0,
+ pmc_base + PMC_CLK_OUT_CNTRL, 6, 3, 0,
+ &clk_out_lock);
+ clks[clk_out_1_mux] = clk;
+ clk = clk_register_gate(NULL, "clk_out_1", "clk_out_1_mux", 0,
+ pmc_base + PMC_CLK_OUT_CNTRL, 2, 0,
+ &clk_out_lock);
+ clk_register_clkdev(clk, "extern1", "clk_out_1");
+ clks[clk_out_1] = clk;
+
+ /* clk_out_2 */
+ clk = clk_register_mux(NULL, "clk_out_2_mux", clk_out2_parents,
+ ARRAY_SIZE(clk_out1_parents), 0,
+ pmc_base + PMC_CLK_OUT_CNTRL, 14, 3, 0,
+ &clk_out_lock);
+ clk = clk_register_gate(NULL, "clk_out_2", "clk_out_2_mux", 0,
+ pmc_base + PMC_CLK_OUT_CNTRL, 10, 0,
+ &clk_out_lock);
+ clk_register_clkdev(clk, "extern2", "clk_out_2");
+ clks[clk_out_2] = clk;
+
+ /* clk_out_3 */
+ clk = clk_register_mux(NULL, "clk_out_3_mux", clk_out3_parents,
+ ARRAY_SIZE(clk_out1_parents), 0,
+ pmc_base + PMC_CLK_OUT_CNTRL, 22, 3, 0,
+ &clk_out_lock);
+ clk = clk_register_gate(NULL, "clk_out_3", "clk_out_3_mux", 0,
+ pmc_base + PMC_CLK_OUT_CNTRL, 18, 0,
+ &clk_out_lock);
+ clk_register_clkdev(clk, "extern3", "clk_out_3");
+ clks[clk_out_3] = clk;
+
+ /* blink */
+ writel_relaxed(0, pmc_base + PMC_BLINK_TIMER);
+ clk = clk_register_gate(NULL, "blink_override", "clk_32k", 0,
+ pmc_base + PMC_DPD_PADS_ORIDE,
+ PMC_DPD_PADS_ORIDE_BLINK_ENB, 0, NULL);
+ clk = clk_register_gate(NULL, "blink", "blink_override", 0,
+ pmc_base + PMC_CTRL,
+ PMC_CTRL_BLINK_ENB, 0, NULL);
+ clk_register_clkdev(clk, "blink", NULL);
+ clks[blink] = clk;
+
+}
+
+static const char *cclk_g_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
+ "pll_p_cclkg", "pll_p_out4_cclkg",
+ "pll_p_out3_cclkg", "unused", "pll_x" };
+static const char *cclk_lp_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
+ "pll_p_cclklp", "pll_p_out4_cclklp",
+ "pll_p_out3_cclklp", "unused", "pll_x",
+ "pll_x_out0" };
+static const char *sclk_parents[] = { "clk_m", "pll_c_out1", "pll_p_out4",
+ "pll_p_out3", "pll_p_out2", "unused",
+ "clk_32k", "pll_m_out1" };
+
+static void __init tegra30_super_clk_init(void)
+{
+ struct clk *clk;
+
+ /*
+ * Clock input to cclk_g divided from pll_p using
+ * U71 divider of cclk_g.
+ */
+ clk = tegra_clk_register_divider("pll_p_cclkg", "pll_p",
+ clk_base + SUPER_CCLKG_DIVIDER, 0,
+ TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ clk_register_clkdev(clk, "pll_p_cclkg", NULL);
+
+ /*
+ * Clock input to cclk_g divided from pll_p_out3 using
+ * U71 divider of cclk_g.
+ */
+ clk = tegra_clk_register_divider("pll_p_out3_cclkg", "pll_p_out3",
+ clk_base + SUPER_CCLKG_DIVIDER, 0,
+ TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ clk_register_clkdev(clk, "pll_p_out3_cclkg", NULL);
+
+ /*
+ * Clock input to cclk_g divided from pll_p_out4 using
+ * U71 divider of cclk_g.
+ */
+ clk = tegra_clk_register_divider("pll_p_out4_cclkg", "pll_p_out4",
+ clk_base + SUPER_CCLKG_DIVIDER, 0,
+ TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ clk_register_clkdev(clk, "pll_p_out4_cclkg", NULL);
+
+ /* CCLKG */
+ clk = tegra_clk_register_super_mux("cclk_g", cclk_g_parents,
+ ARRAY_SIZE(cclk_g_parents),
+ CLK_SET_RATE_PARENT,
+ clk_base + CCLKG_BURST_POLICY,
+ 0, 4, 0, 0, NULL);
+ clk_register_clkdev(clk, "cclk_g", NULL);
+ clks[cclk_g] = clk;
+
+ /*
+ * Clock input to cclk_lp divided from pll_p using
+ * U71 divider of cclk_lp.
+ */
+ clk = tegra_clk_register_divider("pll_p_cclklp", "pll_p",
+ clk_base + SUPER_CCLKLP_DIVIDER, 0,
+ TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ clk_register_clkdev(clk, "pll_p_cclklp", NULL);
+
+ /*
+ * Clock input to cclk_lp divided from pll_p_out3 using
+ * U71 divider of cclk_lp.
+ */
+ clk = tegra_clk_register_divider("pll_p_out3_cclklp", "pll_p_out3",
+ clk_base + SUPER_CCLKG_DIVIDER, 0,
+ TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ clk_register_clkdev(clk, "pll_p_out3_cclklp", NULL);
+
+ /*
+ * Clock input to cclk_lp divided from pll_p_out4 using
+ * U71 divider of cclk_lp.
+ */
+ clk = tegra_clk_register_divider("pll_p_out4_cclklp", "pll_p_out4",
+ clk_base + SUPER_CCLKLP_DIVIDER, 0,
+ TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ clk_register_clkdev(clk, "pll_p_out4_cclklp", NULL);
+
+ /* CCLKLP */
+ clk = tegra_clk_register_super_mux("cclk_lp", cclk_lp_parents,
+ ARRAY_SIZE(cclk_lp_parents),
+ CLK_SET_RATE_PARENT,
+ clk_base + CCLKLP_BURST_POLICY,
+ TEGRA_DIVIDER_2, 4, 8, 9,
+ NULL);
+ clk_register_clkdev(clk, "cclk_lp", NULL);
+ clks[cclk_lp] = clk;
+
+ /* SCLK */
+ clk = tegra_clk_register_super_mux("sclk", sclk_parents,
+ ARRAY_SIZE(sclk_parents),
+ CLK_SET_RATE_PARENT,
+ clk_base + SCLK_BURST_POLICY,
+ 0, 4, 0, 0, NULL);
+ clk_register_clkdev(clk, "sclk", NULL);
+ clks[sclk] = clk;
+
+ /* HCLK */
+ clk = clk_register_divider(NULL, "hclk_div", "sclk", 0,
+ clk_base + SYSTEM_CLK_RATE, 4, 2, 0,
+ &sysrate_lock);
+ clk = clk_register_gate(NULL, "hclk", "hclk_div", CLK_SET_RATE_PARENT,
+ clk_base + SYSTEM_CLK_RATE, 7,
+ CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
+ clk_register_clkdev(clk, "hclk", NULL);
+ clks[hclk] = clk;
+
+ /* PCLK */
+ clk = clk_register_divider(NULL, "pclk_div", "hclk", 0,
+ clk_base + SYSTEM_CLK_RATE, 0, 2, 0,
+ &sysrate_lock);
+ clk = clk_register_gate(NULL, "pclk", "pclk_div", CLK_SET_RATE_PARENT,
+ clk_base + SYSTEM_CLK_RATE, 3,
+ CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
+ clk_register_clkdev(clk, "pclk", NULL);
+ clks[pclk] = clk;
+
+ /* twd */
+ clk = clk_register_fixed_factor(NULL, "twd", "cclk_g",
+ CLK_SET_RATE_PARENT, 1, 2);
+ clk_register_clkdev(clk, "twd", NULL);
+ clks[twd] = clk;
+}
+
+static const char *mux_pllacp_clkm[] = { "pll_a_out0", "unused", "pll_p",
+ "clk_m" };
+static const char *mux_pllpcm_clkm[] = { "pll_p", "pll_c", "pll_m", "clk_m" };
+static const char *mux_pllmcp_clkm[] = { "pll_m", "pll_c", "pll_p", "clk_m" };
+static const char *i2s0_parents[] = { "pll_a_out0", "audio0_2x", "pll_p",
+ "clk_m" };
+static const char *i2s1_parents[] = { "pll_a_out0", "audio1_2x", "pll_p",
+ "clk_m" };
+static const char *i2s2_parents[] = { "pll_a_out0", "audio2_2x", "pll_p",
+ "clk_m" };
+static const char *i2s3_parents[] = { "pll_a_out0", "audio3_2x", "pll_p",
+ "clk_m" };
+static const char *i2s4_parents[] = { "pll_a_out0", "audio4_2x", "pll_p",
+ "clk_m" };
+static const char *spdif_out_parents[] = { "pll_a_out0", "spdif_2x", "pll_p",
+ "clk_m" };
+static const char *spdif_in_parents[] = { "pll_p", "pll_c", "pll_m" };
+static const char *mux_pllpc_clk32k_clkm[] = { "pll_p", "pll_c", "clk_32k",
+ "clk_m" };
+static const char *mux_pllpc_clkm_clk32k[] = { "pll_p", "pll_c", "clk_m",
+ "clk_32k" };
+static const char *mux_pllmcpa[] = { "pll_m", "pll_c", "pll_p", "pll_a_out0" };
+static const char *mux_pllpdc_clkm[] = { "pll_p", "pll_d_out0", "pll_c",
+ "clk_m" };
+static const char *mux_pllp_clkm[] = { "pll_p", "unused", "unused", "clk_m" };
+static const char *mux_pllpmdacd2_clkm[] = { "pll_p", "pll_m", "pll_d_out0",
+ "pll_a_out0", "pll_c",
+ "pll_d2_out0", "clk_m" };
+static const char *mux_plla_clk32k_pllp_clkm_plle[] = { "pll_a_out0",
+ "clk_32k", "pll_p",
+ "clk_m", "pll_e" };
+static const char *mux_plld_out0_plld2_out0[] = { "pll_d_out0",
+ "pll_d2_out0" };
+
+static struct tegra_periph_init_data tegra_periph_clk_list[] = {
+ TEGRA_INIT_DATA_MUX("i2s0", NULL, "tegra30-i2s.0", i2s0_parents, CLK_SOURCE_I2S0, 30, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s0),
+ TEGRA_INIT_DATA_MUX("i2s1", NULL, "tegra30-i2s.1", i2s1_parents, CLK_SOURCE_I2S1, 11, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s1),
+ TEGRA_INIT_DATA_MUX("i2s2", NULL, "tegra30-i2s.2", i2s2_parents, CLK_SOURCE_I2S2, 18, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2s2),
+ TEGRA_INIT_DATA_MUX("i2s3", NULL, "tegra30-i2s.3", i2s3_parents, CLK_SOURCE_I2S3, 101, &periph_v_regs, TEGRA_PERIPH_ON_APB, i2s3),
+ TEGRA_INIT_DATA_MUX("i2s4", NULL, "tegra30-i2s.4", i2s4_parents, CLK_SOURCE_I2S4, 102, &periph_v_regs, TEGRA_PERIPH_ON_APB, i2s4),
+ TEGRA_INIT_DATA_MUX("spdif_out", "spdif_out", "tegra30-spdif", spdif_out_parents, CLK_SOURCE_SPDIF_OUT, 10, &periph_l_regs, TEGRA_PERIPH_ON_APB, spdif_out),
+ TEGRA_INIT_DATA_MUX("spdif_in", "spdif_in", "tegra30-spdif", spdif_in_parents, CLK_SOURCE_SPDIF_IN, 10, &periph_l_regs, TEGRA_PERIPH_ON_APB, spdif_in),
+ TEGRA_INIT_DATA_MUX("d_audio", "d_audio", "tegra30-ahub", mux_pllacp_clkm, CLK_SOURCE_D_AUDIO, 106, &periph_v_regs, 0, d_audio),
+ TEGRA_INIT_DATA_MUX("dam0", NULL, "tegra30-dam.0", mux_pllacp_clkm, CLK_SOURCE_DAM0, 108, &periph_v_regs, 0, dam0),
+ TEGRA_INIT_DATA_MUX("dam1", NULL, "tegra30-dam.1", mux_pllacp_clkm, CLK_SOURCE_DAM1, 109, &periph_v_regs, 0, dam1),
+ TEGRA_INIT_DATA_MUX("dam2", NULL, "tegra30-dam.2", mux_pllacp_clkm, CLK_SOURCE_DAM2, 110, &periph_v_regs, 0, dam2),
+ TEGRA_INIT_DATA_MUX("hda", "hda", "tegra30-hda", mux_pllpcm_clkm, CLK_SOURCE_HDA, 125, &periph_v_regs, 0, hda),
+ TEGRA_INIT_DATA_MUX("hda2codec_2x", "hda2codec", "tegra30-hda", mux_pllpcm_clkm, CLK_SOURCE_HDA2CODEC_2X, 111, &periph_v_regs, 0, hda2codec_2x),
+ TEGRA_INIT_DATA_MUX("sbc1", NULL, "spi_tegra.0", mux_pllpcm_clkm, CLK_SOURCE_SBC1, 41, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc1),
+ TEGRA_INIT_DATA_MUX("sbc2", NULL, "spi_tegra.1", mux_pllpcm_clkm, CLK_SOURCE_SBC2, 44, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc2),
+ TEGRA_INIT_DATA_MUX("sbc3", NULL, "spi_tegra.2", mux_pllpcm_clkm, CLK_SOURCE_SBC3, 46, &periph_h_regs, TEGRA_PERIPH_ON_APB, sbc3),
+ TEGRA_INIT_DATA_MUX("sbc4", NULL, "spi_tegra.3", mux_pllpcm_clkm, CLK_SOURCE_SBC4, 68, &periph_u_regs, TEGRA_PERIPH_ON_APB, sbc4),
+ TEGRA_INIT_DATA_MUX("sbc5", NULL, "spi_tegra.4", mux_pllpcm_clkm, CLK_SOURCE_SBC5, 104, &periph_v_regs, TEGRA_PERIPH_ON_APB, sbc5),
+ TEGRA_INIT_DATA_MUX("sbc6", NULL, "spi_tegra.5", mux_pllpcm_clkm, CLK_SOURCE_SBC6, 105, &periph_v_regs, TEGRA_PERIPH_ON_APB, sbc6),
+ TEGRA_INIT_DATA_MUX("sata_oob", NULL, "tegra_sata_oob", mux_pllpcm_clkm, CLK_SOURCE_SATA_OOB, 123, &periph_v_regs, TEGRA_PERIPH_ON_APB, sata_oob),
+ TEGRA_INIT_DATA_MUX("sata", NULL, "tegra_sata", mux_pllpcm_clkm, CLK_SOURCE_SATA, 124, &periph_v_regs, TEGRA_PERIPH_ON_APB, sata),
+ TEGRA_INIT_DATA_MUX("ndflash", NULL, "tegra_nand", mux_pllpcm_clkm, CLK_SOURCE_NDFLASH, 13, &periph_l_regs, TEGRA_PERIPH_ON_APB, ndflash),
+ TEGRA_INIT_DATA_MUX("ndspeed", NULL, "tegra_nand_speed", mux_pllpcm_clkm, CLK_SOURCE_NDSPEED, 80, &periph_u_regs, TEGRA_PERIPH_ON_APB, ndspeed),
+ TEGRA_INIT_DATA_MUX("vfir", NULL, "vfir", mux_pllpcm_clkm, CLK_SOURCE_VFIR, 7, &periph_l_regs, TEGRA_PERIPH_ON_APB, vfir),
+ TEGRA_INIT_DATA_MUX("csite", NULL, "csite", mux_pllpcm_clkm, CLK_SOURCE_CSITE, 73, &periph_u_regs, TEGRA_PERIPH_ON_APB, csite),
+ TEGRA_INIT_DATA_MUX("la", NULL, "la", mux_pllpcm_clkm, CLK_SOURCE_LA, 76, &periph_u_regs, TEGRA_PERIPH_ON_APB, la),
+ TEGRA_INIT_DATA_MUX("owr", NULL, "tegra_w1", mux_pllpcm_clkm, CLK_SOURCE_OWR, 71, &periph_u_regs, TEGRA_PERIPH_ON_APB, owr),
+ TEGRA_INIT_DATA_MUX("mipi", NULL, "mipi", mux_pllpcm_clkm, CLK_SOURCE_MIPI, 50, &periph_h_regs, TEGRA_PERIPH_ON_APB, mipi),
+ TEGRA_INIT_DATA_MUX("tsensor", NULL, "tegra-tsensor", mux_pllpc_clkm_clk32k, CLK_SOURCE_TSENSOR, 100, &periph_v_regs, TEGRA_PERIPH_ON_APB, tsensor),
+ TEGRA_INIT_DATA_MUX("i2cslow", NULL, "i2cslow", mux_pllpc_clk32k_clkm, CLK_SOURCE_I2CSLOW, 81, &periph_u_regs, TEGRA_PERIPH_ON_APB, i2cslow),
+ TEGRA_INIT_DATA_INT("vde", NULL, "vde", mux_pllpcm_clkm, CLK_SOURCE_VDE, 61, &periph_h_regs, 0, vde),
+ TEGRA_INIT_DATA_INT("vi", "vi", "tegra_camera", mux_pllmcpa, CLK_SOURCE_VI, 20, &periph_l_regs, 0, vi),
+ TEGRA_INIT_DATA_INT("epp", NULL, "epp", mux_pllmcpa, CLK_SOURCE_EPP, 19, &periph_l_regs, 0, epp),
+ TEGRA_INIT_DATA_INT("mpe", NULL, "mpe", mux_pllmcpa, CLK_SOURCE_MPE, 60, &periph_h_regs, 0, mpe),
+ TEGRA_INIT_DATA_INT("host1x", NULL, "host1x", mux_pllmcpa, CLK_SOURCE_HOST1X, 28, &periph_l_regs, 0, host1x),
+ TEGRA_INIT_DATA_INT("3d", NULL, "3d", mux_pllmcpa, CLK_SOURCE_3D, 24, &periph_l_regs, TEGRA_PERIPH_MANUAL_RESET, gr3d),
+ TEGRA_INIT_DATA_INT("3d2", NULL, "3d2", mux_pllmcpa, CLK_SOURCE_3D2, 98, &periph_v_regs, TEGRA_PERIPH_MANUAL_RESET, gr3d2),
+ TEGRA_INIT_DATA_INT("2d", NULL, "2d", mux_pllmcpa, CLK_SOURCE_2D, 21, &periph_l_regs, 0, gr2d),
+ TEGRA_INIT_DATA_INT("se", NULL, "se", mux_pllpcm_clkm, CLK_SOURCE_SE, 127, &periph_v_regs, 0, se),
+ TEGRA_INIT_DATA_MUX("mselect", NULL, "mselect", mux_pllp_clkm, CLK_SOURCE_MSELECT, 99, &periph_v_regs, 0, mselect),
+ TEGRA_INIT_DATA_MUX("nor", NULL, "tegra-nor", mux_pllpcm_clkm, CLK_SOURCE_NOR, 42, &periph_h_regs, 0, nor),
+ TEGRA_INIT_DATA_MUX("sdmmc1", NULL, "sdhci-tegra.0", mux_pllpcm_clkm, CLK_SOURCE_SDMMC1, 14, &periph_l_regs, 0, sdmmc1),
+ TEGRA_INIT_DATA_MUX("sdmmc2", NULL, "sdhci-tegra.1", mux_pllpcm_clkm, CLK_SOURCE_SDMMC2, 9, &periph_l_regs, 0, sdmmc2),
+ TEGRA_INIT_DATA_MUX("sdmmc3", NULL, "sdhci-tegra.2", mux_pllpcm_clkm, CLK_SOURCE_SDMMC3, 69, &periph_u_regs, 0, sdmmc3),
+ TEGRA_INIT_DATA_MUX("sdmmc4", NULL, "sdhci-tegra.3", mux_pllpcm_clkm, CLK_SOURCE_SDMMC4, 15, &periph_l_regs, 0, sdmmc4),
+ TEGRA_INIT_DATA_MUX("cve", NULL, "cve", mux_pllpdc_clkm, CLK_SOURCE_CVE, 49, &periph_h_regs, 0, cve),
+ TEGRA_INIT_DATA_MUX("tvo", NULL, "tvo", mux_pllpdc_clkm, CLK_SOURCE_TVO, 49, &periph_h_regs, 0, tvo),
+ TEGRA_INIT_DATA_MUX("tvdac", NULL, "tvdac", mux_pllpdc_clkm, CLK_SOURCE_TVDAC, 53, &periph_h_regs, 0, tvdac),
+ TEGRA_INIT_DATA_MUX("actmon", NULL, "actmon", mux_pllpc_clk32k_clkm, CLK_SOURCE_ACTMON, 119, &periph_v_regs, 0, actmon),
+ TEGRA_INIT_DATA_MUX("vi_sensor", "vi_sensor", "tegra_camera", mux_pllmcpa, CLK_SOURCE_VI_SENSOR, 20, &periph_l_regs, TEGRA_PERIPH_NO_RESET, vi_sensor),
+ TEGRA_INIT_DATA_DIV16("i2c1", "div-clk", "tegra-i2c.0", mux_pllp_clkm, CLK_SOURCE_I2C1, 12, &periph_l_regs, TEGRA_PERIPH_ON_APB, i2c1),
+ TEGRA_INIT_DATA_DIV16("i2c2", "div-clk", "tegra-i2c.1", mux_pllp_clkm, CLK_SOURCE_I2C2, 54, &periph_h_regs, TEGRA_PERIPH_ON_APB, i2c2),
+ TEGRA_INIT_DATA_DIV16("i2c3", "div-clk", "tegra-i2c.2", mux_pllp_clkm, CLK_SOURCE_I2C3, 67, &periph_u_regs, TEGRA_PERIPH_ON_APB, i2c3),
+ TEGRA_INIT_DATA_DIV16("i2c4", "div-clk", "tegra-i2c.3", mux_pllp_clkm, CLK_SOURCE_I2C4, 103, &periph_v_regs, TEGRA_PERIPH_ON_APB, i2c4),
+ TEGRA_INIT_DATA_DIV16("i2c5", "div-clk", "tegra-i2c.4", mux_pllp_clkm, CLK_SOURCE_I2C5, 47, &periph_h_regs, TEGRA_PERIPH_ON_APB, i2c5),
+ TEGRA_INIT_DATA_UART("uarta", NULL, "tegra_uart.0", mux_pllpcm_clkm, CLK_SOURCE_UARTA, 6, &periph_l_regs, uarta),
+ TEGRA_INIT_DATA_UART("uartb", NULL, "tegra_uart.1", mux_pllpcm_clkm, CLK_SOURCE_UARTB, 7, &periph_l_regs, uartb),
+ TEGRA_INIT_DATA_UART("uartc", NULL, "tegra_uart.2", mux_pllpcm_clkm, CLK_SOURCE_UARTC, 55, &periph_h_regs, uartc),
+ TEGRA_INIT_DATA_UART("uartd", NULL, "tegra_uart.3", mux_pllpcm_clkm, CLK_SOURCE_UARTD, 65, &periph_u_regs, uartd),
+ TEGRA_INIT_DATA_UART("uarte", NULL, "tegra_uart.4", mux_pllpcm_clkm, CLK_SOURCE_UARTE, 66, &periph_u_regs, uarte),
+ TEGRA_INIT_DATA_MUX8("hdmi", NULL, "hdmi", mux_pllpmdacd2_clkm, CLK_SOURCE_HDMI, 51, &periph_h_regs, 0, hdmi),
+ TEGRA_INIT_DATA_MUX8("extern1", NULL, "extern1", mux_plla_clk32k_pllp_clkm_plle, CLK_SOURCE_EXTERN1, 120, &periph_v_regs, 0, extern1),
+ TEGRA_INIT_DATA_MUX8("extern2", NULL, "extern2", mux_plla_clk32k_pllp_clkm_plle, CLK_SOURCE_EXTERN2, 121, &periph_v_regs, 0, extern2),
+ TEGRA_INIT_DATA_MUX8("extern3", NULL, "extern3", mux_plla_clk32k_pllp_clkm_plle, CLK_SOURCE_EXTERN3, 122, &periph_v_regs, 0, extern3),
+ TEGRA_INIT_DATA("pwm", NULL, "pwm", mux_pllpc_clk32k_clkm, CLK_SOURCE_PWM, 28, 2, 0, 0, 8, 1, 0, &periph_l_regs, 17, periph_clk_enb_refcnt, 0, pwm),
+};
+
+static struct tegra_periph_init_data tegra_periph_nodiv_clk_list[] = {
+ TEGRA_INIT_DATA_NODIV("disp1", NULL, "tegradc.0", mux_pllpmdacd2_clkm, CLK_SOURCE_DISP1, 29, 3, 27, &periph_l_regs, 0, disp1),
+ TEGRA_INIT_DATA_NODIV("disp2", NULL, "tegradc.1", mux_pllpmdacd2_clkm, CLK_SOURCE_DISP2, 29, 3, 26, &periph_l_regs, 0, disp2),
+ TEGRA_INIT_DATA_NODIV("dsib", NULL, "tegradc.1", mux_plld_out0_plld2_out0, CLK_SOURCE_DSIB, 25, 1, 82, &periph_u_regs, 0, dsib),
+};
+
+static void __init tegra30_periph_clk_init(void)
+{
+ struct tegra_periph_init_data *data;
+ struct clk *clk;
+ int i;
+
+ /* apbdma */
+ clk = tegra_clk_register_periph_gate("apbdma", "clk_m", 0, clk_base, 0, 34,
+ &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra-apbdma");
+ clks[apbdma] = clk;
+
+ /* rtc */
+ clk = tegra_clk_register_periph_gate("rtc", "clk_32k",
+ TEGRA_PERIPH_NO_RESET | TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 4, &periph_l_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "rtc-tegra");
+ clks[rtc] = clk;
+
+ /* timer */
+ clk = tegra_clk_register_periph_gate("timer", "clk_m", 0, clk_base, 0,
+ 5, &periph_l_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "timer");
+ clks[timer] = clk;
+
+ /* kbc */
+ clk = tegra_clk_register_periph_gate("kbc", "clk_32k",
+ TEGRA_PERIPH_NO_RESET | TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 36, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra-kbc");
+ clks[kbc] = clk;
+
+ /* csus */
+ clk = tegra_clk_register_periph_gate("csus", "clk_m",
+ TEGRA_PERIPH_NO_RESET | TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 92, &periph_u_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "csus", "tengra_camera");
+ clks[csus] = clk;
+
+ /* vcp */
+ clk = tegra_clk_register_periph_gate("vcp", "clk_m", 0, clk_base, 0, 29,
+ &periph_l_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "vcp", "tegra-avp");
+ clks[vcp] = clk;
+
+ /* bsea */
+ clk = tegra_clk_register_periph_gate("bsea", "clk_m", 0, clk_base, 0,
+ 62, &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "bsea", "tegra-avp");
+ clks[bsea] = clk;
+
+ /* bsev */
+ clk = tegra_clk_register_periph_gate("bsev", "clk_m", 0, clk_base, 0,
+ 63, &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "bsev", "tegra-aes");
+ clks[bsev] = clk;
+
+ /* usbd */
+ clk = tegra_clk_register_periph_gate("usbd", "clk_m", 0, clk_base, 0,
+ 22, &periph_l_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "fsl-tegra-udc");
+ clks[usbd] = clk;
+
+ /* usb2 */
+ clk = tegra_clk_register_periph_gate("usb2", "clk_m", 0, clk_base, 0,
+ 58, &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra-ehci.1");
+ clks[usb2] = clk;
+
+ /* usb3 */
+ clk = tegra_clk_register_periph_gate("usb3", "clk_m", 0, clk_base, 0,
+ 59, &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra-ehci.2");
+ clks[usb3] = clk;
+
+ /* dsia */
+ clk = tegra_clk_register_periph_gate("dsia", "pll_d_out0", 0, clk_base,
+ 0, 48, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "dsia", "tegradc.0");
+ clks[dsia] = clk;
+
+ /* csi */
+ clk = tegra_clk_register_periph_gate("csi", "pll_p_out3", 0, clk_base,
+ 0, 52, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "csi", "tegra_camera");
+ clks[csi] = clk;
+
+ /* isp */
+ clk = tegra_clk_register_periph_gate("isp", "clk_m", 0, clk_base, 0, 23,
+ &periph_l_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "isp", "tegra_camera");
+ clks[isp] = clk;
+
+ /* pcie */
+ clk = tegra_clk_register_periph_gate("pcie", "clk_m", 0, clk_base, 0,
+ 70, &periph_u_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "pcie", "tegra-pcie");
+ clks[pcie] = clk;
+
+ /* afi */
+ clk = tegra_clk_register_periph_gate("afi", "clk_m", 0, clk_base, 0, 72,
+ &periph_u_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "afi", "tegra-pcie");
+ clks[afi] = clk;
+
+ /* kfuse */
+ clk = tegra_clk_register_periph_gate("kfuse", "clk_m",
+ TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 40, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "kfuse-tegra");
+ clks[kfuse] = clk;
+
+ /* fuse */
+ clk = tegra_clk_register_periph_gate("fuse", "clk_m",
+ TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 39, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "fuse", "fuse-tegra");
+ clks[fuse] = clk;
+
+ /* fuse_burn */
+ clk = tegra_clk_register_periph_gate("fuse_burn", "clk_m",
+ TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 39, &periph_h_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "fuse_burn", "fuse-tegra");
+ clks[fuse_burn] = clk;
+
+ /* apbif */
+ clk = tegra_clk_register_periph_gate("apbif", "clk_m", 0,
+ clk_base, 0, 107, &periph_v_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "apbif", "tegra30-ahub");
+ clks[apbif] = clk;
+
+ /* hda2hdmi */
+ clk = tegra_clk_register_periph_gate("hda2hdmi", "clk_m",
+ TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 128, &periph_w_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "hda2hdmi", "tegra30-hda");
+ clks[hda2hdmi] = clk;
+
+ /* sata_cold */
+ clk = tegra_clk_register_periph_gate("sata_cold", "clk_m",
+ TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 129, &periph_w_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "tegra_sata_cold");
+ clks[sata_cold] = clk;
+
+ /* dtv */
+ clk = tegra_clk_register_periph_gate("dtv", "clk_m",
+ TEGRA_PERIPH_ON_APB,
+ clk_base, 0, 79, &periph_u_regs,
+ periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, NULL, "dtv");
+ clks[dtv] = clk;
+
+ /* emc */
+ clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
+ ARRAY_SIZE(mux_pllmcp_clkm), 0,
+ clk_base + CLK_SOURCE_EMC,
+ 30, 2, 0, NULL);
+ clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
+ 57, &periph_h_regs, periph_clk_enb_refcnt);
+ clk_register_clkdev(clk, "emc", NULL);
+ clks[emc] = clk;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) {
+ data = &tegra_periph_clk_list[i];
+ clk = tegra_clk_register_periph(data->name, data->parent_names,
+ data->num_parents, &data->periph,
+ clk_base, data->offset);
+ clk_register_clkdev(clk, data->con_id, data->dev_id);
+ clks[data->clk_id] = clk;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tegra_periph_nodiv_clk_list); i++) {
+ data = &tegra_periph_nodiv_clk_list[i];
+ clk = tegra_clk_register_periph_nodiv(data->name,
+ data->parent_names,
+ data->num_parents, &data->periph,
+ clk_base, data->offset);
+ clk_register_clkdev(clk, data->con_id, data->dev_id);
+ clks[data->clk_id] = clk;
+ }
+}
+
+static void __init tegra30_fixed_clk_init(void)
+{
+ struct clk *clk;
+
+ /* clk_32k */
+ clk = clk_register_fixed_rate(NULL, "clk_32k", NULL, CLK_IS_ROOT,
+ 32768);
+ clk_register_clkdev(clk, "clk_32k", NULL);
+ clks[clk_32k] = clk;
+
+ /* clk_m_div2 */
+ clk = clk_register_fixed_factor(NULL, "clk_m_div2", "clk_m",
+ CLK_SET_RATE_PARENT, 1, 2);
+ clk_register_clkdev(clk, "clk_m_div2", NULL);
+ clks[clk_m_div2] = clk;
+
+ /* clk_m_div4 */
+ clk = clk_register_fixed_factor(NULL, "clk_m_div4", "clk_m",
+ CLK_SET_RATE_PARENT, 1, 4);
+ clk_register_clkdev(clk, "clk_m_div4", NULL);
+ clks[clk_m_div4] = clk;
+
+ /* cml0 */
+ clk = clk_register_gate(NULL, "cml0", "pll_e", 0, clk_base + PLLE_AUX,
+ 0, 0, &cml_lock);
+ clk_register_clkdev(clk, "cml0", NULL);
+ clks[cml0] = clk;
+
+ /* cml1 */
+ clk = clk_register_gate(NULL, "cml1", "pll_e", 0, clk_base + PLLE_AUX,
+ 1, 0, &cml_lock);
+ clk_register_clkdev(clk, "cml1", NULL);
+ clks[cml1] = clk;
+
+ /* pciex */
+ clk = clk_register_fixed_rate(NULL, "pciex", "pll_e", 0, 100000000);
+ clk_register_clkdev(clk, "pciex", NULL);
+ clks[pciex] = clk;
+}
+
+static void __init tegra30_osc_clk_init(void)
+{
+ struct clk *clk;
+ unsigned int pll_ref_div;
+
+ tegra30_clk_measure_input_freq();
+
+ /* clk_m */
+ clk = clk_register_fixed_rate(NULL, "clk_m", NULL, CLK_IS_ROOT,
+ input_freq);
+ clk_register_clkdev(clk, "clk_m", NULL);
+ clks[clk_m] = clk;
+
+ /* pll_ref */
+ pll_ref_div = tegra30_get_pll_ref_div();
+ clk = clk_register_fixed_factor(NULL, "pll_ref", "clk_m",
+ CLK_SET_RATE_PARENT, 1, pll_ref_div);
+ clk_register_clkdev(clk, "pll_ref", NULL);
+ clks[pll_ref] = clk;
+}
+
+/* Tegra30 CPU clock and reset control functions */
+static void tegra30_wait_cpu_in_reset(u32 cpu)
+{
+ unsigned int reg;
+
+ do {
+ reg = readl(clk_base +
+ TEGRA30_CLK_RST_CONTROLLER_CPU_CMPLX_STATUS);
+ cpu_relax();
+ } while (!(reg & (1 << cpu))); /* check CPU been reset or not */
+
+ return;
+}
+
+static void tegra30_put_cpu_in_reset(u32 cpu)
+{
+ writel(CPU_RESET(cpu),
+ clk_base + TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET);
+ dmb();
+}
+
+static void tegra30_cpu_out_of_reset(u32 cpu)
+{
+ writel(CPU_RESET(cpu),
+ clk_base + TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR);
+ wmb();
+}
+
+
+static void tegra30_enable_cpu_clock(u32 cpu)
+{
+ unsigned int reg;
+
+ writel(CPU_CLOCK(cpu),
+ clk_base + TEGRA30_CLK_RST_CONTROLLER_CLK_CPU_CMPLX_CLR);
+ reg = readl(clk_base +
+ TEGRA30_CLK_RST_CONTROLLER_CLK_CPU_CMPLX_CLR);
+}
+
+static void tegra30_disable_cpu_clock(u32 cpu)
+{
+
+ unsigned int reg;
+
+ reg = readl(clk_base + TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+ writel(reg | CPU_CLOCK(cpu),
+ clk_base + TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static bool tegra30_cpu_rail_off_ready(void)
+{
+ unsigned int cpu_rst_status;
+ int cpu_pwr_status;
+
+ cpu_rst_status = readl(clk_base +
+ TEGRA30_CLK_RST_CONTROLLER_CPU_CMPLX_STATUS);
+ cpu_pwr_status = tegra_powergate_is_powered(TEGRA_POWERGATE_CPU1) ||
+ tegra_powergate_is_powered(TEGRA_POWERGATE_CPU2) ||
+ tegra_powergate_is_powered(TEGRA_POWERGATE_CPU3);
+
+ if (((cpu_rst_status & 0xE) != 0xE) || cpu_pwr_status)
+ return false;
+
+ return true;
+}
+
+static void tegra30_cpu_clock_suspend(void)
+{
+ /* switch coresite to clk_m, save off original source */
+ tegra30_cpu_clk_sctx.clk_csite_src =
+ readl(clk_base + CLK_RESET_SOURCE_CSITE);
+ writel(3<<30, clk_base + CLK_RESET_SOURCE_CSITE);
+
+ tegra30_cpu_clk_sctx.cpu_burst =
+ readl(clk_base + CLK_RESET_CCLK_BURST);
+ tegra30_cpu_clk_sctx.pllx_base =
+ readl(clk_base + CLK_RESET_PLLX_BASE);
+ tegra30_cpu_clk_sctx.pllx_misc =
+ readl(clk_base + CLK_RESET_PLLX_MISC);
+ tegra30_cpu_clk_sctx.cclk_divider =
+ readl(clk_base + CLK_RESET_CCLK_DIVIDER);
+}
+
+static void tegra30_cpu_clock_resume(void)
+{
+ unsigned int reg, policy;
+
+ /* Is CPU complex already running on PLLX? */
+ reg = readl(clk_base + CLK_RESET_CCLK_BURST);
+ policy = (reg >> CLK_RESET_CCLK_BURST_POLICY_SHIFT) & 0xF;
+
+ if (policy == CLK_RESET_CCLK_IDLE_POLICY)
+ reg = (reg >> CLK_RESET_CCLK_IDLE_POLICY_SHIFT) & 0xF;
+ else if (policy == CLK_RESET_CCLK_RUN_POLICY)
+ reg = (reg >> CLK_RESET_CCLK_RUN_POLICY_SHIFT) & 0xF;
+ else
+ BUG();
+
+ if (reg != CLK_RESET_CCLK_BURST_POLICY_PLLX) {
+ /* restore PLLX settings if CPU is on different PLL */
+ writel(tegra30_cpu_clk_sctx.pllx_misc,
+ clk_base + CLK_RESET_PLLX_MISC);
+ writel(tegra30_cpu_clk_sctx.pllx_base,
+ clk_base + CLK_RESET_PLLX_BASE);
+
+ /* wait for PLL stabilization if PLLX was enabled */
+ if (tegra30_cpu_clk_sctx.pllx_base & (1 << 30))
+ udelay(300);
+ }
+
+ /*
+ * Restore original burst policy setting for calls resulting from CPU
+ * LP2 in idle or system suspend.
+ */
+ writel(tegra30_cpu_clk_sctx.cclk_divider,
+ clk_base + CLK_RESET_CCLK_DIVIDER);
+ writel(tegra30_cpu_clk_sctx.cpu_burst,
+ clk_base + CLK_RESET_CCLK_BURST);
+
+ writel(tegra30_cpu_clk_sctx.clk_csite_src,
+ clk_base + CLK_RESET_SOURCE_CSITE);
+}
+#endif
+
+static struct tegra_cpu_car_ops tegra30_cpu_car_ops = {
+ .wait_for_reset = tegra30_wait_cpu_in_reset,
+ .put_in_reset = tegra30_put_cpu_in_reset,
+ .out_of_reset = tegra30_cpu_out_of_reset,
+ .enable_clock = tegra30_enable_cpu_clock,
+ .disable_clock = tegra30_disable_cpu_clock,
+#ifdef CONFIG_PM_SLEEP
+ .rail_off_ready = tegra30_cpu_rail_off_ready,
+ .suspend = tegra30_cpu_clock_suspend,
+ .resume = tegra30_cpu_clock_resume,
+#endif
+};
+
+static __initdata struct tegra_clk_init_table init_table[] = {
+ {uarta, pll_p, 408000000, 0},
+ {uartb, pll_p, 408000000, 0},
+ {uartc, pll_p, 408000000, 0},
+ {uartd, pll_p, 408000000, 0},
+ {uarte, pll_p, 408000000, 0},
+ {pll_a, clk_max, 564480000, 1},
+ {pll_a_out0, clk_max, 11289600, 1},
+ {extern1, pll_a_out0, 0, 1},
+ {clk_out_1_mux, extern1, 0, 0},
+ {clk_out_1, clk_max, 0, 1},
+ {blink, clk_max, 0, 1},
+ {i2s0, pll_a_out0, 11289600, 0},
+ {i2s1, pll_a_out0, 11289600, 0},
+ {i2s2, pll_a_out0, 11289600, 0},
+ {i2s3, pll_a_out0, 11289600, 0},
+ {i2s4, pll_a_out0, 11289600, 0},
+ {sdmmc1, pll_p, 48000000, 0},
+ {sdmmc2, pll_p, 48000000, 0},
+ {sdmmc3, pll_p, 48000000, 0},
+ {pll_m, clk_max, 0, 1},
+ {pclk, clk_max, 0, 1},
+ {csite, clk_max, 0, 1},
+ {emc, clk_max, 0, 1},
+ {mselect, clk_max, 0, 1},
+ {sbc1, pll_p, 100000000, 0},
+ {sbc2, pll_p, 100000000, 0},
+ {sbc3, pll_p, 100000000, 0},
+ {sbc4, pll_p, 100000000, 0},
+ {sbc5, pll_p, 100000000, 0},
+ {sbc6, pll_p, 100000000, 0},
+ {host1x, pll_c, 150000000, 0},
+ {disp1, pll_p, 600000000, 0},
+ {disp2, pll_p, 600000000, 0},
+ {twd, clk_max, 0, 1},
+ {clk_max, clk_max, 0, 0}, /* This MUST be the last entry. */
+};
+
+/*
+ * Some clocks may be used by different drivers depending on the board
+ * configuration. List those here to register them twice in the clock lookup
+ * table under two names.
+ */
+static struct tegra_clk_duplicate tegra_clk_duplicates[] = {
+ TEGRA_CLK_DUPLICATE(usbd, "utmip-pad", NULL),
+ TEGRA_CLK_DUPLICATE(usbd, "tegra-ehci.0", NULL),
+ TEGRA_CLK_DUPLICATE(usbd, "tegra-otg", NULL),
+ TEGRA_CLK_DUPLICATE(bsev, "tegra-avp", "bsev"),
+ TEGRA_CLK_DUPLICATE(bsev, "nvavp", "bsev"),
+ TEGRA_CLK_DUPLICATE(vde, "tegra-aes", "vde"),
+ TEGRA_CLK_DUPLICATE(bsea, "tegra-aes", "bsea"),
+ TEGRA_CLK_DUPLICATE(bsea, "nvavp", "bsea"),
+ TEGRA_CLK_DUPLICATE(cml1, "tegra_sata_cml", NULL),
+ TEGRA_CLK_DUPLICATE(cml0, "tegra_pcie", "cml"),
+ TEGRA_CLK_DUPLICATE(pciex, "tegra_pcie", "pciex"),
+ TEGRA_CLK_DUPLICATE(twd, "smp_twd", NULL),
+ TEGRA_CLK_DUPLICATE(vcp, "nvavp", "vcp"),
+ TEGRA_CLK_DUPLICATE(clk_max, NULL, NULL), /* MUST be the last entry */
+};
+
+static const struct of_device_id pmc_match[] __initconst = {
+ { .compatible = "nvidia,tegra30-pmc" },
+ {},
+};
+
+void __init tegra30_clock_init(struct device_node *np)
+{
+ struct device_node *node;
+ int i;
+
+ clk_base = of_iomap(np, 0);
+ if (!clk_base) {
+ pr_err("ioremap tegra30 CAR failed\n");
+ return;
+ }
+
+ node = of_find_matching_node(NULL, pmc_match);
+ if (!node) {
+ pr_err("Failed to find pmc node\n");
+ BUG();
+ }
+
+ pmc_base = of_iomap(node, 0);
+ if (!pmc_base) {
+ pr_err("Can't map pmc registers\n");
+ BUG();
+ }
+
+ tegra30_osc_clk_init();
+ tegra30_fixed_clk_init();
+ tegra30_pll_init();
+ tegra30_super_clk_init();
+ tegra30_periph_clk_init();
+ tegra30_audio_clk_init();
+ tegra30_pmc_clk_init();
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++) {
+ if (IS_ERR(clks[i])) {
+ pr_err("Tegra30 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clks[i]));
+ BUG();
+ }
+ if (!clks[i])
+ clks[i] = ERR_PTR(-EINVAL);
+ }
+
+ tegra_init_dup_clks(tegra_clk_duplicates, clks, clk_max);
+
+ clk_data.clks = clks;
+ clk_data.clk_num = ARRAY_SIZE(clks);
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+
+ tegra_init_from_table(init_table, clks, clk_max);
+
+ tegra_cpu_car_ops = &tegra30_cpu_car_ops;
+}
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
new file mode 100644
index 000000000000..a603b9af0ad3
--- /dev/null
+++ b/drivers/clk/tegra/clk.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/clk/tegra.h>
+
+#include "clk.h"
+
+/* Global data of Tegra CPU CAR ops */
+struct tegra_cpu_car_ops *tegra_cpu_car_ops;
+
+void __init tegra_init_dup_clks(struct tegra_clk_duplicate *dup_list,
+ struct clk *clks[], int clk_max)
+{
+ struct clk *clk;
+
+ for (; dup_list->clk_id < clk_max; dup_list++) {
+ clk = clks[dup_list->clk_id];
+ dup_list->lookup.clk = clk;
+ clkdev_add(&dup_list->lookup);
+ }
+}
+
+void __init tegra_init_from_table(struct tegra_clk_init_table *tbl,
+ struct clk *clks[], int clk_max)
+{
+ struct clk *clk;
+
+ for (; tbl->clk_id < clk_max; tbl++) {
+ clk = clks[tbl->clk_id];
+ if (IS_ERR_OR_NULL(clk))
+ return;
+
+ if (tbl->parent_id < clk_max) {
+ struct clk *parent = clks[tbl->parent_id];
+ if (clk_set_parent(clk, parent)) {
+ pr_err("%s: Failed to set parent %s of %s\n",
+ __func__, __clk_get_name(parent),
+ __clk_get_name(clk));
+ WARN_ON(1);
+ }
+ }
+
+ if (tbl->rate)
+ if (clk_set_rate(clk, tbl->rate)) {
+ pr_err("%s: Failed to set rate %lu of %s\n",
+ __func__, tbl->rate,
+ __clk_get_name(clk));
+ WARN_ON(1);
+ }
+
+ if (tbl->state)
+ if (clk_prepare_enable(clk)) {
+ pr_err("%s: Failed to enable %s\n", __func__,
+ __clk_get_name(clk));
+ WARN_ON(1);
+ }
+ }
+}
+
+static const struct of_device_id tegra_dt_clk_match[] = {
+ { .compatible = "nvidia,tegra20-car", .data = tegra20_clock_init },
+ { .compatible = "nvidia,tegra30-car", .data = tegra30_clock_init },
+ { }
+};
+
+void __init tegra_clocks_init(void)
+{
+ of_clk_init(tegra_dt_clk_match);
+}
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
new file mode 100644
index 000000000000..0744731c6229
--- /dev/null
+++ b/drivers/clk/tegra/clk.h
@@ -0,0 +1,502 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __TEGRA_CLK_H
+#define __TEGRA_CLK_H
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+
+/**
+ * struct tegra_clk_sync_source - external clock source from codec
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @rate: input frequency from source
+ * @max_rate: max rate allowed
+ */
+struct tegra_clk_sync_source {
+ struct clk_hw hw;
+ unsigned long rate;
+ unsigned long max_rate;
+};
+
+#define to_clk_sync_source(_hw) \
+ container_of(_hw, struct tegra_clk_sync_source, hw)
+
+extern const struct clk_ops tegra_clk_sync_source_ops;
+struct clk *tegra_clk_register_sync_source(const char *name,
+ unsigned long fixed_rate, unsigned long max_rate);
+
+/**
+ * struct tegra_clk_frac_div - fractional divider clock
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @reg: register containing divider
+ * @flags: hardware-specific flags
+ * @shift: shift to the divider bit field
+ * @width: width of the divider bit field
+ * @frac_width: width of the fractional bit field
+ * @lock: register lock
+ *
+ * Flags:
+ * TEGRA_DIVIDER_ROUND_UP - This flags indicates to round up the divider value.
+ * TEGRA_DIVIDER_FIXED - Fixed rate PLL dividers has addition override bit, this
+ * flag indicates that this divider is for fixed rate PLL.
+ * TEGRA_DIVIDER_INT - Some modules can not cope with the duty cycle when
+ * fraction bit is set. This flags indicates to calculate divider for which
+ * fracton bit will be zero.
+ * TEGRA_DIVIDER_UART - UART module divider has additional enable bit which is
+ * set when divider value is not 0. This flags indicates that the divider
+ * is for UART module.
+ */
+struct tegra_clk_frac_div {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 flags;
+ u8 shift;
+ u8 width;
+ u8 frac_width;
+ spinlock_t *lock;
+};
+
+#define to_clk_frac_div(_hw) container_of(_hw, struct tegra_clk_frac_div, hw)
+
+#define TEGRA_DIVIDER_ROUND_UP BIT(0)
+#define TEGRA_DIVIDER_FIXED BIT(1)
+#define TEGRA_DIVIDER_INT BIT(2)
+#define TEGRA_DIVIDER_UART BIT(3)
+
+extern const struct clk_ops tegra_clk_frac_div_ops;
+struct clk *tegra_clk_register_divider(const char *name,
+ const char *parent_name, void __iomem *reg,
+ unsigned long flags, u8 clk_divider_flags, u8 shift, u8 width,
+ u8 frac_width, spinlock_t *lock);
+
+/*
+ * Tegra PLL:
+ *
+ * In general, there are 3 requirements for each PLL
+ * that SW needs to be comply with.
+ * (1) Input frequency range (REF).
+ * (2) Comparison frequency range (CF). CF = REF/DIVM.
+ * (3) VCO frequency range (VCO). VCO = CF * DIVN.
+ *
+ * The final PLL output frequency (FO) = VCO >> DIVP.
+ */
+
+/**
+ * struct tegra_clk_pll_freq_table - PLL frequecy table
+ *
+ * @input_rate: input rate from source
+ * @output_rate: output rate from PLL for the input rate
+ * @n: feedback divider
+ * @m: input divider
+ * @p: post divider
+ * @cpcon: charge pump current
+ */
+struct tegra_clk_pll_freq_table {
+ unsigned long input_rate;
+ unsigned long output_rate;
+ u16 n;
+ u16 m;
+ u8 p;
+ u8 cpcon;
+};
+
+/**
+ * struct clk_pll_params - PLL parameters
+ *
+ * @input_min: Minimum input frequency
+ * @input_max: Maximum input frequency
+ * @cf_min: Minimum comparison frequency
+ * @cf_max: Maximum comparison frequency
+ * @vco_min: Minimum VCO frequency
+ * @vco_max: Maximum VCO frequency
+ * @base_reg: PLL base reg offset
+ * @misc_reg: PLL misc reg offset
+ * @lock_reg: PLL lock reg offset
+ * @lock_bit_idx: Bit index for PLL lock status
+ * @lock_enable_bit_idx: Bit index to enable PLL lock
+ * @lock_delay: Delay in us if PLL lock is not used
+ */
+struct tegra_clk_pll_params {
+ unsigned long input_min;
+ unsigned long input_max;
+ unsigned long cf_min;
+ unsigned long cf_max;
+ unsigned long vco_min;
+ unsigned long vco_max;
+
+ u32 base_reg;
+ u32 misc_reg;
+ u32 lock_reg;
+ u32 lock_bit_idx;
+ u32 lock_enable_bit_idx;
+ int lock_delay;
+};
+
+/**
+ * struct tegra_clk_pll - Tegra PLL clock
+ *
+ * @hw: handle between common and hardware-specifix interfaces
+ * @clk_base: address of CAR controller
+ * @pmc: address of PMC, required to read override bits
+ * @freq_table: array of frequencies supported by PLL
+ * @params: PLL parameters
+ * @flags: PLL flags
+ * @fixed_rate: PLL rate if it is fixed
+ * @lock: register lock
+ * @divn_shift: shift to the feedback divider bit field
+ * @divn_width: width of the feedback divider bit field
+ * @divm_shift: shift to the input divider bit field
+ * @divm_width: width of the input divider bit field
+ * @divp_shift: shift to the post divider bit field
+ * @divp_width: width of the post divider bit field
+ *
+ * Flags:
+ * TEGRA_PLL_USE_LOCK - This flag indicated to use lock bits for
+ * PLL locking. If not set it will use lock_delay value to wait.
+ * TEGRA_PLL_HAS_CPCON - This flag indicates that CPCON value needs
+ * to be programmed to change output frequency of the PLL.
+ * TEGRA_PLL_SET_LFCON - This flag indicates that LFCON value needs
+ * to be programmed to change output frequency of the PLL.
+ * TEGRA_PLL_SET_DCCON - This flag indicates that DCCON value needs
+ * to be programmed to change output frequency of the PLL.
+ * TEGRA_PLLU - PLLU has inverted post divider. This flags indicated
+ * that it is PLLU and invert post divider value.
+ * TEGRA_PLLM - PLLM has additional override settings in PMC. This
+ * flag indicates that it is PLLM and use override settings.
+ * TEGRA_PLL_FIXED - We are not supposed to change output frequency
+ * of some plls.
+ * TEGRA_PLLE_CONFIGURE - Configure PLLE when enabling.
+ */
+struct tegra_clk_pll {
+ struct clk_hw hw;
+ void __iomem *clk_base;
+ void __iomem *pmc;
+ u8 flags;
+ unsigned long fixed_rate;
+ spinlock_t *lock;
+ u8 divn_shift;
+ u8 divn_width;
+ u8 divm_shift;
+ u8 divm_width;
+ u8 divp_shift;
+ u8 divp_width;
+ struct tegra_clk_pll_freq_table *freq_table;
+ struct tegra_clk_pll_params *params;
+};
+
+#define to_clk_pll(_hw) container_of(_hw, struct tegra_clk_pll, hw)
+
+#define TEGRA_PLL_USE_LOCK BIT(0)
+#define TEGRA_PLL_HAS_CPCON BIT(1)
+#define TEGRA_PLL_SET_LFCON BIT(2)
+#define TEGRA_PLL_SET_DCCON BIT(3)
+#define TEGRA_PLLU BIT(4)
+#define TEGRA_PLLM BIT(5)
+#define TEGRA_PLL_FIXED BIT(6)
+#define TEGRA_PLLE_CONFIGURE BIT(7)
+
+extern const struct clk_ops tegra_clk_pll_ops;
+extern const struct clk_ops tegra_clk_plle_ops;
+struct clk *tegra_clk_register_pll(const char *name, const char *parent_name,
+ void __iomem *clk_base, void __iomem *pmc,
+ unsigned long flags, unsigned long fixed_rate,
+ struct tegra_clk_pll_params *pll_params, u8 pll_flags,
+ struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock);
+struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
+ void __iomem *clk_base, void __iomem *pmc,
+ unsigned long flags, unsigned long fixed_rate,
+ struct tegra_clk_pll_params *pll_params, u8 pll_flags,
+ struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock);
+
+/**
+ * struct tegra_clk_pll_out - PLL divider down clock
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @reg: register containing the PLL divider
+ * @enb_bit_idx: bit to enable/disable PLL divider
+ * @rst_bit_idx: bit to reset PLL divider
+ * @lock: register lock
+ * @flags: hardware-specific flags
+ */
+struct tegra_clk_pll_out {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 enb_bit_idx;
+ u8 rst_bit_idx;
+ spinlock_t *lock;
+ u8 flags;
+};
+
+#define to_clk_pll_out(_hw) container_of(_hw, struct tegra_clk_pll_out, hw)
+
+extern const struct clk_ops tegra_clk_pll_out_ops;
+struct clk *tegra_clk_register_pll_out(const char *name,
+ const char *parent_name, void __iomem *reg, u8 enb_bit_idx,
+ u8 rst_bit_idx, unsigned long flags, u8 pll_div_flags,
+ spinlock_t *lock);
+
+/**
+ * struct tegra_clk_periph_regs - Registers controlling peripheral clock
+ *
+ * @enb_reg: read the enable status
+ * @enb_set_reg: write 1 to enable clock
+ * @enb_clr_reg: write 1 to disable clock
+ * @rst_reg: read the reset status
+ * @rst_set_reg: write 1 to assert the reset of peripheral
+ * @rst_clr_reg: write 1 to deassert the reset of peripheral
+ */
+struct tegra_clk_periph_regs {
+ u32 enb_reg;
+ u32 enb_set_reg;
+ u32 enb_clr_reg;
+ u32 rst_reg;
+ u32 rst_set_reg;
+ u32 rst_clr_reg;
+};
+
+/**
+ * struct tegra_clk_periph_gate - peripheral gate clock
+ *
+ * @magic: magic number to validate type
+ * @hw: handle between common and hardware-specific interfaces
+ * @clk_base: address of CAR controller
+ * @regs: Registers to control the peripheral
+ * @flags: hardware-specific flags
+ * @clk_num: Clock number
+ * @enable_refcnt: array to maintain reference count of the clock
+ *
+ * Flags:
+ * TEGRA_PERIPH_NO_RESET - This flag indicates that reset is not allowed
+ * for this module.
+ * TEGRA_PERIPH_MANUAL_RESET - This flag indicates not to reset module
+ * after clock enable and driver for the module is responsible for
+ * doing reset.
+ * TEGRA_PERIPH_ON_APB - If peripheral is in the APB bus then read the
+ * bus to flush the write operation in apb bus. This flag indicates
+ * that this peripheral is in apb bus.
+ */
+struct tegra_clk_periph_gate {
+ u32 magic;
+ struct clk_hw hw;
+ void __iomem *clk_base;
+ u8 flags;
+ int clk_num;
+ int *enable_refcnt;
+ struct tegra_clk_periph_regs *regs;
+};
+
+#define to_clk_periph_gate(_hw) \
+ container_of(_hw, struct tegra_clk_periph_gate, hw)
+
+#define TEGRA_CLK_PERIPH_GATE_MAGIC 0x17760309
+
+#define TEGRA_PERIPH_NO_RESET BIT(0)
+#define TEGRA_PERIPH_MANUAL_RESET BIT(1)
+#define TEGRA_PERIPH_ON_APB BIT(2)
+
+void tegra_periph_reset(struct tegra_clk_periph_gate *gate, bool assert);
+extern const struct clk_ops tegra_clk_periph_gate_ops;
+struct clk *tegra_clk_register_periph_gate(const char *name,
+ const char *parent_name, u8 gate_flags, void __iomem *clk_base,
+ unsigned long flags, int clk_num,
+ struct tegra_clk_periph_regs *pregs, int *enable_refcnt);
+
+/**
+ * struct clk-periph - peripheral clock
+ *
+ * @magic: magic number to validate type
+ * @hw: handle between common and hardware-specific interfaces
+ * @mux: mux clock
+ * @divider: divider clock
+ * @gate: gate clock
+ * @mux_ops: mux clock ops
+ * @div_ops: divider clock ops
+ * @gate_ops: gate clock ops
+ */
+struct tegra_clk_periph {
+ u32 magic;
+ struct clk_hw hw;
+ struct clk_mux mux;
+ struct tegra_clk_frac_div divider;
+ struct tegra_clk_periph_gate gate;
+
+ const struct clk_ops *mux_ops;
+ const struct clk_ops *div_ops;
+ const struct clk_ops *gate_ops;
+};
+
+#define to_clk_periph(_hw) container_of(_hw, struct tegra_clk_periph, hw)
+
+#define TEGRA_CLK_PERIPH_MAGIC 0x18221223
+
+extern const struct clk_ops tegra_clk_periph_ops;
+struct clk *tegra_clk_register_periph(const char *name,
+ const char **parent_names, int num_parents,
+ struct tegra_clk_periph *periph, void __iomem *clk_base,
+ u32 offset);
+struct clk *tegra_clk_register_periph_nodiv(const char *name,
+ const char **parent_names, int num_parents,
+ struct tegra_clk_periph *periph, void __iomem *clk_base,
+ u32 offset);
+
+#define TEGRA_CLK_PERIPH(_mux_shift, _mux_width, _mux_flags, \
+ _div_shift, _div_width, _div_frac_width, \
+ _div_flags, _clk_num, _enb_refcnt, _regs, \
+ _gate_flags) \
+ { \
+ .mux = { \
+ .flags = _mux_flags, \
+ .shift = _mux_shift, \
+ .width = _mux_width, \
+ }, \
+ .divider = { \
+ .flags = _div_flags, \
+ .shift = _div_shift, \
+ .width = _div_width, \
+ .frac_width = _div_frac_width, \
+ }, \
+ .gate = { \
+ .flags = _gate_flags, \
+ .clk_num = _clk_num, \
+ .enable_refcnt = _enb_refcnt, \
+ .regs = _regs, \
+ }, \
+ .mux_ops = &clk_mux_ops, \
+ .div_ops = &tegra_clk_frac_div_ops, \
+ .gate_ops = &tegra_clk_periph_gate_ops, \
+ }
+
+struct tegra_periph_init_data {
+ const char *name;
+ int clk_id;
+ const char **parent_names;
+ int num_parents;
+ struct tegra_clk_periph periph;
+ u32 offset;
+ const char *con_id;
+ const char *dev_id;
+};
+
+#define TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parent_names, _offset, \
+ _mux_shift, _mux_width, _mux_flags, _div_shift, \
+ _div_width, _div_frac_width, _div_flags, _regs, \
+ _clk_num, _enb_refcnt, _gate_flags, _clk_id) \
+ { \
+ .name = _name, \
+ .clk_id = _clk_id, \
+ .parent_names = _parent_names, \
+ .num_parents = ARRAY_SIZE(_parent_names), \
+ .periph = TEGRA_CLK_PERIPH(_mux_shift, _mux_width, \
+ _mux_flags, _div_shift, \
+ _div_width, _div_frac_width, \
+ _div_flags, _clk_num, \
+ _enb_refcnt, _regs, \
+ _gate_flags), \
+ .offset = _offset, \
+ .con_id = _con_id, \
+ .dev_id = _dev_id, \
+ }
+
+/**
+ * struct clk_super_mux - super clock
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @reg: register controlling multiplexer
+ * @width: width of the multiplexer bit field
+ * @flags: hardware-specific flags
+ * @div2_index: bit controlling divide-by-2
+ * @pllx_index: PLLX index in the parent list
+ * @lock: register lock
+ *
+ * Flags:
+ * TEGRA_DIVIDER_2 - LP cluster has additional divider. This flag indicates
+ * that this is LP cluster clock.
+ */
+struct tegra_clk_super_mux {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 width;
+ u8 flags;
+ u8 div2_index;
+ u8 pllx_index;
+ spinlock_t *lock;
+};
+
+#define to_clk_super_mux(_hw) container_of(_hw, struct tegra_clk_super_mux, hw)
+
+#define TEGRA_DIVIDER_2 BIT(0)
+
+extern const struct clk_ops tegra_clk_super_ops;
+struct clk *tegra_clk_register_super_mux(const char *name,
+ const char **parent_names, u8 num_parents,
+ unsigned long flags, void __iomem *reg, u8 clk_super_flags,
+ u8 width, u8 pllx_index, u8 div2_index, spinlock_t *lock);
+
+/**
+ * struct clk_init_tabel - clock initialization table
+ * @clk_id: clock id as mentioned in device tree bindings
+ * @parent_id: parent clock id as mentioned in device tree bindings
+ * @rate: rate to set
+ * @state: enable/disable
+ */
+struct tegra_clk_init_table {
+ unsigned int clk_id;
+ unsigned int parent_id;
+ unsigned long rate;
+ int state;
+};
+
+/**
+ * struct clk_duplicate - duplicate clocks
+ * @clk_id: clock id as mentioned in device tree bindings
+ * @lookup: duplicate lookup entry for the clock
+ */
+struct tegra_clk_duplicate {
+ int clk_id;
+ struct clk_lookup lookup;
+};
+
+#define TEGRA_CLK_DUPLICATE(_clk_id, _dev, _con) \
+ { \
+ .clk_id = _clk_id, \
+ .lookup = { \
+ .dev_id = _dev, \
+ .con_id = _con, \
+ }, \
+ }
+
+void tegra_init_from_table(struct tegra_clk_init_table *tbl,
+ struct clk *clks[], int clk_max);
+
+void tegra_init_dup_clks(struct tegra_clk_duplicate *dup_list,
+ struct clk *clks[], int clk_max);
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+void tegra20_clock_init(struct device_node *np);
+#else
+static inline void tegra20_clock_init(struct device_node *np) {}
+#endif /* CONFIG_ARCH_TEGRA_2x_SOC */
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+void tegra30_clock_init(struct device_node *np);
+#else
+static inline void tegra30_clock_init(struct device_node *np) {}
+#endif /* CONFIG_ARCH_TEGRA_3x_SOC */
+
+#endif /* TEGRA_CLK_H */
diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c
index dcb6ae0a0425..256c8be74df8 100644
--- a/drivers/clk/versatile/clk-vexpress-osc.c
+++ b/drivers/clk/versatile/clk-vexpress-osc.c
@@ -144,3 +144,4 @@ error:
vexpress_config_func_put(osc->func);
kfree(osc);
}
+CLK_OF_DECLARE(vexpress_soc, "arm,vexpress-osc", vexpress_osc_of_setup);
diff --git a/drivers/clk/versatile/clk-vexpress.c b/drivers/clk/versatile/clk-vexpress.c
index c742ac7c60bb..82b45aad8ccf 100644
--- a/drivers/clk/versatile/clk-vexpress.c
+++ b/drivers/clk/versatile/clk-vexpress.c
@@ -11,6 +11,7 @@
* Copyright (C) 2012 ARM Limited
*/
+#include <linux/amba/sp810.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
@@ -18,8 +19,6 @@
#include <linux/of_address.h>
#include <linux/vexpress.h>
-#include <asm/hardware/sp810.h>
-
static struct clk *vexpress_sp810_timerclken[4];
static DEFINE_SPINLOCK(vexpress_sp810_lock);
@@ -99,19 +98,13 @@ struct clk *vexpress_sp810_of_get(struct of_phandle_args *clkspec, void *data)
return vexpress_sp810_timerclken[clkspec->args[0]];
}
-static const __initconst struct of_device_id vexpress_fixed_clk_match[] = {
- { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
- { .compatible = "arm,vexpress-osc", .data = vexpress_osc_of_setup, },
- {}
-};
-
void __init vexpress_clk_of_init(void)
{
struct device_node *node;
struct clk *clk;
struct clk *refclk, *timclk;
- of_clk_init(vexpress_fixed_clk_match);
+ of_clk_init(NULL);
node = of_find_compatible_node(NULL, NULL, "arm,sp810");
vexpress_sp810_init(of_iomap(node, 0));
diff --git a/drivers/clk/x86/Makefile b/drivers/clk/x86/Makefile
new file mode 100644
index 000000000000..f9ba4fab0ddc
--- /dev/null
+++ b/drivers/clk/x86/Makefile
@@ -0,0 +1,2 @@
+clk-x86-lpss-objs := clk-lpss.o clk-lpt.o
+obj-$(CONFIG_X86_INTEL_LPSS) += clk-x86-lpss.o
diff --git a/drivers/clk/x86/clk-lpss.c b/drivers/clk/x86/clk-lpss.c
new file mode 100644
index 000000000000..b5e229f3c3d9
--- /dev/null
+++ b/drivers/clk/x86/clk-lpss.c
@@ -0,0 +1,99 @@
+/*
+ * Intel Low Power Subsystem clocks.
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+
+static int clk_lpss_is_mmio_resource(struct acpi_resource *res, void *data)
+{
+ struct resource r;
+ return !acpi_dev_resource_memory(res, &r);
+}
+
+static acpi_status clk_lpss_find_mmio(acpi_handle handle, u32 level,
+ void *data, void **retval)
+{
+ struct resource_list_entry *rentry;
+ struct list_head resource_list;
+ struct acpi_device *adev;
+ const char *uid = data;
+ int ret;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+
+ if (uid) {
+ if (!adev->pnp.unique_id)
+ return AE_OK;
+ if (strcmp(uid, adev->pnp.unique_id))
+ return AE_OK;
+ }
+
+ INIT_LIST_HEAD(&resource_list);
+ ret = acpi_dev_get_resources(adev, &resource_list,
+ clk_lpss_is_mmio_resource, NULL);
+ if (ret < 0)
+ return AE_NO_MEMORY;
+
+ list_for_each_entry(rentry, &resource_list, node)
+ if (resource_type(&rentry->res) == IORESOURCE_MEM) {
+ *(struct resource *)retval = rentry->res;
+ break;
+ }
+
+ acpi_dev_free_resource_list(&resource_list);
+ return AE_OK;
+}
+
+/**
+ * clk_register_lpss_gate - register LPSS clock gate
+ * @name: name of this clock gate
+ * @parent_name: parent clock name
+ * @hid: ACPI _HID of the device
+ * @uid: ACPI _UID of the device (optional)
+ * @offset: LPSS PRV_CLOCK_PARAMS offset
+ *
+ * Creates and registers LPSS clock gate.
+ */
+struct clk *clk_register_lpss_gate(const char *name, const char *parent_name,
+ const char *hid, const char *uid,
+ unsigned offset)
+{
+ struct resource res = { };
+ void __iomem *mmio_base;
+ acpi_status status;
+ struct clk *clk;
+
+ /*
+ * First try to look the device and its mmio resource from the
+ * ACPI namespace.
+ */
+ status = acpi_get_devices(hid, clk_lpss_find_mmio, (void *)uid,
+ (void **)&res);
+ if (ACPI_FAILURE(status) || !res.start)
+ return ERR_PTR(-ENODEV);
+
+ mmio_base = ioremap(res.start, resource_size(&res));
+ if (!mmio_base)
+ return ERR_PTR(-ENOMEM);
+
+ clk = clk_register_gate(NULL, name, parent_name, 0, mmio_base + offset,
+ 0, 0, NULL);
+ if (IS_ERR(clk))
+ iounmap(mmio_base);
+
+ return clk;
+}
diff --git a/drivers/clk/x86/clk-lpss.h b/drivers/clk/x86/clk-lpss.h
new file mode 100644
index 000000000000..e9460f442297
--- /dev/null
+++ b/drivers/clk/x86/clk-lpss.h
@@ -0,0 +1,36 @@
+/*
+ * Intel Low Power Subsystem clock.
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CLK_LPSS_H
+#define __CLK_LPSS_H
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+
+#ifdef CONFIG_ACPI
+extern struct clk *clk_register_lpss_gate(const char *name,
+ const char *parent_name,
+ const char *hid, const char *uid,
+ unsigned offset);
+#else
+static inline struct clk *clk_register_lpss_gate(const char *name,
+ const char *parent_name,
+ const char *hid,
+ const char *uid,
+ unsigned offset)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
+
+#endif /* __CLK_LPSS_H */
diff --git a/drivers/clk/x86/clk-lpt.c b/drivers/clk/x86/clk-lpt.c
new file mode 100644
index 000000000000..81298aeef7e3
--- /dev/null
+++ b/drivers/clk/x86/clk-lpt.c
@@ -0,0 +1,86 @@
+/*
+ * Intel Lynxpoint LPSS clocks.
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "clk-lpss.h"
+
+#define PRV_CLOCK_PARAMS 0x800
+
+static int lpt_clk_probe(struct platform_device *pdev)
+{
+ struct clk *clk;
+
+ /* LPSS free running clock */
+ clk = clk_register_fixed_rate(&pdev->dev, "lpss_clk", NULL, CLK_IS_ROOT,
+ 100000000);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ /* Shared DMA clock */
+ clk_register_clkdev(clk, "hclk", "INTL9C60.0.auto");
+
+ /* SPI clocks */
+ clk = clk_register_lpss_gate("spi0_clk", "lpss_clk", "INT33C0", NULL,
+ PRV_CLOCK_PARAMS);
+ if (!IS_ERR(clk))
+ clk_register_clkdev(clk, NULL, "INT33C0:00");
+
+ clk = clk_register_lpss_gate("spi1_clk", "lpss_clk", "INT33C1", NULL,
+ PRV_CLOCK_PARAMS);
+ if (!IS_ERR(clk))
+ clk_register_clkdev(clk, NULL, "INT33C1:00");
+
+ /* I2C clocks */
+ clk = clk_register_lpss_gate("i2c0_clk", "lpss_clk", "INT33C2", NULL,
+ PRV_CLOCK_PARAMS);
+ if (!IS_ERR(clk))
+ clk_register_clkdev(clk, NULL, "INT33C2:00");
+
+ clk = clk_register_lpss_gate("i2c1_clk", "lpss_clk", "INT33C3", NULL,
+ PRV_CLOCK_PARAMS);
+ if (!IS_ERR(clk))
+ clk_register_clkdev(clk, NULL, "INT33C3:00");
+
+ /* UART clocks */
+ clk = clk_register_lpss_gate("uart0_clk", "lpss_clk", "INT33C4", NULL,
+ PRV_CLOCK_PARAMS);
+ if (!IS_ERR(clk))
+ clk_register_clkdev(clk, NULL, "INT33C4:00");
+
+ clk = clk_register_lpss_gate("uart1_clk", "lpss_clk", "INT33C5", NULL,
+ PRV_CLOCK_PARAMS);
+ if (!IS_ERR(clk))
+ clk_register_clkdev(clk, NULL, "INT33C5:00");
+
+ return 0;
+}
+
+static struct platform_driver lpt_clk_driver = {
+ .driver = {
+ .name = "clk-lpt",
+ .owner = THIS_MODULE,
+ },
+ .probe = lpt_clk_probe,
+};
+
+static int __init lpt_clk_init(void)
+{
+ return platform_driver_register(&lpt_clk_driver);
+}
+arch_initcall(lpt_clk_init);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 7fdcbd3f4da5..e920cbe519fa 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -1,3 +1,6 @@
+config CLKSRC_OF
+ bool
+
config CLKSRC_I8253
bool
@@ -25,6 +28,9 @@ config ARMADA_370_XP_TIMER
config SUNXI_TIMER
bool
+config VT8500_TIMER
+ bool
+
config CLKSRC_NOMADIK_MTU
bool
depends on (ARCH_NOMADIK || ARCH_U8500)
@@ -54,7 +60,5 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
help
Use the always on PRCMU Timer as sched_clock
-config CLKSRC_ARM_GENERIC
- def_bool y if ARM64
- help
- This option enables support for the ARM generic timer.
+config ARM_ARCH_TIMER
+ bool
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index f93453d01673..7d671b85a98e 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -1,3 +1,4 @@
+obj-$(CONFIG_CLKSRC_OF) += clksrc-of.o
obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
obj-$(CONFIG_X86_CYCLONE_TIMER) += cyclone.o
obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
@@ -16,5 +17,7 @@ obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o
obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o
obj-$(CONFIG_SUNXI_TIMER) += sunxi_timer.o
+obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o
+obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o
-obj-$(CONFIG_CLKSRC_ARM_GENERIC) += arm_generic.o
+obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
new file mode 100644
index 000000000000..d7ad425ab9b3
--- /dev/null
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -0,0 +1,391 @@
+/*
+ * linux/drivers/clocksource/arm_arch_timer.c
+ *
+ * Copyright (C) 2011 ARM Ltd.
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+
+#include <asm/arch_timer.h>
+#include <asm/virt.h>
+
+#include <clocksource/arm_arch_timer.h>
+
+static u32 arch_timer_rate;
+
+enum ppi_nr {
+ PHYS_SECURE_PPI,
+ PHYS_NONSECURE_PPI,
+ VIRT_PPI,
+ HYP_PPI,
+ MAX_TIMER_PPI
+};
+
+static int arch_timer_ppi[MAX_TIMER_PPI];
+
+static struct clock_event_device __percpu *arch_timer_evt;
+
+static bool arch_timer_use_virtual = true;
+
+/*
+ * Architected system timer support.
+ */
+
+static inline irqreturn_t timer_handler(const int access,
+ struct clock_event_device *evt)
+{
+ unsigned long ctrl;
+ ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
+ if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
+ ctrl |= ARCH_TIMER_CTRL_IT_MASK;
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
+ evt->event_handler(evt);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
+}
+
+static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
+}
+
+static inline void timer_set_mode(const int access, int mode)
+{
+ unsigned long ctrl;
+ switch (mode) {
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
+ ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
+ break;
+ default:
+ break;
+ }
+}
+
+static void arch_timer_set_mode_virt(enum clock_event_mode mode,
+ struct clock_event_device *clk)
+{
+ timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode);
+}
+
+static void arch_timer_set_mode_phys(enum clock_event_mode mode,
+ struct clock_event_device *clk)
+{
+ timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode);
+}
+
+static inline void set_next_event(const int access, unsigned long evt)
+{
+ unsigned long ctrl;
+ ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
+ ctrl |= ARCH_TIMER_CTRL_ENABLE;
+ ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
+ arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt);
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
+}
+
+static int arch_timer_set_next_event_virt(unsigned long evt,
+ struct clock_event_device *unused)
+{
+ set_next_event(ARCH_TIMER_VIRT_ACCESS, evt);
+ return 0;
+}
+
+static int arch_timer_set_next_event_phys(unsigned long evt,
+ struct clock_event_device *unused)
+{
+ set_next_event(ARCH_TIMER_PHYS_ACCESS, evt);
+ return 0;
+}
+
+static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
+{
+ clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
+ clk->name = "arch_sys_timer";
+ clk->rating = 450;
+ if (arch_timer_use_virtual) {
+ clk->irq = arch_timer_ppi[VIRT_PPI];
+ clk->set_mode = arch_timer_set_mode_virt;
+ clk->set_next_event = arch_timer_set_next_event_virt;
+ } else {
+ clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
+ clk->set_mode = arch_timer_set_mode_phys;
+ clk->set_next_event = arch_timer_set_next_event_phys;
+ }
+
+ clk->cpumask = cpumask_of(smp_processor_id());
+
+ clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL);
+
+ clockevents_config_and_register(clk, arch_timer_rate,
+ 0xf, 0x7fffffff);
+
+ if (arch_timer_use_virtual)
+ enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
+ else {
+ enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0);
+ if (arch_timer_ppi[PHYS_NONSECURE_PPI])
+ enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
+ }
+
+ arch_counter_set_user_access();
+
+ return 0;
+}
+
+static int arch_timer_available(void)
+{
+ u32 freq;
+
+ if (arch_timer_rate == 0) {
+ freq = arch_timer_get_cntfrq();
+
+ /* Check the timer frequency. */
+ if (freq == 0) {
+ pr_warn("Architected timer frequency not available\n");
+ return -EINVAL;
+ }
+
+ arch_timer_rate = freq;
+ }
+
+ pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n",
+ (unsigned long)arch_timer_rate / 1000000,
+ (unsigned long)(arch_timer_rate / 10000) % 100,
+ arch_timer_use_virtual ? "virt" : "phys");
+ return 0;
+}
+
+u32 arch_timer_get_rate(void)
+{
+ return arch_timer_rate;
+}
+
+/*
+ * Some external users of arch_timer_read_counter (e.g. sched_clock) may try to
+ * call it before it has been initialised. Rather than incur a performance
+ * penalty checking for initialisation, provide a default implementation that
+ * won't lead to time appearing to jump backwards.
+ */
+static u64 arch_timer_read_zero(void)
+{
+ return 0;
+}
+
+u64 (*arch_timer_read_counter)(void) = arch_timer_read_zero;
+
+static cycle_t arch_counter_read(struct clocksource *cs)
+{
+ return arch_timer_read_counter();
+}
+
+static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
+{
+ return arch_timer_read_counter();
+}
+
+static struct clocksource clocksource_counter = {
+ .name = "arch_sys_counter",
+ .rating = 400,
+ .read = arch_counter_read,
+ .mask = CLOCKSOURCE_MASK(56),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static struct cyclecounter cyclecounter = {
+ .read = arch_counter_read_cc,
+ .mask = CLOCKSOURCE_MASK(56),
+};
+
+static struct timecounter timecounter;
+
+struct timecounter *arch_timer_get_timecounter(void)
+{
+ return &timecounter;
+}
+
+static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
+{
+ pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
+ clk->irq, smp_processor_id());
+
+ if (arch_timer_use_virtual)
+ disable_percpu_irq(arch_timer_ppi[VIRT_PPI]);
+ else {
+ disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]);
+ if (arch_timer_ppi[PHYS_NONSECURE_PPI])
+ disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
+ }
+
+ clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
+}
+
+static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ struct clock_event_device *evt = this_cpu_ptr(arch_timer_evt);
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_STARTING:
+ arch_timer_setup(evt);
+ break;
+ case CPU_DYING:
+ arch_timer_stop(evt);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block arch_timer_cpu_nb __cpuinitdata = {
+ .notifier_call = arch_timer_cpu_notify,
+};
+
+static int __init arch_timer_register(void)
+{
+ int err;
+ int ppi;
+
+ err = arch_timer_available();
+ if (err)
+ goto out;
+
+ arch_timer_evt = alloc_percpu(struct clock_event_device);
+ if (!arch_timer_evt) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ clocksource_register_hz(&clocksource_counter, arch_timer_rate);
+ cyclecounter.mult = clocksource_counter.mult;
+ cyclecounter.shift = clocksource_counter.shift;
+ timecounter_init(&timecounter, &cyclecounter,
+ arch_counter_get_cntpct());
+
+ if (arch_timer_use_virtual) {
+ ppi = arch_timer_ppi[VIRT_PPI];
+ err = request_percpu_irq(ppi, arch_timer_handler_virt,
+ "arch_timer", arch_timer_evt);
+ } else {
+ ppi = arch_timer_ppi[PHYS_SECURE_PPI];
+ err = request_percpu_irq(ppi, arch_timer_handler_phys,
+ "arch_timer", arch_timer_evt);
+ if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
+ ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
+ err = request_percpu_irq(ppi, arch_timer_handler_phys,
+ "arch_timer", arch_timer_evt);
+ if (err)
+ free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
+ arch_timer_evt);
+ }
+ }
+
+ if (err) {
+ pr_err("arch_timer: can't register interrupt %d (%d)\n",
+ ppi, err);
+ goto out_free;
+ }
+
+ err = register_cpu_notifier(&arch_timer_cpu_nb);
+ if (err)
+ goto out_free_irq;
+
+ /* Immediately configure the timer on the boot CPU */
+ arch_timer_setup(this_cpu_ptr(arch_timer_evt));
+
+ return 0;
+
+out_free_irq:
+ if (arch_timer_use_virtual)
+ free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
+ else {
+ free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
+ arch_timer_evt);
+ if (arch_timer_ppi[PHYS_NONSECURE_PPI])
+ free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
+ arch_timer_evt);
+ }
+
+out_free:
+ free_percpu(arch_timer_evt);
+out:
+ return err;
+}
+
+static const struct of_device_id arch_timer_of_match[] __initconst = {
+ { .compatible = "arm,armv7-timer", },
+ { .compatible = "arm,armv8-timer", },
+ {},
+};
+
+int __init arch_timer_init(void)
+{
+ struct device_node *np;
+ u32 freq;
+ int i;
+
+ np = of_find_matching_node(NULL, arch_timer_of_match);
+ if (!np) {
+ pr_err("arch_timer: can't find DT node\n");
+ return -ENODEV;
+ }
+
+ /* Try to determine the frequency from the device tree or CNTFRQ */
+ if (!of_property_read_u32(np, "clock-frequency", &freq))
+ arch_timer_rate = freq;
+
+ for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
+ arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
+
+ of_node_put(np);
+
+ /*
+ * If HYP mode is available, we know that the physical timer
+ * has been configured to be accessible from PL1. Use it, so
+ * that a guest can use the virtual timer instead.
+ *
+ * If no interrupt provided for virtual timer, we'll have to
+ * stick to the physical timer. It'd better be accessible...
+ */
+ if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) {
+ arch_timer_use_virtual = false;
+
+ if (!arch_timer_ppi[PHYS_SECURE_PPI] ||
+ !arch_timer_ppi[PHYS_NONSECURE_PPI]) {
+ pr_warn("arch_timer: No interrupt available, giving up\n");
+ return -EINVAL;
+ }
+ }
+
+ if (arch_timer_use_virtual)
+ arch_timer_read_counter = arch_counter_get_cntvct;
+ else
+ arch_timer_read_counter = arch_counter_get_cntpct;
+
+ return arch_timer_register();
+}
diff --git a/drivers/clocksource/arm_generic.c b/drivers/clocksource/arm_generic.c
deleted file mode 100644
index 8ae1a61523ff..000000000000
--- a/drivers/clocksource/arm_generic.c
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * Generic timers support
- *
- * Copyright (C) 2012 ARM Ltd.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/smp.h>
-#include <linux/cpu.h>
-#include <linux/jiffies.h>
-#include <linux/interrupt.h>
-#include <linux/clockchips.h>
-#include <linux/of_irq.h>
-#include <linux/io.h>
-
-#include <clocksource/arm_generic.h>
-
-#include <asm/arm_generic.h>
-
-static u32 arch_timer_rate;
-static u64 sched_clock_mult __read_mostly;
-static DEFINE_PER_CPU(struct clock_event_device, arch_timer_evt);
-static int arch_timer_ppi;
-
-static irqreturn_t arch_timer_handle_irq(int irq, void *dev_id)
-{
- struct clock_event_device *evt = dev_id;
- unsigned long ctrl;
-
- ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
- if (ctrl & ARCH_TIMER_CTRL_ISTATUS) {
- ctrl |= ARCH_TIMER_CTRL_IMASK;
- arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
- evt->event_handler(evt);
- return IRQ_HANDLED;
- }
-
- return IRQ_NONE;
-}
-
-static void arch_timer_stop(void)
-{
- unsigned long ctrl;
-
- ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
- ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
- arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
-}
-
-static void arch_timer_set_mode(enum clock_event_mode mode,
- struct clock_event_device *clk)
-{
- switch (mode) {
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- arch_timer_stop();
- break;
- default:
- break;
- }
-}
-
-static int arch_timer_set_next_event(unsigned long evt,
- struct clock_event_device *unused)
-{
- unsigned long ctrl;
-
- ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
- ctrl |= ARCH_TIMER_CTRL_ENABLE;
- ctrl &= ~ARCH_TIMER_CTRL_IMASK;
-
- arch_timer_reg_write(ARCH_TIMER_REG_TVAL, evt);
- arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
-
- return 0;
-}
-
-static void __cpuinit arch_timer_setup(struct clock_event_device *clk)
-{
- /* Let's make sure the timer is off before doing anything else */
- arch_timer_stop();
-
- clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
- clk->name = "arch_sys_timer";
- clk->rating = 400;
- clk->set_mode = arch_timer_set_mode;
- clk->set_next_event = arch_timer_set_next_event;
- clk->irq = arch_timer_ppi;
- clk->cpumask = cpumask_of(smp_processor_id());
-
- clockevents_config_and_register(clk, arch_timer_rate,
- 0xf, 0x7fffffff);
-
- enable_percpu_irq(clk->irq, 0);
-
- /* Ensure the virtual counter is visible to userspace for the vDSO. */
- arch_counter_enable_user_access();
-}
-
-static void __init arch_timer_calibrate(void)
-{
- if (arch_timer_rate == 0) {
- arch_timer_reg_write(ARCH_TIMER_REG_CTRL, 0);
- arch_timer_rate = arch_timer_reg_read(ARCH_TIMER_REG_FREQ);
-
- /* Check the timer frequency. */
- if (arch_timer_rate == 0)
- panic("Architected timer frequency is set to zero.\n"
- "You must set this in your .dts file\n");
- }
-
- /* Cache the sched_clock multiplier to save a divide in the hot path. */
-
- sched_clock_mult = DIV_ROUND_CLOSEST(NSEC_PER_SEC, arch_timer_rate);
-
- pr_info("Architected local timer running at %u.%02uMHz.\n",
- arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100);
-}
-
-static cycle_t arch_counter_read(struct clocksource *cs)
-{
- return arch_counter_get_cntpct();
-}
-
-static struct clocksource clocksource_counter = {
- .name = "arch_sys_counter",
- .rating = 400,
- .read = arch_counter_read,
- .mask = CLOCKSOURCE_MASK(56),
- .flags = (CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_VALID_FOR_HRES),
-};
-
-int read_current_timer(unsigned long *timer_value)
-{
- *timer_value = arch_counter_get_cntpct();
- return 0;
-}
-
-unsigned long long notrace sched_clock(void)
-{
- return arch_counter_get_cntvct() * sched_clock_mult;
-}
-
-static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- int cpu = (long)hcpu;
- struct clock_event_device *clk = per_cpu_ptr(&arch_timer_evt, cpu);
-
- switch(action) {
- case CPU_STARTING:
- case CPU_STARTING_FROZEN:
- arch_timer_setup(clk);
- break;
-
- case CPU_DYING:
- case CPU_DYING_FROZEN:
- pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
- clk->irq, cpu);
- disable_percpu_irq(clk->irq);
- arch_timer_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block __cpuinitdata arch_timer_cpu_nb = {
- .notifier_call = arch_timer_cpu_notify,
-};
-
-static const struct of_device_id arch_timer_of_match[] __initconst = {
- { .compatible = "arm,armv8-timer" },
- {},
-};
-
-int __init arm_generic_timer_init(void)
-{
- struct device_node *np;
- int err;
- u32 freq;
-
- np = of_find_matching_node(NULL, arch_timer_of_match);
- if (!np) {
- pr_err("arch_timer: can't find DT node\n");
- return -ENODEV;
- }
-
- /* Try to determine the frequency from the device tree or CNTFRQ */
- if (!of_property_read_u32(np, "clock-frequency", &freq))
- arch_timer_rate = freq;
- arch_timer_calibrate();
-
- arch_timer_ppi = irq_of_parse_and_map(np, 0);
- pr_info("arch_timer: found %s irq %d\n", np->name, arch_timer_ppi);
-
- err = request_percpu_irq(arch_timer_ppi, arch_timer_handle_irq,
- np->name, &arch_timer_evt);
- if (err) {
- pr_err("arch_timer: can't register interrupt %d (%d)\n",
- arch_timer_ppi, err);
- return err;
- }
-
- clocksource_register_hz(&clocksource_counter, arch_timer_rate);
-
- /* Calibrate the delay loop directly */
- lpj_fine = DIV_ROUND_CLOSEST(arch_timer_rate, HZ);
-
- /* Immediately configure the timer on the boot CPU */
- arch_timer_setup(this_cpu_ptr(&arch_timer_evt));
-
- register_cpu_notifier(&arch_timer_cpu_nb);
-
- return 0;
-}
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index bc19f12c20ce..50c68fef944b 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -16,7 +16,6 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <linux/bcm2835_timer.h>
#include <linux/bitops.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
@@ -101,7 +100,7 @@ static struct of_device_id bcm2835_time_match[] __initconst = {
{}
};
-static void __init bcm2835_time_init(void)
+static void __init bcm2835_timer_init(void)
{
struct device_node *node;
void __iomem *base;
@@ -155,7 +154,5 @@ static void __init bcm2835_time_init(void)
pr_info("bcm2835: system timer (irq = %d)\n", irq);
}
-
-struct sys_timer bcm2835_timer = {
- .init = bcm2835_time_init,
-};
+CLOCKSOURCE_OF_DECLARE(bcm2835, "brcm,bcm2835-system-timer",
+ bcm2835_timer_init);
diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c
new file mode 100644
index 000000000000..bdabdaa8d00f
--- /dev/null
+++ b/drivers/clocksource/clksrc-of.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/init.h>
+#include <linux/of.h>
+
+extern struct of_device_id __clksrc_of_table[];
+
+static const struct of_device_id __clksrc_of_table_sentinel
+ __used __section(__clksrc_of_table_end);
+
+void __init clocksource_of_init(void)
+{
+ struct device_node *np;
+ const struct of_device_id *match;
+ void (*init_func)(void);
+
+ for_each_matching_node_and_match(np, __clksrc_of_table, &match) {
+ init_func = match->data;
+ init_func();
+ }
+}
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index d9279385304d..ea210482dd20 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -100,7 +100,6 @@ static struct clock_event_device cs5535_clockevent = {
.set_mode = mfgpt_set_mode,
.set_next_event = mfgpt_next_event,
.rating = 250,
- .shift = 32
};
static irqreturn_t mfgpt_tick(int irq, void *dev_id)
@@ -169,17 +168,11 @@ static int __init cs5535_mfgpt_init(void)
cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_SETUP, val);
/* Set up the clock event */
- cs5535_clockevent.mult = div_sc(MFGPT_HZ, NSEC_PER_SEC,
- cs5535_clockevent.shift);
- cs5535_clockevent.min_delta_ns = clockevent_delta2ns(0xF,
- &cs5535_clockevent);
- cs5535_clockevent.max_delta_ns = clockevent_delta2ns(0xFFFE,
- &cs5535_clockevent);
-
printk(KERN_INFO DRV_NAME
": Registering MFGPT timer as a clock event, using IRQ %d\n",
timer_irq);
- clockevents_register_device(&cs5535_clockevent);
+ clockevents_config_and_register(&cs5535_clockevent, MFGPT_HZ,
+ 0xF, 0xFFFE);
return 0;
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index f7dba5b79b44..ab09ed3742ee 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -107,7 +107,7 @@ static const struct of_device_id osctimer_ids[] __initconst = {
{},
};
-static void __init timer_init(void)
+void __init dw_apb_timer_init(void)
{
struct device_node *event_timer, *source_timer;
@@ -125,7 +125,3 @@ static void __init timer_init(void)
init_sched_clock();
}
-
-struct sys_timer dw_apb_timer = {
- .init = timer_init,
-};
diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c
index 8914c3c1c88b..435e54d55bbd 100644
--- a/drivers/clocksource/nomadik-mtu.c
+++ b/drivers/clocksource/nomadik-mtu.c
@@ -15,6 +15,7 @@
#include <linux/clocksource.h>
#include <linux/clk.h>
#include <linux/jiffies.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/platform_data/clocksource-nomadik-mtu.h>
#include <asm/mach/time.h>
@@ -64,6 +65,7 @@ static void __iomem *mtu_base;
static bool clkevt_periodic;
static u32 clk_prescale;
static u32 nmdk_cycle; /* write-once */
+static struct delay_timer mtu_delay_timer;
#ifdef CONFIG_NOMADIK_MTU_SCHED_CLOCK
/*
@@ -80,6 +82,11 @@ static u32 notrace nomadik_read_sched_clock(void)
}
#endif
+static unsigned long nmdk_timer_read_current_timer(void)
+{
+ return ~readl_relaxed(mtu_base + MTU_VAL(0));
+}
+
/* Clockevent device: use one-shot mode */
static int nmdk_clkevt_next(unsigned long evt, struct clock_event_device *ev)
{
@@ -134,12 +141,32 @@ static void nmdk_clkevt_mode(enum clock_event_mode mode,
}
}
+void nmdk_clksrc_reset(void)
+{
+ /* Disable */
+ writel(0, mtu_base + MTU_CR(0));
+
+ /* ClockSource: configure load and background-load, and fire it up */
+ writel(nmdk_cycle, mtu_base + MTU_LR(0));
+ writel(nmdk_cycle, mtu_base + MTU_BGLR(0));
+
+ writel(clk_prescale | MTU_CRn_32BITS | MTU_CRn_ENA,
+ mtu_base + MTU_CR(0));
+}
+
+static void nmdk_clkevt_resume(struct clock_event_device *cedev)
+{
+ nmdk_clkevt_reset();
+ nmdk_clksrc_reset();
+}
+
static struct clock_event_device nmdk_clkevt = {
.name = "mtu_1",
.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
.rating = 200,
.set_mode = nmdk_clkevt_mode,
.set_next_event = nmdk_clkevt_next,
+ .resume = nmdk_clkevt_resume,
};
/*
@@ -161,19 +188,6 @@ static struct irqaction nmdk_timer_irq = {
.dev_id = &nmdk_clkevt,
};
-void nmdk_clksrc_reset(void)
-{
- /* Disable */
- writel(0, mtu_base + MTU_CR(0));
-
- /* ClockSource: configure load and background-load, and fire it up */
- writel(nmdk_cycle, mtu_base + MTU_LR(0));
- writel(nmdk_cycle, mtu_base + MTU_BGLR(0));
-
- writel(clk_prescale | MTU_CRn_32BITS | MTU_CRn_ENA,
- mtu_base + MTU_CR(0));
-}
-
void __init nmdk_timer_init(void __iomem *base, int irq)
{
unsigned long rate;
@@ -227,4 +241,8 @@ void __init nmdk_timer_init(void __iomem *base, int irq)
setup_irq(irq, &nmdk_timer_irq);
nmdk_clkevt.cpumask = cpumask_of(0);
clockevents_config_and_register(&nmdk_clkevt, rate, 2, 0xffffffffU);
+
+ mtu_delay_timer.read_current_timer = &nmdk_timer_read_current_timer;
+ mtu_delay_timer.freq = rate;
+ register_current_timer_delay(&mtu_delay_timer);
}
diff --git a/drivers/clocksource/sunxi_timer.c b/drivers/clocksource/sunxi_timer.c
index 3cd1bd3d7aee..4086b9167159 100644
--- a/drivers/clocksource/sunxi_timer.c
+++ b/drivers/clocksource/sunxi_timer.c
@@ -23,7 +23,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/sunxi_timer.h>
-#include <linux/clk/sunxi.h>
+#include <linux/clk-provider.h>
#define TIMER_CTL_REG 0x00
#define TIMER_CTL_ENABLE (1 << 0)
@@ -74,7 +74,6 @@ static int sunxi_clkevt_next_event(unsigned long evt,
static struct clock_event_device sunxi_clockevent = {
.name = "sunxi_tick",
- .shift = 32,
.rating = 300,
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.set_mode = sunxi_clkevt_mode,
@@ -104,7 +103,7 @@ static struct of_device_id sunxi_timer_dt_ids[] = {
{ }
};
-static void __init sunxi_timer_init(void)
+void __init sunxi_timer_init(void)
{
struct device_node *node;
unsigned long rate = 0;
@@ -124,7 +123,7 @@ static void __init sunxi_timer_init(void)
if (irq <= 0)
panic("Can't parse IRQ");
- sunxi_init_clocks();
+ of_clk_init(NULL);
clk = of_clk_get(node, 0);
if (IS_ERR(clk))
@@ -154,18 +153,8 @@ static void __init sunxi_timer_init(void)
val = readl(timer_base + TIMER_CTL_REG);
writel(val | TIMER_CTL_ENABLE, timer_base + TIMER_CTL_REG);
- sunxi_clockevent.mult = div_sc(rate / TIMER_SCAL,
- NSEC_PER_SEC,
- sunxi_clockevent.shift);
- sunxi_clockevent.max_delta_ns = clockevent_delta2ns(0xff,
- &sunxi_clockevent);
- sunxi_clockevent.min_delta_ns = clockevent_delta2ns(0x1,
- &sunxi_clockevent);
sunxi_clockevent.cpumask = cpumask_of(0);
- clockevents_register_device(&sunxi_clockevent);
+ clockevents_config_and_register(&sunxi_clockevent, rate / TIMER_SCAL,
+ 0x1, 0xff);
}
-
-struct sys_timer sunxi_timer = {
- .init = sunxi_timer_init,
-};
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 32cb929b8eb6..8a6187225dd0 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -157,7 +157,6 @@ static struct tc_clkevt_device clkevt = {
.name = "tc_clkevt",
.features = CLOCK_EVT_FEAT_PERIODIC
| CLOCK_EVT_FEAT_ONESHOT,
- .shift = 32,
/* Should be lower than at91rm9200's system timer */
.rating = 125,
.set_next_event = tc_next_event,
@@ -196,13 +195,9 @@ static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
timer_clock = clk32k_divisor_idx;
- clkevt.clkevt.mult = div_sc(32768, NSEC_PER_SEC, clkevt.clkevt.shift);
- clkevt.clkevt.max_delta_ns
- = clockevent_delta2ns(0xffff, &clkevt.clkevt);
- clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1;
clkevt.clkevt.cpumask = cpumask_of(0);
- clockevents_register_device(&clkevt.clkevt);
+ clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
setup_irq(irq, &tc_irqaction);
}
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c
new file mode 100644
index 000000000000..0bde03feb095
--- /dev/null
+++ b/drivers/clocksource/tegra20_timer.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/mach/time.h>
+#include <asm/smp_twd.h>
+#include <asm/sched_clock.h>
+
+#define RTC_SECONDS 0x08
+#define RTC_SHADOW_SECONDS 0x0c
+#define RTC_MILLISECONDS 0x10
+
+#define TIMERUS_CNTR_1US 0x10
+#define TIMERUS_USEC_CFG 0x14
+#define TIMERUS_CNTR_FREEZE 0x4c
+
+#define TIMER1_BASE 0x0
+#define TIMER2_BASE 0x8
+#define TIMER3_BASE 0x50
+#define TIMER4_BASE 0x58
+
+#define TIMER_PTV 0x0
+#define TIMER_PCR 0x4
+
+static void __iomem *timer_reg_base;
+static void __iomem *rtc_base;
+
+static struct timespec persistent_ts;
+static u64 persistent_ms, last_persistent_ms;
+
+#define timer_writel(value, reg) \
+ __raw_writel(value, timer_reg_base + (reg))
+#define timer_readl(reg) \
+ __raw_readl(timer_reg_base + (reg))
+
+static int tegra_timer_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ u32 reg;
+
+ reg = 0x80000000 | ((cycles > 1) ? (cycles-1) : 0);
+ timer_writel(reg, TIMER3_BASE + TIMER_PTV);
+
+ return 0;
+}
+
+static void tegra_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ u32 reg;
+
+ timer_writel(0, TIMER3_BASE + TIMER_PTV);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ reg = 0xC0000000 | ((1000000/HZ)-1);
+ timer_writel(reg, TIMER3_BASE + TIMER_PTV);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+ }
+}
+
+static struct clock_event_device tegra_clockevent = {
+ .name = "timer0",
+ .rating = 300,
+ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
+ .set_next_event = tegra_timer_set_next_event,
+ .set_mode = tegra_timer_set_mode,
+};
+
+static u32 notrace tegra_read_sched_clock(void)
+{
+ return timer_readl(TIMERUS_CNTR_1US);
+}
+
+/*
+ * tegra_rtc_read - Reads the Tegra RTC registers
+ * Care must be taken that this funciton is not called while the
+ * tegra_rtc driver could be executing to avoid race conditions
+ * on the RTC shadow register
+ */
+static u64 tegra_rtc_read_ms(void)
+{
+ u32 ms = readl(rtc_base + RTC_MILLISECONDS);
+ u32 s = readl(rtc_base + RTC_SHADOW_SECONDS);
+ return (u64)s * MSEC_PER_SEC + ms;
+}
+
+/*
+ * tegra_read_persistent_clock - Return time from a persistent clock.
+ *
+ * Reads the time from a source which isn't disabled during PM, the
+ * 32k sync timer. Convert the cycles elapsed since last read into
+ * nsecs and adds to a monotonically increasing timespec.
+ * Care must be taken that this funciton is not called while the
+ * tegra_rtc driver could be executing to avoid race conditions
+ * on the RTC shadow register
+ */
+static void tegra_read_persistent_clock(struct timespec *ts)
+{
+ u64 delta;
+ struct timespec *tsp = &persistent_ts;
+
+ last_persistent_ms = persistent_ms;
+ persistent_ms = tegra_rtc_read_ms();
+ delta = persistent_ms - last_persistent_ms;
+
+ timespec_add_ns(tsp, delta * NSEC_PER_MSEC);
+ *ts = *tsp;
+}
+
+static irqreturn_t tegra_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = (struct clock_event_device *)dev_id;
+ timer_writel(1<<30, TIMER3_BASE + TIMER_PCR);
+ evt->event_handler(evt);
+ return IRQ_HANDLED;
+}
+
+static struct irqaction tegra_timer_irq = {
+ .name = "timer0",
+ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_TRIGGER_HIGH,
+ .handler = tegra_timer_interrupt,
+ .dev_id = &tegra_clockevent,
+};
+
+static const struct of_device_id timer_match[] __initconst = {
+ { .compatible = "nvidia,tegra20-timer" },
+ {}
+};
+
+static const struct of_device_id rtc_match[] __initconst = {
+ { .compatible = "nvidia,tegra20-rtc" },
+ {}
+};
+
+static void __init tegra20_init_timer(void)
+{
+ struct device_node *np;
+ struct clk *clk;
+ unsigned long rate;
+ int ret;
+
+ np = of_find_matching_node(NULL, timer_match);
+ if (!np) {
+ pr_err("Failed to find timer DT node\n");
+ BUG();
+ }
+
+ timer_reg_base = of_iomap(np, 0);
+ if (!timer_reg_base) {
+ pr_err("Can't map timer registers\n");
+ BUG();
+ }
+
+ tegra_timer_irq.irq = irq_of_parse_and_map(np, 2);
+ if (tegra_timer_irq.irq <= 0) {
+ pr_err("Failed to map timer IRQ\n");
+ BUG();
+ }
+
+ clk = clk_get_sys("timer", NULL);
+ if (IS_ERR(clk)) {
+ pr_warn("Unable to get timer clock. Assuming 12Mhz input clock.\n");
+ rate = 12000000;
+ } else {
+ clk_prepare_enable(clk);
+ rate = clk_get_rate(clk);
+ }
+
+ of_node_put(np);
+
+ np = of_find_matching_node(NULL, rtc_match);
+ if (!np) {
+ pr_err("Failed to find RTC DT node\n");
+ BUG();
+ }
+
+ rtc_base = of_iomap(np, 0);
+ if (!rtc_base) {
+ pr_err("Can't map RTC registers");
+ BUG();
+ }
+
+ /*
+ * rtc registers are used by read_persistent_clock, keep the rtc clock
+ * enabled
+ */
+ clk = clk_get_sys("rtc-tegra", NULL);
+ if (IS_ERR(clk))
+ pr_warn("Unable to get rtc-tegra clock\n");
+ else
+ clk_prepare_enable(clk);
+
+ of_node_put(np);
+
+ switch (rate) {
+ case 12000000:
+ timer_writel(0x000b, TIMERUS_USEC_CFG);
+ break;
+ case 13000000:
+ timer_writel(0x000c, TIMERUS_USEC_CFG);
+ break;
+ case 19200000:
+ timer_writel(0x045f, TIMERUS_USEC_CFG);
+ break;
+ case 26000000:
+ timer_writel(0x0019, TIMERUS_USEC_CFG);
+ break;
+ default:
+ WARN(1, "Unknown clock rate");
+ }
+
+ setup_sched_clock(tegra_read_sched_clock, 32, 1000000);
+
+ if (clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
+ "timer_us", 1000000, 300, 32, clocksource_mmio_readl_up)) {
+ pr_err("Failed to register clocksource\n");
+ BUG();
+ }
+
+ ret = setup_irq(tegra_timer_irq.irq, &tegra_timer_irq);
+ if (ret) {
+ pr_err("Failed to register timer IRQ: %d\n", ret);
+ BUG();
+ }
+
+ tegra_clockevent.cpumask = cpu_all_mask;
+ tegra_clockevent.irq = tegra_timer_irq.irq;
+ clockevents_config_and_register(&tegra_clockevent, 1000000,
+ 0x1, 0x1fffffff);
+#ifdef CONFIG_HAVE_ARM_TWD
+ twd_local_timer_of_register();
+#endif
+ register_persistent_clock(NULL, tegra_read_persistent_clock);
+}
+CLOCKSOURCE_OF_DECLARE(tegra20, "nvidia,tegra20-timer", tegra20_init_timer);
+
+#ifdef CONFIG_PM
+static u32 usec_config;
+
+void tegra_timer_suspend(void)
+{
+ usec_config = timer_readl(TIMERUS_USEC_CFG);
+}
+
+void tegra_timer_resume(void)
+{
+ timer_writel(usec_config, TIMERUS_USEC_CFG);
+}
+#endif
diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
new file mode 100644
index 000000000000..8efc86b5b5dd
--- /dev/null
+++ b/drivers/clocksource/vt8500_timer.c
@@ -0,0 +1,180 @@
+/*
+ * arch/arm/mach-vt8500/timer.c
+ *
+ * Copyright (C) 2012 Tony Prisk <linux@prisktech.co.nz>
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * This file is copied and modified from the original timer.c provided by
+ * Alexey Charkov. Minor changes have been made for Device Tree Support.
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/delay.h>
+#include <asm/mach/time.h>
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define VT8500_TIMER_OFFSET 0x0100
+#define VT8500_TIMER_HZ 3000000
+#define TIMER_MATCH_VAL 0x0000
+#define TIMER_COUNT_VAL 0x0010
+#define TIMER_STATUS_VAL 0x0014
+#define TIMER_IER_VAL 0x001c /* interrupt enable */
+#define TIMER_CTRL_VAL 0x0020
+#define TIMER_AS_VAL 0x0024 /* access status */
+#define TIMER_COUNT_R_ACTIVE (1 << 5) /* not ready for read */
+#define TIMER_COUNT_W_ACTIVE (1 << 4) /* not ready for write */
+#define TIMER_MATCH_W_ACTIVE (1 << 0) /* not ready for write */
+
+#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
+
+static void __iomem *regbase;
+
+static cycle_t vt8500_timer_read(struct clocksource *cs)
+{
+ int loops = msecs_to_loops(10);
+ writel(3, regbase + TIMER_CTRL_VAL);
+ while ((readl((regbase + TIMER_AS_VAL)) & TIMER_COUNT_R_ACTIVE)
+ && --loops)
+ cpu_relax();
+ return readl(regbase + TIMER_COUNT_VAL);
+}
+
+static struct clocksource clocksource = {
+ .name = "vt8500_timer",
+ .rating = 200,
+ .read = vt8500_timer_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static int vt8500_timer_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ int loops = msecs_to_loops(10);
+ cycle_t alarm = clocksource.read(&clocksource) + cycles;
+ while ((readl(regbase + TIMER_AS_VAL) & TIMER_MATCH_W_ACTIVE)
+ && --loops)
+ cpu_relax();
+ writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL);
+
+ if ((signed)(alarm - clocksource.read(&clocksource)) <= 16)
+ return -ETIME;
+
+ writel(1, regbase + TIMER_IER_VAL);
+
+ return 0;
+}
+
+static void vt8500_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_RESUME:
+ case CLOCK_EVT_MODE_PERIODIC:
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ writel(readl(regbase + TIMER_CTRL_VAL) | 1,
+ regbase + TIMER_CTRL_VAL);
+ writel(0, regbase + TIMER_IER_VAL);
+ break;
+ }
+}
+
+static struct clock_event_device clockevent = {
+ .name = "vt8500_timer",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 200,
+ .set_next_event = vt8500_timer_set_next_event,
+ .set_mode = vt8500_timer_set_mode,
+};
+
+static irqreturn_t vt8500_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+ writel(0xf, regbase + TIMER_STATUS_VAL);
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction irq = {
+ .name = "vt8500_timer",
+ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = vt8500_timer_interrupt,
+ .dev_id = &clockevent,
+};
+
+static struct of_device_id vt8500_timer_ids[] = {
+ { .compatible = "via,vt8500-timer" },
+ { }
+};
+
+static void __init vt8500_timer_init(void)
+{
+ struct device_node *np;
+ int timer_irq;
+
+ np = of_find_matching_node(NULL, vt8500_timer_ids);
+ if (!np) {
+ pr_err("%s: Timer description missing from Device Tree\n",
+ __func__);
+ return;
+ }
+ regbase = of_iomap(np, 0);
+ if (!regbase) {
+ pr_err("%s: Missing iobase description in Device Tree\n",
+ __func__);
+ of_node_put(np);
+ return;
+ }
+ timer_irq = irq_of_parse_and_map(np, 0);
+ if (!timer_irq) {
+ pr_err("%s: Missing irq description in Device Tree\n",
+ __func__);
+ of_node_put(np);
+ return;
+ }
+
+ writel(1, regbase + TIMER_CTRL_VAL);
+ writel(0xf, regbase + TIMER_STATUS_VAL);
+ writel(~0, regbase + TIMER_MATCH_VAL);
+
+ if (clocksource_register_hz(&clocksource, VT8500_TIMER_HZ))
+ pr_err("%s: vt8500_timer_init: clocksource_register failed for %s\n",
+ __func__, clocksource.name);
+
+ clockevent.cpumask = cpumask_of(0);
+
+ if (setup_irq(timer_irq, &irq))
+ pr_err("%s: setup_irq failed for %s\n", __func__,
+ clockevent.name);
+ clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ,
+ 4, 0xf0000000);
+}
+
+CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init)
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 7b695913cb30..f1b7e244bfc1 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -276,7 +276,7 @@ static int cn_init(void)
cn_already_initialized = 1;
- proc_net_fops_create(&init_net, "connector", S_IRUGO, &cn_file_ops);
+ proc_create("connector", S_IRUGO, init_net.proc_net, &cn_file_ops);
return 0;
}
@@ -287,7 +287,7 @@ static void cn_fini(void)
cn_already_initialized = 0;
- proc_net_remove(&init_net, "connector");
+ remove_proc_entry("connector", init_net.proc_net);
cn_queue_free_dev(dev->cbdev);
netlink_kernel_release(dev->nls);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index e0a899f25e37..cbcb21e32771 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -185,7 +185,7 @@ config CPU_FREQ_GOV_CONSERVATIVE
If in doubt, say N.
config GENERIC_CPUFREQ_CPU0
- bool "Generic CPU0 cpufreq driver"
+ tristate "Generic CPU0 cpufreq driver"
depends on HAVE_CLK && REGULATOR && PM_OPP && OF
select CPU_FREQ_TABLE
help
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index a0b3661d90b0..030ddf6dd3f1 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -21,8 +21,8 @@ config ARM_S3C2416_CPUFREQ
If in doubt, say N.
config ARM_S3C2416_CPUFREQ_VCORESCALE
- bool "Allow voltage scaling for S3C2416 arm core (EXPERIMENTAL)"
- depends on ARM_S3C2416_CPUFREQ && REGULATOR && EXPERIMENTAL
+ bool "Allow voltage scaling for S3C2416 arm core"
+ depends on ARM_S3C2416_CPUFREQ && REGULATOR
help
Enable CPU voltage scaling when entering the dvs mode.
It uses information gathered through existing hardware and
@@ -77,9 +77,39 @@ config ARM_EXYNOS5250_CPUFREQ
This adds the CPUFreq driver for Samsung EXYNOS5250
SoC.
+config ARM_KIRKWOOD_CPUFREQ
+ def_bool ARCH_KIRKWOOD && OF
+ help
+ This adds the CPUFreq driver for Marvell Kirkwood
+ SoCs.
+
+config ARM_IMX6Q_CPUFREQ
+ tristate "Freescale i.MX6Q cpufreq support"
+ depends on SOC_IMX6Q
+ depends on REGULATOR_ANATOP
+ help
+ This adds cpufreq driver support for Freescale i.MX6Q SOC.
+
+ If in doubt, say N.
+
config ARM_SPEAR_CPUFREQ
bool "SPEAr CPUFreq support"
depends on PLAT_SPEAR
default y
help
This adds the CPUFreq driver support for SPEAr SOCs.
+
+config ARM_HIGHBANK_CPUFREQ
+ tristate "Calxeda Highbank-based"
+ depends on ARCH_HIGHBANK
+ select CPU_FREQ_TABLE
+ select GENERIC_CPUFREQ_CPU0
+ select PM_OPP
+ select REGULATOR
+
+ default m
+ help
+ This adds the CPUFreq driver for Calxeda Highbank SoC
+ based boards.
+
+ If in doubt, say N.
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 934854ae5eb4..d7dc0ed6adb0 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -2,6 +2,19 @@
# x86 CPU Frequency scaling drivers
#
+config X86_INTEL_PSTATE
+ bool "Intel P state control"
+ depends on X86
+ help
+ This driver provides a P state for Intel core processors.
+ The driver implements an internal governor and will become
+ the scaling driver and governor for Sandy bridge processors.
+
+ When this driver is enabled it will become the perferred
+ scaling driver for Sandy bridge processors.
+
+ If in doubt, say N.
+
config X86_PCC_CPUFREQ
tristate "Processor Clocking Control interface driver"
depends on ACPI && ACPI_PROCESSOR
@@ -106,7 +119,7 @@ config X86_POWERNOW_K7_ACPI
config X86_POWERNOW_K8
tristate "AMD Opteron/Athlon64 PowerNow!"
select CPU_FREQ_TABLE
- depends on ACPI && ACPI_PROCESSOR
+ depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ
help
This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
Support for K10 and newer processors is now in acpi-cpufreq.
@@ -174,7 +187,7 @@ config X86_SPEEDSTEP_ICH
config X86_SPEEDSTEP_SMI
tristate "Intel SpeedStep on 440BX/ZX/MX chipsets (SMI interface)"
select CPU_FREQ_TABLE
- depends on X86_32 && EXPERIMENTAL
+ depends on X86_32
help
This adds the CPUFreq driver for certain mobile Intel Pentium III
(Coppermine), all mobile Intel Pentium III-M (Tualatin)
@@ -206,7 +219,7 @@ config X86_P4_CLOCKMOD
config X86_CPUFREQ_NFORCE2
tristate "nVidia nForce2 FSB changing"
- depends on X86_32 && EXPERIMENTAL
+ depends on X86_32
help
This adds the CPUFreq driver for FSB changing on nVidia nForce2
platforms.
@@ -242,7 +255,7 @@ config X86_LONGHAUL
config X86_E_POWERSAVER
tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)"
select CPU_FREQ_TABLE
- depends on X86_32 && EXPERIMENTAL
+ depends on X86_32
help
This adds the CPUFreq driver for VIA C7 processors. However, this driver
does not have any safeguards to prevent operating the CPU out of spec
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index fadc4d496e2f..863fd1865d45 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -19,11 +19,12 @@ obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o
##################################################################################
# x86 drivers.
# Link order matters. K8 is preferred to ACPI because of firmware bugs in early
-# K8 systems. ACPI is preferred to all other hardware-specific drivers.
+# K8 systems. This is still the case but acpi-cpufreq errors out so that
+# powernow-k8 can load then. ACPI is preferred to all other hardware-specific drivers.
# speedstep-* is preferred over p4-clockmod.
-obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o
+obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o
@@ -39,10 +40,11 @@ obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o
obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
+obj-$(CONFIG_X86_INTEL_PSTATE) += intel_pstate.o
##################################################################################
# ARM SoC drivers
-obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o
+obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o
obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
@@ -50,8 +52,11 @@ obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o
-obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
+obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
+obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
+obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
+obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
##################################################################################
# PowerPC platform drivers
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 0d048f6a2b23..937bc286591f 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -734,7 +734,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
#ifdef CONFIG_SMP
dmi_check_system(sw_any_bug_dmi_table);
- if (bios_with_sw_any_bug && cpumask_weight(policy->cpus) == 1) {
+ if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
cpumask_copy(policy->cpus, cpu_core_mask(cpu));
}
@@ -762,6 +762,12 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
switch (perf->control_register.space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ boot_cpu_data.x86 == 0xf) {
+ pr_debug("AMD K8 systems must use native drivers.\n");
+ result = -ENODEV;
+ goto err_unreg;
+ }
pr_debug("SYSTEM IO addr space\n");
data->cpu_feature = SYSTEM_IO_CAPABLE;
break;
@@ -1030,4 +1036,11 @@ MODULE_PARM_DESC(acpi_pstate_strict,
late_initcall(acpi_cpufreq_init);
module_exit(acpi_cpufreq_exit);
+static const struct x86_cpu_id acpi_cpufreq_ids[] = {
+ X86_FEATURE_MATCH(X86_FEATURE_ACPI),
+ X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);
+
MODULE_ALIAS("acpi");
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 52bf36d599f5..4e5b7fb8927c 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -12,12 +12,12 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
-#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/opp.h>
+#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -71,12 +71,15 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
}
if (cpu_reg) {
+ rcu_read_lock();
opp = opp_find_freq_ceil(cpu_dev, &freq_Hz);
if (IS_ERR(opp)) {
+ rcu_read_unlock();
pr_err("failed to find OPP for %ld\n", freq_Hz);
return PTR_ERR(opp);
}
volt = opp_get_voltage(opp);
+ rcu_read_unlock();
tol = volt * voltage_tolerance / 100;
volt_old = regulator_get_voltage(cpu_reg);
}
@@ -143,7 +146,6 @@ static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
* share the clock and voltage and clock. Use cpufreq affected_cpus
* interface to have all CPUs scaled together.
*/
- policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
cpumask_setall(policy->cpus);
cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
@@ -174,34 +176,32 @@ static struct cpufreq_driver cpu0_cpufreq_driver = {
.attr = cpu0_cpufreq_attr,
};
-static int cpu0_cpufreq_driver_init(void)
+static int cpu0_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
int ret;
- np = of_find_node_by_path("/cpus/cpu@0");
+ for_each_child_of_node(of_find_node_by_path("/cpus"), np) {
+ if (of_get_property(np, "operating-points", NULL))
+ break;
+ }
+
if (!np) {
pr_err("failed to find cpu0 node\n");
return -ENOENT;
}
- cpu_dev = get_cpu_device(0);
- if (!cpu_dev) {
- pr_err("failed to get cpu0 device\n");
- ret = -ENODEV;
- goto out_put_node;
- }
-
+ cpu_dev = &pdev->dev;
cpu_dev->of_node = np;
- cpu_clk = clk_get(cpu_dev, NULL);
+ cpu_clk = devm_clk_get(cpu_dev, NULL);
if (IS_ERR(cpu_clk)) {
ret = PTR_ERR(cpu_clk);
pr_err("failed to get cpu0 clock: %d\n", ret);
goto out_put_node;
}
- cpu_reg = regulator_get(cpu_dev, "cpu0");
+ cpu_reg = devm_regulator_get(cpu_dev, "cpu0");
if (IS_ERR(cpu_reg)) {
pr_warn("failed to get cpu0 regulator\n");
cpu_reg = NULL;
@@ -236,12 +236,14 @@ static int cpu0_cpufreq_driver_init(void)
*/
for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
;
+ rcu_read_lock();
opp = opp_find_freq_exact(cpu_dev,
freq_table[0].frequency * 1000, true);
min_uV = opp_get_voltage(opp);
opp = opp_find_freq_exact(cpu_dev,
freq_table[i-1].frequency * 1000, true);
max_uV = opp_get_voltage(opp);
+ rcu_read_unlock();
ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
if (ret > 0)
transition_latency += ret * 1000;
@@ -262,7 +264,24 @@ out_put_node:
of_node_put(np);
return ret;
}
-late_initcall(cpu0_cpufreq_driver_init);
+
+static int cpu0_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&cpu0_cpufreq_driver);
+ opp_free_cpufreq_table(cpu_dev, &freq_table);
+
+ return 0;
+}
+
+static struct platform_driver cpu0_cpufreq_platdrv = {
+ .driver = {
+ .name = "cpufreq-cpu0",
+ .owner = THIS_MODULE,
+ },
+ .probe = cpu0_cpufreq_probe,
+ .remove = cpu0_cpufreq_remove,
+};
+module_platform_driver(cpu0_cpufreq_platdrv);
MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
MODULE_DESCRIPTION("Generic CPU0 cpufreq driver");
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1f93dbd72355..b02824d092e7 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -59,8 +59,6 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock);
* mode before doing so.
*
* Additional rules:
- * - All holders of the lock should check to make sure that the CPU they
- * are concerned with are online after they get the lock.
* - Governor routines that can be called in cpufreq hotplug path should not
* take this sem as top level hotplug notifier handler takes this.
* - Lock should not be held across
@@ -70,38 +68,28 @@ static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
#define lock_policy_rwsem(mode, cpu) \
-static int lock_policy_rwsem_##mode \
-(int cpu) \
+static int lock_policy_rwsem_##mode(int cpu) \
{ \
int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
BUG_ON(policy_cpu == -1); \
down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
- if (unlikely(!cpu_online(cpu))) { \
- up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
- return -1; \
- } \
\
return 0; \
}
lock_policy_rwsem(read, cpu);
-
lock_policy_rwsem(write, cpu);
-static void unlock_policy_rwsem_read(int cpu)
-{
- int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
- BUG_ON(policy_cpu == -1);
- up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
-}
-
-static void unlock_policy_rwsem_write(int cpu)
-{
- int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
- BUG_ON(policy_cpu == -1);
- up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
+#define unlock_policy_rwsem(mode, cpu) \
+static void unlock_policy_rwsem_##mode(int cpu) \
+{ \
+ int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
+ BUG_ON(policy_cpu == -1); \
+ up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
}
+unlock_policy_rwsem(read, cpu);
+unlock_policy_rwsem(write, cpu);
/* internal prototypes */
static int __cpufreq_governor(struct cpufreq_policy *policy,
@@ -180,6 +168,9 @@ err_out:
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
+ if (cpufreq_disabled())
+ return NULL;
+
return __cpufreq_cpu_get(cpu, false);
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
@@ -198,6 +189,9 @@ static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
void cpufreq_cpu_put(struct cpufreq_policy *data)
{
+ if (cpufreq_disabled())
+ return;
+
__cpufreq_cpu_put(data, false);
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
@@ -261,14 +255,21 @@ static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
{
struct cpufreq_policy *policy;
+ unsigned long flags;
BUG_ON(irqs_disabled());
+ if (cpufreq_disabled())
+ return;
+
freqs->flags = cpufreq_driver->flags;
pr_debug("notification %u of frequency transition to %u kHz\n",
state, freqs->new);
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
switch (state) {
case CPUFREQ_PRECHANGE:
@@ -294,7 +295,6 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
(unsigned long)freqs->cpu);
- trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu);
trace_cpu_frequency(freqs->new, freqs->cpu);
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_POSTCHANGE, freqs);
@@ -543,8 +543,6 @@ static ssize_t show_cpus(const struct cpumask *mask, char *buf)
*/
static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
{
- if (cpumask_empty(policy->related_cpus))
- return show_cpus(policy->cpus, buf);
return show_cpus(policy->related_cpus, buf);
}
@@ -700,87 +698,6 @@ static struct kobj_type ktype_cpufreq = {
.release = cpufreq_sysfs_release,
};
-/*
- * Returns:
- * Negative: Failure
- * 0: Success
- * Positive: When we have a managed CPU and the sysfs got symlinked
- */
-static int cpufreq_add_dev_policy(unsigned int cpu,
- struct cpufreq_policy *policy,
- struct device *dev)
-{
- int ret = 0;
-#ifdef CONFIG_SMP
- unsigned long flags;
- unsigned int j;
-#ifdef CONFIG_HOTPLUG_CPU
- struct cpufreq_governor *gov;
-
- gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
- if (gov) {
- policy->governor = gov;
- pr_debug("Restoring governor %s for cpu %d\n",
- policy->governor->name, cpu);
- }
-#endif
-
- for_each_cpu(j, policy->cpus) {
- struct cpufreq_policy *managed_policy;
-
- if (cpu == j)
- continue;
-
- /* Check for existing affected CPUs.
- * They may not be aware of it due to CPU Hotplug.
- * cpufreq_cpu_put is called when the device is removed
- * in __cpufreq_remove_dev()
- */
- managed_policy = cpufreq_cpu_get(j);
- if (unlikely(managed_policy)) {
-
- /* Set proper policy_cpu */
- unlock_policy_rwsem_write(cpu);
- per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
-
- if (lock_policy_rwsem_write(cpu) < 0) {
- /* Should not go through policy unlock path */
- if (cpufreq_driver->exit)
- cpufreq_driver->exit(policy);
- cpufreq_cpu_put(managed_policy);
- return -EBUSY;
- }
-
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
- cpumask_copy(managed_policy->cpus, policy->cpus);
- per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
- pr_debug("CPU already managed, adding link\n");
- ret = sysfs_create_link(&dev->kobj,
- &managed_policy->kobj,
- "cpufreq");
- if (ret)
- cpufreq_cpu_put(managed_policy);
- /*
- * Success. We only needed to be added to the mask.
- * Call driver->exit() because only the cpu parent of
- * the kobj needed to call init().
- */
- if (cpufreq_driver->exit)
- cpufreq_driver->exit(policy);
-
- if (!ret)
- return 1;
- else
- return ret;
- }
- }
-#endif
- return ret;
-}
-
-
/* symlink affected CPUs */
static int cpufreq_add_dev_symlink(unsigned int cpu,
struct cpufreq_policy *policy)
@@ -794,8 +711,6 @@ static int cpufreq_add_dev_symlink(unsigned int cpu,
if (j == cpu)
continue;
- if (!cpu_online(j))
- continue;
pr_debug("CPU %u already managed, adding link\n", j);
managed_policy = cpufreq_cpu_get(cpu);
@@ -852,8 +767,6 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
spin_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus) {
- if (!cpu_online(j))
- continue;
per_cpu(cpufreq_cpu_data, j) = policy;
per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
}
@@ -885,6 +798,42 @@ err_out_kobj_put:
return ret;
}
+#ifdef CONFIG_HOTPLUG_CPU
+static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
+ struct device *dev)
+{
+ struct cpufreq_policy *policy;
+ int ret = 0;
+ unsigned long flags;
+
+ policy = cpufreq_cpu_get(sibling);
+ WARN_ON(!policy);
+
+ __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+
+ lock_policy_rwsem_write(sibling);
+
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
+
+ cpumask_set_cpu(cpu, policy->cpus);
+ per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
+ per_cpu(cpufreq_cpu_data, cpu) = policy;
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ unlock_policy_rwsem_write(sibling);
+
+ __cpufreq_governor(policy, CPUFREQ_GOV_START);
+ __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+
+ ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
+ if (ret) {
+ cpufreq_cpu_put(policy);
+ return ret;
+ }
+
+ return 0;
+}
+#endif
/**
* cpufreq_add_dev - add a CPU device
@@ -897,12 +846,12 @@ err_out_kobj_put:
*/
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{
- unsigned int cpu = dev->id;
- int ret = 0, found = 0;
+ unsigned int j, cpu = dev->id;
+ int ret = -ENOMEM;
struct cpufreq_policy *policy;
unsigned long flags;
- unsigned int j;
#ifdef CONFIG_HOTPLUG_CPU
+ struct cpufreq_governor *gov;
int sibling;
#endif
@@ -919,6 +868,19 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
cpufreq_cpu_put(policy);
return 0;
}
+
+#ifdef CONFIG_HOTPLUG_CPU
+ /* Check if this cpu was hot-unplugged earlier and has siblings */
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ for_each_online_cpu(sibling) {
+ struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
+ if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ return cpufreq_add_policy_cpu(cpu, sibling, dev);
+ }
+ }
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+#endif
#endif
if (!try_module_get(cpufreq_driver->owner)) {
@@ -926,7 +888,6 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
goto module_out;
}
- ret = -ENOMEM;
policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
if (!policy)
goto nomem_out;
@@ -938,66 +899,58 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
goto err_free_cpumask;
policy->cpu = cpu;
+ policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
cpumask_copy(policy->cpus, cpumask_of(cpu));
/* Initially set CPU itself as the policy_cpu */
per_cpu(cpufreq_policy_cpu, cpu) = cpu;
- ret = (lock_policy_rwsem_write(cpu) < 0);
- WARN_ON(ret);
init_completion(&policy->kobj_unregister);
INIT_WORK(&policy->update, handle_update);
- /* Set governor before ->init, so that driver could check it */
-#ifdef CONFIG_HOTPLUG_CPU
- for_each_online_cpu(sibling) {
- struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
- if (cp && cp->governor &&
- (cpumask_test_cpu(cpu, cp->related_cpus))) {
- policy->governor = cp->governor;
- found = 1;
- break;
- }
- }
-#endif
- if (!found)
- policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
/* call driver. From then on the cpufreq must be able
* to accept all calls to ->verify and ->setpolicy for this CPU
*/
ret = cpufreq_driver->init(policy);
if (ret) {
pr_debug("initialization failed\n");
- goto err_unlock_policy;
+ goto err_set_policy_cpu;
}
+
+ /* related cpus should atleast have policy->cpus */
+ cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
+
+ /*
+ * affected cpus must always be the one, which are online. We aren't
+ * managing offline cpus here.
+ */
+ cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
+
policy->user_policy.min = policy->min;
policy->user_policy.max = policy->max;
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_START, policy);
- ret = cpufreq_add_dev_policy(cpu, policy, dev);
- if (ret) {
- if (ret > 0)
- /* This is a managed cpu, symlink created,
- exit with 0 */
- ret = 0;
- goto err_unlock_policy;
+#ifdef CONFIG_HOTPLUG_CPU
+ gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
+ if (gov) {
+ policy->governor = gov;
+ pr_debug("Restoring governor %s for cpu %d\n",
+ policy->governor->name, cpu);
}
+#endif
ret = cpufreq_add_dev_interface(cpu, policy, dev);
if (ret)
goto err_out_unregister;
- unlock_policy_rwsem_write(cpu);
-
kobject_uevent(&policy->kobj, KOBJ_ADD);
module_put(cpufreq_driver->owner);
pr_debug("initialization complete\n");
return 0;
-
err_out_unregister:
spin_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus)
@@ -1007,8 +960,8 @@ err_out_unregister:
kobject_put(&policy->kobj);
wait_for_completion(&policy->kobj_unregister);
-err_unlock_policy:
- unlock_policy_rwsem_write(cpu);
+err_set_policy_cpu:
+ per_cpu(cpufreq_policy_cpu, cpu) = -1;
free_cpumask_var(policy->related_cpus);
err_free_cpumask:
free_cpumask_var(policy->cpus);
@@ -1020,6 +973,22 @@ module_out:
return ret;
}
+static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
+{
+ int j;
+
+ policy->last_cpu = policy->cpu;
+ policy->cpu = cpu;
+
+ for_each_cpu(j, policy->cpus)
+ per_cpu(cpufreq_policy_cpu, j) = cpu;
+
+#ifdef CONFIG_CPU_FREQ_TABLE
+ cpufreq_frequency_table_update_policy_cpu(policy);
+#endif
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_UPDATE_POLICY_CPU, policy);
+}
/**
* __cpufreq_remove_dev - remove a CPU device
@@ -1030,129 +999,103 @@ module_out:
*/
static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
{
- unsigned int cpu = dev->id;
+ unsigned int cpu = dev->id, ret, cpus;
unsigned long flags;
struct cpufreq_policy *data;
struct kobject *kobj;
struct completion *cmp;
-#ifdef CONFIG_SMP
struct device *cpu_dev;
- unsigned int j;
-#endif
- pr_debug("unregistering CPU %u\n", cpu);
+ pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
spin_lock_irqsave(&cpufreq_driver_lock, flags);
+
data = per_cpu(cpufreq_cpu_data, cpu);
+ per_cpu(cpufreq_cpu_data, cpu) = NULL;
+
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
if (!data) {
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- unlock_policy_rwsem_write(cpu);
+ pr_debug("%s: No cpu_data found\n", __func__);
return -EINVAL;
}
- per_cpu(cpufreq_cpu_data, cpu) = NULL;
+ if (cpufreq_driver->target)
+ __cpufreq_governor(data, CPUFREQ_GOV_STOP);
-#ifdef CONFIG_SMP
- /* if this isn't the CPU which is the parent of the kobj, we
- * only need to unlink, put and exit
- */
- if (unlikely(cpu != data->cpu)) {
- pr_debug("removing link\n");
- cpumask_clear_cpu(cpu, data->cpus);
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- kobj = &dev->kobj;
- cpufreq_cpu_put(data);
- unlock_policy_rwsem_write(cpu);
- sysfs_remove_link(kobj, "cpufreq");
- return 0;
- }
+#ifdef CONFIG_HOTPLUG_CPU
+ if (!cpufreq_driver->setpolicy)
+ strncpy(per_cpu(cpufreq_cpu_governor, cpu),
+ data->governor->name, CPUFREQ_NAME_LEN);
#endif
-#ifdef CONFIG_SMP
+ WARN_ON(lock_policy_rwsem_write(cpu));
+ cpus = cpumask_weight(data->cpus);
+ cpumask_clear_cpu(cpu, data->cpus);
+ unlock_policy_rwsem_write(cpu);
-#ifdef CONFIG_HOTPLUG_CPU
- strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
- CPUFREQ_NAME_LEN);
-#endif
+ if (cpu != data->cpu) {
+ sysfs_remove_link(&dev->kobj, "cpufreq");
+ } else if (cpus > 1) {
+ /* first sibling now owns the new sysfs dir */
+ cpu_dev = get_cpu_device(cpumask_first(data->cpus));
+ sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
+ ret = kobject_move(&data->kobj, &cpu_dev->kobj);
+ if (ret) {
+ pr_err("%s: Failed to move kobj: %d", __func__, ret);
- /* if we have other CPUs still registered, we need to unlink them,
- * or else wait_for_completion below will lock up. Clean the
- * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
- * the sysfs links afterwards.
- */
- if (unlikely(cpumask_weight(data->cpus) > 1)) {
- for_each_cpu(j, data->cpus) {
- if (j == cpu)
- continue;
- per_cpu(cpufreq_cpu_data, j) = NULL;
- }
- }
+ WARN_ON(lock_policy_rwsem_write(cpu));
+ cpumask_set_cpu(cpu, data->cpus);
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ per_cpu(cpufreq_cpu_data, cpu) = data;
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- if (unlikely(cpumask_weight(data->cpus) > 1)) {
- for_each_cpu(j, data->cpus) {
- if (j == cpu)
- continue;
- pr_debug("removing link for cpu %u\n", j);
-#ifdef CONFIG_HOTPLUG_CPU
- strncpy(per_cpu(cpufreq_cpu_governor, j),
- data->governor->name, CPUFREQ_NAME_LEN);
-#endif
- cpu_dev = get_cpu_device(j);
- kobj = &cpu_dev->kobj;
unlock_policy_rwsem_write(cpu);
- sysfs_remove_link(kobj, "cpufreq");
- lock_policy_rwsem_write(cpu);
- cpufreq_cpu_put(data);
- }
- }
-#else
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-#endif
- if (cpufreq_driver->target)
- __cpufreq_governor(data, CPUFREQ_GOV_STOP);
+ ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
+ "cpufreq");
+ return -EINVAL;
+ }
- kobj = &data->kobj;
- cmp = &data->kobj_unregister;
- unlock_policy_rwsem_write(cpu);
- kobject_put(kobj);
+ WARN_ON(lock_policy_rwsem_write(cpu));
+ update_policy_cpu(data, cpu_dev->id);
+ unlock_policy_rwsem_write(cpu);
+ pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
+ __func__, cpu_dev->id, cpu);
+ }
- /* we need to make sure that the underlying kobj is actually
- * not referenced anymore by anybody before we proceed with
- * unloading.
- */
- pr_debug("waiting for dropping of refcount\n");
- wait_for_completion(cmp);
- pr_debug("wait complete\n");
+ pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
+ cpufreq_cpu_put(data);
- lock_policy_rwsem_write(cpu);
- if (cpufreq_driver->exit)
- cpufreq_driver->exit(data);
- unlock_policy_rwsem_write(cpu);
+ /* If cpu is last user of policy, free policy */
+ if (cpus == 1) {
+ lock_policy_rwsem_read(cpu);
+ kobj = &data->kobj;
+ cmp = &data->kobj_unregister;
+ unlock_policy_rwsem_read(cpu);
+ kobject_put(kobj);
+
+ /* we need to make sure that the underlying kobj is actually
+ * not referenced anymore by anybody before we proceed with
+ * unloading.
+ */
+ pr_debug("waiting for dropping of refcount\n");
+ wait_for_completion(cmp);
+ pr_debug("wait complete\n");
-#ifdef CONFIG_HOTPLUG_CPU
- /* when the CPU which is the parent of the kobj is hotplugged
- * offline, check for siblings, and create cpufreq sysfs interface
- * and symlinks
- */
- if (unlikely(cpumask_weight(data->cpus) > 1)) {
- /* first sibling now owns the new sysfs dir */
- cpumask_clear_cpu(cpu, data->cpus);
- cpufreq_add_dev(get_cpu_device(cpumask_first(data->cpus)), NULL);
+ if (cpufreq_driver->exit)
+ cpufreq_driver->exit(data);
- /* finally remove our own symlink */
- lock_policy_rwsem_write(cpu);
- __cpufreq_remove_dev(dev, sif);
+ free_cpumask_var(data->related_cpus);
+ free_cpumask_var(data->cpus);
+ kfree(data);
+ } else if (cpufreq_driver->target) {
+ __cpufreq_governor(data, CPUFREQ_GOV_START);
+ __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
}
-#endif
-
- free_cpumask_var(data->related_cpus);
- free_cpumask_var(data->cpus);
- kfree(data);
+ per_cpu(cpufreq_policy_cpu, cpu) = -1;
return 0;
}
@@ -1165,9 +1108,6 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
if (cpu_is_offline(cpu))
return 0;
- if (unlikely(lock_policy_rwsem_write(cpu)))
- BUG();
-
retval = __cpufreq_remove_dev(dev, sif);
return retval;
}
@@ -1216,9 +1156,13 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
*/
unsigned int cpufreq_quick_get(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy;
unsigned int ret_freq = 0;
+ if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
+ return cpufreq_driver->get(cpu);
+
+ policy = cpufreq_cpu_get(cpu);
if (policy) {
ret_freq = policy->cur;
cpufreq_cpu_put(policy);
@@ -1386,6 +1330,20 @@ static struct syscore_ops cpufreq_syscore_ops = {
.resume = cpufreq_bp_resume,
};
+/**
+ * cpufreq_get_current_driver - return current driver's name
+ *
+ * Return the name string of the currently loaded cpufreq driver
+ * or NULL, if none.
+ */
+const char *cpufreq_get_current_driver(void)
+{
+ if (cpufreq_driver)
+ return cpufreq_driver->name;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
/*********************************************************************
* NOTIFIER LISTS INTERFACE *
@@ -1408,6 +1366,9 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
{
int ret;
+ if (cpufreq_disabled())
+ return -EINVAL;
+
WARN_ON(!init_cpufreq_transition_notifier_list_called);
switch (list) {
@@ -1442,6 +1403,9 @@ int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
{
int ret;
+ if (cpufreq_disabled())
+ return -EINVAL;
+
switch (list) {
case CPUFREQ_TRANSITION_NOTIFIER:
ret = srcu_notifier_chain_unregister(
@@ -1487,7 +1451,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
if (target_freq == policy->cur)
return 0;
- if (cpu_online(policy->cpu) && cpufreq_driver->target)
+ if (cpufreq_driver->target)
retval = cpufreq_driver->target(policy, target_freq, relation);
return retval;
@@ -1522,7 +1486,10 @@ int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
{
int ret = 0;
- if (!(cpu_online(cpu) && cpufreq_driver->getavg))
+ if (cpufreq_disabled())
+ return ret;
+
+ if (!cpufreq_driver->getavg)
return 0;
policy = cpufreq_cpu_get(policy->cpu);
@@ -1577,6 +1544,11 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
policy->cpu, event);
ret = policy->governor->governor(policy, event);
+ if (event == CPUFREQ_GOV_START)
+ policy->governor->initialized++;
+ else if (event == CPUFREQ_GOV_STOP)
+ policy->governor->initialized--;
+
/* we keep one module reference alive for
each CPU governed by this CPU */
if ((event != CPUFREQ_GOV_START) || ret)
@@ -1600,6 +1572,7 @@ int cpufreq_register_governor(struct cpufreq_governor *governor)
mutex_lock(&cpufreq_governor_mutex);
+ governor->initialized = 0;
err = -EBUSY;
if (__find_governor(governor->name) == NULL) {
err = 0;
@@ -1797,7 +1770,7 @@ int cpufreq_update_policy(unsigned int cpu)
pr_debug("Driver did not initialize current freq");
data->cur = policy.cur;
} else {
- if (data->cur != policy.cur)
+ if (data->cur != policy.cur && cpufreq_driver->target)
cpufreq_out_of_sync(cpu, data->cur,
policy.cur);
}
@@ -1829,9 +1802,6 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
- if (unlikely(lock_policy_rwsem_write(cpu)))
- BUG();
-
__cpufreq_remove_dev(dev, NULL);
break;
case CPU_DOWN_FAILED:
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 64ef737e7e72..4fd0006b1291 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -25,7 +25,7 @@
#include "cpufreq_governor.h"
-/* Conservative governor macors */
+/* Conservative governor macros */
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
#define DEF_SAMPLING_DOWN_FACTOR (1)
@@ -113,17 +113,20 @@ static void cs_check_cpu(int cpu, unsigned int load)
static void cs_dbs_timer(struct work_struct *work)
{
+ struct delayed_work *dw = to_delayed_work(work);
struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
struct cs_cpu_dbs_info_s, cdbs.work.work);
- unsigned int cpu = dbs_info->cdbs.cpu;
+ unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
+ struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
+ cpu);
int delay = delay_for_sampling_rate(cs_tuners.sampling_rate);
- mutex_lock(&dbs_info->cdbs.timer_mutex);
+ mutex_lock(&core_dbs_info->cdbs.timer_mutex);
+ if (need_load_eval(&core_dbs_info->cdbs, cs_tuners.sampling_rate))
+ dbs_check_cpu(&cs_dbs_data, cpu);
- dbs_check_cpu(&cs_dbs_data, cpu);
-
- schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay);
- mutex_unlock(&dbs_info->cdbs.timer_mutex);
+ schedule_delayed_work_on(smp_processor_id(), dw, delay);
+ mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
}
static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
@@ -141,7 +144,7 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
/*
* we only care if our internally tracked freq moves outside the 'valid'
- * ranges of freqency available to us otherwise we do not change it
+ * ranges of frequency available to us otherwise we do not change it
*/
if (dbs_info->requested_freq > policy->max
|| dbs_info->requested_freq < policy->min)
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 6c5f1d383cdc..5a76086ff09b 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -161,25 +161,48 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
}
EXPORT_SYMBOL_GPL(dbs_check_cpu);
-static inline void dbs_timer_init(struct dbs_data *dbs_data,
- struct cpu_dbs_common_info *cdbs, unsigned int sampling_rate)
+static inline void dbs_timer_init(struct dbs_data *dbs_data, int cpu,
+ unsigned int sampling_rate)
{
int delay = delay_for_sampling_rate(sampling_rate);
+ struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu);
- INIT_DEFERRABLE_WORK(&cdbs->work, dbs_data->gov_dbs_timer);
- schedule_delayed_work_on(cdbs->cpu, &cdbs->work, delay);
+ schedule_delayed_work_on(cpu, &cdbs->work, delay);
}
-static inline void dbs_timer_exit(struct cpu_dbs_common_info *cdbs)
+static inline void dbs_timer_exit(struct dbs_data *dbs_data, int cpu)
{
+ struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu);
+
cancel_delayed_work_sync(&cdbs->work);
}
+/* Will return if we need to evaluate cpu load again or not */
+bool need_load_eval(struct cpu_dbs_common_info *cdbs,
+ unsigned int sampling_rate)
+{
+ if (policy_is_shared(cdbs->cur_policy)) {
+ ktime_t time_now = ktime_get();
+ s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp);
+
+ /* Do nothing if we recently have sampled */
+ if (delta_us < (s64)(sampling_rate / 2))
+ return false;
+ else
+ cdbs->time_stamp = time_now;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(need_load_eval);
+
int cpufreq_governor_dbs(struct dbs_data *dbs_data,
struct cpufreq_policy *policy, unsigned int event)
{
struct od_cpu_dbs_info_s *od_dbs_info = NULL;
struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
+ struct cs_ops *cs_ops = NULL;
+ struct od_ops *od_ops = NULL;
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
struct cpu_dbs_common_info *cpu_cdbs;
@@ -192,109 +215,111 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data,
cs_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu);
sampling_rate = &cs_tuners->sampling_rate;
ignore_nice = cs_tuners->ignore_nice;
+ cs_ops = dbs_data->gov_ops;
} else {
od_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu);
sampling_rate = &od_tuners->sampling_rate;
ignore_nice = od_tuners->ignore_nice;
+ od_ops = dbs_data->gov_ops;
}
switch (event) {
case CPUFREQ_GOV_START:
- if ((!cpu_online(cpu)) || (!policy->cur))
+ if (!policy->cur)
return -EINVAL;
mutex_lock(&dbs_data->mutex);
- dbs_data->enable++;
- cpu_cdbs->cpu = cpu;
for_each_cpu(j, policy->cpus) {
- struct cpu_dbs_common_info *j_cdbs;
- j_cdbs = dbs_data->get_cpu_cdbs(j);
+ struct cpu_dbs_common_info *j_cdbs =
+ dbs_data->get_cpu_cdbs(j);
+ j_cdbs->cpu = j;
j_cdbs->cur_policy = policy;
j_cdbs->prev_cpu_idle = get_cpu_idle_time(j,
&j_cdbs->prev_cpu_wall);
if (ignore_nice)
j_cdbs->prev_cpu_nice =
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
- }
- /*
- * Start the timerschedule work, when this governor is used for
- * first time
- */
- if (dbs_data->enable != 1)
- goto second_time;
-
- rc = sysfs_create_group(cpufreq_global_kobject,
- dbs_data->attr_group);
- if (rc) {
- mutex_unlock(&dbs_data->mutex);
- return rc;
+ mutex_init(&j_cdbs->timer_mutex);
+ INIT_DEFERRABLE_WORK(&j_cdbs->work,
+ dbs_data->gov_dbs_timer);
}
- /* policy latency is in nS. Convert it to uS first */
- latency = policy->cpuinfo.transition_latency / 1000;
- if (latency == 0)
- latency = 1;
+ if (!policy->governor->initialized) {
+ rc = sysfs_create_group(cpufreq_global_kobject,
+ dbs_data->attr_group);
+ if (rc) {
+ mutex_unlock(&dbs_data->mutex);
+ return rc;
+ }
+ }
/*
* conservative does not implement micro like ondemand
* governor, thus we are bound to jiffes/HZ
*/
if (dbs_data->governor == GOV_CONSERVATIVE) {
- struct cs_ops *ops = dbs_data->gov_ops;
+ cs_dbs_info->down_skip = 0;
+ cs_dbs_info->enable = 1;
+ cs_dbs_info->requested_freq = policy->cur;
- cpufreq_register_notifier(ops->notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
+ if (!policy->governor->initialized) {
+ cpufreq_register_notifier(cs_ops->notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
- dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
- jiffies_to_usecs(10);
+ dbs_data->min_sampling_rate =
+ MIN_SAMPLING_RATE_RATIO *
+ jiffies_to_usecs(10);
+ }
} else {
- struct od_ops *ops = dbs_data->gov_ops;
+ od_dbs_info->rate_mult = 1;
+ od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
+ od_ops->powersave_bias_init_cpu(cpu);
- od_tuners->io_is_busy = ops->io_busy();
+ if (!policy->governor->initialized)
+ od_tuners->io_is_busy = od_ops->io_busy();
}
+ if (policy->governor->initialized)
+ goto unlock;
+
+ /* policy latency is in nS. Convert it to uS first */
+ latency = policy->cpuinfo.transition_latency / 1000;
+ if (latency == 0)
+ latency = 1;
+
/* Bring kernel and HW constraints together */
dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
MIN_LATENCY_MULTIPLIER * latency);
*sampling_rate = max(dbs_data->min_sampling_rate, latency *
LATENCY_MULTIPLIER);
-
-second_time:
- if (dbs_data->governor == GOV_CONSERVATIVE) {
- cs_dbs_info->down_skip = 0;
- cs_dbs_info->enable = 1;
- cs_dbs_info->requested_freq = policy->cur;
- } else {
- struct od_ops *ops = dbs_data->gov_ops;
- od_dbs_info->rate_mult = 1;
- od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
- ops->powersave_bias_init_cpu(cpu);
- }
+unlock:
mutex_unlock(&dbs_data->mutex);
- mutex_init(&cpu_cdbs->timer_mutex);
- dbs_timer_init(dbs_data, cpu_cdbs, *sampling_rate);
+ /* Initiate timer time stamp */
+ cpu_cdbs->time_stamp = ktime_get();
+
+ for_each_cpu(j, policy->cpus)
+ dbs_timer_init(dbs_data, j, *sampling_rate);
break;
case CPUFREQ_GOV_STOP:
if (dbs_data->governor == GOV_CONSERVATIVE)
cs_dbs_info->enable = 0;
- dbs_timer_exit(cpu_cdbs);
+ for_each_cpu(j, policy->cpus)
+ dbs_timer_exit(dbs_data, j);
mutex_lock(&dbs_data->mutex);
mutex_destroy(&cpu_cdbs->timer_mutex);
- dbs_data->enable--;
- if (!dbs_data->enable) {
- struct cs_ops *ops = dbs_data->gov_ops;
+ if (policy->governor->initialized == 1) {
sysfs_remove_group(cpufreq_global_kobject,
dbs_data->attr_group);
if (dbs_data->governor == GOV_CONSERVATIVE)
- cpufreq_unregister_notifier(ops->notifier_block,
+ cpufreq_unregister_notifier(cs_ops->notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
}
mutex_unlock(&dbs_data->mutex);
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index f6616540c53d..d2ac91150600 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -82,6 +82,7 @@ struct cpu_dbs_common_info {
* the governor or limits.
*/
struct mutex timer_mutex;
+ ktime_t time_stamp;
};
struct od_cpu_dbs_info_s {
@@ -108,7 +109,7 @@ struct od_dbs_tuners {
unsigned int sampling_rate;
unsigned int sampling_down_factor;
unsigned int up_threshold;
- unsigned int down_differential;
+ unsigned int adj_up_threshold;
unsigned int powersave_bias;
unsigned int io_is_busy;
};
@@ -129,7 +130,6 @@ struct dbs_data {
#define GOV_CONSERVATIVE 1
int governor;
unsigned int min_sampling_rate;
- unsigned int enable; /* number of CPUs using this policy */
struct attribute_group *attr_group;
void *tuners;
@@ -171,6 +171,8 @@ static inline int delay_for_sampling_rate(unsigned int sampling_rate)
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall);
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
+bool need_load_eval(struct cpu_dbs_common_info *cdbs,
+ unsigned int sampling_rate);
int cpufreq_governor_dbs(struct dbs_data *dbs_data,
struct cpufreq_policy *policy, unsigned int event);
#endif /* _CPUFREQ_GOVERNER_H */
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 7731f7c7e79a..f3eb26cd848f 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -26,7 +26,7 @@
#include "cpufreq_governor.h"
-/* On-demand governor macors */
+/* On-demand governor macros */
#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define DEF_SAMPLING_DOWN_FACTOR (1)
@@ -47,7 +47,8 @@ static struct cpufreq_governor cpufreq_gov_ondemand;
static struct od_dbs_tuners od_tuners = {
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
- .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
+ .adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
+ DEF_FREQUENCY_DOWN_DIFFERENTIAL,
.ignore_nice = 0,
.powersave_bias = 0,
};
@@ -65,7 +66,7 @@ static void ondemand_powersave_bias_init_cpu(int cpu)
* efficient idling at a higher frequency/voltage is.
* Pavel Machek says this is not so for various generations of AMD and old
* Intel systems.
- * Mike Chan (androidlcom) calis this is also not true for ARM.
+ * Mike Chan (android.com) claims this is also not true for ARM.
* Because of this, whitelist specific known (series) of CPUs by default, and
* leave all others up to the user.
*/
@@ -73,7 +74,7 @@ static int should_io_be_busy(void)
{
#if defined(CONFIG_X86)
/*
- * For Intel, Core 2 (model 15) andl later have an efficient idle.
+ * For Intel, Core 2 (model 15) and later have an efficient idle.
*/
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
boot_cpu_data.x86 == 6 &&
@@ -158,8 +159,8 @@ static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
/*
* Every sampling_rate, we check, if current idle time is less than 20%
- * (default), then we try to increase frequency Every sampling_rate, we look for
- * a the lowest frequency which can sustain the load while keeping idle time
+ * (default), then we try to increase frequency. Every sampling_rate, we look
+ * for the lowest frequency which can sustain the load while keeping idle time
* over 30%. If such a frequency exist, we try to decrease to this frequency.
*
* Any frequency increase takes it to the maximum frequency. Frequency reduction
@@ -192,11 +193,9 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
* support the current CPU usage without triggering the up policy. To be
* safe, we focus 10 points under the threshold.
*/
- if (load_freq < (od_tuners.up_threshold - od_tuners.down_differential) *
- policy->cur) {
+ if (load_freq < od_tuners.adj_up_threshold * policy->cur) {
unsigned int freq_next;
- freq_next = load_freq / (od_tuners.up_threshold -
- od_tuners.down_differential);
+ freq_next = load_freq / od_tuners.adj_up_threshold;
/* No longer fully busy, reset rate_mult */
dbs_info->rate_mult = 1;
@@ -218,33 +217,42 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
static void od_dbs_timer(struct work_struct *work)
{
+ struct delayed_work *dw = to_delayed_work(work);
struct od_cpu_dbs_info_s *dbs_info =
container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
- unsigned int cpu = dbs_info->cdbs.cpu;
- int delay, sample_type = dbs_info->sample_type;
+ unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
+ struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
+ cpu);
+ int delay, sample_type = core_dbs_info->sample_type;
+ bool eval_load;
- mutex_lock(&dbs_info->cdbs.timer_mutex);
+ mutex_lock(&core_dbs_info->cdbs.timer_mutex);
+ eval_load = need_load_eval(&core_dbs_info->cdbs,
+ od_tuners.sampling_rate);
/* Common NORMAL_SAMPLE setup */
- dbs_info->sample_type = OD_NORMAL_SAMPLE;
+ core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
if (sample_type == OD_SUB_SAMPLE) {
- delay = dbs_info->freq_lo_jiffies;
- __cpufreq_driver_target(dbs_info->cdbs.cur_policy,
- dbs_info->freq_lo, CPUFREQ_RELATION_H);
+ delay = core_dbs_info->freq_lo_jiffies;
+ if (eval_load)
+ __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
+ core_dbs_info->freq_lo,
+ CPUFREQ_RELATION_H);
} else {
- dbs_check_cpu(&od_dbs_data, cpu);
- if (dbs_info->freq_lo) {
+ if (eval_load)
+ dbs_check_cpu(&od_dbs_data, cpu);
+ if (core_dbs_info->freq_lo) {
/* Setup timer for SUB_SAMPLE */
- dbs_info->sample_type = OD_SUB_SAMPLE;
- delay = dbs_info->freq_hi_jiffies;
+ core_dbs_info->sample_type = OD_SUB_SAMPLE;
+ delay = core_dbs_info->freq_hi_jiffies;
} else {
delay = delay_for_sampling_rate(od_tuners.sampling_rate
- * dbs_info->rate_mult);
+ * core_dbs_info->rate_mult);
}
}
- schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay);
- mutex_unlock(&dbs_info->cdbs.timer_mutex);
+ schedule_delayed_work_on(smp_processor_id(), dw, delay);
+ mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
}
/************************** sysfs interface ************************/
@@ -259,7 +267,7 @@ static ssize_t show_sampling_rate_min(struct kobject *kobj,
* update_sampling_rate - update sampling rate effective immediately if needed.
* @new_rate: new sampling rate
*
- * If new rate is smaller than the old, simply updaing
+ * If new rate is smaller than the old, simply updating
* dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
* original sampling_rate was 1 second and the requested new sampling rate is 10
* ms because the user needs immediate reaction from ondemand governor, but not
@@ -287,7 +295,7 @@ static void update_sampling_rate(unsigned int new_rate)
cpufreq_cpu_put(policy);
continue;
}
- dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu);
+ dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
cpufreq_cpu_put(policy);
mutex_lock(&dbs_info->cdbs.timer_mutex);
@@ -306,8 +314,7 @@ static void update_sampling_rate(unsigned int new_rate)
cancel_delayed_work_sync(&dbs_info->cdbs.work);
mutex_lock(&dbs_info->cdbs.timer_mutex);
- schedule_delayed_work_on(dbs_info->cdbs.cpu,
- &dbs_info->cdbs.work,
+ schedule_delayed_work_on(cpu, &dbs_info->cdbs.work,
usecs_to_jiffies(new_rate));
}
@@ -351,6 +358,10 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
input < MIN_FREQUENCY_UP_THRESHOLD) {
return -EINVAL;
}
+ /* Calculate the new adj_up_threshold */
+ od_tuners.adj_up_threshold += input;
+ od_tuners.adj_up_threshold -= od_tuners.up_threshold;
+
od_tuners.up_threshold = input;
return count;
}
@@ -507,7 +518,8 @@ static int __init cpufreq_gov_dbs_init(void)
if (idle_time != -1ULL) {
/* Idle micro accounting is supported. Use finer thresholds */
od_tuners.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
- od_tuners.down_differential = MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
+ od_tuners.adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
+ MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
/*
* In nohz/micro accounting case we set the minimum frequency
* not depending on HZ, but fixed (very low). The deferred
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 9d7732b81044..2fd779eb1ed1 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -24,12 +24,6 @@
static spinlock_t cpufreq_stats_lock;
-#define CPUFREQ_STATDEVICE_ATTR(_name, _mode, _show) \
-static struct freq_attr _attr_##_name = {\
- .attr = {.name = __stringify(_name), .mode = _mode, }, \
- .show = _show,\
-};
-
struct cpufreq_stats {
unsigned int cpu;
unsigned int total_trans;
@@ -136,17 +130,17 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
return PAGE_SIZE;
return len;
}
-CPUFREQ_STATDEVICE_ATTR(trans_table, 0444, show_trans_table);
+cpufreq_freq_attr_ro(trans_table);
#endif
-CPUFREQ_STATDEVICE_ATTR(total_trans, 0444, show_total_trans);
-CPUFREQ_STATDEVICE_ATTR(time_in_state, 0444, show_time_in_state);
+cpufreq_freq_attr_ro(total_trans);
+cpufreq_freq_attr_ro(time_in_state);
static struct attribute *default_attrs[] = {
- &_attr_total_trans.attr,
- &_attr_time_in_state.attr,
+ &total_trans.attr,
+ &time_in_state.attr,
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
- &_attr_trans_table.attr,
+ &trans_table.attr,
#endif
NULL
};
@@ -170,11 +164,13 @@ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
static void cpufreq_stats_free_table(unsigned int cpu)
{
struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
+
if (stat) {
+ pr_debug("%s: Free stat table\n", __func__);
kfree(stat->time_in_state);
kfree(stat);
+ per_cpu(cpufreq_stats_table, cpu) = NULL;
}
- per_cpu(cpufreq_stats_table, cpu) = NULL;
}
/* must be called early in the CPU removal sequence (before
@@ -183,8 +179,14 @@ static void cpufreq_stats_free_table(unsigned int cpu)
static void cpufreq_stats_free_sysfs(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- if (policy && policy->cpu == cpu)
+
+ if (!cpufreq_frequency_get_table(cpu))
+ return;
+
+ if (policy && !policy_is_shared(policy)) {
+ pr_debug("%s: Free sysfs stat\n", __func__);
sysfs_remove_group(&policy->kobj, &stats_attr_group);
+ }
if (policy)
cpufreq_cpu_put(policy);
}
@@ -262,6 +264,19 @@ error_get_fail:
return ret;
}
+static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
+{
+ struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
+ policy->last_cpu);
+
+ pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
+ policy->cpu, policy->last_cpu);
+ per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table,
+ policy->last_cpu);
+ per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL;
+ stat->cpu = policy->cpu;
+}
+
static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
unsigned long val, void *data)
{
@@ -269,6 +284,12 @@ static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
struct cpufreq_policy *policy = data;
struct cpufreq_frequency_table *table;
unsigned int cpu = policy->cpu;
+
+ if (val == CPUFREQ_UPDATE_POLICY_CPU) {
+ cpufreq_stats_update_policy_cpu(policy);
+ return 0;
+ }
+
if (val != CPUFREQ_NOTIFY)
return 0;
table = cpufreq_frequency_get_table(cpu);
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index c8c3d293cc57..bbeb9c0720a6 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -118,8 +118,6 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
switch (event) {
case CPUFREQ_GOV_START:
- if (!cpu_online(cpu))
- return -EINVAL;
BUG_ON(!policy->cur);
mutex_lock(&userspace_mutex);
diff --git a/drivers/cpufreq/db8500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c
index 4f154bc0ebe4..72f0c3efa76e 100644
--- a/drivers/cpufreq/db8500-cpufreq.c
+++ b/drivers/cpufreq/dbx500-cpufreq.c
@@ -1,13 +1,13 @@
/*
* Copyright (C) STMicroelectronics 2009
- * Copyright (C) ST-Ericsson SA 2010
+ * Copyright (C) ST-Ericsson SA 2010-2012
*
* License Terms: GNU General Public License v2
* Author: Sundar Iyer <sundar.iyer@stericsson.com>
* Author: Martin Persson <martin.persson@stericsson.com>
* Author: Jonas Aaberg <jonas.aberg@stericsson.com>
- *
*/
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/cpufreq.h>
@@ -15,27 +15,27 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <mach/id.h>
static struct cpufreq_frequency_table *freq_table;
static struct clk *armss_clk;
-static struct freq_attr *db8500_cpufreq_attr[] = {
+static struct freq_attr *dbx500_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
-static int db8500_cpufreq_verify_speed(struct cpufreq_policy *policy)
+static int dbx500_cpufreq_verify_speed(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, freq_table);
}
-static int db8500_cpufreq_target(struct cpufreq_policy *policy,
+static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
struct cpufreq_freqs freqs;
unsigned int idx;
+ int ret;
/* scale the target frequency to one of the extremes supported */
if (target_freq < policy->cpuinfo.min_freq)
@@ -44,10 +44,9 @@ static int db8500_cpufreq_target(struct cpufreq_policy *policy,
target_freq = policy->cpuinfo.max_freq;
/* Lookup the next frequency */
- if (cpufreq_frequency_table_target
- (policy, freq_table, target_freq, relation, &idx)) {
+ if (cpufreq_frequency_table_target(policy, freq_table, target_freq,
+ relation, &idx))
return -EINVAL;
- }
freqs.old = policy->cur;
freqs.new = freq_table[idx].frequency;
@@ -60,9 +59,12 @@ static int db8500_cpufreq_target(struct cpufreq_policy *policy,
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
/* update armss clk frequency */
- if (clk_set_rate(armss_clk, freq_table[idx].frequency * 1000)) {
- pr_err("db8500-cpufreq: Failed to update armss clk\n");
- return -EINVAL;
+ ret = clk_set_rate(armss_clk, freqs.new * 1000);
+
+ if (ret) {
+ pr_err("dbx500-cpufreq: Failed to set armss_clk to %d Hz: error %d\n",
+ freqs.new * 1000, ret);
+ return ret;
}
/* post change notification */
@@ -72,7 +74,7 @@ static int db8500_cpufreq_target(struct cpufreq_policy *policy,
return 0;
}
-static unsigned int db8500_cpufreq_getspeed(unsigned int cpu)
+static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu)
{
int i = 0;
unsigned long freq = clk_get_rate(armss_clk) / 1000;
@@ -84,40 +86,26 @@ static unsigned int db8500_cpufreq_getspeed(unsigned int cpu)
}
/* We could not find a corresponding frequency. */
- pr_err("db8500-cpufreq: Failed to find cpufreq speed\n");
+ pr_err("dbx500-cpufreq: Failed to find cpufreq speed\n");
return 0;
}
-static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
+static int __cpuinit dbx500_cpufreq_init(struct cpufreq_policy *policy)
{
- int i = 0;
int res;
- armss_clk = clk_get(NULL, "armss");
- if (IS_ERR(armss_clk)) {
- pr_err("db8500-cpufreq : Failed to get armss clk\n");
- return PTR_ERR(armss_clk);
- }
-
- pr_info("db8500-cpufreq : Available frequencies:\n");
- while (freq_table[i].frequency != CPUFREQ_TABLE_END) {
- pr_info(" %d Mhz\n", freq_table[i].frequency/1000);
- i++;
- }
-
/* get policy fields based on the table */
res = cpufreq_frequency_table_cpuinfo(policy, freq_table);
if (!res)
cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
else {
- pr_err("db8500-cpufreq : Failed to read policy table\n");
- clk_put(armss_clk);
+ pr_err("dbx500-cpufreq: Failed to read policy table\n");
return res;
}
policy->min = policy->cpuinfo.min_freq;
policy->max = policy->cpuinfo.max_freq;
- policy->cur = db8500_cpufreq_getspeed(policy->cpu);
+ policy->cur = dbx500_cpufreq_getspeed(policy->cpu);
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
/*
@@ -128,52 +116,59 @@ static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */
/* policy sharing between dual CPUs */
- cpumask_copy(policy->cpus, cpu_present_mask);
-
- policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
+ cpumask_setall(policy->cpus);
return 0;
}
-static struct cpufreq_driver db8500_cpufreq_driver = {
- .flags = CPUFREQ_STICKY,
- .verify = db8500_cpufreq_verify_speed,
- .target = db8500_cpufreq_target,
- .get = db8500_cpufreq_getspeed,
- .init = db8500_cpufreq_init,
- .name = "DB8500",
- .attr = db8500_cpufreq_attr,
+static struct cpufreq_driver dbx500_cpufreq_driver = {
+ .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS,
+ .verify = dbx500_cpufreq_verify_speed,
+ .target = dbx500_cpufreq_target,
+ .get = dbx500_cpufreq_getspeed,
+ .init = dbx500_cpufreq_init,
+ .name = "DBX500",
+ .attr = dbx500_cpufreq_attr,
};
-static int db8500_cpufreq_probe(struct platform_device *pdev)
+static int dbx500_cpufreq_probe(struct platform_device *pdev)
{
- freq_table = dev_get_platdata(&pdev->dev);
+ int i = 0;
+ freq_table = dev_get_platdata(&pdev->dev);
if (!freq_table) {
- pr_err("db8500-cpufreq: Failed to fetch cpufreq table\n");
+ pr_err("dbx500-cpufreq: Failed to fetch cpufreq table\n");
return -ENODEV;
}
- return cpufreq_register_driver(&db8500_cpufreq_driver);
+ armss_clk = clk_get(&pdev->dev, "armss");
+ if (IS_ERR(armss_clk)) {
+ pr_err("dbx500-cpufreq: Failed to get armss clk\n");
+ return PTR_ERR(armss_clk);
+ }
+
+ pr_info("dbx500-cpufreq: Available frequencies:\n");
+ while (freq_table[i].frequency != CPUFREQ_TABLE_END) {
+ pr_info(" %d Mhz\n", freq_table[i].frequency/1000);
+ i++;
+ }
+
+ return cpufreq_register_driver(&dbx500_cpufreq_driver);
}
-static struct platform_driver db8500_cpufreq_plat_driver = {
+static struct platform_driver dbx500_cpufreq_plat_driver = {
.driver = {
- .name = "cpufreq-u8500",
+ .name = "cpufreq-ux500",
.owner = THIS_MODULE,
},
- .probe = db8500_cpufreq_probe,
+ .probe = dbx500_cpufreq_probe,
};
-static int __init db8500_cpufreq_register(void)
+static int __init dbx500_cpufreq_register(void)
{
- if (!cpu_is_u8500_family())
- return -ENODEV;
-
- pr_info("cpufreq for DB8500 started\n");
- return platform_driver_register(&db8500_cpufreq_plat_driver);
+ return platform_driver_register(&dbx500_cpufreq_plat_driver);
}
-device_initcall(db8500_cpufreq_register);
+device_initcall(dbx500_cpufreq_register);
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("cpufreq driver for DB8500");
+MODULE_DESCRIPTION("cpufreq driver for DBX500");
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index 7012ea8bf1e7..78057a357ddb 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -18,10 +18,10 @@
#include <linux/cpufreq.h>
#include <linux/suspend.h>
-#include <mach/cpufreq.h>
-
#include <plat/cpu.h>
+#include "exynos-cpufreq.h"
+
static struct exynos_dvfs_info *exynos_info;
static struct regulator *arm_regulator;
@@ -42,51 +42,56 @@ static unsigned int exynos_getspeed(unsigned int cpu)
return clk_get_rate(exynos_info->cpu_clk) / 1000;
}
-static int exynos_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int exynos_cpufreq_get_index(unsigned int freq)
+{
+ struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
+ int index;
+
+ for (index = 0;
+ freq_table[index].frequency != CPUFREQ_TABLE_END; index++)
+ if (freq_table[index].frequency == freq)
+ break;
+
+ if (freq_table[index].frequency == CPUFREQ_TABLE_END)
+ return -EINVAL;
+
+ return index;
+}
+
+static int exynos_cpufreq_scale(unsigned int target_freq)
{
- unsigned int index, old_index;
- unsigned int arm_volt, safe_arm_volt = 0;
- int ret = 0;
struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
unsigned int *volt_table = exynos_info->volt_table;
+ struct cpufreq_policy *policy = cpufreq_cpu_get(0);
+ unsigned int arm_volt, safe_arm_volt = 0;
unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz;
-
- mutex_lock(&cpufreq_lock);
+ int index, old_index;
+ int ret = 0;
freqs.old = policy->cur;
+ freqs.new = target_freq;
+ freqs.cpu = policy->cpu;
- if (frequency_locked && target_freq != locking_frequency) {
- ret = -EAGAIN;
+ if (freqs.new == freqs.old)
goto out;
- }
/*
* The policy max have been changed so that we cannot get proper
* old_index with cpufreq_frequency_table_target(). Thus, ignore
* policy and get the index from the raw freqeuncy table.
*/
- for (old_index = 0;
- freq_table[old_index].frequency != CPUFREQ_TABLE_END;
- old_index++)
- if (freq_table[old_index].frequency == freqs.old)
- break;
-
- if (freq_table[old_index].frequency == CPUFREQ_TABLE_END) {
- ret = -EINVAL;
+ old_index = exynos_cpufreq_get_index(freqs.old);
+ if (old_index < 0) {
+ ret = old_index;
goto out;
}
- if (cpufreq_frequency_table_target(policy, freq_table,
- target_freq, relation, &index)) {
- ret = -EINVAL;
+ index = exynos_cpufreq_get_index(target_freq);
+ if (index < 0) {
+ ret = index;
goto out;
}
- freqs.new = freq_table[index].frequency;
- freqs.cpu = policy->cpu;
-
/*
* ARM clock source will be changed APLL to MPLL temporary
* To support this level, need to control regulator for
@@ -106,15 +111,25 @@ static int exynos_target(struct cpufreq_policy *policy,
/* When the new frequency is higher than current frequency */
if ((freqs.new > freqs.old) && !safe_arm_volt) {
/* Firstly, voltage up to increase frequency */
- regulator_set_voltage(arm_regulator, arm_volt,
- arm_volt);
+ ret = regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
+ if (ret) {
+ pr_err("%s: failed to set cpu voltage to %d\n",
+ __func__, arm_volt);
+ goto out;
+ }
}
- if (safe_arm_volt)
- regulator_set_voltage(arm_regulator, safe_arm_volt,
+ if (safe_arm_volt) {
+ ret = regulator_set_voltage(arm_regulator, safe_arm_volt,
safe_arm_volt);
- if (freqs.new != freqs.old)
- exynos_info->set_freq(old_index, index);
+ if (ret) {
+ pr_err("%s: failed to set cpu voltage to %d\n",
+ __func__, safe_arm_volt);
+ goto out;
+ }
+ }
+
+ exynos_info->set_freq(old_index, index);
for_each_cpu(freqs.cpu, policy->cpus)
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
@@ -125,8 +140,44 @@ static int exynos_target(struct cpufreq_policy *policy,
/* down the voltage after frequency change */
regulator_set_voltage(arm_regulator, arm_volt,
arm_volt);
+ if (ret) {
+ pr_err("%s: failed to set cpu voltage to %d\n",
+ __func__, arm_volt);
+ goto out;
+ }
+ }
+
+out:
+
+ cpufreq_cpu_put(policy);
+
+ return ret;
+}
+
+static int exynos_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
+ unsigned int index;
+ unsigned int new_freq;
+ int ret = 0;
+
+ mutex_lock(&cpufreq_lock);
+
+ if (frequency_locked)
+ goto out;
+
+ if (cpufreq_frequency_table_target(policy, freq_table,
+ target_freq, relation, &index)) {
+ ret = -EINVAL;
+ goto out;
}
+ new_freq = freq_table[index].frequency;
+
+ ret = exynos_cpufreq_scale(new_freq);
+
out:
mutex_unlock(&cpufreq_lock);
@@ -163,51 +214,26 @@ static int exynos_cpufreq_resume(struct cpufreq_policy *policy)
static int exynos_cpufreq_pm_notifier(struct notifier_block *notifier,
unsigned long pm_event, void *v)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
- static unsigned int saved_frequency;
- unsigned int temp;
+ int ret;
- mutex_lock(&cpufreq_lock);
switch (pm_event) {
case PM_SUSPEND_PREPARE:
- if (frequency_locked)
- goto out;
-
+ mutex_lock(&cpufreq_lock);
frequency_locked = true;
+ mutex_unlock(&cpufreq_lock);
- if (locking_frequency) {
- saved_frequency = exynos_getspeed(0);
+ ret = exynos_cpufreq_scale(locking_frequency);
+ if (ret < 0)
+ return NOTIFY_BAD;
- mutex_unlock(&cpufreq_lock);
- exynos_target(policy, locking_frequency,
- CPUFREQ_RELATION_H);
- mutex_lock(&cpufreq_lock);
- }
break;
case PM_POST_SUSPEND:
- if (saved_frequency) {
- /*
- * While frequency_locked, only locking_frequency
- * is valid for target(). In order to use
- * saved_frequency while keeping frequency_locked,
- * we temporarly overwrite locking_frequency.
- */
- temp = locking_frequency;
- locking_frequency = saved_frequency;
-
- mutex_unlock(&cpufreq_lock);
- exynos_target(policy, locking_frequency,
- CPUFREQ_RELATION_H);
- mutex_lock(&cpufreq_lock);
-
- locking_frequency = temp;
- }
+ mutex_lock(&cpufreq_lock);
frequency_locked = false;
+ mutex_unlock(&cpufreq_lock);
break;
}
-out:
- mutex_unlock(&cpufreq_lock);
return NOTIFY_OK;
}
@@ -222,35 +248,34 @@ static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpufreq_frequency_table_get_attr(exynos_info->freq_table, policy->cpu);
- locking_frequency = exynos_getspeed(0);
-
/* set the transition latency value */
policy->cpuinfo.transition_latency = 100000;
- /*
- * EXYNOS4 multi-core processors has 2 cores
- * that the frequency cannot be set independently.
- * Each cpu is bound to the same speed.
- * So the affected cpu is all of the cpus.
- */
- if (num_online_cpus() == 1) {
- cpumask_copy(policy->related_cpus, cpu_possible_mask);
- cpumask_copy(policy->cpus, cpu_online_mask);
- } else {
- policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
- cpumask_setall(policy->cpus);
- }
+ cpumask_setall(policy->cpus);
return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table);
}
+static int exynos_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ return 0;
+}
+
+static struct freq_attr *exynos_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
static struct cpufreq_driver exynos_driver = {
.flags = CPUFREQ_STICKY,
.verify = exynos_verify_speed,
.target = exynos_target,
.get = exynos_getspeed,
.init = exynos_cpufreq_cpu_init,
+ .exit = exynos_cpufreq_cpu_exit,
.name = "exynos_cpufreq",
+ .attr = exynos_cpufreq_attr,
#ifdef CONFIG_PM
.suspend = exynos_cpufreq_suspend,
.resume = exynos_cpufreq_resume,
@@ -288,6 +313,8 @@ static int __init exynos_cpufreq_init(void)
goto err_vdd_arm;
}
+ locking_frequency = exynos_getspeed(0);
+
register_pm_notifier(&exynos_cpufreq_nb);
if (cpufreq_register_driver(&exynos_driver)) {
@@ -299,8 +326,7 @@ static int __init exynos_cpufreq_init(void)
err_cpufreq:
unregister_pm_notifier(&exynos_cpufreq_nb);
- if (!IS_ERR(arm_regulator))
- regulator_put(arm_regulator);
+ regulator_put(arm_regulator);
err_vdd_arm:
kfree(exynos_info);
pr_debug("%s: failed initialization\n", __func__);
diff --git a/drivers/cpufreq/exynos-cpufreq.h b/drivers/cpufreq/exynos-cpufreq.h
new file mode 100644
index 000000000000..92b852ee5ddc
--- /dev/null
+++ b/drivers/cpufreq/exynos-cpufreq.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * EXYNOS - CPUFreq support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+enum cpufreq_level_index {
+ L0, L1, L2, L3, L4,
+ L5, L6, L7, L8, L9,
+ L10, L11, L12, L13, L14,
+ L15, L16, L17, L18, L19,
+ L20,
+};
+
+#define APLL_FREQ(f, a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, m, p, s) \
+ { \
+ .freq = (f) * 1000, \
+ .clk_div_cpu0 = ((a0) | (a1) << 4 | (a2) << 8 | (a3) << 12 | \
+ (a4) << 16 | (a5) << 20 | (a6) << 24 | (a7) << 28), \
+ .clk_div_cpu1 = (b0 << 0 | b1 << 4 | b2 << 8), \
+ .mps = ((m) << 16 | (p) << 8 | (s)), \
+ }
+
+struct apll_freq {
+ unsigned int freq;
+ u32 clk_div_cpu0;
+ u32 clk_div_cpu1;
+ u32 mps;
+};
+
+struct exynos_dvfs_info {
+ unsigned long mpll_freq_khz;
+ unsigned int pll_safe_idx;
+ struct clk *cpu_clk;
+ unsigned int *volt_table;
+ struct cpufreq_frequency_table *freq_table;
+ void (*set_freq)(unsigned int, unsigned int);
+ bool (*need_apll_change)(unsigned int, unsigned int);
+};
+
+extern int exynos4210_cpufreq_init(struct exynos_dvfs_info *);
+extern int exynos4x12_cpufreq_init(struct exynos_dvfs_info *);
+extern int exynos5250_cpufreq_init(struct exynos_dvfs_info *);
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
index fb148fa27678..add7fbec4fc9 100644
--- a/drivers/cpufreq/exynos4210-cpufreq.c
+++ b/drivers/cpufreq/exynos4210-cpufreq.c
@@ -18,99 +18,40 @@
#include <linux/cpufreq.h>
#include <mach/regs-clock.h>
-#include <mach/cpufreq.h>
-#define CPUFREQ_LEVEL_END L5
-
-static int max_support_idx = L0;
-static int min_support_idx = (CPUFREQ_LEVEL_END - 1);
+#include "exynos-cpufreq.h"
static struct clk *cpu_clk;
static struct clk *moutcore;
static struct clk *mout_mpll;
static struct clk *mout_apll;
-struct cpufreq_clkdiv {
- unsigned int index;
- unsigned int clkdiv;
-};
-
-static unsigned int exynos4210_volt_table[CPUFREQ_LEVEL_END] = {
+static unsigned int exynos4210_volt_table[] = {
1250000, 1150000, 1050000, 975000, 950000,
};
-
-static struct cpufreq_clkdiv exynos4210_clkdiv_table[CPUFREQ_LEVEL_END];
-
static struct cpufreq_frequency_table exynos4210_freq_table[] = {
- {L0, 1200*1000},
- {L1, 1000*1000},
- {L2, 800*1000},
- {L3, 500*1000},
- {L4, 200*1000},
+ {L0, 1200 * 1000},
+ {L1, 1000 * 1000},
+ {L2, 800 * 1000},
+ {L3, 500 * 1000},
+ {L4, 200 * 1000},
{0, CPUFREQ_TABLE_END},
};
-static unsigned int clkdiv_cpu0[CPUFREQ_LEVEL_END][7] = {
+static struct apll_freq apll_freq_4210[] = {
/*
- * Clock divider value for following
- * { DIVCORE, DIVCOREM0, DIVCOREM1, DIVPERIPH,
- * DIVATB, DIVPCLK_DBG, DIVAPLL }
+ * values:
+ * freq
+ * clock divider for CORE, COREM0, COREM1, PERIPH, ATB, PCLK_DBG, APLL, RESERVED
+ * clock divider for COPY, HPM, RESERVED
+ * PLL M, P, S
*/
-
- /* ARM L0: 1200MHz */
- { 0, 3, 7, 3, 4, 1, 7 },
-
- /* ARM L1: 1000MHz */
- { 0, 3, 7, 3, 4, 1, 7 },
-
- /* ARM L2: 800MHz */
- { 0, 3, 7, 3, 3, 1, 7 },
-
- /* ARM L3: 500MHz */
- { 0, 3, 7, 3, 3, 1, 7 },
-
- /* ARM L4: 200MHz */
- { 0, 1, 3, 1, 3, 1, 0 },
-};
-
-static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = {
- /*
- * Clock divider value for following
- * { DIVCOPY, DIVHPM }
- */
-
- /* ARM L0: 1200MHz */
- { 5, 0 },
-
- /* ARM L1: 1000MHz */
- { 4, 0 },
-
- /* ARM L2: 800MHz */
- { 3, 0 },
-
- /* ARM L3: 500MHz */
- { 3, 0 },
-
- /* ARM L4: 200MHz */
- { 3, 0 },
-};
-
-static unsigned int exynos4210_apll_pms_table[CPUFREQ_LEVEL_END] = {
- /* APLL FOUT L0: 1200MHz */
- ((150 << 16) | (3 << 8) | 1),
-
- /* APLL FOUT L1: 1000MHz */
- ((250 << 16) | (6 << 8) | 1),
-
- /* APLL FOUT L2: 800MHz */
- ((200 << 16) | (6 << 8) | 1),
-
- /* APLL FOUT L3: 500MHz */
- ((250 << 16) | (6 << 8) | 2),
-
- /* APLL FOUT L4: 200MHz */
- ((200 << 16) | (6 << 8) | 3),
+ APLL_FREQ(1200, 0, 3, 7, 3, 4, 1, 7, 0, 5, 0, 0, 150, 3, 1),
+ APLL_FREQ(1000, 0, 3, 7, 3, 4, 1, 7, 0, 4, 0, 0, 250, 6, 1),
+ APLL_FREQ(800, 0, 3, 7, 3, 3, 1, 7, 0, 3, 0, 0, 200, 6, 1),
+ APLL_FREQ(500, 0, 3, 7, 3, 3, 1, 7, 0, 3, 0, 0, 250, 6, 2),
+ APLL_FREQ(200, 0, 1, 3, 1, 3, 1, 0, 0, 3, 0, 0, 200, 6, 3),
};
static void exynos4210_set_clkdiv(unsigned int div_index)
@@ -119,7 +60,7 @@ static void exynos4210_set_clkdiv(unsigned int div_index)
/* Change Divider - CPU0 */
- tmp = exynos4210_clkdiv_table[div_index].clkdiv;
+ tmp = apll_freq_4210[div_index].clk_div_cpu0;
__raw_writel(tmp, EXYNOS4_CLKDIV_CPU);
@@ -129,12 +70,7 @@ static void exynos4210_set_clkdiv(unsigned int div_index)
/* Change Divider - CPU1 */
- tmp = __raw_readl(EXYNOS4_CLKDIV_CPU1);
-
- tmp &= ~((0x7 << 4) | 0x7);
-
- tmp |= ((clkdiv_cpu1[div_index][0] << 4) |
- (clkdiv_cpu1[div_index][1] << 0));
+ tmp = apll_freq_4210[div_index].clk_div_cpu1;
__raw_writel(tmp, EXYNOS4_CLKDIV_CPU1);
@@ -162,7 +98,7 @@ static void exynos4210_set_apll(unsigned int index)
/* 3. Change PLL PMS values */
tmp = __raw_readl(EXYNOS4_APLL_CON0);
tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
- tmp |= exynos4210_apll_pms_table[index];
+ tmp |= apll_freq_4210[index].mps;
__raw_writel(tmp, EXYNOS4_APLL_CON0);
/* 4. wait_lock_time */
@@ -179,10 +115,10 @@ static void exynos4210_set_apll(unsigned int index)
} while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
}
-bool exynos4210_pms_change(unsigned int old_index, unsigned int new_index)
+static bool exynos4210_pms_change(unsigned int old_index, unsigned int new_index)
{
- unsigned int old_pm = (exynos4210_apll_pms_table[old_index] >> 8);
- unsigned int new_pm = (exynos4210_apll_pms_table[new_index] >> 8);
+ unsigned int old_pm = apll_freq_4210[old_index].mps >> 8;
+ unsigned int new_pm = apll_freq_4210[new_index].mps >> 8;
return (old_pm == new_pm) ? 0 : 1;
}
@@ -200,7 +136,7 @@ static void exynos4210_set_frequency(unsigned int old_index,
/* 2. Change just s value in apll m,p,s value */
tmp = __raw_readl(EXYNOS4_APLL_CON0);
tmp &= ~(0x7 << 0);
- tmp |= (exynos4210_apll_pms_table[new_index] & 0x7);
+ tmp |= apll_freq_4210[new_index].mps & 0x7;
__raw_writel(tmp, EXYNOS4_APLL_CON0);
} else {
/* Clock Configuration Procedure */
@@ -214,7 +150,7 @@ static void exynos4210_set_frequency(unsigned int old_index,
/* 1. Change just s value in apll m,p,s value */
tmp = __raw_readl(EXYNOS4_APLL_CON0);
tmp &= ~(0x7 << 0);
- tmp |= (exynos4210_apll_pms_table[new_index] & 0x7);
+ tmp |= apll_freq_4210[new_index].mps & 0x7;
__raw_writel(tmp, EXYNOS4_APLL_CON0);
/* 2. Change the system clock divider values */
@@ -231,8 +167,6 @@ static void exynos4210_set_frequency(unsigned int old_index,
int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
{
- int i;
- unsigned int tmp;
unsigned long rate;
cpu_clk = clk_get(NULL, "armclk");
@@ -253,33 +187,9 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
if (IS_ERR(mout_apll))
goto err_mout_apll;
- tmp = __raw_readl(EXYNOS4_CLKDIV_CPU);
-
- for (i = L0; i < CPUFREQ_LEVEL_END; i++) {
- tmp &= ~(EXYNOS4_CLKDIV_CPU0_CORE_MASK |
- EXYNOS4_CLKDIV_CPU0_COREM0_MASK |
- EXYNOS4_CLKDIV_CPU0_COREM1_MASK |
- EXYNOS4_CLKDIV_CPU0_PERIPH_MASK |
- EXYNOS4_CLKDIV_CPU0_ATB_MASK |
- EXYNOS4_CLKDIV_CPU0_PCLKDBG_MASK |
- EXYNOS4_CLKDIV_CPU0_APLL_MASK);
-
- tmp |= ((clkdiv_cpu0[i][0] << EXYNOS4_CLKDIV_CPU0_CORE_SHIFT) |
- (clkdiv_cpu0[i][1] << EXYNOS4_CLKDIV_CPU0_COREM0_SHIFT) |
- (clkdiv_cpu0[i][2] << EXYNOS4_CLKDIV_CPU0_COREM1_SHIFT) |
- (clkdiv_cpu0[i][3] << EXYNOS4_CLKDIV_CPU0_PERIPH_SHIFT) |
- (clkdiv_cpu0[i][4] << EXYNOS4_CLKDIV_CPU0_ATB_SHIFT) |
- (clkdiv_cpu0[i][5] << EXYNOS4_CLKDIV_CPU0_PCLKDBG_SHIFT) |
- (clkdiv_cpu0[i][6] << EXYNOS4_CLKDIV_CPU0_APLL_SHIFT));
-
- exynos4210_clkdiv_table[i].clkdiv = tmp;
- }
-
info->mpll_freq_khz = rate;
- info->pm_lock_idx = L2;
+ /* 800Mhz */
info->pll_safe_idx = L2;
- info->max_support_idx = max_support_idx;
- info->min_support_idx = min_support_idx;
info->cpu_clk = cpu_clk;
info->volt_table = exynos4210_volt_table;
info->freq_table = exynos4210_freq_table;
@@ -289,14 +199,11 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
return 0;
err_mout_apll:
- if (!IS_ERR(mout_mpll))
- clk_put(mout_mpll);
+ clk_put(mout_mpll);
err_mout_mpll:
- if (!IS_ERR(moutcore))
- clk_put(moutcore);
+ clk_put(moutcore);
err_moutcore:
- if (!IS_ERR(cpu_clk))
- clk_put(cpu_clk);
+ clk_put(cpu_clk);
pr_debug("%s: failed initialization\n", __func__);
return -EINVAL;
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c
index 8c5a7afa5b0b..08b7477b0aa2 100644
--- a/drivers/cpufreq/exynos4x12-cpufreq.c
+++ b/drivers/cpufreq/exynos4x12-cpufreq.c
@@ -18,28 +18,21 @@
#include <linux/cpufreq.h>
#include <mach/regs-clock.h>
-#include <mach/cpufreq.h>
-#define CPUFREQ_LEVEL_END (L13 + 1)
-
-static int max_support_idx;
-static int min_support_idx = (CPUFREQ_LEVEL_END - 1);
+#include "exynos-cpufreq.h"
static struct clk *cpu_clk;
static struct clk *moutcore;
static struct clk *mout_mpll;
static struct clk *mout_apll;
-struct cpufreq_clkdiv {
- unsigned int index;
- unsigned int clkdiv;
- unsigned int clkdiv1;
+static unsigned int exynos4x12_volt_table[] = {
+ 1350000, 1287500, 1250000, 1187500, 1137500, 1087500, 1037500,
+ 1000000, 987500, 975000, 950000, 925000, 900000, 900000
};
-static unsigned int exynos4x12_volt_table[CPUFREQ_LEVEL_END];
-
static struct cpufreq_frequency_table exynos4x12_freq_table[] = {
- {L0, 1500 * 1000},
+ {L0, CPUFREQ_ENTRY_INVALID},
{L1, 1400 * 1000},
{L2, 1300 * 1000},
{L3, 1200 * 1000},
@@ -56,247 +49,54 @@ static struct cpufreq_frequency_table exynos4x12_freq_table[] = {
{0, CPUFREQ_TABLE_END},
};
-static struct cpufreq_clkdiv exynos4x12_clkdiv_table[CPUFREQ_LEVEL_END];
+static struct apll_freq *apll_freq_4x12;
-static unsigned int clkdiv_cpu0_4212[CPUFREQ_LEVEL_END][8] = {
+static struct apll_freq apll_freq_4212[] = {
/*
- * Clock divider value for following
- * { DIVCORE, DIVCOREM0, DIVCOREM1, DIVPERIPH,
- * DIVATB, DIVPCLK_DBG, DIVAPLL, DIVCORE2 }
+ * values:
+ * freq
+ * clock divider for CORE, COREM0, COREM1, PERIPH, ATB, PCLK_DBG, APLL, CORE2
+ * clock divider for COPY, HPM, RESERVED
+ * PLL M, P, S
*/
- /* ARM L0: 1500 MHz */
- { 0, 3, 7, 0, 6, 1, 2, 0 },
-
- /* ARM L1: 1400 MHz */
- { 0, 3, 7, 0, 6, 1, 2, 0 },
-
- /* ARM L2: 1300 MHz */
- { 0, 3, 7, 0, 5, 1, 2, 0 },
-
- /* ARM L3: 1200 MHz */
- { 0, 3, 7, 0, 5, 1, 2, 0 },
-
- /* ARM L4: 1100 MHz */
- { 0, 3, 6, 0, 4, 1, 2, 0 },
-
- /* ARM L5: 1000 MHz */
- { 0, 2, 5, 0, 4, 1, 1, 0 },
-
- /* ARM L6: 900 MHz */
- { 0, 2, 5, 0, 3, 1, 1, 0 },
-
- /* ARM L7: 800 MHz */
- { 0, 2, 5, 0, 3, 1, 1, 0 },
-
- /* ARM L8: 700 MHz */
- { 0, 2, 4, 0, 3, 1, 1, 0 },
-
- /* ARM L9: 600 MHz */
- { 0, 2, 4, 0, 3, 1, 1, 0 },
-
- /* ARM L10: 500 MHz */
- { 0, 2, 4, 0, 3, 1, 1, 0 },
-
- /* ARM L11: 400 MHz */
- { 0, 2, 4, 0, 3, 1, 1, 0 },
-
- /* ARM L12: 300 MHz */
- { 0, 2, 4, 0, 2, 1, 1, 0 },
-
- /* ARM L13: 200 MHz */
- { 0, 1, 3, 0, 1, 1, 1, 0 },
+ APLL_FREQ(1500, 0, 3, 7, 0, 6, 1, 2, 0, 6, 2, 0, 250, 4, 0),
+ APLL_FREQ(1400, 0, 3, 7, 0, 6, 1, 2, 0, 6, 2, 0, 175, 3, 0),
+ APLL_FREQ(1300, 0, 3, 7, 0, 5, 1, 2, 0, 5, 2, 0, 325, 6, 0),
+ APLL_FREQ(1200, 0, 3, 7, 0, 5, 1, 2, 0, 5, 2, 0, 200, 4, 0),
+ APLL_FREQ(1100, 0, 3, 6, 0, 4, 1, 2, 0, 4, 2, 0, 275, 6, 0),
+ APLL_FREQ(1000, 0, 2, 5, 0, 4, 1, 1, 0, 4, 2, 0, 125, 3, 0),
+ APLL_FREQ(900, 0, 2, 5, 0, 3, 1, 1, 0, 3, 2, 0, 150, 4, 0),
+ APLL_FREQ(800, 0, 2, 5, 0, 3, 1, 1, 0, 3, 2, 0, 100, 3, 0),
+ APLL_FREQ(700, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 175, 3, 1),
+ APLL_FREQ(600, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 200, 4, 1),
+ APLL_FREQ(500, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 125, 3, 1),
+ APLL_FREQ(400, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 100, 3, 1),
+ APLL_FREQ(300, 0, 2, 4, 0, 2, 1, 1, 0, 3, 2, 0, 200, 4, 2),
+ APLL_FREQ(200, 0, 1, 3, 0, 1, 1, 1, 0, 3, 2, 0, 100, 3, 2),
};
-static unsigned int clkdiv_cpu0_4412[CPUFREQ_LEVEL_END][8] = {
+static struct apll_freq apll_freq_4412[] = {
/*
- * Clock divider value for following
- * { DIVCORE, DIVCOREM0, DIVCOREM1, DIVPERIPH,
- * DIVATB, DIVPCLK_DBG, DIVAPLL, DIVCORE2 }
- */
- /* ARM L0: 1500 MHz */
- { 0, 3, 7, 0, 6, 1, 2, 0 },
-
- /* ARM L1: 1400 MHz */
- { 0, 3, 7, 0, 6, 1, 2, 0 },
-
- /* ARM L2: 1300 MHz */
- { 0, 3, 7, 0, 5, 1, 2, 0 },
-
- /* ARM L3: 1200 MHz */
- { 0, 3, 7, 0, 5, 1, 2, 0 },
-
- /* ARM L4: 1100 MHz */
- { 0, 3, 6, 0, 4, 1, 2, 0 },
-
- /* ARM L5: 1000 MHz */
- { 0, 2, 5, 0, 4, 1, 1, 0 },
-
- /* ARM L6: 900 MHz */
- { 0, 2, 5, 0, 3, 1, 1, 0 },
-
- /* ARM L7: 800 MHz */
- { 0, 2, 5, 0, 3, 1, 1, 0 },
-
- /* ARM L8: 700 MHz */
- { 0, 2, 4, 0, 3, 1, 1, 0 },
-
- /* ARM L9: 600 MHz */
- { 0, 2, 4, 0, 3, 1, 1, 0 },
-
- /* ARM L10: 500 MHz */
- { 0, 2, 4, 0, 3, 1, 1, 0 },
-
- /* ARM L11: 400 MHz */
- { 0, 2, 4, 0, 3, 1, 1, 0 },
-
- /* ARM L12: 300 MHz */
- { 0, 2, 4, 0, 2, 1, 1, 0 },
-
- /* ARM L13: 200 MHz */
- { 0, 1, 3, 0, 1, 1, 1, 0 },
-};
-
-static unsigned int clkdiv_cpu1_4212[CPUFREQ_LEVEL_END][2] = {
- /* Clock divider value for following
- * { DIVCOPY, DIVHPM }
+ * values:
+ * freq
+ * clock divider for CORE, COREM0, COREM1, PERIPH, ATB, PCLK_DBG, APLL, CORE2
+ * clock divider for COPY, HPM, CORES
+ * PLL M, P, S
*/
- /* ARM L0: 1500 MHz */
- { 6, 0 },
-
- /* ARM L1: 1400 MHz */
- { 6, 0 },
-
- /* ARM L2: 1300 MHz */
- { 5, 0 },
-
- /* ARM L3: 1200 MHz */
- { 5, 0 },
-
- /* ARM L4: 1100 MHz */
- { 4, 0 },
-
- /* ARM L5: 1000 MHz */
- { 4, 0 },
-
- /* ARM L6: 900 MHz */
- { 3, 0 },
-
- /* ARM L7: 800 MHz */
- { 3, 0 },
-
- /* ARM L8: 700 MHz */
- { 3, 0 },
-
- /* ARM L9: 600 MHz */
- { 3, 0 },
-
- /* ARM L10: 500 MHz */
- { 3, 0 },
-
- /* ARM L11: 400 MHz */
- { 3, 0 },
-
- /* ARM L12: 300 MHz */
- { 3, 0 },
-
- /* ARM L13: 200 MHz */
- { 3, 0 },
-};
-
-static unsigned int clkdiv_cpu1_4412[CPUFREQ_LEVEL_END][3] = {
- /* Clock divider value for following
- * { DIVCOPY, DIVHPM, DIVCORES }
- */
- /* ARM L0: 1500 MHz */
- { 6, 0, 7 },
-
- /* ARM L1: 1400 MHz */
- { 6, 0, 6 },
-
- /* ARM L2: 1300 MHz */
- { 5, 0, 6 },
-
- /* ARM L3: 1200 MHz */
- { 5, 0, 5 },
-
- /* ARM L4: 1100 MHz */
- { 4, 0, 5 },
-
- /* ARM L5: 1000 MHz */
- { 4, 0, 4 },
-
- /* ARM L6: 900 MHz */
- { 3, 0, 4 },
-
- /* ARM L7: 800 MHz */
- { 3, 0, 3 },
-
- /* ARM L8: 700 MHz */
- { 3, 0, 3 },
-
- /* ARM L9: 600 MHz */
- { 3, 0, 2 },
-
- /* ARM L10: 500 MHz */
- { 3, 0, 2 },
-
- /* ARM L11: 400 MHz */
- { 3, 0, 1 },
-
- /* ARM L12: 300 MHz */
- { 3, 0, 1 },
-
- /* ARM L13: 200 MHz */
- { 3, 0, 0 },
-};
-
-static unsigned int exynos4x12_apll_pms_table[CPUFREQ_LEVEL_END] = {
- /* APLL FOUT L0: 1500 MHz */
- ((250 << 16) | (4 << 8) | (0x0)),
-
- /* APLL FOUT L1: 1400 MHz */
- ((175 << 16) | (3 << 8) | (0x0)),
-
- /* APLL FOUT L2: 1300 MHz */
- ((325 << 16) | (6 << 8) | (0x0)),
-
- /* APLL FOUT L3: 1200 MHz */
- ((200 << 16) | (4 << 8) | (0x0)),
-
- /* APLL FOUT L4: 1100 MHz */
- ((275 << 16) | (6 << 8) | (0x0)),
-
- /* APLL FOUT L5: 1000 MHz */
- ((125 << 16) | (3 << 8) | (0x0)),
-
- /* APLL FOUT L6: 900 MHz */
- ((150 << 16) | (4 << 8) | (0x0)),
-
- /* APLL FOUT L7: 800 MHz */
- ((100 << 16) | (3 << 8) | (0x0)),
-
- /* APLL FOUT L8: 700 MHz */
- ((175 << 16) | (3 << 8) | (0x1)),
-
- /* APLL FOUT L9: 600 MHz */
- ((200 << 16) | (4 << 8) | (0x1)),
-
- /* APLL FOUT L10: 500 MHz */
- ((125 << 16) | (3 << 8) | (0x1)),
-
- /* APLL FOUT L11 400 MHz */
- ((100 << 16) | (3 << 8) | (0x1)),
-
- /* APLL FOUT L12: 300 MHz */
- ((200 << 16) | (4 << 8) | (0x2)),
-
- /* APLL FOUT L13: 200 MHz */
- ((100 << 16) | (3 << 8) | (0x2)),
-};
-
-static const unsigned int asv_voltage_4x12[CPUFREQ_LEVEL_END] = {
- 1350000, 1287500, 1250000, 1187500, 1137500, 1087500, 1037500,
- 1000000, 987500, 975000, 950000, 925000, 900000, 900000
+ APLL_FREQ(1500, 0, 3, 7, 0, 6, 1, 2, 0, 6, 0, 7, 250, 4, 0),
+ APLL_FREQ(1400, 0, 3, 7, 0, 6, 1, 2, 0, 6, 0, 6, 175, 3, 0),
+ APLL_FREQ(1300, 0, 3, 7, 0, 5, 1, 2, 0, 5, 0, 6, 325, 6, 0),
+ APLL_FREQ(1200, 0, 3, 7, 0, 5, 1, 2, 0, 5, 0, 5, 200, 4, 0),
+ APLL_FREQ(1100, 0, 3, 6, 0, 4, 1, 2, 0, 4, 0, 5, 275, 6, 0),
+ APLL_FREQ(1000, 0, 2, 5, 0, 4, 1, 1, 0, 4, 0, 4, 125, 3, 0),
+ APLL_FREQ(900, 0, 2, 5, 0, 3, 1, 1, 0, 3, 0, 4, 150, 4, 0),
+ APLL_FREQ(800, 0, 2, 5, 0, 3, 1, 1, 0, 3, 0, 3, 100, 3, 0),
+ APLL_FREQ(700, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 3, 175, 3, 1),
+ APLL_FREQ(600, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 2, 200, 4, 1),
+ APLL_FREQ(500, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 2, 125, 3, 1),
+ APLL_FREQ(400, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 1, 100, 3, 1),
+ APLL_FREQ(300, 0, 2, 4, 0, 2, 1, 1, 0, 3, 0, 1, 200, 4, 2),
+ APLL_FREQ(200, 0, 1, 3, 0, 1, 1, 1, 0, 3, 0, 0, 100, 3, 2),
};
static void exynos4x12_set_clkdiv(unsigned int div_index)
@@ -306,7 +106,7 @@ static void exynos4x12_set_clkdiv(unsigned int div_index)
/* Change Divider - CPU0 */
- tmp = exynos4x12_clkdiv_table[div_index].clkdiv;
+ tmp = apll_freq_4x12[div_index].clk_div_cpu0;
__raw_writel(tmp, EXYNOS4_CLKDIV_CPU);
@@ -314,7 +114,7 @@ static void exynos4x12_set_clkdiv(unsigned int div_index)
cpu_relax();
/* Change Divider - CPU1 */
- tmp = exynos4x12_clkdiv_table[div_index].clkdiv1;
+ tmp = apll_freq_4x12[div_index].clk_div_cpu1;
__raw_writel(tmp, EXYNOS4_CLKDIV_CPU1);
if (soc_is_exynos4212())
@@ -341,14 +141,14 @@ static void exynos4x12_set_apll(unsigned int index)
} while (tmp != 0x2);
/* 2. Set APLL Lock time */
- pdiv = ((exynos4x12_apll_pms_table[index] >> 8) & 0x3f);
+ pdiv = ((apll_freq_4x12[index].mps >> 8) & 0x3f);
__raw_writel((pdiv * 250), EXYNOS4_APLL_LOCK);
/* 3. Change PLL PMS values */
tmp = __raw_readl(EXYNOS4_APLL_CON0);
tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
- tmp |= exynos4x12_apll_pms_table[index];
+ tmp |= apll_freq_4x12[index].mps;
__raw_writel(tmp, EXYNOS4_APLL_CON0);
/* 4. wait_lock_time */
@@ -367,10 +167,10 @@ static void exynos4x12_set_apll(unsigned int index)
} while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
}
-bool exynos4x12_pms_change(unsigned int old_index, unsigned int new_index)
+static bool exynos4x12_pms_change(unsigned int old_index, unsigned int new_index)
{
- unsigned int old_pm = exynos4x12_apll_pms_table[old_index] >> 8;
- unsigned int new_pm = exynos4x12_apll_pms_table[new_index] >> 8;
+ unsigned int old_pm = apll_freq_4x12[old_index].mps >> 8;
+ unsigned int new_pm = apll_freq_4x12[new_index].mps >> 8;
return (old_pm == new_pm) ? 0 : 1;
}
@@ -387,7 +187,7 @@ static void exynos4x12_set_frequency(unsigned int old_index,
/* 2. Change just s value in apll m,p,s value */
tmp = __raw_readl(EXYNOS4_APLL_CON0);
tmp &= ~(0x7 << 0);
- tmp |= (exynos4x12_apll_pms_table[new_index] & 0x7);
+ tmp |= apll_freq_4x12[new_index].mps & 0x7;
__raw_writel(tmp, EXYNOS4_APLL_CON0);
} else {
@@ -402,7 +202,7 @@ static void exynos4x12_set_frequency(unsigned int old_index,
/* 1. Change just s value in apll m,p,s value */
tmp = __raw_readl(EXYNOS4_APLL_CON0);
tmp &= ~(0x7 << 0);
- tmp |= (exynos4x12_apll_pms_table[new_index] & 0x7);
+ tmp |= apll_freq_4x12[new_index].mps & 0x7;
__raw_writel(tmp, EXYNOS4_APLL_CON0);
/* 2. Change the system clock divider values */
exynos4x12_set_clkdiv(new_index);
@@ -416,27 +216,10 @@ static void exynos4x12_set_frequency(unsigned int old_index,
}
}
-static void __init set_volt_table(void)
-{
- unsigned int i;
-
- max_support_idx = L1;
-
- /* Not supported */
- exynos4x12_freq_table[L0].frequency = CPUFREQ_ENTRY_INVALID;
-
- for (i = 0 ; i < CPUFREQ_LEVEL_END ; i++)
- exynos4x12_volt_table[i] = asv_voltage_4x12[i];
-}
-
int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
{
- int i;
- unsigned int tmp;
unsigned long rate;
- set_volt_table();
-
cpu_clk = clk_get(NULL, "armclk");
if (IS_ERR(cpu_clk))
return PTR_ERR(cpu_clk);
@@ -455,66 +238,14 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
if (IS_ERR(mout_apll))
goto err_mout_apll;
- for (i = L0; i < CPUFREQ_LEVEL_END; i++) {
-
- exynos4x12_clkdiv_table[i].index = i;
-
- tmp = __raw_readl(EXYNOS4_CLKDIV_CPU);
-
- tmp &= ~(EXYNOS4_CLKDIV_CPU0_CORE_MASK |
- EXYNOS4_CLKDIV_CPU0_COREM0_MASK |
- EXYNOS4_CLKDIV_CPU0_COREM1_MASK |
- EXYNOS4_CLKDIV_CPU0_PERIPH_MASK |
- EXYNOS4_CLKDIV_CPU0_ATB_MASK |
- EXYNOS4_CLKDIV_CPU0_PCLKDBG_MASK |
- EXYNOS4_CLKDIV_CPU0_APLL_MASK);
-
- if (soc_is_exynos4212()) {
- tmp |= ((clkdiv_cpu0_4212[i][0] << EXYNOS4_CLKDIV_CPU0_CORE_SHIFT) |
- (clkdiv_cpu0_4212[i][1] << EXYNOS4_CLKDIV_CPU0_COREM0_SHIFT) |
- (clkdiv_cpu0_4212[i][2] << EXYNOS4_CLKDIV_CPU0_COREM1_SHIFT) |
- (clkdiv_cpu0_4212[i][3] << EXYNOS4_CLKDIV_CPU0_PERIPH_SHIFT) |
- (clkdiv_cpu0_4212[i][4] << EXYNOS4_CLKDIV_CPU0_ATB_SHIFT) |
- (clkdiv_cpu0_4212[i][5] << EXYNOS4_CLKDIV_CPU0_PCLKDBG_SHIFT) |
- (clkdiv_cpu0_4212[i][6] << EXYNOS4_CLKDIV_CPU0_APLL_SHIFT));
- } else {
- tmp &= ~EXYNOS4_CLKDIV_CPU0_CORE2_MASK;
-
- tmp |= ((clkdiv_cpu0_4412[i][0] << EXYNOS4_CLKDIV_CPU0_CORE_SHIFT) |
- (clkdiv_cpu0_4412[i][1] << EXYNOS4_CLKDIV_CPU0_COREM0_SHIFT) |
- (clkdiv_cpu0_4412[i][2] << EXYNOS4_CLKDIV_CPU0_COREM1_SHIFT) |
- (clkdiv_cpu0_4412[i][3] << EXYNOS4_CLKDIV_CPU0_PERIPH_SHIFT) |
- (clkdiv_cpu0_4412[i][4] << EXYNOS4_CLKDIV_CPU0_ATB_SHIFT) |
- (clkdiv_cpu0_4412[i][5] << EXYNOS4_CLKDIV_CPU0_PCLKDBG_SHIFT) |
- (clkdiv_cpu0_4412[i][6] << EXYNOS4_CLKDIV_CPU0_APLL_SHIFT) |
- (clkdiv_cpu0_4412[i][7] << EXYNOS4_CLKDIV_CPU0_CORE2_SHIFT));
- }
-
- exynos4x12_clkdiv_table[i].clkdiv = tmp;
-
- tmp = __raw_readl(EXYNOS4_CLKDIV_CPU1);
-
- if (soc_is_exynos4212()) {
- tmp &= ~(EXYNOS4_CLKDIV_CPU1_COPY_MASK |
- EXYNOS4_CLKDIV_CPU1_HPM_MASK);
- tmp |= ((clkdiv_cpu1_4212[i][0] << EXYNOS4_CLKDIV_CPU1_COPY_SHIFT) |
- (clkdiv_cpu1_4212[i][1] << EXYNOS4_CLKDIV_CPU1_HPM_SHIFT));
- } else {
- tmp &= ~(EXYNOS4_CLKDIV_CPU1_COPY_MASK |
- EXYNOS4_CLKDIV_CPU1_HPM_MASK |
- EXYNOS4_CLKDIV_CPU1_CORES_MASK);
- tmp |= ((clkdiv_cpu1_4412[i][0] << EXYNOS4_CLKDIV_CPU1_COPY_SHIFT) |
- (clkdiv_cpu1_4412[i][1] << EXYNOS4_CLKDIV_CPU1_HPM_SHIFT) |
- (clkdiv_cpu1_4412[i][2] << EXYNOS4_CLKDIV_CPU1_CORES_SHIFT));
- }
- exynos4x12_clkdiv_table[i].clkdiv1 = tmp;
- }
+ if (soc_is_exynos4212())
+ apll_freq_4x12 = apll_freq_4212;
+ else
+ apll_freq_4x12 = apll_freq_4412;
info->mpll_freq_khz = rate;
- info->pm_lock_idx = L5;
+ /* 800Mhz */
info->pll_safe_idx = L7;
- info->max_support_idx = max_support_idx;
- info->min_support_idx = min_support_idx;
info->cpu_clk = cpu_clk;
info->volt_table = exynos4x12_volt_table;
info->freq_table = exynos4x12_freq_table;
diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c
index e64c253cb169..9fae466d7746 100644
--- a/drivers/cpufreq/exynos5250-cpufreq.c
+++ b/drivers/cpufreq/exynos5250-cpufreq.c
@@ -19,25 +19,21 @@
#include <mach/map.h>
#include <mach/regs-clock.h>
-#include <mach/cpufreq.h>
-#define CPUFREQ_LEVEL_END (L15 + 1)
+#include "exynos-cpufreq.h"
-static int max_support_idx;
-static int min_support_idx = (CPUFREQ_LEVEL_END - 1);
static struct clk *cpu_clk;
static struct clk *moutcore;
static struct clk *mout_mpll;
static struct clk *mout_apll;
-struct cpufreq_clkdiv {
- unsigned int index;
- unsigned int clkdiv;
- unsigned int clkdiv1;
+static unsigned int exynos5250_volt_table[] = {
+ 1300000, 1250000, 1225000, 1200000, 1150000,
+ 1125000, 1100000, 1075000, 1050000, 1025000,
+ 1012500, 1000000, 975000, 950000, 937500,
+ 925000
};
-static unsigned int exynos5250_volt_table[CPUFREQ_LEVEL_END];
-
static struct cpufreq_frequency_table exynos5250_freq_table[] = {
{L0, 1700 * 1000},
{L1, 1600 * 1000},
@@ -47,8 +43,8 @@ static struct cpufreq_frequency_table exynos5250_freq_table[] = {
{L5, 1200 * 1000},
{L6, 1100 * 1000},
{L7, 1000 * 1000},
- {L8, 900 * 1000},
- {L9, 800 * 1000},
+ {L8, 900 * 1000},
+ {L9, 800 * 1000},
{L10, 700 * 1000},
{L11, 600 * 1000},
{L12, 500 * 1000},
@@ -58,78 +54,30 @@ static struct cpufreq_frequency_table exynos5250_freq_table[] = {
{0, CPUFREQ_TABLE_END},
};
-static struct cpufreq_clkdiv exynos5250_clkdiv_table[CPUFREQ_LEVEL_END];
-
-static unsigned int clkdiv_cpu0_5250[CPUFREQ_LEVEL_END][8] = {
+static struct apll_freq apll_freq_5250[] = {
/*
- * Clock divider value for following
- * { ARM, CPUD, ACP, PERIPH, ATB, PCLK_DBG, APLL, ARM2 }
- */
- { 0, 3, 7, 7, 7, 3, 5, 0 }, /* 1700 MHz */
- { 0, 3, 7, 7, 7, 1, 4, 0 }, /* 1600 MHz */
- { 0, 2, 7, 7, 7, 1, 4, 0 }, /* 1500 MHz */
- { 0, 2, 7, 7, 6, 1, 4, 0 }, /* 1400 MHz */
- { 0, 2, 7, 7, 6, 1, 3, 0 }, /* 1300 MHz */
- { 0, 2, 7, 7, 5, 1, 3, 0 }, /* 1200 MHz */
- { 0, 3, 7, 7, 5, 1, 3, 0 }, /* 1100 MHz */
- { 0, 1, 7, 7, 4, 1, 2, 0 }, /* 1000 MHz */
- { 0, 1, 7, 7, 4, 1, 2, 0 }, /* 900 MHz */
- { 0, 1, 7, 7, 4, 1, 2, 0 }, /* 800 MHz */
- { 0, 1, 7, 7, 3, 1, 1, 0 }, /* 700 MHz */
- { 0, 1, 7, 7, 3, 1, 1, 0 }, /* 600 MHz */
- { 0, 1, 7, 7, 2, 1, 1, 0 }, /* 500 MHz */
- { 0, 1, 7, 7, 2, 1, 1, 0 }, /* 400 MHz */
- { 0, 1, 7, 7, 1, 1, 1, 0 }, /* 300 MHz */
- { 0, 1, 7, 7, 1, 1, 1, 0 }, /* 200 MHz */
-};
-
-static unsigned int clkdiv_cpu1_5250[CPUFREQ_LEVEL_END][2] = {
- /* Clock divider value for following
- * { COPY, HPM }
+ * values:
+ * freq
+ * clock divider for ARM, CPUD, ACP, PERIPH, ATB, PCLK_DBG, APLL, ARM2
+ * clock divider for COPY, HPM, RESERVED
+ * PLL M, P, S
*/
- { 0, 2 }, /* 1700 MHz */
- { 0, 2 }, /* 1600 MHz */
- { 0, 2 }, /* 1500 MHz */
- { 0, 2 }, /* 1400 MHz */
- { 0, 2 }, /* 1300 MHz */
- { 0, 2 }, /* 1200 MHz */
- { 0, 2 }, /* 1100 MHz */
- { 0, 2 }, /* 1000 MHz */
- { 0, 2 }, /* 900 MHz */
- { 0, 2 }, /* 800 MHz */
- { 0, 2 }, /* 700 MHz */
- { 0, 2 }, /* 600 MHz */
- { 0, 2 }, /* 500 MHz */
- { 0, 2 }, /* 400 MHz */
- { 0, 2 }, /* 300 MHz */
- { 0, 2 }, /* 200 MHz */
-};
-
-static unsigned int exynos5_apll_pms_table[CPUFREQ_LEVEL_END] = {
- ((425 << 16) | (6 << 8) | 0), /* 1700 MHz */
- ((200 << 16) | (3 << 8) | 0), /* 1600 MHz */
- ((250 << 16) | (4 << 8) | 0), /* 1500 MHz */
- ((175 << 16) | (3 << 8) | 0), /* 1400 MHz */
- ((325 << 16) | (6 << 8) | 0), /* 1300 MHz */
- ((200 << 16) | (4 << 8) | 0), /* 1200 MHz */
- ((275 << 16) | (6 << 8) | 0), /* 1100 MHz */
- ((125 << 16) | (3 << 8) | 0), /* 1000 MHz */
- ((150 << 16) | (4 << 8) | 0), /* 900 MHz */
- ((100 << 16) | (3 << 8) | 0), /* 800 MHz */
- ((175 << 16) | (3 << 8) | 1), /* 700 MHz */
- ((200 << 16) | (4 << 8) | 1), /* 600 MHz */
- ((125 << 16) | (3 << 8) | 1), /* 500 MHz */
- ((100 << 16) | (3 << 8) | 1), /* 400 MHz */
- ((200 << 16) | (4 << 8) | 2), /* 300 MHz */
- ((100 << 16) | (3 << 8) | 2), /* 200 MHz */
-};
-
-/* ASV group voltage table */
-static const unsigned int asv_voltage_5250[CPUFREQ_LEVEL_END] = {
- 1300000, 1250000, 1225000, 1200000, 1150000,
- 1125000, 1100000, 1075000, 1050000, 1025000,
- 1012500, 1000000, 975000, 950000, 937500,
- 925000
+ APLL_FREQ(1700, 0, 3, 7, 7, 7, 3, 5, 0, 0, 2, 0, 425, 6, 0),
+ APLL_FREQ(1600, 0, 3, 7, 7, 7, 1, 4, 0, 0, 2, 0, 200, 3, 0),
+ APLL_FREQ(1500, 0, 2, 7, 7, 7, 1, 4, 0, 0, 2, 0, 250, 4, 0),
+ APLL_FREQ(1400, 0, 2, 7, 7, 6, 1, 4, 0, 0, 2, 0, 175, 3, 0),
+ APLL_FREQ(1300, 0, 2, 7, 7, 6, 1, 3, 0, 0, 2, 0, 325, 6, 0),
+ APLL_FREQ(1200, 0, 2, 7, 7, 5, 1, 3, 0, 0, 2, 0, 200, 4, 0),
+ APLL_FREQ(1100, 0, 3, 7, 7, 5, 1, 3, 0, 0, 2, 0, 275, 6, 0),
+ APLL_FREQ(1000, 0, 1, 7, 7, 4, 1, 2, 0, 0, 2, 0, 125, 3, 0),
+ APLL_FREQ(900, 0, 1, 7, 7, 4, 1, 2, 0, 0, 2, 0, 150, 4, 0),
+ APLL_FREQ(800, 0, 1, 7, 7, 4, 1, 2, 0, 0, 2, 0, 100, 3, 0),
+ APLL_FREQ(700, 0, 1, 7, 7, 3, 1, 1, 0, 0, 2, 0, 175, 3, 1),
+ APLL_FREQ(600, 0, 1, 7, 7, 3, 1, 1, 0, 0, 2, 0, 200, 4, 1),
+ APLL_FREQ(500, 0, 1, 7, 7, 2, 1, 1, 0, 0, 2, 0, 125, 3, 1),
+ APLL_FREQ(400, 0, 1, 7, 7, 2, 1, 1, 0, 0, 2, 0, 100, 3, 1),
+ APLL_FREQ(300, 0, 1, 7, 7, 1, 1, 1, 0, 0, 2, 0, 200, 4, 2),
+ APLL_FREQ(200, 0, 1, 7, 7, 1, 1, 1, 0, 0, 2, 0, 100, 3, 2),
};
static void set_clkdiv(unsigned int div_index)
@@ -138,7 +86,7 @@ static void set_clkdiv(unsigned int div_index)
/* Change Divider - CPU0 */
- tmp = exynos5250_clkdiv_table[div_index].clkdiv;
+ tmp = apll_freq_5250[div_index].clk_div_cpu0;
__raw_writel(tmp, EXYNOS5_CLKDIV_CPU0);
@@ -146,7 +94,7 @@ static void set_clkdiv(unsigned int div_index)
cpu_relax();
/* Change Divider - CPU1 */
- tmp = exynos5250_clkdiv_table[div_index].clkdiv1;
+ tmp = apll_freq_5250[div_index].clk_div_cpu1;
__raw_writel(tmp, EXYNOS5_CLKDIV_CPU1);
@@ -169,14 +117,14 @@ static void set_apll(unsigned int new_index,
} while (tmp != 0x2);
/* 2. Set APLL Lock time */
- pdiv = ((exynos5_apll_pms_table[new_index] >> 8) & 0x3f);
+ pdiv = ((apll_freq_5250[new_index].mps >> 8) & 0x3f);
__raw_writel((pdiv * 250), EXYNOS5_APLL_LOCK);
/* 3. Change PLL PMS values */
tmp = __raw_readl(EXYNOS5_APLL_CON0);
tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
- tmp |= exynos5_apll_pms_table[new_index];
+ tmp |= apll_freq_5250[new_index].mps;
__raw_writel(tmp, EXYNOS5_APLL_CON0);
/* 4. wait_lock_time */
@@ -196,10 +144,10 @@ static void set_apll(unsigned int new_index,
}
-bool exynos5250_pms_change(unsigned int old_index, unsigned int new_index)
+static bool exynos5250_pms_change(unsigned int old_index, unsigned int new_index)
{
- unsigned int old_pm = (exynos5_apll_pms_table[old_index] >> 8);
- unsigned int new_pm = (exynos5_apll_pms_table[new_index] >> 8);
+ unsigned int old_pm = apll_freq_5250[old_index].mps >> 8;
+ unsigned int new_pm = apll_freq_5250[new_index].mps >> 8;
return (old_pm == new_pm) ? 0 : 1;
}
@@ -216,7 +164,7 @@ static void exynos5250_set_frequency(unsigned int old_index,
/* 2. Change just s value in apll m,p,s value */
tmp = __raw_readl(EXYNOS5_APLL_CON0);
tmp &= ~(0x7 << 0);
- tmp |= (exynos5_apll_pms_table[new_index] & 0x7);
+ tmp |= apll_freq_5250[new_index].mps & 0x7;
__raw_writel(tmp, EXYNOS5_APLL_CON0);
} else {
@@ -231,7 +179,7 @@ static void exynos5250_set_frequency(unsigned int old_index,
/* 1. Change just s value in apll m,p,s value */
tmp = __raw_readl(EXYNOS5_APLL_CON0);
tmp &= ~(0x7 << 0);
- tmp |= (exynos5_apll_pms_table[new_index] & 0x7);
+ tmp |= apll_freq_5250[new_index].mps & 0x7;
__raw_writel(tmp, EXYNOS5_APLL_CON0);
/* 2. Change the system clock divider values */
set_clkdiv(new_index);
@@ -245,24 +193,10 @@ static void exynos5250_set_frequency(unsigned int old_index,
}
}
-static void __init set_volt_table(void)
-{
- unsigned int i;
-
- max_support_idx = L0;
-
- for (i = 0 ; i < CPUFREQ_LEVEL_END ; i++)
- exynos5250_volt_table[i] = asv_voltage_5250[i];
-}
-
int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
{
- int i;
- unsigned int tmp;
unsigned long rate;
- set_volt_table();
-
cpu_clk = clk_get(NULL, "armclk");
if (IS_ERR(cpu_clk))
return PTR_ERR(cpu_clk);
@@ -281,44 +215,9 @@ int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
if (IS_ERR(mout_apll))
goto err_mout_apll;
- for (i = L0; i < CPUFREQ_LEVEL_END; i++) {
-
- exynos5250_clkdiv_table[i].index = i;
-
- tmp = __raw_readl(EXYNOS5_CLKDIV_CPU0);
-
- tmp &= ~((0x7 << 0) | (0x7 << 4) | (0x7 << 8) |
- (0x7 << 12) | (0x7 << 16) | (0x7 << 20) |
- (0x7 << 24) | (0x7 << 28));
-
- tmp |= ((clkdiv_cpu0_5250[i][0] << 0) |
- (clkdiv_cpu0_5250[i][1] << 4) |
- (clkdiv_cpu0_5250[i][2] << 8) |
- (clkdiv_cpu0_5250[i][3] << 12) |
- (clkdiv_cpu0_5250[i][4] << 16) |
- (clkdiv_cpu0_5250[i][5] << 20) |
- (clkdiv_cpu0_5250[i][6] << 24) |
- (clkdiv_cpu0_5250[i][7] << 28));
-
- exynos5250_clkdiv_table[i].clkdiv = tmp;
-
- tmp = __raw_readl(EXYNOS5_CLKDIV_CPU1);
-
- tmp &= ~((0x7 << 0) | (0x7 << 4));
-
- tmp |= ((clkdiv_cpu1_5250[i][0] << 0) |
- (clkdiv_cpu1_5250[i][1] << 4));
-
- exynos5250_clkdiv_table[i].clkdiv1 = tmp;
- }
-
info->mpll_freq_khz = rate;
- /* 1000Mhz */
- info->pm_lock_idx = L7;
/* 800Mhz */
info->pll_safe_idx = L9;
- info->max_support_idx = max_support_idx;
- info->min_support_idx = min_support_idx;
info->cpu_clk = cpu_clk;
info->volt_table = exynos5250_volt_table;
info->freq_table = exynos5250_freq_table;
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 49cda256efb2..d7a79662e24c 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -63,9 +63,6 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n",
policy->min, policy->max, policy->cpu);
- if (!cpu_online(policy->cpu))
- return -EINVAL;
-
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
@@ -121,9 +118,6 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
break;
}
- if (!cpu_online(policy->cpu))
- return -EINVAL;
-
for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
unsigned int freq = table[i].frequency;
if (freq == CPUFREQ_ENTRY_INVALID)
@@ -227,6 +221,15 @@ void cpufreq_frequency_table_put_attr(unsigned int cpu)
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
+void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy)
+{
+ pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n",
+ policy->cpu, policy->last_cpu);
+ per_cpu(cpufreq_show_table, policy->cpu) = per_cpu(cpufreq_show_table,
+ policy->last_cpu);
+ per_cpu(cpufreq_show_table, policy->last_cpu) = NULL;
+}
+
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
{
return per_cpu(cpufreq_show_table, cpu);
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c
new file mode 100644
index 000000000000..66e3a71b81a3
--- /dev/null
+++ b/drivers/cpufreq/highbank-cpufreq.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2012 Calxeda, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This driver provides the clk notifier callbacks that are used when
+ * the cpufreq-cpu0 driver changes to frequency to alert the highbank
+ * EnergyCore Management Engine (ECME) about the need to change
+ * voltage. The ECME interfaces with the actual voltage regulators.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/mailbox.h>
+#include <linux/platform_device.h>
+
+#define HB_CPUFREQ_CHANGE_NOTE 0x80000001
+#define HB_CPUFREQ_IPC_LEN 7
+#define HB_CPUFREQ_VOLT_RETRIES 15
+
+static int hb_voltage_change(unsigned int freq)
+{
+ int i;
+ u32 msg[HB_CPUFREQ_IPC_LEN];
+
+ msg[0] = HB_CPUFREQ_CHANGE_NOTE;
+ msg[1] = freq / 1000000;
+ for (i = 2; i < HB_CPUFREQ_IPC_LEN; i++)
+ msg[i] = 0;
+
+ return pl320_ipc_transmit(msg);
+}
+
+static int hb_cpufreq_clk_notify(struct notifier_block *nb,
+ unsigned long action, void *hclk)
+{
+ struct clk_notifier_data *clk_data = hclk;
+ int i = 0;
+
+ if (action == PRE_RATE_CHANGE) {
+ if (clk_data->new_rate > clk_data->old_rate)
+ while (hb_voltage_change(clk_data->new_rate))
+ if (i++ > HB_CPUFREQ_VOLT_RETRIES)
+ return NOTIFY_BAD;
+ } else if (action == POST_RATE_CHANGE) {
+ if (clk_data->new_rate < clk_data->old_rate)
+ while (hb_voltage_change(clk_data->new_rate))
+ if (i++ > HB_CPUFREQ_VOLT_RETRIES)
+ return NOTIFY_BAD;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block hb_cpufreq_clk_nb = {
+ .notifier_call = hb_cpufreq_clk_notify,
+};
+
+static int hb_cpufreq_driver_init(void)
+{
+ struct platform_device_info devinfo = { .name = "cpufreq-cpu0", };
+ struct device *cpu_dev;
+ struct clk *cpu_clk;
+ struct device_node *np;
+ int ret;
+
+ if (!of_machine_is_compatible("calxeda,highbank"))
+ return -ENODEV;
+
+ for_each_child_of_node(of_find_node_by_path("/cpus"), np)
+ if (of_get_property(np, "operating-points", NULL))
+ break;
+
+ if (!np) {
+ pr_err("failed to find highbank cpufreq node\n");
+ return -ENOENT;
+ }
+
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev) {
+ pr_err("failed to get highbank cpufreq device\n");
+ ret = -ENODEV;
+ goto out_put_node;
+ }
+
+ cpu_dev->of_node = np;
+
+ cpu_clk = clk_get(cpu_dev, NULL);
+ if (IS_ERR(cpu_clk)) {
+ ret = PTR_ERR(cpu_clk);
+ pr_err("failed to get cpu0 clock: %d\n", ret);
+ goto out_put_node;
+ }
+
+ ret = clk_notifier_register(cpu_clk, &hb_cpufreq_clk_nb);
+ if (ret) {
+ pr_err("failed to register clk notifier: %d\n", ret);
+ goto out_put_node;
+ }
+
+ /* Instantiate cpufreq-cpu0 */
+ platform_device_register_full(&devinfo);
+
+out_put_node:
+ of_node_put(np);
+ return ret;
+}
+module_init(hb_cpufreq_driver_init);
+
+MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>");
+MODULE_DESCRIPTION("Calxeda Highbank cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
new file mode 100644
index 000000000000..d6b6ef350cb6
--- /dev/null
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/opp.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#define PU_SOC_VOLTAGE_NORMAL 1250000
+#define PU_SOC_VOLTAGE_HIGH 1275000
+#define FREQ_1P2_GHZ 1200000000
+
+static struct regulator *arm_reg;
+static struct regulator *pu_reg;
+static struct regulator *soc_reg;
+
+static struct clk *arm_clk;
+static struct clk *pll1_sys_clk;
+static struct clk *pll1_sw_clk;
+static struct clk *step_clk;
+static struct clk *pll2_pfd2_396m_clk;
+
+static struct device *cpu_dev;
+static struct cpufreq_frequency_table *freq_table;
+static unsigned int transition_latency;
+
+static int imx6q_verify_speed(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, freq_table);
+}
+
+static unsigned int imx6q_get_speed(unsigned int cpu)
+{
+ return clk_get_rate(arm_clk) / 1000;
+}
+
+static int imx6q_set_target(struct cpufreq_policy *policy,
+ unsigned int target_freq, unsigned int relation)
+{
+ struct cpufreq_freqs freqs;
+ struct opp *opp;
+ unsigned long freq_hz, volt, volt_old;
+ unsigned int index, cpu;
+ int ret;
+
+ ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
+ relation, &index);
+ if (ret) {
+ dev_err(cpu_dev, "failed to match target frequency %d: %d\n",
+ target_freq, ret);
+ return ret;
+ }
+
+ freqs.new = freq_table[index].frequency;
+ freq_hz = freqs.new * 1000;
+ freqs.old = clk_get_rate(arm_clk) / 1000;
+
+ if (freqs.old == freqs.new)
+ return 0;
+
+ for_each_online_cpu(cpu) {
+ freqs.cpu = cpu;
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ }
+
+ rcu_read_lock();
+ opp = opp_find_freq_ceil(cpu_dev, &freq_hz);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
+ dev_err(cpu_dev, "failed to find OPP for %ld\n", freq_hz);
+ return PTR_ERR(opp);
+ }
+
+ volt = opp_get_voltage(opp);
+ rcu_read_unlock();
+ volt_old = regulator_get_voltage(arm_reg);
+
+ dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
+ freqs.old / 1000, volt_old / 1000,
+ freqs.new / 1000, volt / 1000);
+
+ /* scaling up? scale voltage before frequency */
+ if (freqs.new > freqs.old) {
+ ret = regulator_set_voltage_tol(arm_reg, volt, 0);
+ if (ret) {
+ dev_err(cpu_dev,
+ "failed to scale vddarm up: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Need to increase vddpu and vddsoc for safety
+ * if we are about to run at 1.2 GHz.
+ */
+ if (freqs.new == FREQ_1P2_GHZ / 1000) {
+ regulator_set_voltage_tol(pu_reg,
+ PU_SOC_VOLTAGE_HIGH, 0);
+ regulator_set_voltage_tol(soc_reg,
+ PU_SOC_VOLTAGE_HIGH, 0);
+ }
+ }
+
+ /*
+ * The setpoints are selected per PLL/PDF frequencies, so we need to
+ * reprogram PLL for frequency scaling. The procedure of reprogramming
+ * PLL1 is as below.
+ *
+ * - Enable pll2_pfd2_396m_clk and reparent pll1_sw_clk to it
+ * - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it
+ * - Disable pll2_pfd2_396m_clk
+ */
+ clk_prepare_enable(pll2_pfd2_396m_clk);
+ clk_set_parent(step_clk, pll2_pfd2_396m_clk);
+ clk_set_parent(pll1_sw_clk, step_clk);
+ if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
+ clk_set_rate(pll1_sys_clk, freqs.new * 1000);
+ /*
+ * If we are leaving 396 MHz set-point, we need to enable
+ * pll1_sys_clk and disable pll2_pfd2_396m_clk to keep
+ * their use count correct.
+ */
+ if (freqs.old * 1000 <= clk_get_rate(pll2_pfd2_396m_clk)) {
+ clk_prepare_enable(pll1_sys_clk);
+ clk_disable_unprepare(pll2_pfd2_396m_clk);
+ }
+ clk_set_parent(pll1_sw_clk, pll1_sys_clk);
+ clk_disable_unprepare(pll2_pfd2_396m_clk);
+ } else {
+ /*
+ * Disable pll1_sys_clk if pll2_pfd2_396m_clk is sufficient
+ * to provide the frequency.
+ */
+ clk_disable_unprepare(pll1_sys_clk);
+ }
+
+ /* Ensure the arm clock divider is what we expect */
+ ret = clk_set_rate(arm_clk, freqs.new * 1000);
+ if (ret) {
+ dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
+ regulator_set_voltage_tol(arm_reg, volt_old, 0);
+ return ret;
+ }
+
+ /* scaling down? scale voltage after frequency */
+ if (freqs.new < freqs.old) {
+ ret = regulator_set_voltage_tol(arm_reg, volt, 0);
+ if (ret)
+ dev_warn(cpu_dev,
+ "failed to scale vddarm down: %d\n", ret);
+
+ if (freqs.old == FREQ_1P2_GHZ / 1000) {
+ regulator_set_voltage_tol(pu_reg,
+ PU_SOC_VOLTAGE_NORMAL, 0);
+ regulator_set_voltage_tol(soc_reg,
+ PU_SOC_VOLTAGE_NORMAL, 0);
+ }
+ }
+
+ for_each_online_cpu(cpu) {
+ freqs.cpu = cpu;
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ }
+
+ return 0;
+}
+
+static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
+{
+ int ret;
+
+ ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "invalid frequency table: %d\n", ret);
+ return ret;
+ }
+
+ policy->cpuinfo.transition_latency = transition_latency;
+ policy->cur = clk_get_rate(arm_clk) / 1000;
+ cpumask_setall(policy->cpus);
+ cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
+
+ return 0;
+}
+
+static int imx6q_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ return 0;
+}
+
+static struct freq_attr *imx6q_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver imx6q_cpufreq_driver = {
+ .verify = imx6q_verify_speed,
+ .target = imx6q_set_target,
+ .get = imx6q_get_speed,
+ .init = imx6q_cpufreq_init,
+ .exit = imx6q_cpufreq_exit,
+ .name = "imx6q-cpufreq",
+ .attr = imx6q_cpufreq_attr,
+};
+
+static int imx6q_cpufreq_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct opp *opp;
+ unsigned long min_volt, max_volt;
+ int num, ret;
+
+ cpu_dev = &pdev->dev;
+
+ np = of_find_node_by_path("/cpus/cpu@0");
+ if (!np) {
+ dev_err(cpu_dev, "failed to find cpu0 node\n");
+ return -ENOENT;
+ }
+
+ cpu_dev->of_node = np;
+
+ arm_clk = devm_clk_get(cpu_dev, "arm");
+ pll1_sys_clk = devm_clk_get(cpu_dev, "pll1_sys");
+ pll1_sw_clk = devm_clk_get(cpu_dev, "pll1_sw");
+ step_clk = devm_clk_get(cpu_dev, "step");
+ pll2_pfd2_396m_clk = devm_clk_get(cpu_dev, "pll2_pfd2_396m");
+ if (IS_ERR(arm_clk) || IS_ERR(pll1_sys_clk) || IS_ERR(pll1_sw_clk) ||
+ IS_ERR(step_clk) || IS_ERR(pll2_pfd2_396m_clk)) {
+ dev_err(cpu_dev, "failed to get clocks\n");
+ ret = -ENOENT;
+ goto put_node;
+ }
+
+ arm_reg = devm_regulator_get(cpu_dev, "arm");
+ pu_reg = devm_regulator_get(cpu_dev, "pu");
+ soc_reg = devm_regulator_get(cpu_dev, "soc");
+ if (!arm_reg || !pu_reg || !soc_reg) {
+ dev_err(cpu_dev, "failed to get regulators\n");
+ ret = -ENOENT;
+ goto put_node;
+ }
+
+ /* We expect an OPP table supplied by platform */
+ num = opp_get_opp_count(cpu_dev);
+ if (num < 0) {
+ ret = num;
+ dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
+ goto put_node;
+ }
+
+ ret = opp_init_cpufreq_table(cpu_dev, &freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
+ goto put_node;
+ }
+
+ if (of_property_read_u32(np, "clock-latency", &transition_latency))
+ transition_latency = CPUFREQ_ETERNAL;
+
+ /*
+ * OPP is maintained in order of increasing frequency, and
+ * freq_table initialised from OPP is therefore sorted in the
+ * same order.
+ */
+ rcu_read_lock();
+ opp = opp_find_freq_exact(cpu_dev,
+ freq_table[0].frequency * 1000, true);
+ min_volt = opp_get_voltage(opp);
+ opp = opp_find_freq_exact(cpu_dev,
+ freq_table[--num].frequency * 1000, true);
+ max_volt = opp_get_voltage(opp);
+ rcu_read_unlock();
+ ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt);
+ if (ret > 0)
+ transition_latency += ret * 1000;
+
+ /* Count vddpu and vddsoc latency in for 1.2 GHz support */
+ if (freq_table[num].frequency == FREQ_1P2_GHZ / 1000) {
+ ret = regulator_set_voltage_time(pu_reg, PU_SOC_VOLTAGE_NORMAL,
+ PU_SOC_VOLTAGE_HIGH);
+ if (ret > 0)
+ transition_latency += ret * 1000;
+ ret = regulator_set_voltage_time(soc_reg, PU_SOC_VOLTAGE_NORMAL,
+ PU_SOC_VOLTAGE_HIGH);
+ if (ret > 0)
+ transition_latency += ret * 1000;
+ }
+
+ ret = cpufreq_register_driver(&imx6q_cpufreq_driver);
+ if (ret) {
+ dev_err(cpu_dev, "failed register driver: %d\n", ret);
+ goto free_freq_table;
+ }
+
+ of_node_put(np);
+ return 0;
+
+free_freq_table:
+ opp_free_cpufreq_table(cpu_dev, &freq_table);
+put_node:
+ of_node_put(np);
+ return ret;
+}
+
+static int imx6q_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&imx6q_cpufreq_driver);
+ opp_free_cpufreq_table(cpu_dev, &freq_table);
+
+ return 0;
+}
+
+static struct platform_driver imx6q_cpufreq_platdrv = {
+ .driver = {
+ .name = "imx6q-cpufreq",
+ .owner = THIS_MODULE,
+ },
+ .probe = imx6q_cpufreq_probe,
+ .remove = imx6q_cpufreq_remove,
+};
+module_platform_driver(imx6q_cpufreq_platdrv);
+
+MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
+MODULE_DESCRIPTION("Freescale i.MX6Q cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
new file mode 100644
index 000000000000..096fde0ebcb5
--- /dev/null
+++ b/drivers/cpufreq/intel_pstate.c
@@ -0,0 +1,823 @@
+/*
+ * cpufreq_snb.c: Native P state management for Intel processors
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
+#include <linux/ktime.h>
+#include <linux/hrtimer.h>
+#include <linux/tick.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <trace/events/power.h>
+
+#include <asm/div64.h>
+#include <asm/msr.h>
+#include <asm/cpu_device_id.h>
+
+#define SAMPLE_COUNT 3
+
+#define FRAC_BITS 8
+#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
+#define fp_toint(X) ((X) >> FRAC_BITS)
+
+static inline int32_t mul_fp(int32_t x, int32_t y)
+{
+ return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
+}
+
+static inline int32_t div_fp(int32_t x, int32_t y)
+{
+ return div_s64((int64_t)x << FRAC_BITS, (int64_t)y);
+}
+
+struct sample {
+ ktime_t start_time;
+ ktime_t end_time;
+ int core_pct_busy;
+ int pstate_pct_busy;
+ u64 duration_us;
+ u64 idletime_us;
+ u64 aperf;
+ u64 mperf;
+ int freq;
+};
+
+struct pstate_data {
+ int current_pstate;
+ int min_pstate;
+ int max_pstate;
+ int turbo_pstate;
+};
+
+struct _pid {
+ int setpoint;
+ int32_t integral;
+ int32_t p_gain;
+ int32_t i_gain;
+ int32_t d_gain;
+ int deadband;
+ int last_err;
+};
+
+struct cpudata {
+ int cpu;
+
+ char name[64];
+
+ struct timer_list timer;
+
+ struct pstate_adjust_policy *pstate_policy;
+ struct pstate_data pstate;
+ struct _pid pid;
+ struct _pid idle_pid;
+
+ int min_pstate_count;
+ int idle_mode;
+
+ ktime_t prev_sample;
+ u64 prev_idle_time_us;
+ u64 prev_aperf;
+ u64 prev_mperf;
+ int sample_ptr;
+ struct sample samples[SAMPLE_COUNT];
+};
+
+static struct cpudata **all_cpu_data;
+struct pstate_adjust_policy {
+ int sample_rate_ms;
+ int deadband;
+ int setpoint;
+ int p_gain_pct;
+ int d_gain_pct;
+ int i_gain_pct;
+};
+
+static struct pstate_adjust_policy default_policy = {
+ .sample_rate_ms = 10,
+ .deadband = 0,
+ .setpoint = 109,
+ .p_gain_pct = 17,
+ .d_gain_pct = 0,
+ .i_gain_pct = 4,
+};
+
+struct perf_limits {
+ int no_turbo;
+ int max_perf_pct;
+ int min_perf_pct;
+ int32_t max_perf;
+ int32_t min_perf;
+};
+
+static struct perf_limits limits = {
+ .no_turbo = 0,
+ .max_perf_pct = 100,
+ .max_perf = int_tofp(1),
+ .min_perf_pct = 0,
+ .min_perf = 0,
+};
+
+static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
+ int deadband, int integral) {
+ pid->setpoint = setpoint;
+ pid->deadband = deadband;
+ pid->integral = int_tofp(integral);
+ pid->last_err = setpoint - busy;
+}
+
+static inline void pid_p_gain_set(struct _pid *pid, int percent)
+{
+ pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
+}
+
+static inline void pid_i_gain_set(struct _pid *pid, int percent)
+{
+ pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
+}
+
+static inline void pid_d_gain_set(struct _pid *pid, int percent)
+{
+
+ pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
+}
+
+static signed int pid_calc(struct _pid *pid, int busy)
+{
+ signed int err, result;
+ int32_t pterm, dterm, fp_error;
+ int32_t integral_limit;
+
+ err = pid->setpoint - busy;
+ fp_error = int_tofp(err);
+
+ if (abs(err) <= pid->deadband)
+ return 0;
+
+ pterm = mul_fp(pid->p_gain, fp_error);
+
+ pid->integral += fp_error;
+
+ /* limit the integral term */
+ integral_limit = int_tofp(30);
+ if (pid->integral > integral_limit)
+ pid->integral = integral_limit;
+ if (pid->integral < -integral_limit)
+ pid->integral = -integral_limit;
+
+ dterm = mul_fp(pid->d_gain, (err - pid->last_err));
+ pid->last_err = err;
+
+ result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
+
+ return (signed int)fp_toint(result);
+}
+
+static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
+{
+ pid_p_gain_set(&cpu->pid, cpu->pstate_policy->p_gain_pct);
+ pid_d_gain_set(&cpu->pid, cpu->pstate_policy->d_gain_pct);
+ pid_i_gain_set(&cpu->pid, cpu->pstate_policy->i_gain_pct);
+
+ pid_reset(&cpu->pid,
+ cpu->pstate_policy->setpoint,
+ 100,
+ cpu->pstate_policy->deadband,
+ 0);
+}
+
+static inline void intel_pstate_idle_pid_reset(struct cpudata *cpu)
+{
+ pid_p_gain_set(&cpu->idle_pid, cpu->pstate_policy->p_gain_pct);
+ pid_d_gain_set(&cpu->idle_pid, cpu->pstate_policy->d_gain_pct);
+ pid_i_gain_set(&cpu->idle_pid, cpu->pstate_policy->i_gain_pct);
+
+ pid_reset(&cpu->idle_pid,
+ 75,
+ 50,
+ cpu->pstate_policy->deadband,
+ 0);
+}
+
+static inline void intel_pstate_reset_all_pid(void)
+{
+ unsigned int cpu;
+ for_each_online_cpu(cpu) {
+ if (all_cpu_data[cpu])
+ intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
+ }
+}
+
+/************************** debugfs begin ************************/
+static int pid_param_set(void *data, u64 val)
+{
+ *(u32 *)data = val;
+ intel_pstate_reset_all_pid();
+ return 0;
+}
+static int pid_param_get(void *data, u64 *val)
+{
+ *val = *(u32 *)data;
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get,
+ pid_param_set, "%llu\n");
+
+struct pid_param {
+ char *name;
+ void *value;
+};
+
+static struct pid_param pid_files[] = {
+ {"sample_rate_ms", &default_policy.sample_rate_ms},
+ {"d_gain_pct", &default_policy.d_gain_pct},
+ {"i_gain_pct", &default_policy.i_gain_pct},
+ {"deadband", &default_policy.deadband},
+ {"setpoint", &default_policy.setpoint},
+ {"p_gain_pct", &default_policy.p_gain_pct},
+ {NULL, NULL}
+};
+
+static struct dentry *debugfs_parent;
+static void intel_pstate_debug_expose_params(void)
+{
+ int i = 0;
+
+ debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
+ if (IS_ERR_OR_NULL(debugfs_parent))
+ return;
+ while (pid_files[i].name) {
+ debugfs_create_file(pid_files[i].name, 0660,
+ debugfs_parent, pid_files[i].value,
+ &fops_pid_param);
+ i++;
+ }
+}
+
+/************************** debugfs end ************************/
+
+/************************** sysfs begin ************************/
+#define show_one(file_name, object) \
+ static ssize_t show_##file_name \
+ (struct kobject *kobj, struct attribute *attr, char *buf) \
+ { \
+ return sprintf(buf, "%u\n", limits.object); \
+ }
+
+static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+ limits.no_turbo = clamp_t(int, input, 0 , 1);
+
+ return count;
+}
+
+static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ limits.max_perf_pct = clamp_t(int, input, 0 , 100);
+ limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
+ return count;
+}
+
+static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+ limits.min_perf_pct = clamp_t(int, input, 0 , 100);
+ limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
+
+ return count;
+}
+
+show_one(no_turbo, no_turbo);
+show_one(max_perf_pct, max_perf_pct);
+show_one(min_perf_pct, min_perf_pct);
+
+define_one_global_rw(no_turbo);
+define_one_global_rw(max_perf_pct);
+define_one_global_rw(min_perf_pct);
+
+static struct attribute *intel_pstate_attributes[] = {
+ &no_turbo.attr,
+ &max_perf_pct.attr,
+ &min_perf_pct.attr,
+ NULL
+};
+
+static struct attribute_group intel_pstate_attr_group = {
+ .attrs = intel_pstate_attributes,
+};
+static struct kobject *intel_pstate_kobject;
+
+static void intel_pstate_sysfs_expose_params(void)
+{
+ int rc;
+
+ intel_pstate_kobject = kobject_create_and_add("intel_pstate",
+ &cpu_subsys.dev_root->kobj);
+ BUG_ON(!intel_pstate_kobject);
+ rc = sysfs_create_group(intel_pstate_kobject,
+ &intel_pstate_attr_group);
+ BUG_ON(rc);
+}
+
+/************************** sysfs end ************************/
+
+static int intel_pstate_min_pstate(void)
+{
+ u64 value;
+ rdmsrl(0xCE, value);
+ return (value >> 40) & 0xFF;
+}
+
+static int intel_pstate_max_pstate(void)
+{
+ u64 value;
+ rdmsrl(0xCE, value);
+ return (value >> 8) & 0xFF;
+}
+
+static int intel_pstate_turbo_pstate(void)
+{
+ u64 value;
+ int nont, ret;
+ rdmsrl(0x1AD, value);
+ nont = intel_pstate_max_pstate();
+ ret = ((value) & 255);
+ if (ret <= nont)
+ ret = nont;
+ return ret;
+}
+
+static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
+{
+ int max_perf = cpu->pstate.turbo_pstate;
+ int min_perf;
+ if (limits.no_turbo)
+ max_perf = cpu->pstate.max_pstate;
+
+ max_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
+ *max = clamp_t(int, max_perf,
+ cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
+
+ min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
+ *min = clamp_t(int, min_perf,
+ cpu->pstate.min_pstate, max_perf);
+}
+
+static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
+{
+ int max_perf, min_perf;
+
+ intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
+
+ pstate = clamp_t(int, pstate, min_perf, max_perf);
+
+ if (pstate == cpu->pstate.current_pstate)
+ return;
+
+#ifndef MODULE
+ trace_cpu_frequency(pstate * 100000, cpu->cpu);
+#endif
+ cpu->pstate.current_pstate = pstate;
+ wrmsrl(MSR_IA32_PERF_CTL, pstate << 8);
+
+}
+
+static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
+{
+ int target;
+ target = cpu->pstate.current_pstate + steps;
+
+ intel_pstate_set_pstate(cpu, target);
+}
+
+static inline void intel_pstate_pstate_decrease(struct cpudata *cpu, int steps)
+{
+ int target;
+ target = cpu->pstate.current_pstate - steps;
+ intel_pstate_set_pstate(cpu, target);
+}
+
+static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
+{
+ sprintf(cpu->name, "Intel 2nd generation core");
+
+ cpu->pstate.min_pstate = intel_pstate_min_pstate();
+ cpu->pstate.max_pstate = intel_pstate_max_pstate();
+ cpu->pstate.turbo_pstate = intel_pstate_turbo_pstate();
+
+ /*
+ * goto max pstate so we don't slow up boot if we are built-in if we are
+ * a module we will take care of it during normal operation
+ */
+ intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
+}
+
+static inline void intel_pstate_calc_busy(struct cpudata *cpu,
+ struct sample *sample)
+{
+ u64 core_pct;
+ sample->pstate_pct_busy = 100 - div64_u64(
+ sample->idletime_us * 100,
+ sample->duration_us);
+ core_pct = div64_u64(sample->aperf * 100, sample->mperf);
+ sample->freq = cpu->pstate.turbo_pstate * core_pct * 1000;
+
+ sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct),
+ 100);
+}
+
+static inline void intel_pstate_sample(struct cpudata *cpu)
+{
+ ktime_t now;
+ u64 idle_time_us;
+ u64 aperf, mperf;
+
+ now = ktime_get();
+ idle_time_us = get_cpu_idle_time_us(cpu->cpu, NULL);
+
+ rdmsrl(MSR_IA32_APERF, aperf);
+ rdmsrl(MSR_IA32_MPERF, mperf);
+ /* for the first sample, don't actually record a sample, just
+ * set the baseline */
+ if (cpu->prev_idle_time_us > 0) {
+ cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
+ cpu->samples[cpu->sample_ptr].start_time = cpu->prev_sample;
+ cpu->samples[cpu->sample_ptr].end_time = now;
+ cpu->samples[cpu->sample_ptr].duration_us =
+ ktime_us_delta(now, cpu->prev_sample);
+ cpu->samples[cpu->sample_ptr].idletime_us =
+ idle_time_us - cpu->prev_idle_time_us;
+
+ cpu->samples[cpu->sample_ptr].aperf = aperf;
+ cpu->samples[cpu->sample_ptr].mperf = mperf;
+ cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
+ cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
+
+ intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
+ }
+
+ cpu->prev_sample = now;
+ cpu->prev_idle_time_us = idle_time_us;
+ cpu->prev_aperf = aperf;
+ cpu->prev_mperf = mperf;
+}
+
+static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
+{
+ int sample_time, delay;
+
+ sample_time = cpu->pstate_policy->sample_rate_ms;
+ delay = msecs_to_jiffies(sample_time);
+ delay -= jiffies % delay;
+ mod_timer_pinned(&cpu->timer, jiffies + delay);
+}
+
+static inline void intel_pstate_idle_mode(struct cpudata *cpu)
+{
+ cpu->idle_mode = 1;
+}
+
+static inline void intel_pstate_normal_mode(struct cpudata *cpu)
+{
+ cpu->idle_mode = 0;
+}
+
+static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
+{
+ int32_t busy_scaled;
+ int32_t core_busy, turbo_pstate, current_pstate;
+
+ core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy);
+ turbo_pstate = int_tofp(cpu->pstate.turbo_pstate);
+ current_pstate = int_tofp(cpu->pstate.current_pstate);
+ busy_scaled = mul_fp(core_busy, div_fp(turbo_pstate, current_pstate));
+
+ return fp_toint(busy_scaled);
+}
+
+static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
+{
+ int busy_scaled;
+ struct _pid *pid;
+ signed int ctl = 0;
+ int steps;
+
+ pid = &cpu->pid;
+ busy_scaled = intel_pstate_get_scaled_busy(cpu);
+
+ ctl = pid_calc(pid, busy_scaled);
+
+ steps = abs(ctl);
+ if (ctl < 0)
+ intel_pstate_pstate_increase(cpu, steps);
+ else
+ intel_pstate_pstate_decrease(cpu, steps);
+}
+
+static inline void intel_pstate_adjust_idle_pstate(struct cpudata *cpu)
+{
+ int busy_scaled;
+ struct _pid *pid;
+ int ctl = 0;
+ int steps;
+
+ pid = &cpu->idle_pid;
+
+ busy_scaled = intel_pstate_get_scaled_busy(cpu);
+
+ ctl = pid_calc(pid, 100 - busy_scaled);
+
+ steps = abs(ctl);
+ if (ctl < 0)
+ intel_pstate_pstate_decrease(cpu, steps);
+ else
+ intel_pstate_pstate_increase(cpu, steps);
+
+ if (cpu->pstate.current_pstate == cpu->pstate.min_pstate)
+ intel_pstate_normal_mode(cpu);
+}
+
+static void intel_pstate_timer_func(unsigned long __data)
+{
+ struct cpudata *cpu = (struct cpudata *) __data;
+
+ intel_pstate_sample(cpu);
+
+ if (!cpu->idle_mode)
+ intel_pstate_adjust_busy_pstate(cpu);
+ else
+ intel_pstate_adjust_idle_pstate(cpu);
+
+#if defined(XPERF_FIX)
+ if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) {
+ cpu->min_pstate_count++;
+ if (!(cpu->min_pstate_count % 5)) {
+ intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
+ intel_pstate_idle_mode(cpu);
+ }
+ } else
+ cpu->min_pstate_count = 0;
+#endif
+ intel_pstate_set_sample_time(cpu);
+}
+
+#define ICPU(model, policy) \
+ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy }
+
+static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
+ ICPU(0x2a, default_policy),
+ ICPU(0x2d, default_policy),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
+
+static int intel_pstate_init_cpu(unsigned int cpunum)
+{
+
+ const struct x86_cpu_id *id;
+ struct cpudata *cpu;
+
+ id = x86_match_cpu(intel_pstate_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL);
+ if (!all_cpu_data[cpunum])
+ return -ENOMEM;
+
+ cpu = all_cpu_data[cpunum];
+
+ intel_pstate_get_cpu_pstates(cpu);
+
+ cpu->cpu = cpunum;
+ cpu->pstate_policy =
+ (struct pstate_adjust_policy *)id->driver_data;
+ init_timer_deferrable(&cpu->timer);
+ cpu->timer.function = intel_pstate_timer_func;
+ cpu->timer.data =
+ (unsigned long)cpu;
+ cpu->timer.expires = jiffies + HZ/100;
+ intel_pstate_busy_pid_reset(cpu);
+ intel_pstate_idle_pid_reset(cpu);
+ intel_pstate_sample(cpu);
+ intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
+
+ add_timer_on(&cpu->timer, cpunum);
+
+ pr_info("Intel pstate controlling: cpu %d\n", cpunum);
+
+ return 0;
+}
+
+static unsigned int intel_pstate_get(unsigned int cpu_num)
+{
+ struct sample *sample;
+ struct cpudata *cpu;
+
+ cpu = all_cpu_data[cpu_num];
+ if (!cpu)
+ return 0;
+ sample = &cpu->samples[cpu->sample_ptr];
+ return sample->freq;
+}
+
+static int intel_pstate_set_policy(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpu;
+ int min, max;
+
+ cpu = all_cpu_data[policy->cpu];
+
+ intel_pstate_get_min_max(cpu, &min, &max);
+
+ limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
+ limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
+ limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
+
+ limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq;
+ limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100);
+ limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
+
+ if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ limits.min_perf_pct = 100;
+ limits.min_perf = int_tofp(1);
+ limits.max_perf_pct = 100;
+ limits.max_perf = int_tofp(1);
+ limits.no_turbo = 0;
+ }
+
+ return 0;
+}
+
+static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
+{
+ cpufreq_verify_within_limits(policy,
+ policy->cpuinfo.min_freq,
+ policy->cpuinfo.max_freq);
+
+ if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
+ (policy->policy != CPUFREQ_POLICY_PERFORMANCE))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __cpuinit intel_pstate_cpu_exit(struct cpufreq_policy *policy)
+{
+ int cpu = policy->cpu;
+
+ del_timer(&all_cpu_data[cpu]->timer);
+ kfree(all_cpu_data[cpu]);
+ all_cpu_data[cpu] = NULL;
+ return 0;
+}
+
+static int __cpuinit intel_pstate_cpu_init(struct cpufreq_policy *policy)
+{
+ int rc, min_pstate, max_pstate;
+ struct cpudata *cpu;
+
+ rc = intel_pstate_init_cpu(policy->cpu);
+ if (rc)
+ return rc;
+
+ cpu = all_cpu_data[policy->cpu];
+
+ if (!limits.no_turbo &&
+ limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
+ policy->policy = CPUFREQ_POLICY_PERFORMANCE;
+ else
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
+
+ intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
+ policy->min = min_pstate * 100000;
+ policy->max = max_pstate * 100000;
+
+ /* cpuinfo and default policy values */
+ policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000;
+ policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * 100000;
+ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ cpumask_set_cpu(policy->cpu, policy->cpus);
+
+ return 0;
+}
+
+static struct cpufreq_driver intel_pstate_driver = {
+ .flags = CPUFREQ_CONST_LOOPS,
+ .verify = intel_pstate_verify_policy,
+ .setpolicy = intel_pstate_set_policy,
+ .get = intel_pstate_get,
+ .init = intel_pstate_cpu_init,
+ .exit = intel_pstate_cpu_exit,
+ .name = "intel_pstate",
+ .owner = THIS_MODULE,
+};
+
+static void intel_pstate_exit(void)
+{
+ int cpu;
+
+ sysfs_remove_group(intel_pstate_kobject,
+ &intel_pstate_attr_group);
+ debugfs_remove_recursive(debugfs_parent);
+
+ cpufreq_unregister_driver(&intel_pstate_driver);
+
+ if (!all_cpu_data)
+ return;
+
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ if (all_cpu_data[cpu]) {
+ del_timer_sync(&all_cpu_data[cpu]->timer);
+ kfree(all_cpu_data[cpu]);
+ }
+ }
+
+ put_online_cpus();
+ vfree(all_cpu_data);
+}
+module_exit(intel_pstate_exit);
+
+static int __initdata no_load;
+
+static int __init intel_pstate_init(void)
+{
+ int rc = 0;
+ const struct x86_cpu_id *id;
+
+ if (no_load)
+ return -ENODEV;
+
+ id = x86_match_cpu(intel_pstate_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ pr_info("Intel P-state driver initializing.\n");
+
+ all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus());
+ if (!all_cpu_data)
+ return -ENOMEM;
+ memset(all_cpu_data, 0, sizeof(void *) * num_possible_cpus());
+
+ rc = cpufreq_register_driver(&intel_pstate_driver);
+ if (rc)
+ goto out;
+
+ intel_pstate_debug_expose_params();
+ intel_pstate_sysfs_expose_params();
+ return rc;
+out:
+ intel_pstate_exit();
+ return -ENODEV;
+}
+device_initcall(intel_pstate_init);
+
+static int __init intel_pstate_setup(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ if (!strcmp(str, "disable"))
+ no_load = 1;
+ return 0;
+}
+early_param("intel_pstate", intel_pstate_setup);
+
+MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
+MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
new file mode 100644
index 000000000000..0e83e3c24f5b
--- /dev/null
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -0,0 +1,259 @@
+/*
+ * kirkwood_freq.c: cpufreq driver for the Marvell kirkwood
+ *
+ * Copyright (C) 2013 Andrew Lunn <andrew@lunn.ch>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/cpufreq.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <asm/proc-fns.h>
+
+#define CPU_SW_INT_BLK BIT(28)
+
+static struct priv
+{
+ struct clk *cpu_clk;
+ struct clk *ddr_clk;
+ struct clk *powersave_clk;
+ struct device *dev;
+ void __iomem *base;
+} priv;
+
+#define STATE_CPU_FREQ 0x01
+#define STATE_DDR_FREQ 0x02
+
+/*
+ * Kirkwood can swap the clock to the CPU between two clocks:
+ *
+ * - cpu clk
+ * - ddr clk
+ *
+ * The frequencies are set at runtime before registering this *
+ * table.
+ */
+static struct cpufreq_frequency_table kirkwood_freq_table[] = {
+ {STATE_CPU_FREQ, 0}, /* CPU uses cpuclk */
+ {STATE_DDR_FREQ, 0}, /* CPU uses ddrclk */
+ {0, CPUFREQ_TABLE_END},
+};
+
+static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu)
+{
+ if (__clk_is_enabled(priv.powersave_clk))
+ return kirkwood_freq_table[1].frequency;
+ return kirkwood_freq_table[0].frequency;
+}
+
+static void kirkwood_cpufreq_set_cpu_state(unsigned int index)
+{
+ struct cpufreq_freqs freqs;
+ unsigned int state = kirkwood_freq_table[index].index;
+ unsigned long reg;
+
+ freqs.old = kirkwood_cpufreq_get_cpu_frequency(0);
+ freqs.new = kirkwood_freq_table[index].frequency;
+ freqs.cpu = 0; /* Kirkwood is UP */
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ dev_dbg(priv.dev, "Attempting to set frequency to %i KHz\n",
+ kirkwood_freq_table[index].frequency);
+ dev_dbg(priv.dev, "old frequency was %i KHz\n",
+ kirkwood_cpufreq_get_cpu_frequency(0));
+
+ if (freqs.old != freqs.new) {
+ local_irq_disable();
+
+ /* Disable interrupts to the CPU */
+ reg = readl_relaxed(priv.base);
+ reg |= CPU_SW_INT_BLK;
+ writel_relaxed(reg, priv.base);
+
+ switch (state) {
+ case STATE_CPU_FREQ:
+ clk_disable(priv.powersave_clk);
+ break;
+ case STATE_DDR_FREQ:
+ clk_enable(priv.powersave_clk);
+ break;
+ }
+
+ /* Wait-for-Interrupt, while the hardware changes frequency */
+ cpu_do_idle();
+
+ /* Enable interrupts to the CPU */
+ reg = readl_relaxed(priv.base);
+ reg &= ~CPU_SW_INT_BLK;
+ writel_relaxed(reg, priv.base);
+
+ local_irq_enable();
+ }
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+};
+
+static int kirkwood_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, kirkwood_freq_table);
+}
+
+static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int index = 0;
+
+ if (cpufreq_frequency_table_target(policy, kirkwood_freq_table,
+ target_freq, relation, &index))
+ return -EINVAL;
+
+ kirkwood_cpufreq_set_cpu_state(index);
+
+ return 0;
+}
+
+/* Module init and exit code */
+static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ int result;
+
+ /* cpuinfo and default policy values */
+ policy->cpuinfo.transition_latency = 5000; /* 5uS */
+ policy->cur = kirkwood_cpufreq_get_cpu_frequency(0);
+
+ result = cpufreq_frequency_table_cpuinfo(policy, kirkwood_freq_table);
+ if (result)
+ return result;
+
+ cpufreq_frequency_table_get_attr(kirkwood_freq_table, policy->cpu);
+
+ return 0;
+}
+
+static int kirkwood_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ return 0;
+}
+
+static struct freq_attr *kirkwood_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver kirkwood_cpufreq_driver = {
+ .get = kirkwood_cpufreq_get_cpu_frequency,
+ .verify = kirkwood_cpufreq_verify,
+ .target = kirkwood_cpufreq_target,
+ .init = kirkwood_cpufreq_cpu_init,
+ .exit = kirkwood_cpufreq_cpu_exit,
+ .name = "kirkwood-cpufreq",
+ .owner = THIS_MODULE,
+ .attr = kirkwood_cpufreq_attr,
+};
+
+static int kirkwood_cpufreq_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct resource *res;
+ int err;
+
+ priv.dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Cannot get memory resource\n");
+ return -ENODEV;
+ }
+ priv.base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!priv.base) {
+ dev_err(&pdev->dev, "Cannot ioremap\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ np = of_find_node_by_path("/cpus/cpu@0");
+ if (!np)
+ return -ENODEV;
+
+ priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk");
+ if (IS_ERR(priv.cpu_clk)) {
+ dev_err(priv.dev, "Unable to get cpuclk");
+ return PTR_ERR(priv.cpu_clk);
+ }
+
+ clk_prepare_enable(priv.cpu_clk);
+ kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
+
+ priv.ddr_clk = of_clk_get_by_name(np, "ddrclk");
+ if (IS_ERR(priv.ddr_clk)) {
+ dev_err(priv.dev, "Unable to get ddrclk");
+ err = PTR_ERR(priv.ddr_clk);
+ goto out_cpu;
+ }
+
+ clk_prepare_enable(priv.ddr_clk);
+ kirkwood_freq_table[1].frequency = clk_get_rate(priv.ddr_clk) / 1000;
+
+ priv.powersave_clk = of_clk_get_by_name(np, "powersave");
+ if (IS_ERR(priv.powersave_clk)) {
+ dev_err(priv.dev, "Unable to get powersave");
+ err = PTR_ERR(priv.powersave_clk);
+ goto out_ddr;
+ }
+ clk_prepare(priv.powersave_clk);
+
+ of_node_put(np);
+ np = NULL;
+
+ err = cpufreq_register_driver(&kirkwood_cpufreq_driver);
+ if (!err)
+ return 0;
+
+ dev_err(priv.dev, "Failed to register cpufreq driver");
+
+ clk_disable_unprepare(priv.powersave_clk);
+out_ddr:
+ clk_disable_unprepare(priv.ddr_clk);
+out_cpu:
+ clk_disable_unprepare(priv.cpu_clk);
+ of_node_put(np);
+
+ return err;
+}
+
+static int kirkwood_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&kirkwood_cpufreq_driver);
+
+ clk_disable_unprepare(priv.powersave_clk);
+ clk_disable_unprepare(priv.ddr_clk);
+ clk_disable_unprepare(priv.cpu_clk);
+
+ return 0;
+}
+
+static struct platform_driver kirkwood_cpufreq_platform_driver = {
+ .probe = kirkwood_cpufreq_probe,
+ .remove = kirkwood_cpufreq_remove,
+ .driver = {
+ .name = "kirkwood-cpufreq",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(kirkwood_cpufreq_platform_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch");
+MODULE_DESCRIPTION("cpufreq driver for Marvell's kirkwood CPU");
+MODULE_ALIAS("platform:kirkwood-cpufreq");
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
index 89b178a3f849..d4c4989823dc 100644
--- a/drivers/cpufreq/maple-cpufreq.c
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -181,7 +181,7 @@ static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy)
/* secondary CPUs are tied to the primary one by the
* cpufreq core if in the secondary policy we tell it that
* it actually must be one policy together with all others. */
- cpumask_copy(policy->cpus, cpu_online_mask);
+ cpumask_setall(policy->cpus);
cpufreq_frequency_table_get_attr(maple_cpu_freqs, policy->cpu);
return cpufreq_frequency_table_cpuinfo(policy,
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 1f3417a8322d..9128c07bafba 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -110,13 +110,16 @@ static int omap_target(struct cpufreq_policy *policy,
freq = ret;
if (mpu_reg) {
+ rcu_read_lock();
opp = opp_find_freq_ceil(mpu_dev, &freq);
if (IS_ERR(opp)) {
+ rcu_read_unlock();
dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n",
__func__, freqs.new);
return -EINVAL;
}
volt = opp_get_voltage(opp);
+ rcu_read_unlock();
tol = volt * OPP_TOLERANCE / 100;
volt_old = regulator_get_voltage(mpu_reg);
}
@@ -211,10 +214,8 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
* interface to handle this scenario. Additional is_smp() check
* is to keep SMP_ON_UP build working.
*/
- if (is_smp()) {
- policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
+ if (is_smp())
cpumask_setall(policy->cpus);
- }
/* FIXME: what's the actual transition time? */
policy->cpuinfo.transition_latency = 300 * 1000;
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 056faf6af1a9..d13a13678b5f 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1249,39 +1249,59 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
.attr = powernow_k8_attr,
};
+static void __request_acpi_cpufreq(void)
+{
+ const char *cur_drv, *drv = "acpi-cpufreq";
+
+ cur_drv = cpufreq_get_current_driver();
+ if (!cur_drv)
+ goto request;
+
+ if (strncmp(cur_drv, drv, min_t(size_t, strlen(cur_drv), strlen(drv))))
+ pr_warn(PFX "WTF driver: %s\n", cur_drv);
+
+ return;
+
+ request:
+ pr_warn(PFX "This CPU is not supported anymore, using acpi-cpufreq instead.\n");
+ request_module(drv);
+}
+
/* driver entry point for init */
static int __cpuinit powernowk8_init(void)
{
unsigned int i, supported_cpus = 0;
- int rv;
+ int ret;
if (static_cpu_has(X86_FEATURE_HW_PSTATE)) {
- pr_warn(PFX "this CPU is not supported anymore, using acpi-cpufreq instead.\n");
- request_module("acpi-cpufreq");
+ __request_acpi_cpufreq();
return -ENODEV;
}
if (!x86_match_cpu(powernow_k8_ids))
return -ENODEV;
+ get_online_cpus();
for_each_online_cpu(i) {
- int rc;
- smp_call_function_single(i, check_supported_cpu, &rc, 1);
- if (rc == 0)
+ smp_call_function_single(i, check_supported_cpu, &ret, 1);
+ if (!ret)
supported_cpus++;
}
- if (supported_cpus != num_online_cpus())
+ if (supported_cpus != num_online_cpus()) {
+ put_online_cpus();
return -ENODEV;
+ }
+ put_online_cpus();
- rv = cpufreq_register_driver(&cpufreq_amd64_driver);
+ ret = cpufreq_register_driver(&cpufreq_amd64_driver);
+ if (ret)
+ return ret;
- if (!rv)
- pr_info(PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
- num_online_nodes(), boot_cpu_data.x86_model_id,
- supported_cpus);
+ pr_info(PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
+ num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus);
- return rv;
+ return ret;
}
/* driver entry point for term */
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 4575cfe41755..7e4d77327957 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -30,7 +30,7 @@ static struct {
u32 cnt;
} spear_cpufreq;
-int spear_cpufreq_verify(struct cpufreq_policy *policy)
+static int spear_cpufreq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, spear_cpufreq.freq_tbl);
}
@@ -157,7 +157,9 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
freqs.new = newfreq / 1000;
freqs.new /= mult;
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ for_each_cpu(freqs.cpu, policy->cpus)
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
if (mult == 2)
ret = spear1340_set_cpu_rate(srcclk, newfreq);
@@ -170,7 +172,8 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000;
}
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ for_each_cpu(freqs.cpu, policy->cpus)
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
return ret;
}
@@ -188,8 +191,7 @@ static int spear_cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = spear_cpufreq.transition_latency;
policy->cur = spear_cpufreq_get(0);
- cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
- cpumask_copy(policy->related_cpus, policy->cpus);
+ cpumask_setall(policy->cpus);
return 0;
}
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index c4cc27e5c8a5..071e2c3eec4f 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -39,4 +39,10 @@ config CPU_IDLE_CALXEDA
help
Select this to enable cpuidle on Calxeda processors.
+config CPU_IDLE_KIRKWOOD
+ bool "CPU Idle Driver for Kirkwood processors"
+ depends on ARCH_KIRKWOOD
+ help
+ Select this to enable cpuidle on Kirkwood processors.
+
endif
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 03ee87482c71..24c6e7d945ed 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -6,3 +6,4 @@ obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
obj-$(CONFIG_CPU_IDLE_CALXEDA) += cpuidle-calxeda.o
+obj-$(CONFIG_CPU_IDLE_KIRKWOOD) += cpuidle-kirkwood.o
diff --git a/drivers/cpuidle/cpuidle-kirkwood.c b/drivers/cpuidle/cpuidle-kirkwood.c
new file mode 100644
index 000000000000..670aa1e55cd6
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-kirkwood.c
@@ -0,0 +1,106 @@
+/*
+ * arch/arm/mach-kirkwood/cpuidle.c
+ *
+ * CPU idle Marvell Kirkwood SoCs
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * The cpu idle uses wait-for-interrupt and DDR self refresh in order
+ * to implement two idle states -
+ * #1 wait-for-interrupt
+ * #2 wait-for-interrupt and DDR self refresh
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/cpuidle.h>
+#include <linux/io.h>
+#include <linux/export.h>
+#include <asm/proc-fns.h>
+#include <asm/cpuidle.h>
+
+#define KIRKWOOD_MAX_STATES 2
+
+static void __iomem *ddr_operation_base;
+
+/* Actual code that puts the SoC in different idle states */
+static int kirkwood_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ writel(0x7, ddr_operation_base);
+ cpu_do_idle();
+
+ return index;
+}
+
+static struct cpuidle_driver kirkwood_idle_driver = {
+ .name = "kirkwood_idle",
+ .owner = THIS_MODULE,
+ .en_core_tk_irqen = 1,
+ .states[0] = ARM_CPUIDLE_WFI_STATE,
+ .states[1] = {
+ .enter = kirkwood_enter_idle,
+ .exit_latency = 10,
+ .target_residency = 100000,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "DDR SR",
+ .desc = "WFI and DDR Self Refresh",
+ },
+ .state_count = KIRKWOOD_MAX_STATES,
+};
+static struct cpuidle_device *device;
+
+static DEFINE_PER_CPU(struct cpuidle_device, kirkwood_cpuidle_device);
+
+/* Initialize CPU idle by registering the idle states */
+static int kirkwood_cpuidle_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ return -EINVAL;
+
+ ddr_operation_base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!ddr_operation_base)
+ return -EADDRNOTAVAIL;
+
+ device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id());
+ device->state_count = KIRKWOOD_MAX_STATES;
+
+ cpuidle_register_driver(&kirkwood_idle_driver);
+ if (cpuidle_register_device(device)) {
+ pr_err("kirkwood_init_cpuidle: Failed registering\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+int kirkwood_cpuidle_remove(struct platform_device *pdev)
+{
+ cpuidle_unregister_device(device);
+ cpuidle_unregister_driver(&kirkwood_idle_driver);
+
+ return 0;
+}
+
+static struct platform_driver kirkwood_cpuidle_driver = {
+ .probe = kirkwood_cpuidle_probe,
+ .remove = kirkwood_cpuidle_remove,
+ .driver = {
+ .name = "kirkwood_cpuidle",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(kirkwood_cpuidle_driver);
+
+MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch>");
+MODULE_DESCRIPTION("Kirkwood cpu idle driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:kirkwood-cpuidle");
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index fb4a7dd57f94..eba69290e074 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -69,24 +69,15 @@ int cpuidle_play_dead(void)
{
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
- int i, dead_state = -1;
- int power_usage = INT_MAX;
+ int i;
if (!drv)
return -ENODEV;
/* Find lowest-power state that supports long-term idle */
- for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
- struct cpuidle_state *s = &drv->states[i];
-
- if (s->power_usage < power_usage && s->enter_dead) {
- power_usage = s->power_usage;
- dead_state = i;
- }
- }
-
- if (dead_state != -1)
- return drv->states[dead_state].enter_dead(dev, dead_state);
+ for (i = drv->state_count - 1; i >= CPUIDLE_DRIVER_STATE_START; i--)
+ if (drv->states[i].enter_dead)
+ return drv->states[i].enter_dead(dev, i);
return -ENODEV;
}
@@ -153,7 +144,6 @@ int cpuidle_idle_call(void)
return 0;
}
- trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu);
trace_cpu_idle_rcuidle(next_state, dev->cpu);
if (cpuidle_state_is_coupled(dev, drv, next_state))
@@ -162,7 +152,6 @@ int cpuidle_idle_call(void)
else
entered_state = cpuidle_enter_state(dev, drv, next_state);
- trace_power_end_rcuidle(dev->cpu);
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
/* give the governor an opportunity to reflect on the outcome */
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index c2b281afe0ed..422c7b69ba7c 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -19,34 +19,9 @@ DEFINE_SPINLOCK(cpuidle_driver_lock);
static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu);
static struct cpuidle_driver * __cpuidle_get_cpu_driver(int cpu);
-static void set_power_states(struct cpuidle_driver *drv)
-{
- int i;
-
- /*
- * cpuidle driver should set the drv->power_specified bit
- * before registering if the driver provides
- * power_usage numbers.
- *
- * If power_specified is not set,
- * we fill in power_usage with decreasing values as the
- * cpuidle code has an implicit assumption that state Cn
- * uses less power than C(n-1).
- *
- * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned
- * an power value of -1. So we use -2, -3, etc, for other
- * c-states.
- */
- for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++)
- drv->states[i].power_usage = -1 - i;
-}
-
static void __cpuidle_driver_init(struct cpuidle_driver *drv)
{
drv->refcnt = 0;
-
- if (!drv->power_specified)
- set_power_states(drv);
}
static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu)
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 20ea33afdda1..fe343a06b7da 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -312,7 +312,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
- int power_usage = INT_MAX;
int i;
int multiplier;
struct timespec t;
@@ -383,11 +382,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
if (s->exit_latency * multiplier > data->predicted_us)
continue;
- if (s->power_usage < power_usage) {
- power_usage = s->power_usage;
- data->last_state_idx = i;
- data->exit_us = s->exit_latency;
- }
+ data->last_state_idx = i;
+ data->exit_us = s->exit_latency;
}
/* not deepest C-state chosen for low predicted residency */
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 340942946106..428754af6236 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -374,7 +374,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device);
/* state statistics */
- for (i = 0; i < drv->state_count; i++) {
+ for (i = 0; i < device->state_count; i++) {
kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL);
if (!kobj)
goto error_state;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 90d34adc2a66..9e6947bc296f 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -38,7 +38,10 @@
#include <crypto/internal/hash.h>
#include <linux/omap-dma.h>
+
+#ifdef CONFIG_ARCH_OMAP1
#include <mach/irqs.h>
+#endif
#define SHA_REG_DIGEST(x) (0x00 + ((x) * 0x04))
#define SHA_REG_DIN(x) (0x1C + ((x) * 0x04))
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 53766f39aadd..3b367973a802 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -994,6 +994,11 @@ module_exit(devfreq_exit);
* @freq: The frequency given to target function
* @flags: Flags handed from devfreq framework.
*
+ * Locking: This function must be called under rcu_read_lock(). opp is a rcu
+ * protected pointer. The reason for the same is that the opp pointer which is
+ * returned will remain valid for use with opp_get_{voltage, freq} only while
+ * under the locked area. The pointer returned must be used prior to unlocking
+ * with rcu_read_unlock() to maintain the integrity of the pointer.
*/
struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
u32 flags)
diff --git a/drivers/devfreq/exynos4_bus.c b/drivers/devfreq/exynos4_bus.c
index 80c745e83082..3f37f3b3f268 100644
--- a/drivers/devfreq/exynos4_bus.c
+++ b/drivers/devfreq/exynos4_bus.c
@@ -73,6 +73,16 @@ enum busclk_level_idx {
#define EX4210_LV_NUM (LV_2 + 1)
#define EX4x12_LV_NUM (LV_4 + 1)
+/**
+ * struct busfreq_opp_info - opp information for bus
+ * @rate: Frequency in hertz
+ * @volt: Voltage in microvolts corresponding to this OPP
+ */
+struct busfreq_opp_info {
+ unsigned long rate;
+ unsigned long volt;
+};
+
struct busfreq_data {
enum exynos4_busf_type type;
struct device *dev;
@@ -80,7 +90,7 @@ struct busfreq_data {
bool disabled;
struct regulator *vdd_int;
struct regulator *vdd_mif; /* Exynos4412/4212 only */
- struct opp *curr_opp;
+ struct busfreq_opp_info curr_oppinfo;
struct exynos4_ppmu dmc[2];
struct notifier_block pm_notifier;
@@ -296,13 +306,14 @@ static unsigned int exynos4x12_clkdiv_sclkip[][3] = {
};
-static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp)
+static int exynos4210_set_busclk(struct busfreq_data *data,
+ struct busfreq_opp_info *oppi)
{
unsigned int index;
unsigned int tmp;
for (index = LV_0; index < EX4210_LV_NUM; index++)
- if (opp_get_freq(opp) == exynos4210_busclk_table[index].clk)
+ if (oppi->rate == exynos4210_busclk_table[index].clk)
break;
if (index == EX4210_LV_NUM)
@@ -361,13 +372,14 @@ static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp)
return 0;
}
-static int exynos4x12_set_busclk(struct busfreq_data *data, struct opp *opp)
+static int exynos4x12_set_busclk(struct busfreq_data *data,
+ struct busfreq_opp_info *oppi)
{
unsigned int index;
unsigned int tmp;
for (index = LV_0; index < EX4x12_LV_NUM; index++)
- if (opp_get_freq(opp) == exynos4x12_mifclk_table[index].clk)
+ if (oppi->rate == exynos4x12_mifclk_table[index].clk)
break;
if (index == EX4x12_LV_NUM)
@@ -576,11 +588,12 @@ static int exynos4x12_get_intspec(unsigned long mifclk)
return -EINVAL;
}
-static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
- struct opp *oldopp)
+static int exynos4_bus_setvolt(struct busfreq_data *data,
+ struct busfreq_opp_info *oppi,
+ struct busfreq_opp_info *oldoppi)
{
int err = 0, tmp;
- unsigned long volt = opp_get_voltage(opp);
+ unsigned long volt = oppi->volt;
switch (data->type) {
case TYPE_BUSF_EXYNOS4210:
@@ -595,11 +608,11 @@ static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
if (err)
break;
- tmp = exynos4x12_get_intspec(opp_get_freq(opp));
+ tmp = exynos4x12_get_intspec(oppi->rate);
if (tmp < 0) {
err = tmp;
regulator_set_voltage(data->vdd_mif,
- opp_get_voltage(oldopp),
+ oldoppi->volt,
MAX_SAFEVOLT);
break;
}
@@ -609,7 +622,7 @@ static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
/* Try to recover */
if (err)
regulator_set_voltage(data->vdd_mif,
- opp_get_voltage(oldopp),
+ oldoppi->volt,
MAX_SAFEVOLT);
break;
default:
@@ -626,17 +639,26 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
struct platform_device *pdev = container_of(dev, struct platform_device,
dev);
struct busfreq_data *data = platform_get_drvdata(pdev);
- struct opp *opp = devfreq_recommended_opp(dev, _freq, flags);
- unsigned long freq = opp_get_freq(opp);
- unsigned long old_freq = opp_get_freq(data->curr_opp);
+ struct opp *opp;
+ unsigned long freq;
+ unsigned long old_freq = data->curr_oppinfo.rate;
+ struct busfreq_opp_info new_oppinfo;
- if (IS_ERR(opp))
+ rcu_read_lock();
+ opp = devfreq_recommended_opp(dev, _freq, flags);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
return PTR_ERR(opp);
+ }
+ new_oppinfo.rate = opp_get_freq(opp);
+ new_oppinfo.volt = opp_get_voltage(opp);
+ rcu_read_unlock();
+ freq = new_oppinfo.rate;
if (old_freq == freq)
return 0;
- dev_dbg(dev, "targetting %lukHz %luuV\n", freq, opp_get_voltage(opp));
+ dev_dbg(dev, "targeting %lukHz %luuV\n", freq, new_oppinfo.volt);
mutex_lock(&data->lock);
@@ -644,17 +666,18 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
goto out;
if (old_freq < freq)
- err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+ err = exynos4_bus_setvolt(data, &new_oppinfo,
+ &data->curr_oppinfo);
if (err)
goto out;
if (old_freq != freq) {
switch (data->type) {
case TYPE_BUSF_EXYNOS4210:
- err = exynos4210_set_busclk(data, opp);
+ err = exynos4210_set_busclk(data, &new_oppinfo);
break;
case TYPE_BUSF_EXYNOS4x12:
- err = exynos4x12_set_busclk(data, opp);
+ err = exynos4x12_set_busclk(data, &new_oppinfo);
break;
default:
err = -EINVAL;
@@ -664,11 +687,12 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
goto out;
if (old_freq > freq)
- err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+ err = exynos4_bus_setvolt(data, &new_oppinfo,
+ &data->curr_oppinfo);
if (err)
goto out;
- data->curr_opp = opp;
+ data->curr_oppinfo = new_oppinfo;
out:
mutex_unlock(&data->lock);
return err;
@@ -702,7 +726,7 @@ static int exynos4_bus_get_dev_status(struct device *dev,
exynos4_read_ppmu(data);
busier_dmc = exynos4_get_busier_dmc(data);
- stat->current_frequency = opp_get_freq(data->curr_opp);
+ stat->current_frequency = data->curr_oppinfo.rate;
if (busier_dmc)
addr = S5P_VA_DMC1;
@@ -933,6 +957,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
struct busfreq_data *data = container_of(this, struct busfreq_data,
pm_notifier);
struct opp *opp;
+ struct busfreq_opp_info new_oppinfo;
unsigned long maxfreq = ULONG_MAX;
int err = 0;
@@ -943,18 +968,29 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
data->disabled = true;
+ rcu_read_lock();
opp = opp_find_freq_floor(data->dev, &maxfreq);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
+ dev_err(data->dev, "%s: unable to find a min freq\n",
+ __func__);
+ return PTR_ERR(opp);
+ }
+ new_oppinfo.rate = opp_get_freq(opp);
+ new_oppinfo.volt = opp_get_voltage(opp);
+ rcu_read_unlock();
- err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+ err = exynos4_bus_setvolt(data, &new_oppinfo,
+ &data->curr_oppinfo);
if (err)
goto unlock;
switch (data->type) {
case TYPE_BUSF_EXYNOS4210:
- err = exynos4210_set_busclk(data, opp);
+ err = exynos4210_set_busclk(data, &new_oppinfo);
break;
case TYPE_BUSF_EXYNOS4x12:
- err = exynos4x12_set_busclk(data, opp);
+ err = exynos4x12_set_busclk(data, &new_oppinfo);
break;
default:
err = -EINVAL;
@@ -962,7 +998,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
if (err)
goto unlock;
- data->curr_opp = opp;
+ data->curr_oppinfo = new_oppinfo;
unlock:
mutex_unlock(&data->lock);
if (err)
@@ -1027,13 +1063,17 @@ static int exynos4_busfreq_probe(struct platform_device *pdev)
}
}
+ rcu_read_lock();
opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq);
if (IS_ERR(opp)) {
+ rcu_read_unlock();
dev_err(dev, "Invalid initial frequency %lu kHz.\n",
exynos4_devfreq_profile.initial_freq);
return PTR_ERR(opp);
}
- data->curr_opp = opp;
+ data->curr_oppinfo.rate = opp_get_freq(opp);
+ data->curr_oppinfo.volt = opp_get_voltage(opp);
+ rcu_read_unlock();
platform_set_drvdata(pdev, data);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index d4c12180c654..40179e749f08 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -125,6 +125,8 @@ config MPC512X_DMA
---help---
Enable support for the Freescale MPC512x built-in DMA engine.
+source "drivers/dma/bestcomm/Kconfig"
+
config MV_XOR
bool "Marvell XOR engine support"
depends on PLAT_ORION
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 7428feaa8705..642d96736cf5 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o
obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
+obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_MV_XOR) += mv_xor.o
obj-$(CONFIG_DW_DMAC) += dw_dmac.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
diff --git a/drivers/dma/bestcomm/Kconfig b/drivers/dma/bestcomm/Kconfig
new file mode 100644
index 000000000000..29e427085efb
--- /dev/null
+++ b/drivers/dma/bestcomm/Kconfig
@@ -0,0 +1,36 @@
+#
+# Kconfig options for Bestcomm
+#
+
+config PPC_BESTCOMM
+ tristate "Bestcomm DMA engine support"
+ depends on PPC_MPC52xx
+ default n
+ select PPC_LIB_RHEAP
+ help
+ BestComm is the name of the communication coprocessor found
+ on the Freescale MPC5200 family of processor. Its usage is
+ optional for some drivers (like ATA), but required for
+ others (like FEC).
+
+ If you want to use drivers that require DMA operations,
+ answer Y or M. Otherwise say N.
+
+config PPC_BESTCOMM_ATA
+ tristate
+ depends on PPC_BESTCOMM
+ help
+ This option enables the support for the ATA task.
+
+config PPC_BESTCOMM_FEC
+ tristate
+ depends on PPC_BESTCOMM
+ help
+ This option enables the support for the FEC tasks.
+
+config PPC_BESTCOMM_GEN_BD
+ tristate
+ depends on PPC_BESTCOMM
+ help
+ This option enables the support for the GenBD tasks.
+
diff --git a/drivers/dma/bestcomm/Makefile b/drivers/dma/bestcomm/Makefile
new file mode 100644
index 000000000000..aed2df2a6580
--- /dev/null
+++ b/drivers/dma/bestcomm/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for BestComm & co
+#
+
+bestcomm-core-objs := bestcomm.o sram.o
+bestcomm-ata-objs := ata.o bcom_ata_task.o
+bestcomm-fec-objs := fec.o bcom_fec_rx_task.o bcom_fec_tx_task.o
+bestcomm-gen-bd-objs := gen_bd.o bcom_gen_bd_rx_task.o bcom_gen_bd_tx_task.o
+
+obj-$(CONFIG_PPC_BESTCOMM) += bestcomm-core.o
+obj-$(CONFIG_PPC_BESTCOMM_ATA) += bestcomm-ata.o
+obj-$(CONFIG_PPC_BESTCOMM_FEC) += bestcomm-fec.o
+obj-$(CONFIG_PPC_BESTCOMM_GEN_BD) += bestcomm-gen-bd.o
+
diff --git a/drivers/dma/bestcomm/ata.c b/drivers/dma/bestcomm/ata.c
new file mode 100644
index 000000000000..2fd87f83cf90
--- /dev/null
+++ b/drivers/dma/bestcomm/ata.c
@@ -0,0 +1,157 @@
+/*
+ * Bestcomm ATA task driver
+ *
+ *
+ * Patterned after bestcomm/fec.c by Dale Farnsworth <dfarnsworth@mvista.com>
+ * 2003-2004 (c) MontaVista, Software, Inc.
+ *
+ * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2006 Freescale - John Rigby
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/io.h>
+
+#include <linux/fsl/bestcomm/bestcomm.h>
+#include <linux/fsl/bestcomm/bestcomm_priv.h>
+#include <linux/fsl/bestcomm/ata.h>
+
+
+/* ======================================================================== */
+/* Task image/var/inc */
+/* ======================================================================== */
+
+/* ata task image */
+extern u32 bcom_ata_task[];
+
+/* ata task vars that need to be set before enabling the task */
+struct bcom_ata_var {
+ u32 enable; /* (u16*) address of task's control register */
+ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
+ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
+ u32 bd_start; /* (struct bcom_bd*) current bd */
+ u32 buffer_size; /* size of receive buffer */
+};
+
+/* ata task incs that need to be set before enabling the task */
+struct bcom_ata_inc {
+ u16 pad0;
+ s16 incr_bytes;
+ u16 pad1;
+ s16 incr_dst;
+ u16 pad2;
+ s16 incr_src;
+};
+
+
+/* ======================================================================== */
+/* Task support code */
+/* ======================================================================== */
+
+struct bcom_task *
+bcom_ata_init(int queue_len, int maxbufsize)
+{
+ struct bcom_task *tsk;
+ struct bcom_ata_var *var;
+ struct bcom_ata_inc *inc;
+
+ /* Prefetch breaks ATA DMA. Turn it off for ATA DMA */
+ bcom_disable_prefetch();
+
+ tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_ata_bd), 0);
+ if (!tsk)
+ return NULL;
+
+ tsk->flags = BCOM_FLAGS_NONE;
+
+ bcom_ata_reset_bd(tsk);
+
+ var = (struct bcom_ata_var *) bcom_task_var(tsk->tasknum);
+ inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum);
+
+ if (bcom_load_image(tsk->tasknum, bcom_ata_task)) {
+ bcom_task_free(tsk);
+ return NULL;
+ }
+
+ var->enable = bcom_eng->regs_base +
+ offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
+ var->bd_base = tsk->bd_pa;
+ var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
+ var->bd_start = tsk->bd_pa;
+ var->buffer_size = maxbufsize;
+
+ /* Configure some stuff */
+ bcom_set_task_pragma(tsk->tasknum, BCOM_ATA_PRAGMA);
+ bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
+
+ out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ATA_RX], BCOM_IPR_ATA_RX);
+ out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ATA_TX], BCOM_IPR_ATA_TX);
+
+ out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
+
+ return tsk;
+}
+EXPORT_SYMBOL_GPL(bcom_ata_init);
+
+void bcom_ata_rx_prepare(struct bcom_task *tsk)
+{
+ struct bcom_ata_inc *inc;
+
+ inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum);
+
+ inc->incr_bytes = -(s16)sizeof(u32);
+ inc->incr_src = 0;
+ inc->incr_dst = sizeof(u32);
+
+ bcom_set_initiator(tsk->tasknum, BCOM_INITIATOR_ATA_RX);
+}
+EXPORT_SYMBOL_GPL(bcom_ata_rx_prepare);
+
+void bcom_ata_tx_prepare(struct bcom_task *tsk)
+{
+ struct bcom_ata_inc *inc;
+
+ inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum);
+
+ inc->incr_bytes = -(s16)sizeof(u32);
+ inc->incr_src = sizeof(u32);
+ inc->incr_dst = 0;
+
+ bcom_set_initiator(tsk->tasknum, BCOM_INITIATOR_ATA_TX);
+}
+EXPORT_SYMBOL_GPL(bcom_ata_tx_prepare);
+
+void bcom_ata_reset_bd(struct bcom_task *tsk)
+{
+ struct bcom_ata_var *var;
+
+ /* Reset all BD */
+ memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
+
+ tsk->index = 0;
+ tsk->outdex = 0;
+
+ var = (struct bcom_ata_var *) bcom_task_var(tsk->tasknum);
+ var->bd_start = var->bd_base;
+}
+EXPORT_SYMBOL_GPL(bcom_ata_reset_bd);
+
+void bcom_ata_release(struct bcom_task *tsk)
+{
+ /* Nothing special for the ATA tasks */
+ bcom_task_free(tsk);
+}
+EXPORT_SYMBOL_GPL(bcom_ata_release);
+
+
+MODULE_DESCRIPTION("BestComm ATA task driver");
+MODULE_AUTHOR("John Rigby");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/dma/bestcomm/bcom_ata_task.c b/drivers/dma/bestcomm/bcom_ata_task.c
new file mode 100644
index 000000000000..cc6049a4e469
--- /dev/null
+++ b/drivers/dma/bestcomm/bcom_ata_task.c
@@ -0,0 +1,67 @@
+/*
+ * Bestcomm ATA task microcode
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Created based on bestcom/code_dma/image_rtos1/dma_image.hex
+ */
+
+#include <asm/types.h>
+
+/*
+ * The header consists of the following fields:
+ * u32 magic;
+ * u8 desc_size;
+ * u8 var_size;
+ * u8 inc_size;
+ * u8 first_var;
+ * u8 reserved[8];
+ *
+ * The size fields contain the number of 32-bit words.
+ */
+
+u32 bcom_ata_task[] = {
+ /* header */
+ 0x4243544b,
+ 0x0e060709,
+ 0x00000000,
+ 0x00000000,
+
+ /* Task descriptors */
+ 0x8198009b, /* LCD: idx0 = var3; idx0 <= var2; idx0 += inc3 */
+ 0x13e00c08, /* DRD1A: var3 = var1; FN=0 MORE init=31 WS=0 RS=0 */
+ 0xb8000264, /* LCD: idx1 = *idx0, idx2 = var0; idx1 < var9; idx1 += inc4, idx2 += inc4 */
+ 0x10000f00, /* DRD1A: var3 = idx0; FN=0 MORE init=0 WS=0 RS=0 */
+ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
+ 0x0c8cfc8a, /* DRD2B1: *idx2 = EU3(); EU3(*idx2,var10) */
+ 0xd8988240, /* LCDEXT: idx1 = idx1; idx1 > var9; idx1 += inc0 */
+ 0xf845e011, /* LCDEXT: idx2 = *(idx0 + var00000015); ; idx2 += inc2 */
+ 0xb845e00a, /* LCD: idx3 = *(idx0 + var00000019); ; idx3 += inc1 */
+ 0x0bfecf90, /* DRD1A: *idx3 = *idx2; FN=0 TFD init=31 WS=3 RS=3 */
+ 0x9898802d, /* LCD: idx1 = idx1; idx1 once var0; idx1 += inc5 */
+ 0x64000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 INT EXT init=0 WS=0 RS=0 */
+ 0x0c0cf849, /* DRD2B1: *idx0 = EU3(); EU3(idx1,var9) */
+ 0x000001f8, /* NOP */
+
+ /* VAR[9]-VAR[14] */
+ 0x40000000,
+ 0x7fff7fff,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ /* INC[0]-INC[6] */
+ 0x40000000,
+ 0xe0000000,
+ 0xe0000000,
+ 0xa000000c,
+ 0x20000000,
+ 0x00000000,
+ 0x00000000,
+};
+
diff --git a/drivers/dma/bestcomm/bcom_fec_rx_task.c b/drivers/dma/bestcomm/bcom_fec_rx_task.c
new file mode 100644
index 000000000000..a1ad6a02fcef
--- /dev/null
+++ b/drivers/dma/bestcomm/bcom_fec_rx_task.c
@@ -0,0 +1,78 @@
+/*
+ * Bestcomm FEC RX task microcode
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Automatically created based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex
+ * on Tue Mar 22 11:19:38 2005 GMT
+ */
+
+#include <asm/types.h>
+
+/*
+ * The header consists of the following fields:
+ * u32 magic;
+ * u8 desc_size;
+ * u8 var_size;
+ * u8 inc_size;
+ * u8 first_var;
+ * u8 reserved[8];
+ *
+ * The size fields contain the number of 32-bit words.
+ */
+
+u32 bcom_fec_rx_task[] = {
+ /* header */
+ 0x4243544b,
+ 0x18060709,
+ 0x00000000,
+ 0x00000000,
+
+ /* Task descriptors */
+ 0x808220e3, /* LCD: idx0 = var1, idx1 = var4; idx1 <= var3; idx0 += inc4, idx1 += inc3 */
+ 0x10601010, /* DRD1A: var4 = var2; FN=0 MORE init=3 WS=0 RS=0 */
+ 0xb8800264, /* LCD: idx2 = *idx1, idx3 = var0; idx2 < var9; idx2 += inc4, idx3 += inc4 */
+ 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */
+ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
+ 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */
+ 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */
+ 0xb8c58029, /* LCD: idx3 = *(idx1 + var00000015); idx3 once var0; idx3 += inc5 */
+ 0x60000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=0 RS=0 */
+ 0x088cf8cc, /* DRD2B1: idx2 = EU3(); EU3(idx3,var12) */
+ 0x991982f2, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var11; idx2 += inc6, idx3 += inc2 */
+ 0x006acf80, /* DRD1A: *idx3 = *idx0; FN=0 init=3 WS=1 RS=1 */
+ 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */
+ 0x9999802d, /* LCD: idx3 = idx3; idx3 once var0; idx3 += inc5 */
+ 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */
+ 0x034cfc4e, /* DRD2B1: var13 = EU3(); EU3(*idx1,var14) */
+ 0x00008868, /* DRD1A: idx2 = var13; FN=0 init=0 WS=0 RS=0 */
+ 0x99198341, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var13; idx2 += inc0, idx3 += inc1 */
+ 0x007ecf80, /* DRD1A: *idx3 = *idx0; FN=0 init=3 WS=3 RS=3 */
+ 0x99198272, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var9; idx2 += inc6, idx3 += inc2 */
+ 0x046acf80, /* DRD1A: *idx3 = *idx0; FN=0 INT init=3 WS=1 RS=1 */
+ 0x9819002d, /* LCD: idx2 = idx0; idx2 once var0; idx2 += inc5 */
+ 0x0060c790, /* DRD1A: *idx1 = *idx2; FN=0 init=3 WS=0 RS=0 */
+ 0x000001f8, /* NOP */
+
+ /* VAR[9]-VAR[14] */
+ 0x40000000,
+ 0x7fff7fff,
+ 0x00000000,
+ 0x00000003,
+ 0x40000008,
+ 0x43ffffff,
+
+ /* INC[0]-INC[6] */
+ 0x40000000,
+ 0xe0000000,
+ 0xe0000000,
+ 0xa0000008,
+ 0x20000000,
+ 0x00000000,
+ 0x4000ffff,
+};
+
diff --git a/drivers/dma/bestcomm/bcom_fec_tx_task.c b/drivers/dma/bestcomm/bcom_fec_tx_task.c
new file mode 100644
index 000000000000..b1c495c3a65a
--- /dev/null
+++ b/drivers/dma/bestcomm/bcom_fec_tx_task.c
@@ -0,0 +1,91 @@
+/*
+ * Bestcomm FEC TX task microcode
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Automatically created based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex
+ * on Tue Mar 22 11:19:29 2005 GMT
+ */
+
+#include <asm/types.h>
+
+/*
+ * The header consists of the following fields:
+ * u32 magic;
+ * u8 desc_size;
+ * u8 var_size;
+ * u8 inc_size;
+ * u8 first_var;
+ * u8 reserved[8];
+ *
+ * The size fields contain the number of 32-bit words.
+ */
+
+u32 bcom_fec_tx_task[] = {
+ /* header */
+ 0x4243544b,
+ 0x2407070d,
+ 0x00000000,
+ 0x00000000,
+
+ /* Task descriptors */
+ 0x8018001b, /* LCD: idx0 = var0; idx0 <= var0; idx0 += inc3 */
+ 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */
+ 0x01ccfc0d, /* DRD2B1: var7 = EU3(); EU3(*idx0,var13) */
+ 0x8082a123, /* LCD: idx0 = var1, idx1 = var5; idx1 <= var4; idx0 += inc4, idx1 += inc3 */
+ 0x10801418, /* DRD1A: var5 = var3; FN=0 MORE init=4 WS=0 RS=0 */
+ 0xf88103a4, /* LCDEXT: idx2 = *idx1, idx3 = var2; idx2 < var14; idx2 += inc4, idx3 += inc4 */
+ 0x801a6024, /* LCD: idx4 = var0; ; idx4 += inc4 */
+ 0x10001708, /* DRD1A: var5 = idx1; FN=0 MORE init=0 WS=0 RS=0 */
+ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
+ 0x0cccfccf, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var15) */
+ 0x991a002c, /* LCD: idx2 = idx2, idx3 = idx4; idx2 once var0; idx2 += inc5, idx3 += inc4 */
+ 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */
+ 0x024cfc4d, /* DRD2B1: var9 = EU3(); EU3(*idx1,var13) */
+ 0x60000003, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=3 EXT init=0 WS=0 RS=0 */
+ 0x0cccf247, /* DRD2B1: *idx3 = EU3(); EU3(var9,var7) */
+ 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */
+ 0xb8c80029, /* LCD: idx3 = *(idx1 + var0000001a); idx3 once var0; idx3 += inc5 */
+ 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */
+ 0x088cf8d1, /* DRD2B1: idx2 = EU3(); EU3(idx3,var17) */
+ 0x00002f10, /* DRD1A: var11 = idx2; FN=0 init=0 WS=0 RS=0 */
+ 0x99198432, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var16; idx2 += inc6, idx3 += inc2 */
+ 0x008ac398, /* DRD1A: *idx0 = *idx3; FN=0 init=4 WS=1 RS=1 */
+ 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */
+ 0x9999802d, /* LCD: idx3 = idx3; idx3 once var0; idx3 += inc5 */
+ 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */
+ 0x048cfc53, /* DRD2B1: var18 = EU3(); EU3(*idx1,var19) */
+ 0x60000008, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=8 EXT init=0 WS=0 RS=0 */
+ 0x088cf48b, /* DRD2B1: idx2 = EU3(); EU3(var18,var11) */
+ 0x99198481, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var18; idx2 += inc0, idx3 += inc1 */
+ 0x009ec398, /* DRD1A: *idx0 = *idx3; FN=0 init=4 WS=3 RS=3 */
+ 0x991983b2, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var14; idx2 += inc6, idx3 += inc2 */
+ 0x088ac398, /* DRD1A: *idx0 = *idx3; FN=0 TFD init=4 WS=1 RS=1 */
+ 0x9919002d, /* LCD: idx2 = idx2; idx2 once var0; idx2 += inc5 */
+ 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */
+ 0x0c4cf88e, /* DRD2B1: *idx1 = EU3(); EU3(idx2,var14) */
+ 0x000001f8, /* NOP */
+
+ /* VAR[13]-VAR[19] */
+ 0x0c000000,
+ 0x40000000,
+ 0x7fff7fff,
+ 0x00000000,
+ 0x00000003,
+ 0x40000004,
+ 0x43ffffff,
+
+ /* INC[0]-INC[6] */
+ 0x40000000,
+ 0xe0000000,
+ 0xe0000000,
+ 0xa0000008,
+ 0x20000000,
+ 0x00000000,
+ 0x4000ffff,
+};
+
diff --git a/drivers/dma/bestcomm/bcom_gen_bd_rx_task.c b/drivers/dma/bestcomm/bcom_gen_bd_rx_task.c
new file mode 100644
index 000000000000..efee022b0256
--- /dev/null
+++ b/drivers/dma/bestcomm/bcom_gen_bd_rx_task.c
@@ -0,0 +1,63 @@
+/*
+ * Bestcomm GenBD RX task microcode
+ *
+ * Copyright (C) 2006 AppSpec Computer Technologies Corp.
+ * Jeff Gibbons <jeff.gibbons@appspec.com>
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex
+ * on Tue Mar 4 10:14:12 2006 GMT
+ *
+ */
+
+#include <asm/types.h>
+
+/*
+ * The header consists of the following fields:
+ * u32 magic;
+ * u8 desc_size;
+ * u8 var_size;
+ * u8 inc_size;
+ * u8 first_var;
+ * u8 reserved[8];
+ *
+ * The size fields contain the number of 32-bit words.
+ */
+
+u32 bcom_gen_bd_rx_task[] = {
+ /* header */
+ 0x4243544b,
+ 0x0d020409,
+ 0x00000000,
+ 0x00000000,
+
+ /* Task descriptors */
+ 0x808220da, /* LCD: idx0 = var1, idx1 = var4; idx1 <= var3; idx0 += inc3, idx1 += inc2 */
+ 0x13e01010, /* DRD1A: var4 = var2; FN=0 MORE init=31 WS=0 RS=0 */
+ 0xb880025b, /* LCD: idx2 = *idx1, idx3 = var0; idx2 < var9; idx2 += inc3, idx3 += inc3 */
+ 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */
+ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
+ 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */
+ 0xd9190240, /* LCDEXT: idx2 = idx2; idx2 > var9; idx2 += inc0 */
+ 0xb8c5e009, /* LCD: idx3 = *(idx1 + var00000015); ; idx3 += inc1 */
+ 0x07fecf80, /* DRD1A: *idx3 = *idx0; FN=0 INT init=31 WS=3 RS=3 */
+ 0x99190024, /* LCD: idx2 = idx2; idx2 once var0; idx2 += inc4 */
+ 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */
+ 0x0c4cf889, /* DRD2B1: *idx1 = EU3(); EU3(idx2,var9) */
+ 0x000001f8, /* NOP */
+
+ /* VAR[9]-VAR[10] */
+ 0x40000000,
+ 0x7fff7fff,
+
+ /* INC[0]-INC[3] */
+ 0x40000000,
+ 0xe0000000,
+ 0xa0000008,
+ 0x20000000,
+};
+
diff --git a/drivers/dma/bestcomm/bcom_gen_bd_tx_task.c b/drivers/dma/bestcomm/bcom_gen_bd_tx_task.c
new file mode 100644
index 000000000000..c605aa42ecbb
--- /dev/null
+++ b/drivers/dma/bestcomm/bcom_gen_bd_tx_task.c
@@ -0,0 +1,69 @@
+/*
+ * Bestcomm GenBD TX task microcode
+ *
+ * Copyright (C) 2006 AppSpec Computer Technologies Corp.
+ * Jeff Gibbons <jeff.gibbons@appspec.com>
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex
+ * on Tue Mar 4 10:14:12 2006 GMT
+ *
+ */
+
+#include <asm/types.h>
+
+/*
+ * The header consists of the following fields:
+ * u32 magic;
+ * u8 desc_size;
+ * u8 var_size;
+ * u8 inc_size;
+ * u8 first_var;
+ * u8 reserved[8];
+ *
+ * The size fields contain the number of 32-bit words.
+ */
+
+u32 bcom_gen_bd_tx_task[] = {
+ /* header */
+ 0x4243544b,
+ 0x0f040609,
+ 0x00000000,
+ 0x00000000,
+
+ /* Task descriptors */
+ 0x800220e3, /* LCD: idx0 = var0, idx1 = var4; idx1 <= var3; idx0 += inc4, idx1 += inc3 */
+ 0x13e01010, /* DRD1A: var4 = var2; FN=0 MORE init=31 WS=0 RS=0 */
+ 0xb8808264, /* LCD: idx2 = *idx1, idx3 = var1; idx2 < var9; idx2 += inc4, idx3 += inc4 */
+ 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */
+ 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
+ 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */
+ 0xd9190300, /* LCDEXT: idx2 = idx2; idx2 > var12; idx2 += inc0 */
+ 0xb8c5e009, /* LCD: idx3 = *(idx1 + var00000015); ; idx3 += inc1 */
+ 0x03fec398, /* DRD1A: *idx0 = *idx3; FN=0 init=31 WS=3 RS=3 */
+ 0x9919826a, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var9; idx2 += inc5, idx3 += inc2 */
+ 0x0feac398, /* DRD1A: *idx0 = *idx3; FN=0 TFD INT init=31 WS=1 RS=1 */
+ 0x99190036, /* LCD: idx2 = idx2; idx2 once var0; idx2 += inc6 */
+ 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */
+ 0x0c4cf889, /* DRD2B1: *idx1 = EU3(); EU3(idx2,var9) */
+ 0x000001f8, /* NOP */
+
+ /* VAR[9]-VAR[12] */
+ 0x40000000,
+ 0x7fff7fff,
+ 0x00000000,
+ 0x40000004,
+
+ /* INC[0]-INC[5] */
+ 0x40000000,
+ 0xe0000000,
+ 0xe0000000,
+ 0xa0000008,
+ 0x20000000,
+ 0x4000ffff,
+};
+
diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c
new file mode 100644
index 000000000000..a8c2e2994d2e
--- /dev/null
+++ b/drivers/dma/bestcomm/bestcomm.c
@@ -0,0 +1,531 @@
+/*
+ * Driver for MPC52xx processor BestComm peripheral controller
+ *
+ *
+ * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2005 Varma Electronics Oy,
+ * ( by Andrey Volkov <avolkov@varma-el.com> )
+ * Copyright (C) 2003-2004 MontaVista, Software, Inc.
+ * ( by Dale Farnsworth <dfarnsworth@mvista.com> )
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/mpc52xx.h>
+
+#include <linux/fsl/bestcomm/sram.h>
+#include <linux/fsl/bestcomm/bestcomm_priv.h>
+#include "linux/fsl/bestcomm/bestcomm.h"
+
+#define DRIVER_NAME "bestcomm-core"
+
+/* MPC5200 device tree match tables */
+static struct of_device_id mpc52xx_sram_ids[] = {
+ { .compatible = "fsl,mpc5200-sram", },
+ { .compatible = "mpc5200-sram", },
+ {}
+};
+
+
+struct bcom_engine *bcom_eng = NULL;
+EXPORT_SYMBOL_GPL(bcom_eng); /* needed for inline functions */
+
+/* ======================================================================== */
+/* Public and private API */
+/* ======================================================================== */
+
+/* Private API */
+
+struct bcom_task *
+bcom_task_alloc(int bd_count, int bd_size, int priv_size)
+{
+ int i, tasknum = -1;
+ struct bcom_task *tsk;
+
+ /* Don't try to do anything if bestcomm init failed */
+ if (!bcom_eng)
+ return NULL;
+
+ /* Get and reserve a task num */
+ spin_lock(&bcom_eng->lock);
+
+ for (i=0; i<BCOM_MAX_TASKS; i++)
+ if (!bcom_eng->tdt[i].stop) { /* we use stop as a marker */
+ bcom_eng->tdt[i].stop = 0xfffffffful; /* dummy addr */
+ tasknum = i;
+ break;
+ }
+
+ spin_unlock(&bcom_eng->lock);
+
+ if (tasknum < 0)
+ return NULL;
+
+ /* Allocate our structure */
+ tsk = kzalloc(sizeof(struct bcom_task) + priv_size, GFP_KERNEL);
+ if (!tsk)
+ goto error;
+
+ tsk->tasknum = tasknum;
+ if (priv_size)
+ tsk->priv = (void*)tsk + sizeof(struct bcom_task);
+
+ /* Get IRQ of that task */
+ tsk->irq = irq_of_parse_and_map(bcom_eng->ofnode, tsk->tasknum);
+ if (tsk->irq == NO_IRQ)
+ goto error;
+
+ /* Init the BDs, if needed */
+ if (bd_count) {
+ tsk->cookie = kmalloc(sizeof(void*) * bd_count, GFP_KERNEL);
+ if (!tsk->cookie)
+ goto error;
+
+ tsk->bd = bcom_sram_alloc(bd_count * bd_size, 4, &tsk->bd_pa);
+ if (!tsk->bd)
+ goto error;
+ memset(tsk->bd, 0x00, bd_count * bd_size);
+
+ tsk->num_bd = bd_count;
+ tsk->bd_size = bd_size;
+ }
+
+ return tsk;
+
+error:
+ if (tsk) {
+ if (tsk->irq != NO_IRQ)
+ irq_dispose_mapping(tsk->irq);
+ bcom_sram_free(tsk->bd);
+ kfree(tsk->cookie);
+ kfree(tsk);
+ }
+
+ bcom_eng->tdt[tasknum].stop = 0;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(bcom_task_alloc);
+
+void
+bcom_task_free(struct bcom_task *tsk)
+{
+ /* Stop the task */
+ bcom_disable_task(tsk->tasknum);
+
+ /* Clear TDT */
+ bcom_eng->tdt[tsk->tasknum].start = 0;
+ bcom_eng->tdt[tsk->tasknum].stop = 0;
+
+ /* Free everything */
+ irq_dispose_mapping(tsk->irq);
+ bcom_sram_free(tsk->bd);
+ kfree(tsk->cookie);
+ kfree(tsk);
+}
+EXPORT_SYMBOL_GPL(bcom_task_free);
+
+int
+bcom_load_image(int task, u32 *task_image)
+{
+ struct bcom_task_header *hdr = (struct bcom_task_header *)task_image;
+ struct bcom_tdt *tdt;
+ u32 *desc, *var, *inc;
+ u32 *desc_src, *var_src, *inc_src;
+
+ /* Safety checks */
+ if (hdr->magic != BCOM_TASK_MAGIC) {
+ printk(KERN_ERR DRIVER_NAME
+ ": Trying to load invalid microcode\n");
+ return -EINVAL;
+ }
+
+ if ((task < 0) || (task >= BCOM_MAX_TASKS)) {
+ printk(KERN_ERR DRIVER_NAME
+ ": Trying to load invalid task %d\n", task);
+ return -EINVAL;
+ }
+
+ /* Initial load or reload */
+ tdt = &bcom_eng->tdt[task];
+
+ if (tdt->start) {
+ desc = bcom_task_desc(task);
+ if (hdr->desc_size != bcom_task_num_descs(task)) {
+ printk(KERN_ERR DRIVER_NAME
+ ": Trying to reload wrong task image "
+ "(%d size %d/%d)!\n",
+ task,
+ hdr->desc_size,
+ bcom_task_num_descs(task));
+ return -EINVAL;
+ }
+ } else {
+ phys_addr_t start_pa;
+
+ desc = bcom_sram_alloc(hdr->desc_size * sizeof(u32), 4, &start_pa);
+ if (!desc)
+ return -ENOMEM;
+
+ tdt->start = start_pa;
+ tdt->stop = start_pa + ((hdr->desc_size-1) * sizeof(u32));
+ }
+
+ var = bcom_task_var(task);
+ inc = bcom_task_inc(task);
+
+ /* Clear & copy */
+ memset(var, 0x00, BCOM_VAR_SIZE);
+ memset(inc, 0x00, BCOM_INC_SIZE);
+
+ desc_src = (u32 *)(hdr + 1);
+ var_src = desc_src + hdr->desc_size;
+ inc_src = var_src + hdr->var_size;
+
+ memcpy(desc, desc_src, hdr->desc_size * sizeof(u32));
+ memcpy(var + hdr->first_var, var_src, hdr->var_size * sizeof(u32));
+ memcpy(inc, inc_src, hdr->inc_size * sizeof(u32));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bcom_load_image);
+
+void
+bcom_set_initiator(int task, int initiator)
+{
+ int i;
+ int num_descs;
+ u32 *desc;
+ int next_drd_has_initiator;
+
+ bcom_set_tcr_initiator(task, initiator);
+
+ /* Just setting tcr is apparently not enough due to some problem */
+ /* with it. So we just go thru all the microcode and replace in */
+ /* the DRD directly */
+
+ desc = bcom_task_desc(task);
+ next_drd_has_initiator = 1;
+ num_descs = bcom_task_num_descs(task);
+
+ for (i=0; i<num_descs; i++, desc++) {
+ if (!bcom_desc_is_drd(*desc))
+ continue;
+ if (next_drd_has_initiator)
+ if (bcom_desc_initiator(*desc) != BCOM_INITIATOR_ALWAYS)
+ bcom_set_desc_initiator(desc, initiator);
+ next_drd_has_initiator = !bcom_drd_is_extended(*desc);
+ }
+}
+EXPORT_SYMBOL_GPL(bcom_set_initiator);
+
+
+/* Public API */
+
+void
+bcom_enable(struct bcom_task *tsk)
+{
+ bcom_enable_task(tsk->tasknum);
+}
+EXPORT_SYMBOL_GPL(bcom_enable);
+
+void
+bcom_disable(struct bcom_task *tsk)
+{
+ bcom_disable_task(tsk->tasknum);
+}
+EXPORT_SYMBOL_GPL(bcom_disable);
+
+
+/* ======================================================================== */
+/* Engine init/cleanup */
+/* ======================================================================== */
+
+/* Function Descriptor table */
+/* this will need to be updated if Freescale changes their task code FDT */
+static u32 fdt_ops[] = {
+ 0xa0045670, /* FDT[48] - load_acc() */
+ 0x80045670, /* FDT[49] - unload_acc() */
+ 0x21800000, /* FDT[50] - and() */
+ 0x21e00000, /* FDT[51] - or() */
+ 0x21500000, /* FDT[52] - xor() */
+ 0x21400000, /* FDT[53] - andn() */
+ 0x21500000, /* FDT[54] - not() */
+ 0x20400000, /* FDT[55] - add() */
+ 0x20500000, /* FDT[56] - sub() */
+ 0x20800000, /* FDT[57] - lsh() */
+ 0x20a00000, /* FDT[58] - rsh() */
+ 0xc0170000, /* FDT[59] - crc8() */
+ 0xc0145670, /* FDT[60] - crc16() */
+ 0xc0345670, /* FDT[61] - crc32() */
+ 0xa0076540, /* FDT[62] - endian32() */
+ 0xa0000760, /* FDT[63] - endian16() */
+};
+
+
+static int bcom_engine_init(void)
+{
+ int task;
+ phys_addr_t tdt_pa, ctx_pa, var_pa, fdt_pa;
+ unsigned int tdt_size, ctx_size, var_size, fdt_size;
+
+ /* Allocate & clear SRAM zones for FDT, TDTs, contexts and vars/incs */
+ tdt_size = BCOM_MAX_TASKS * sizeof(struct bcom_tdt);
+ ctx_size = BCOM_MAX_TASKS * BCOM_CTX_SIZE;
+ var_size = BCOM_MAX_TASKS * (BCOM_VAR_SIZE + BCOM_INC_SIZE);
+ fdt_size = BCOM_FDT_SIZE;
+
+ bcom_eng->tdt = bcom_sram_alloc(tdt_size, sizeof(u32), &tdt_pa);
+ bcom_eng->ctx = bcom_sram_alloc(ctx_size, BCOM_CTX_ALIGN, &ctx_pa);
+ bcom_eng->var = bcom_sram_alloc(var_size, BCOM_VAR_ALIGN, &var_pa);
+ bcom_eng->fdt = bcom_sram_alloc(fdt_size, BCOM_FDT_ALIGN, &fdt_pa);
+
+ if (!bcom_eng->tdt || !bcom_eng->ctx || !bcom_eng->var || !bcom_eng->fdt) {
+ printk(KERN_ERR "DMA: SRAM alloc failed in engine init !\n");
+
+ bcom_sram_free(bcom_eng->tdt);
+ bcom_sram_free(bcom_eng->ctx);
+ bcom_sram_free(bcom_eng->var);
+ bcom_sram_free(bcom_eng->fdt);
+
+ return -ENOMEM;
+ }
+
+ memset(bcom_eng->tdt, 0x00, tdt_size);
+ memset(bcom_eng->ctx, 0x00, ctx_size);
+ memset(bcom_eng->var, 0x00, var_size);
+ memset(bcom_eng->fdt, 0x00, fdt_size);
+
+ /* Copy the FDT for the EU#3 */
+ memcpy(&bcom_eng->fdt[48], fdt_ops, sizeof(fdt_ops));
+
+ /* Initialize Task base structure */
+ for (task=0; task<BCOM_MAX_TASKS; task++)
+ {
+ out_be16(&bcom_eng->regs->tcr[task], 0);
+ out_8(&bcom_eng->regs->ipr[task], 0);
+
+ bcom_eng->tdt[task].context = ctx_pa;
+ bcom_eng->tdt[task].var = var_pa;
+ bcom_eng->tdt[task].fdt = fdt_pa;
+
+ var_pa += BCOM_VAR_SIZE + BCOM_INC_SIZE;
+ ctx_pa += BCOM_CTX_SIZE;
+ }
+
+ out_be32(&bcom_eng->regs->taskBar, tdt_pa);
+
+ /* Init 'always' initiator */
+ out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ALWAYS], BCOM_IPR_ALWAYS);
+
+ /* Disable COMM Bus Prefetch on the original 5200; it's broken */
+ if ((mfspr(SPRN_SVR) & MPC5200_SVR_MASK) == MPC5200_SVR)
+ bcom_disable_prefetch();
+
+ /* Init lock */
+ spin_lock_init(&bcom_eng->lock);
+
+ return 0;
+}
+
+static void
+bcom_engine_cleanup(void)
+{
+ int task;
+
+ /* Stop all tasks */
+ for (task=0; task<BCOM_MAX_TASKS; task++)
+ {
+ out_be16(&bcom_eng->regs->tcr[task], 0);
+ out_8(&bcom_eng->regs->ipr[task], 0);
+ }
+
+ out_be32(&bcom_eng->regs->taskBar, 0ul);
+
+ /* Release the SRAM zones */
+ bcom_sram_free(bcom_eng->tdt);
+ bcom_sram_free(bcom_eng->ctx);
+ bcom_sram_free(bcom_eng->var);
+ bcom_sram_free(bcom_eng->fdt);
+}
+
+
+/* ======================================================================== */
+/* OF platform driver */
+/* ======================================================================== */
+
+static int mpc52xx_bcom_probe(struct platform_device *op)
+{
+ struct device_node *ofn_sram;
+ struct resource res_bcom;
+
+ int rv;
+
+ /* Inform user we're ok so far */
+ printk(KERN_INFO "DMA: MPC52xx BestComm driver\n");
+
+ /* Get the bestcomm node */
+ of_node_get(op->dev.of_node);
+
+ /* Prepare SRAM */
+ ofn_sram = of_find_matching_node(NULL, mpc52xx_sram_ids);
+ if (!ofn_sram) {
+ printk(KERN_ERR DRIVER_NAME ": "
+ "No SRAM found in device tree\n");
+ rv = -ENODEV;
+ goto error_ofput;
+ }
+ rv = bcom_sram_init(ofn_sram, DRIVER_NAME);
+ of_node_put(ofn_sram);
+
+ if (rv) {
+ printk(KERN_ERR DRIVER_NAME ": "
+ "Error in SRAM init\n");
+ goto error_ofput;
+ }
+
+ /* Get a clean struct */
+ bcom_eng = kzalloc(sizeof(struct bcom_engine), GFP_KERNEL);
+ if (!bcom_eng) {
+ printk(KERN_ERR DRIVER_NAME ": "
+ "Can't allocate state structure\n");
+ rv = -ENOMEM;
+ goto error_sramclean;
+ }
+
+ /* Save the node */
+ bcom_eng->ofnode = op->dev.of_node;
+
+ /* Get, reserve & map io */
+ if (of_address_to_resource(op->dev.of_node, 0, &res_bcom)) {
+ printk(KERN_ERR DRIVER_NAME ": "
+ "Can't get resource\n");
+ rv = -EINVAL;
+ goto error_sramclean;
+ }
+
+ if (!request_mem_region(res_bcom.start, resource_size(&res_bcom),
+ DRIVER_NAME)) {
+ printk(KERN_ERR DRIVER_NAME ": "
+ "Can't request registers region\n");
+ rv = -EBUSY;
+ goto error_sramclean;
+ }
+
+ bcom_eng->regs_base = res_bcom.start;
+ bcom_eng->regs = ioremap(res_bcom.start, sizeof(struct mpc52xx_sdma));
+ if (!bcom_eng->regs) {
+ printk(KERN_ERR DRIVER_NAME ": "
+ "Can't map registers\n");
+ rv = -ENOMEM;
+ goto error_release;
+ }
+
+ /* Now, do the real init */
+ rv = bcom_engine_init();
+ if (rv)
+ goto error_unmap;
+
+ /* Done ! */
+ printk(KERN_INFO "DMA: MPC52xx BestComm engine @%08lx ok !\n",
+ (long)bcom_eng->regs_base);
+
+ return 0;
+
+ /* Error path */
+error_unmap:
+ iounmap(bcom_eng->regs);
+error_release:
+ release_mem_region(res_bcom.start, sizeof(struct mpc52xx_sdma));
+error_sramclean:
+ kfree(bcom_eng);
+ bcom_sram_cleanup();
+error_ofput:
+ of_node_put(op->dev.of_node);
+
+ printk(KERN_ERR "DMA: MPC52xx BestComm init failed !\n");
+
+ return rv;
+}
+
+
+static int mpc52xx_bcom_remove(struct platform_device *op)
+{
+ /* Clean up the engine */
+ bcom_engine_cleanup();
+
+ /* Cleanup SRAM */
+ bcom_sram_cleanup();
+
+ /* Release regs */
+ iounmap(bcom_eng->regs);
+ release_mem_region(bcom_eng->regs_base, sizeof(struct mpc52xx_sdma));
+
+ /* Release the node */
+ of_node_put(bcom_eng->ofnode);
+
+ /* Release memory */
+ kfree(bcom_eng);
+ bcom_eng = NULL;
+
+ return 0;
+}
+
+static struct of_device_id mpc52xx_bcom_of_match[] = {
+ { .compatible = "fsl,mpc5200-bestcomm", },
+ { .compatible = "mpc5200-bestcomm", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mpc52xx_bcom_of_match);
+
+
+static struct platform_driver mpc52xx_bcom_of_platform_driver = {
+ .probe = mpc52xx_bcom_probe,
+ .remove = mpc52xx_bcom_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = mpc52xx_bcom_of_match,
+ },
+};
+
+
+/* ======================================================================== */
+/* Module */
+/* ======================================================================== */
+
+static int __init
+mpc52xx_bcom_init(void)
+{
+ return platform_driver_register(&mpc52xx_bcom_of_platform_driver);
+}
+
+static void __exit
+mpc52xx_bcom_exit(void)
+{
+ platform_driver_unregister(&mpc52xx_bcom_of_platform_driver);
+}
+
+/* If we're not a module, we must make sure everything is setup before */
+/* anyone tries to use us ... that's why we use subsys_initcall instead */
+/* of module_init. */
+subsys_initcall(mpc52xx_bcom_init);
+module_exit(mpc52xx_bcom_exit);
+
+MODULE_DESCRIPTION("Freescale MPC52xx BestComm DMA");
+MODULE_AUTHOR("Sylvain Munaut <tnt@246tNt.com>");
+MODULE_AUTHOR("Andrey Volkov <avolkov@varma-el.com>");
+MODULE_AUTHOR("Dale Farnsworth <dfarnsworth@mvista.com>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/dma/bestcomm/fec.c b/drivers/dma/bestcomm/fec.c
new file mode 100644
index 000000000000..7f1fb1c999e4
--- /dev/null
+++ b/drivers/dma/bestcomm/fec.c
@@ -0,0 +1,270 @@
+/*
+ * Bestcomm FEC tasks driver
+ *
+ *
+ * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003-2004 MontaVista, Software, Inc.
+ * ( by Dale Farnsworth <dfarnsworth@mvista.com> )
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/io.h>
+
+#include <linux/fsl/bestcomm/bestcomm.h>
+#include <linux/fsl/bestcomm/bestcomm_priv.h>
+#include <linux/fsl/bestcomm/fec.h>
+
+
+/* ======================================================================== */
+/* Task image/var/inc */
+/* ======================================================================== */
+
+/* fec tasks images */
+extern u32 bcom_fec_rx_task[];
+extern u32 bcom_fec_tx_task[];
+
+/* rx task vars that need to be set before enabling the task */
+struct bcom_fec_rx_var {
+ u32 enable; /* (u16*) address of task's control register */
+ u32 fifo; /* (u32*) address of fec's fifo */
+ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
+ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
+ u32 bd_start; /* (struct bcom_bd*) current bd */
+ u32 buffer_size; /* size of receive buffer */
+};
+
+/* rx task incs that need to be set before enabling the task */
+struct bcom_fec_rx_inc {
+ u16 pad0;
+ s16 incr_bytes;
+ u16 pad1;
+ s16 incr_dst;
+ u16 pad2;
+ s16 incr_dst_ma;
+};
+
+/* tx task vars that need to be set before enabling the task */
+struct bcom_fec_tx_var {
+ u32 DRD; /* (u32*) address of self-modified DRD */
+ u32 fifo; /* (u32*) address of fec's fifo */
+ u32 enable; /* (u16*) address of task's control register */
+ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
+ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
+ u32 bd_start; /* (struct bcom_bd*) current bd */
+ u32 buffer_size; /* set by uCode for each packet */
+};
+
+/* tx task incs that need to be set before enabling the task */
+struct bcom_fec_tx_inc {
+ u16 pad0;
+ s16 incr_bytes;
+ u16 pad1;
+ s16 incr_src;
+ u16 pad2;
+ s16 incr_src_ma;
+};
+
+/* private structure in the task */
+struct bcom_fec_priv {
+ phys_addr_t fifo;
+ int maxbufsize;
+};
+
+
+/* ======================================================================== */
+/* Task support code */
+/* ======================================================================== */
+
+struct bcom_task *
+bcom_fec_rx_init(int queue_len, phys_addr_t fifo, int maxbufsize)
+{
+ struct bcom_task *tsk;
+ struct bcom_fec_priv *priv;
+
+ tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_fec_bd),
+ sizeof(struct bcom_fec_priv));
+ if (!tsk)
+ return NULL;
+
+ tsk->flags = BCOM_FLAGS_NONE;
+
+ priv = tsk->priv;
+ priv->fifo = fifo;
+ priv->maxbufsize = maxbufsize;
+
+ if (bcom_fec_rx_reset(tsk)) {
+ bcom_task_free(tsk);
+ return NULL;
+ }
+
+ return tsk;
+}
+EXPORT_SYMBOL_GPL(bcom_fec_rx_init);
+
+int
+bcom_fec_rx_reset(struct bcom_task *tsk)
+{
+ struct bcom_fec_priv *priv = tsk->priv;
+ struct bcom_fec_rx_var *var;
+ struct bcom_fec_rx_inc *inc;
+
+ /* Shutdown the task */
+ bcom_disable_task(tsk->tasknum);
+
+ /* Reset the microcode */
+ var = (struct bcom_fec_rx_var *) bcom_task_var(tsk->tasknum);
+ inc = (struct bcom_fec_rx_inc *) bcom_task_inc(tsk->tasknum);
+
+ if (bcom_load_image(tsk->tasknum, bcom_fec_rx_task))
+ return -1;
+
+ var->enable = bcom_eng->regs_base +
+ offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
+ var->fifo = (u32) priv->fifo;
+ var->bd_base = tsk->bd_pa;
+ var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
+ var->bd_start = tsk->bd_pa;
+ var->buffer_size = priv->maxbufsize;
+
+ inc->incr_bytes = -(s16)sizeof(u32); /* These should be in the */
+ inc->incr_dst = sizeof(u32); /* task image, but we stick */
+ inc->incr_dst_ma= sizeof(u8); /* to the official ones */
+
+ /* Reset the BDs */
+ tsk->index = 0;
+ tsk->outdex = 0;
+
+ memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
+
+ /* Configure some stuff */
+ bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_RX_BD_PRAGMA);
+ bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
+
+ out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_FEC_RX], BCOM_IPR_FEC_RX);
+
+ out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bcom_fec_rx_reset);
+
+void
+bcom_fec_rx_release(struct bcom_task *tsk)
+{
+ /* Nothing special for the FEC tasks */
+ bcom_task_free(tsk);
+}
+EXPORT_SYMBOL_GPL(bcom_fec_rx_release);
+
+
+
+ /* Return 2nd to last DRD */
+ /* This is an ugly hack, but at least it's only done
+ once at initialization */
+static u32 *self_modified_drd(int tasknum)
+{
+ u32 *desc;
+ int num_descs;
+ int drd_count;
+ int i;
+
+ num_descs = bcom_task_num_descs(tasknum);
+ desc = bcom_task_desc(tasknum) + num_descs - 1;
+ drd_count = 0;
+ for (i=0; i<num_descs; i++, desc--)
+ if (bcom_desc_is_drd(*desc) && ++drd_count == 3)
+ break;
+ return desc;
+}
+
+struct bcom_task *
+bcom_fec_tx_init(int queue_len, phys_addr_t fifo)
+{
+ struct bcom_task *tsk;
+ struct bcom_fec_priv *priv;
+
+ tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_fec_bd),
+ sizeof(struct bcom_fec_priv));
+ if (!tsk)
+ return NULL;
+
+ tsk->flags = BCOM_FLAGS_ENABLE_TASK;
+
+ priv = tsk->priv;
+ priv->fifo = fifo;
+
+ if (bcom_fec_tx_reset(tsk)) {
+ bcom_task_free(tsk);
+ return NULL;
+ }
+
+ return tsk;
+}
+EXPORT_SYMBOL_GPL(bcom_fec_tx_init);
+
+int
+bcom_fec_tx_reset(struct bcom_task *tsk)
+{
+ struct bcom_fec_priv *priv = tsk->priv;
+ struct bcom_fec_tx_var *var;
+ struct bcom_fec_tx_inc *inc;
+
+ /* Shutdown the task */
+ bcom_disable_task(tsk->tasknum);
+
+ /* Reset the microcode */
+ var = (struct bcom_fec_tx_var *) bcom_task_var(tsk->tasknum);
+ inc = (struct bcom_fec_tx_inc *) bcom_task_inc(tsk->tasknum);
+
+ if (bcom_load_image(tsk->tasknum, bcom_fec_tx_task))
+ return -1;
+
+ var->enable = bcom_eng->regs_base +
+ offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
+ var->fifo = (u32) priv->fifo;
+ var->DRD = bcom_sram_va2pa(self_modified_drd(tsk->tasknum));
+ var->bd_base = tsk->bd_pa;
+ var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
+ var->bd_start = tsk->bd_pa;
+
+ inc->incr_bytes = -(s16)sizeof(u32); /* These should be in the */
+ inc->incr_src = sizeof(u32); /* task image, but we stick */
+ inc->incr_src_ma= sizeof(u8); /* to the official ones */
+
+ /* Reset the BDs */
+ tsk->index = 0;
+ tsk->outdex = 0;
+
+ memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
+
+ /* Configure some stuff */
+ bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_TX_BD_PRAGMA);
+ bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
+
+ out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_FEC_TX], BCOM_IPR_FEC_TX);
+
+ out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bcom_fec_tx_reset);
+
+void
+bcom_fec_tx_release(struct bcom_task *tsk)
+{
+ /* Nothing special for the FEC tasks */
+ bcom_task_free(tsk);
+}
+EXPORT_SYMBOL_GPL(bcom_fec_tx_release);
+
+
+MODULE_DESCRIPTION("BestComm FEC tasks driver");
+MODULE_AUTHOR("Dale Farnsworth <dfarnsworth@mvista.com>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/dma/bestcomm/gen_bd.c b/drivers/dma/bestcomm/gen_bd.c
new file mode 100644
index 000000000000..1a5b22d88127
--- /dev/null
+++ b/drivers/dma/bestcomm/gen_bd.c
@@ -0,0 +1,354 @@
+/*
+ * Driver for MPC52xx processor BestComm General Buffer Descriptor
+ *
+ * Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2006 AppSpec Computer Technologies Corp.
+ * Jeff Gibbons <jeff.gibbons@appspec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm/errno.h>
+#include <asm/io.h>
+
+#include <asm/mpc52xx.h>
+#include <asm/mpc52xx_psc.h>
+
+#include <linux/fsl/bestcomm/bestcomm.h>
+#include <linux/fsl/bestcomm/bestcomm_priv.h>
+#include <linux/fsl/bestcomm/gen_bd.h>
+
+
+/* ======================================================================== */
+/* Task image/var/inc */
+/* ======================================================================== */
+
+/* gen_bd tasks images */
+extern u32 bcom_gen_bd_rx_task[];
+extern u32 bcom_gen_bd_tx_task[];
+
+/* rx task vars that need to be set before enabling the task */
+struct bcom_gen_bd_rx_var {
+ u32 enable; /* (u16*) address of task's control register */
+ u32 fifo; /* (u32*) address of gen_bd's fifo */
+ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
+ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
+ u32 bd_start; /* (struct bcom_bd*) current bd */
+ u32 buffer_size; /* size of receive buffer */
+};
+
+/* rx task incs that need to be set before enabling the task */
+struct bcom_gen_bd_rx_inc {
+ u16 pad0;
+ s16 incr_bytes;
+ u16 pad1;
+ s16 incr_dst;
+};
+
+/* tx task vars that need to be set before enabling the task */
+struct bcom_gen_bd_tx_var {
+ u32 fifo; /* (u32*) address of gen_bd's fifo */
+ u32 enable; /* (u16*) address of task's control register */
+ u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */
+ u32 bd_last; /* (struct bcom_bd*) end of ring buffer */
+ u32 bd_start; /* (struct bcom_bd*) current bd */
+ u32 buffer_size; /* set by uCode for each packet */
+};
+
+/* tx task incs that need to be set before enabling the task */
+struct bcom_gen_bd_tx_inc {
+ u16 pad0;
+ s16 incr_bytes;
+ u16 pad1;
+ s16 incr_src;
+ u16 pad2;
+ s16 incr_src_ma;
+};
+
+/* private structure */
+struct bcom_gen_bd_priv {
+ phys_addr_t fifo;
+ int initiator;
+ int ipr;
+ int maxbufsize;
+};
+
+
+/* ======================================================================== */
+/* Task support code */
+/* ======================================================================== */
+
+struct bcom_task *
+bcom_gen_bd_rx_init(int queue_len, phys_addr_t fifo,
+ int initiator, int ipr, int maxbufsize)
+{
+ struct bcom_task *tsk;
+ struct bcom_gen_bd_priv *priv;
+
+ tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_gen_bd),
+ sizeof(struct bcom_gen_bd_priv));
+ if (!tsk)
+ return NULL;
+
+ tsk->flags = BCOM_FLAGS_NONE;
+
+ priv = tsk->priv;
+ priv->fifo = fifo;
+ priv->initiator = initiator;
+ priv->ipr = ipr;
+ priv->maxbufsize = maxbufsize;
+
+ if (bcom_gen_bd_rx_reset(tsk)) {
+ bcom_task_free(tsk);
+ return NULL;
+ }
+
+ return tsk;
+}
+EXPORT_SYMBOL_GPL(bcom_gen_bd_rx_init);
+
+int
+bcom_gen_bd_rx_reset(struct bcom_task *tsk)
+{
+ struct bcom_gen_bd_priv *priv = tsk->priv;
+ struct bcom_gen_bd_rx_var *var;
+ struct bcom_gen_bd_rx_inc *inc;
+
+ /* Shutdown the task */
+ bcom_disable_task(tsk->tasknum);
+
+ /* Reset the microcode */
+ var = (struct bcom_gen_bd_rx_var *) bcom_task_var(tsk->tasknum);
+ inc = (struct bcom_gen_bd_rx_inc *) bcom_task_inc(tsk->tasknum);
+
+ if (bcom_load_image(tsk->tasknum, bcom_gen_bd_rx_task))
+ return -1;
+
+ var->enable = bcom_eng->regs_base +
+ offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
+ var->fifo = (u32) priv->fifo;
+ var->bd_base = tsk->bd_pa;
+ var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
+ var->bd_start = tsk->bd_pa;
+ var->buffer_size = priv->maxbufsize;
+
+ inc->incr_bytes = -(s16)sizeof(u32);
+ inc->incr_dst = sizeof(u32);
+
+ /* Reset the BDs */
+ tsk->index = 0;
+ tsk->outdex = 0;
+
+ memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
+
+ /* Configure some stuff */
+ bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_RX_BD_PRAGMA);
+ bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
+
+ out_8(&bcom_eng->regs->ipr[priv->initiator], priv->ipr);
+ bcom_set_initiator(tsk->tasknum, priv->initiator);
+
+ out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bcom_gen_bd_rx_reset);
+
+void
+bcom_gen_bd_rx_release(struct bcom_task *tsk)
+{
+ /* Nothing special for the GenBD tasks */
+ bcom_task_free(tsk);
+}
+EXPORT_SYMBOL_GPL(bcom_gen_bd_rx_release);
+
+
+extern struct bcom_task *
+bcom_gen_bd_tx_init(int queue_len, phys_addr_t fifo,
+ int initiator, int ipr)
+{
+ struct bcom_task *tsk;
+ struct bcom_gen_bd_priv *priv;
+
+ tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_gen_bd),
+ sizeof(struct bcom_gen_bd_priv));
+ if (!tsk)
+ return NULL;
+
+ tsk->flags = BCOM_FLAGS_NONE;
+
+ priv = tsk->priv;
+ priv->fifo = fifo;
+ priv->initiator = initiator;
+ priv->ipr = ipr;
+
+ if (bcom_gen_bd_tx_reset(tsk)) {
+ bcom_task_free(tsk);
+ return NULL;
+ }
+
+ return tsk;
+}
+EXPORT_SYMBOL_GPL(bcom_gen_bd_tx_init);
+
+int
+bcom_gen_bd_tx_reset(struct bcom_task *tsk)
+{
+ struct bcom_gen_bd_priv *priv = tsk->priv;
+ struct bcom_gen_bd_tx_var *var;
+ struct bcom_gen_bd_tx_inc *inc;
+
+ /* Shutdown the task */
+ bcom_disable_task(tsk->tasknum);
+
+ /* Reset the microcode */
+ var = (struct bcom_gen_bd_tx_var *) bcom_task_var(tsk->tasknum);
+ inc = (struct bcom_gen_bd_tx_inc *) bcom_task_inc(tsk->tasknum);
+
+ if (bcom_load_image(tsk->tasknum, bcom_gen_bd_tx_task))
+ return -1;
+
+ var->enable = bcom_eng->regs_base +
+ offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
+ var->fifo = (u32) priv->fifo;
+ var->bd_base = tsk->bd_pa;
+ var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
+ var->bd_start = tsk->bd_pa;
+
+ inc->incr_bytes = -(s16)sizeof(u32);
+ inc->incr_src = sizeof(u32);
+ inc->incr_src_ma = sizeof(u8);
+
+ /* Reset the BDs */
+ tsk->index = 0;
+ tsk->outdex = 0;
+
+ memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
+
+ /* Configure some stuff */
+ bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_TX_BD_PRAGMA);
+ bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
+
+ out_8(&bcom_eng->regs->ipr[priv->initiator], priv->ipr);
+ bcom_set_initiator(tsk->tasknum, priv->initiator);
+
+ out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bcom_gen_bd_tx_reset);
+
+void
+bcom_gen_bd_tx_release(struct bcom_task *tsk)
+{
+ /* Nothing special for the GenBD tasks */
+ bcom_task_free(tsk);
+}
+EXPORT_SYMBOL_GPL(bcom_gen_bd_tx_release);
+
+/* ---------------------------------------------------------------------
+ * PSC support code
+ */
+
+/**
+ * bcom_psc_parameters - Bestcomm initialization value table for PSC devices
+ *
+ * This structure is only used internally. It is a lookup table for PSC
+ * specific parameters to bestcomm tasks.
+ */
+static struct bcom_psc_params {
+ int rx_initiator;
+ int rx_ipr;
+ int tx_initiator;
+ int tx_ipr;
+} bcom_psc_params[] = {
+ [0] = {
+ .rx_initiator = BCOM_INITIATOR_PSC1_RX,
+ .rx_ipr = BCOM_IPR_PSC1_RX,
+ .tx_initiator = BCOM_INITIATOR_PSC1_TX,
+ .tx_ipr = BCOM_IPR_PSC1_TX,
+ },
+ [1] = {
+ .rx_initiator = BCOM_INITIATOR_PSC2_RX,
+ .rx_ipr = BCOM_IPR_PSC2_RX,
+ .tx_initiator = BCOM_INITIATOR_PSC2_TX,
+ .tx_ipr = BCOM_IPR_PSC2_TX,
+ },
+ [2] = {
+ .rx_initiator = BCOM_INITIATOR_PSC3_RX,
+ .rx_ipr = BCOM_IPR_PSC3_RX,
+ .tx_initiator = BCOM_INITIATOR_PSC3_TX,
+ .tx_ipr = BCOM_IPR_PSC3_TX,
+ },
+ [3] = {
+ .rx_initiator = BCOM_INITIATOR_PSC4_RX,
+ .rx_ipr = BCOM_IPR_PSC4_RX,
+ .tx_initiator = BCOM_INITIATOR_PSC4_TX,
+ .tx_ipr = BCOM_IPR_PSC4_TX,
+ },
+ [4] = {
+ .rx_initiator = BCOM_INITIATOR_PSC5_RX,
+ .rx_ipr = BCOM_IPR_PSC5_RX,
+ .tx_initiator = BCOM_INITIATOR_PSC5_TX,
+ .tx_ipr = BCOM_IPR_PSC5_TX,
+ },
+ [5] = {
+ .rx_initiator = BCOM_INITIATOR_PSC6_RX,
+ .rx_ipr = BCOM_IPR_PSC6_RX,
+ .tx_initiator = BCOM_INITIATOR_PSC6_TX,
+ .tx_ipr = BCOM_IPR_PSC6_TX,
+ },
+};
+
+/**
+ * bcom_psc_gen_bd_rx_init - Allocate a receive bcom_task for a PSC port
+ * @psc_num: Number of the PSC to allocate a task for
+ * @queue_len: number of buffer descriptors to allocate for the task
+ * @fifo: physical address of FIFO register
+ * @maxbufsize: Maximum receive data size in bytes.
+ *
+ * Allocate a bestcomm task structure for receiving data from a PSC.
+ */
+struct bcom_task * bcom_psc_gen_bd_rx_init(unsigned psc_num, int queue_len,
+ phys_addr_t fifo, int maxbufsize)
+{
+ if (psc_num >= MPC52xx_PSC_MAXNUM)
+ return NULL;
+
+ return bcom_gen_bd_rx_init(queue_len, fifo,
+ bcom_psc_params[psc_num].rx_initiator,
+ bcom_psc_params[psc_num].rx_ipr,
+ maxbufsize);
+}
+EXPORT_SYMBOL_GPL(bcom_psc_gen_bd_rx_init);
+
+/**
+ * bcom_psc_gen_bd_tx_init - Allocate a transmit bcom_task for a PSC port
+ * @psc_num: Number of the PSC to allocate a task for
+ * @queue_len: number of buffer descriptors to allocate for the task
+ * @fifo: physical address of FIFO register
+ *
+ * Allocate a bestcomm task structure for transmitting data to a PSC.
+ */
+struct bcom_task *
+bcom_psc_gen_bd_tx_init(unsigned psc_num, int queue_len, phys_addr_t fifo)
+{
+ struct psc;
+ return bcom_gen_bd_tx_init(queue_len, fifo,
+ bcom_psc_params[psc_num].tx_initiator,
+ bcom_psc_params[psc_num].tx_ipr);
+}
+EXPORT_SYMBOL_GPL(bcom_psc_gen_bd_tx_init);
+
+
+MODULE_DESCRIPTION("BestComm General Buffer Descriptor tasks driver");
+MODULE_AUTHOR("Jeff Gibbons <jeff.gibbons@appspec.com>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/dma/bestcomm/sram.c b/drivers/dma/bestcomm/sram.c
new file mode 100644
index 000000000000..5e2ed30ba2c4
--- /dev/null
+++ b/drivers/dma/bestcomm/sram.c
@@ -0,0 +1,178 @@
+/*
+ * Simple memory allocator for on-board SRAM
+ *
+ *
+ * Maintainer : Sylvain Munaut <tnt@246tNt.com>
+ *
+ * Copyright (C) 2005 Sylvain Munaut <tnt@246tNt.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/of.h>
+
+#include <asm/io.h>
+#include <asm/mmu.h>
+
+#include <linux/fsl/bestcomm/sram.h>
+
+
+/* Struct keeping our 'state' */
+struct bcom_sram *bcom_sram = NULL;
+EXPORT_SYMBOL_GPL(bcom_sram); /* needed for inline functions */
+
+
+/* ======================================================================== */
+/* Public API */
+/* ======================================================================== */
+/* DO NOT USE in interrupts, if needed in irq handler, we should use the
+ _irqsave version of the spin_locks */
+
+int bcom_sram_init(struct device_node *sram_node, char *owner)
+{
+ int rv;
+ const u32 *regaddr_p;
+ u64 regaddr64, size64;
+ unsigned int psize;
+
+ /* Create our state struct */
+ if (bcom_sram) {
+ printk(KERN_ERR "%s: bcom_sram_init: "
+ "Already initialized !\n", owner);
+ return -EBUSY;
+ }
+
+ bcom_sram = kmalloc(sizeof(struct bcom_sram), GFP_KERNEL);
+ if (!bcom_sram) {
+ printk(KERN_ERR "%s: bcom_sram_init: "
+ "Couldn't allocate internal state !\n", owner);
+ return -ENOMEM;
+ }
+
+ /* Get address and size of the sram */
+ regaddr_p = of_get_address(sram_node, 0, &size64, NULL);
+ if (!regaddr_p) {
+ printk(KERN_ERR "%s: bcom_sram_init: "
+ "Invalid device node !\n", owner);
+ rv = -EINVAL;
+ goto error_free;
+ }
+
+ regaddr64 = of_translate_address(sram_node, regaddr_p);
+
+ bcom_sram->base_phys = (phys_addr_t) regaddr64;
+ bcom_sram->size = (unsigned int) size64;
+
+ /* Request region */
+ if (!request_mem_region(bcom_sram->base_phys, bcom_sram->size, owner)) {
+ printk(KERN_ERR "%s: bcom_sram_init: "
+ "Couldn't request region !\n", owner);
+ rv = -EBUSY;
+ goto error_free;
+ }
+
+ /* Map SRAM */
+ /* sram is not really __iomem */
+ bcom_sram->base_virt = (void*) ioremap(bcom_sram->base_phys, bcom_sram->size);
+
+ if (!bcom_sram->base_virt) {
+ printk(KERN_ERR "%s: bcom_sram_init: "
+ "Map error SRAM zone 0x%08lx (0x%0x)!\n",
+ owner, (long)bcom_sram->base_phys, bcom_sram->size );
+ rv = -ENOMEM;
+ goto error_release;
+ }
+
+ /* Create an rheap (defaults to 32 bits word alignment) */
+ bcom_sram->rh = rh_create(4);
+
+ /* Attach the free zones */
+#if 0
+ /* Currently disabled ... for future use only */
+ reg_addr_p = of_get_property(sram_node, "available", &psize);
+#else
+ regaddr_p = NULL;
+ psize = 0;
+#endif
+
+ if (!regaddr_p || !psize) {
+ /* Attach the whole zone */
+ rh_attach_region(bcom_sram->rh, 0, bcom_sram->size);
+ } else {
+ /* Attach each zone independently */
+ while (psize >= 2 * sizeof(u32)) {
+ phys_addr_t zbase = of_translate_address(sram_node, regaddr_p);
+ rh_attach_region(bcom_sram->rh, zbase - bcom_sram->base_phys, regaddr_p[1]);
+ regaddr_p += 2;
+ psize -= 2 * sizeof(u32);
+ }
+ }
+
+ /* Init our spinlock */
+ spin_lock_init(&bcom_sram->lock);
+
+ return 0;
+
+error_release:
+ release_mem_region(bcom_sram->base_phys, bcom_sram->size);
+error_free:
+ kfree(bcom_sram);
+ bcom_sram = NULL;
+
+ return rv;
+}
+EXPORT_SYMBOL_GPL(bcom_sram_init);
+
+void bcom_sram_cleanup(void)
+{
+ /* Free resources */
+ if (bcom_sram) {
+ rh_destroy(bcom_sram->rh);
+ iounmap((void __iomem *)bcom_sram->base_virt);
+ release_mem_region(bcom_sram->base_phys, bcom_sram->size);
+ kfree(bcom_sram);
+ bcom_sram = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(bcom_sram_cleanup);
+
+void* bcom_sram_alloc(int size, int align, phys_addr_t *phys)
+{
+ unsigned long offset;
+
+ spin_lock(&bcom_sram->lock);
+ offset = rh_alloc_align(bcom_sram->rh, size, align, NULL);
+ spin_unlock(&bcom_sram->lock);
+
+ if (IS_ERR_VALUE(offset))
+ return NULL;
+
+ *phys = bcom_sram->base_phys + offset;
+ return bcom_sram->base_virt + offset;
+}
+EXPORT_SYMBOL_GPL(bcom_sram_alloc);
+
+void bcom_sram_free(void *ptr)
+{
+ unsigned long offset;
+
+ if (!ptr)
+ return;
+
+ offset = ptr - bcom_sram->base_virt;
+
+ spin_lock(&bcom_sram->lock);
+ rh_free(bcom_sram->rh, offset);
+ spin_unlock(&bcom_sram->lock);
+}
+EXPORT_SYMBOL_GPL(bcom_sram_free);
+
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index aa384e53b7ac..a2f079aca550 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -21,11 +21,1241 @@
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
-#include <mach/coh901318.h>
+#include <linux/platform_data/dma-coh901318.h>
-#include "coh901318_lli.h"
+#include "coh901318.h"
#include "dmaengine.h"
+#define COH901318_MOD32_MASK (0x1F)
+#define COH901318_WORD_MASK (0xFFFFFFFF)
+/* INT_STATUS - Interrupt Status Registers 32bit (R/-) */
+#define COH901318_INT_STATUS1 (0x0000)
+#define COH901318_INT_STATUS2 (0x0004)
+/* TC_INT_STATUS - Terminal Count Interrupt Status Registers 32bit (R/-) */
+#define COH901318_TC_INT_STATUS1 (0x0008)
+#define COH901318_TC_INT_STATUS2 (0x000C)
+/* TC_INT_CLEAR - Terminal Count Interrupt Clear Registers 32bit (-/W) */
+#define COH901318_TC_INT_CLEAR1 (0x0010)
+#define COH901318_TC_INT_CLEAR2 (0x0014)
+/* RAW_TC_INT_STATUS - Raw Term Count Interrupt Status Registers 32bit (R/-) */
+#define COH901318_RAW_TC_INT_STATUS1 (0x0018)
+#define COH901318_RAW_TC_INT_STATUS2 (0x001C)
+/* BE_INT_STATUS - Bus Error Interrupt Status Registers 32bit (R/-) */
+#define COH901318_BE_INT_STATUS1 (0x0020)
+#define COH901318_BE_INT_STATUS2 (0x0024)
+/* BE_INT_CLEAR - Bus Error Interrupt Clear Registers 32bit (-/W) */
+#define COH901318_BE_INT_CLEAR1 (0x0028)
+#define COH901318_BE_INT_CLEAR2 (0x002C)
+/* RAW_BE_INT_STATUS - Raw Term Count Interrupt Status Registers 32bit (R/-) */
+#define COH901318_RAW_BE_INT_STATUS1 (0x0030)
+#define COH901318_RAW_BE_INT_STATUS2 (0x0034)
+
+/*
+ * CX_CFG - Channel Configuration Registers 32bit (R/W)
+ */
+#define COH901318_CX_CFG (0x0100)
+#define COH901318_CX_CFG_SPACING (0x04)
+/* Channel enable activates tha dma job */
+#define COH901318_CX_CFG_CH_ENABLE (0x00000001)
+#define COH901318_CX_CFG_CH_DISABLE (0x00000000)
+/* Request Mode */
+#define COH901318_CX_CFG_RM_MASK (0x00000006)
+#define COH901318_CX_CFG_RM_MEMORY_TO_MEMORY (0x0 << 1)
+#define COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY (0x1 << 1)
+#define COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY (0x1 << 1)
+#define COH901318_CX_CFG_RM_PRIMARY_TO_SECONDARY (0x3 << 1)
+#define COH901318_CX_CFG_RM_SECONDARY_TO_PRIMARY (0x3 << 1)
+/* Linked channel request field. RM must == 11 */
+#define COH901318_CX_CFG_LCRF_SHIFT 3
+#define COH901318_CX_CFG_LCRF_MASK (0x000001F8)
+#define COH901318_CX_CFG_LCR_DISABLE (0x00000000)
+/* Terminal Counter Interrupt Request Mask */
+#define COH901318_CX_CFG_TC_IRQ_ENABLE (0x00000200)
+#define COH901318_CX_CFG_TC_IRQ_DISABLE (0x00000000)
+/* Bus Error interrupt Mask */
+#define COH901318_CX_CFG_BE_IRQ_ENABLE (0x00000400)
+#define COH901318_CX_CFG_BE_IRQ_DISABLE (0x00000000)
+
+/*
+ * CX_STAT - Channel Status Registers 32bit (R/-)
+ */
+#define COH901318_CX_STAT (0x0200)
+#define COH901318_CX_STAT_SPACING (0x04)
+#define COH901318_CX_STAT_RBE_IRQ_IND (0x00000008)
+#define COH901318_CX_STAT_RTC_IRQ_IND (0x00000004)
+#define COH901318_CX_STAT_ACTIVE (0x00000002)
+#define COH901318_CX_STAT_ENABLED (0x00000001)
+
+/*
+ * CX_CTRL - Channel Control Registers 32bit (R/W)
+ */
+#define COH901318_CX_CTRL (0x0400)
+#define COH901318_CX_CTRL_SPACING (0x10)
+/* Transfer Count Enable */
+#define COH901318_CX_CTRL_TC_ENABLE (0x00001000)
+#define COH901318_CX_CTRL_TC_DISABLE (0x00000000)
+/* Transfer Count Value 0 - 4095 */
+#define COH901318_CX_CTRL_TC_VALUE_MASK (0x00000FFF)
+/* Burst count */
+#define COH901318_CX_CTRL_BURST_COUNT_MASK (0x0000E000)
+#define COH901318_CX_CTRL_BURST_COUNT_64_BYTES (0x7 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_48_BYTES (0x6 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_32_BYTES (0x5 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_16_BYTES (0x4 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_8_BYTES (0x3 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_4_BYTES (0x2 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_2_BYTES (0x1 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_1_BYTE (0x0 << 13)
+/* Source bus size */
+#define COH901318_CX_CTRL_SRC_BUS_SIZE_MASK (0x00030000)
+#define COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS (0x2 << 16)
+#define COH901318_CX_CTRL_SRC_BUS_SIZE_16_BITS (0x1 << 16)
+#define COH901318_CX_CTRL_SRC_BUS_SIZE_8_BITS (0x0 << 16)
+/* Source address increment */
+#define COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE (0x00040000)
+#define COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE (0x00000000)
+/* Destination Bus Size */
+#define COH901318_CX_CTRL_DST_BUS_SIZE_MASK (0x00180000)
+#define COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS (0x2 << 19)
+#define COH901318_CX_CTRL_DST_BUS_SIZE_16_BITS (0x1 << 19)
+#define COH901318_CX_CTRL_DST_BUS_SIZE_8_BITS (0x0 << 19)
+/* Destination address increment */
+#define COH901318_CX_CTRL_DST_ADDR_INC_ENABLE (0x00200000)
+#define COH901318_CX_CTRL_DST_ADDR_INC_DISABLE (0x00000000)
+/* Master Mode (Master2 is only connected to MSL) */
+#define COH901318_CX_CTRL_MASTER_MODE_MASK (0x00C00000)
+#define COH901318_CX_CTRL_MASTER_MODE_M2R_M1W (0x3 << 22)
+#define COH901318_CX_CTRL_MASTER_MODE_M1R_M2W (0x2 << 22)
+#define COH901318_CX_CTRL_MASTER_MODE_M2RW (0x1 << 22)
+#define COH901318_CX_CTRL_MASTER_MODE_M1RW (0x0 << 22)
+/* Terminal Count flag to PER enable */
+#define COH901318_CX_CTRL_TCP_ENABLE (0x01000000)
+#define COH901318_CX_CTRL_TCP_DISABLE (0x00000000)
+/* Terminal Count flags to CPU enable */
+#define COH901318_CX_CTRL_TC_IRQ_ENABLE (0x02000000)
+#define COH901318_CX_CTRL_TC_IRQ_DISABLE (0x00000000)
+/* Hand shake to peripheral */
+#define COH901318_CX_CTRL_HSP_ENABLE (0x04000000)
+#define COH901318_CX_CTRL_HSP_DISABLE (0x00000000)
+#define COH901318_CX_CTRL_HSS_ENABLE (0x08000000)
+#define COH901318_CX_CTRL_HSS_DISABLE (0x00000000)
+/* DMA mode */
+#define COH901318_CX_CTRL_DDMA_MASK (0x30000000)
+#define COH901318_CX_CTRL_DDMA_LEGACY (0x0 << 28)
+#define COH901318_CX_CTRL_DDMA_DEMAND_DMA1 (0x1 << 28)
+#define COH901318_CX_CTRL_DDMA_DEMAND_DMA2 (0x2 << 28)
+/* Primary Request Data Destination */
+#define COH901318_CX_CTRL_PRDD_MASK (0x40000000)
+#define COH901318_CX_CTRL_PRDD_DEST (0x1 << 30)
+#define COH901318_CX_CTRL_PRDD_SOURCE (0x0 << 30)
+
+/*
+ * CX_SRC_ADDR - Channel Source Address Registers 32bit (R/W)
+ */
+#define COH901318_CX_SRC_ADDR (0x0404)
+#define COH901318_CX_SRC_ADDR_SPACING (0x10)
+
+/*
+ * CX_DST_ADDR - Channel Destination Address Registers 32bit R/W
+ */
+#define COH901318_CX_DST_ADDR (0x0408)
+#define COH901318_CX_DST_ADDR_SPACING (0x10)
+
+/*
+ * CX_LNK_ADDR - Channel Link Address Registers 32bit (R/W)
+ */
+#define COH901318_CX_LNK_ADDR (0x040C)
+#define COH901318_CX_LNK_ADDR_SPACING (0x10)
+#define COH901318_CX_LNK_LINK_IMMEDIATE (0x00000001)
+
+/**
+ * struct coh901318_params - parameters for DMAC configuration
+ * @config: DMA config register
+ * @ctrl_lli_last: DMA control register for the last lli in the list
+ * @ctrl_lli: DMA control register for an lli
+ * @ctrl_lli_chained: DMA control register for a chained lli
+ */
+struct coh901318_params {
+ u32 config;
+ u32 ctrl_lli_last;
+ u32 ctrl_lli;
+ u32 ctrl_lli_chained;
+};
+
+/**
+ * struct coh_dma_channel - dma channel base
+ * @name: ascii name of dma channel
+ * @number: channel id number
+ * @desc_nbr_max: number of preallocated descriptors
+ * @priority_high: prio of channel, 0 low otherwise high.
+ * @param: configuration parameters
+ */
+struct coh_dma_channel {
+ const char name[32];
+ const int number;
+ const int desc_nbr_max;
+ const int priority_high;
+ const struct coh901318_params param;
+};
+
+/**
+ * struct powersave - DMA power save structure
+ * @lock: lock protecting data in this struct
+ * @started_channels: bit mask indicating active dma channels
+ */
+struct powersave {
+ spinlock_t lock;
+ u64 started_channels;
+};
+
+/* points out all dma slave channels.
+ * Syntax is [A1, B1, A2, B2, .... ,-1,-1]
+ * Select all channels from A to B, end of list is marked with -1,-1
+ */
+static int dma_slave_channels[] = {
+ U300_DMA_MSL_TX_0, U300_DMA_SPI_RX,
+ U300_DMA_UART1_TX, U300_DMA_UART1_RX, -1, -1};
+
+/* points out all dma memcpy channels. */
+static int dma_memcpy_channels[] = {
+ U300_DMA_GENERAL_PURPOSE_0, U300_DMA_GENERAL_PURPOSE_8, -1, -1};
+
+#define flags_memcpy_config (COH901318_CX_CFG_CH_DISABLE | \
+ COH901318_CX_CFG_RM_MEMORY_TO_MEMORY | \
+ COH901318_CX_CFG_LCR_DISABLE | \
+ COH901318_CX_CFG_TC_IRQ_ENABLE | \
+ COH901318_CX_CFG_BE_IRQ_ENABLE)
+#define flags_memcpy_lli_chained (COH901318_CX_CTRL_TC_ENABLE | \
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \
+ COH901318_CX_CTRL_MASTER_MODE_M1RW | \
+ COH901318_CX_CTRL_TCP_DISABLE | \
+ COH901318_CX_CTRL_TC_IRQ_DISABLE | \
+ COH901318_CX_CTRL_HSP_DISABLE | \
+ COH901318_CX_CTRL_HSS_DISABLE | \
+ COH901318_CX_CTRL_DDMA_LEGACY | \
+ COH901318_CX_CTRL_PRDD_SOURCE)
+#define flags_memcpy_lli (COH901318_CX_CTRL_TC_ENABLE | \
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \
+ COH901318_CX_CTRL_MASTER_MODE_M1RW | \
+ COH901318_CX_CTRL_TCP_DISABLE | \
+ COH901318_CX_CTRL_TC_IRQ_DISABLE | \
+ COH901318_CX_CTRL_HSP_DISABLE | \
+ COH901318_CX_CTRL_HSS_DISABLE | \
+ COH901318_CX_CTRL_DDMA_LEGACY | \
+ COH901318_CX_CTRL_PRDD_SOURCE)
+#define flags_memcpy_lli_last (COH901318_CX_CTRL_TC_ENABLE | \
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \
+ COH901318_CX_CTRL_MASTER_MODE_M1RW | \
+ COH901318_CX_CTRL_TCP_DISABLE | \
+ COH901318_CX_CTRL_TC_IRQ_ENABLE | \
+ COH901318_CX_CTRL_HSP_DISABLE | \
+ COH901318_CX_CTRL_HSS_DISABLE | \
+ COH901318_CX_CTRL_DDMA_LEGACY | \
+ COH901318_CX_CTRL_PRDD_SOURCE)
+
+const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = {
+ {
+ .number = U300_DMA_MSL_TX_0,
+ .name = "MSL TX 0",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_MSL_TX_1,
+ .name = "MSL TX 1",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ },
+ {
+ .number = U300_DMA_MSL_TX_2,
+ .name = "MSL TX 2",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .desc_nbr_max = 10,
+ },
+ {
+ .number = U300_DMA_MSL_TX_3,
+ .name = "MSL TX 3",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ },
+ {
+ .number = U300_DMA_MSL_TX_4,
+ .name = "MSL TX 4",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ },
+ {
+ .number = U300_DMA_MSL_TX_5,
+ .name = "MSL TX 5",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_MSL_TX_6,
+ .name = "MSL TX 6",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_MSL_RX_0,
+ .name = "MSL RX 0",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_MSL_RX_1,
+ .name = "MSL RX 1",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_MSL_RX_2,
+ .name = "MSL RX 2",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_MSL_RX_3,
+ .name = "MSL RX 3",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_MSL_RX_4,
+ .name = "MSL RX 4",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_MSL_RX_5,
+ .name = "MSL RX 5",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_MSL_RX_6,
+ .name = "MSL RX 6",
+ .priority_high = 0,
+ },
+ /*
+ * Don't set up device address, burst count or size of src
+ * or dst bus for this peripheral - handled by PrimeCell
+ * DMA extension.
+ */
+ {
+ .number = U300_DMA_MMCSD_RX_TX,
+ .name = "MMCSD RX TX",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+
+ },
+ {
+ .number = U300_DMA_MSPRO_TX,
+ .name = "MSPRO TX",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_MSPRO_RX,
+ .name = "MSPRO RX",
+ .priority_high = 0,
+ },
+ /*
+ * Don't set up device address, burst count or size of src
+ * or dst bus for this peripheral - handled by PrimeCell
+ * DMA extension.
+ */
+ {
+ .number = U300_DMA_UART0_TX,
+ .name = "UART0 TX",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ },
+ {
+ .number = U300_DMA_UART0_RX,
+ .name = "UART0 RX",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ },
+ {
+ .number = U300_DMA_APEX_TX,
+ .name = "APEX TX",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_APEX_RX,
+ .name = "APEX RX",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_PCM_I2S0_TX,
+ .name = "PCM I2S0 TX",
+ .priority_high = 1,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ },
+ {
+ .number = U300_DMA_PCM_I2S0_RX,
+ .name = "PCM I2S0 RX",
+ .priority_high = 1,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_PCM_I2S1_TX,
+ .name = "PCM I2S1 TX",
+ .priority_high = 1,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_SOURCE,
+ },
+ {
+ .number = U300_DMA_PCM_I2S1_RX,
+ .name = "PCM I2S1 RX",
+ .priority_high = 1,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_DEST,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
+ COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
+ COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_ENABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY |
+ COH901318_CX_CTRL_PRDD_DEST,
+ },
+ {
+ .number = U300_DMA_XGAM_CDI,
+ .name = "XGAM CDI",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_XGAM_PDI,
+ .name = "XGAM PDI",
+ .priority_high = 0,
+ },
+ /*
+ * Don't set up device address, burst count or size of src
+ * or dst bus for this peripheral - handled by PrimeCell
+ * DMA extension.
+ */
+ {
+ .number = U300_DMA_SPI_TX,
+ .name = "SPI TX",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ },
+ {
+ .number = U300_DMA_SPI_RX,
+ .name = "SPI RX",
+ .priority_high = 0,
+ .param.config = COH901318_CX_CFG_CH_DISABLE |
+ COH901318_CX_CFG_LCR_DISABLE |
+ COH901318_CX_CFG_TC_IRQ_ENABLE |
+ COH901318_CX_CFG_BE_IRQ_ENABLE,
+ .param.ctrl_lli_chained = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_DISABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+ .param.ctrl_lli_last = 0 |
+ COH901318_CX_CTRL_TC_ENABLE |
+ COH901318_CX_CTRL_MASTER_MODE_M1RW |
+ COH901318_CX_CTRL_TCP_DISABLE |
+ COH901318_CX_CTRL_TC_IRQ_ENABLE |
+ COH901318_CX_CTRL_HSP_ENABLE |
+ COH901318_CX_CTRL_HSS_DISABLE |
+ COH901318_CX_CTRL_DDMA_LEGACY,
+
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_0,
+ .name = "GENERAL 00",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_1,
+ .name = "GENERAL 01",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_2,
+ .name = "GENERAL 02",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_3,
+ .name = "GENERAL 03",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_4,
+ .name = "GENERAL 04",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_5,
+ .name = "GENERAL 05",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_6,
+ .name = "GENERAL 06",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_7,
+ .name = "GENERAL 07",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_GENERAL_PURPOSE_8,
+ .name = "GENERAL 08",
+ .priority_high = 0,
+
+ .param.config = flags_memcpy_config,
+ .param.ctrl_lli_chained = flags_memcpy_lli_chained,
+ .param.ctrl_lli = flags_memcpy_lli,
+ .param.ctrl_lli_last = flags_memcpy_lli_last,
+ },
+ {
+ .number = U300_DMA_UART1_TX,
+ .name = "UART1 TX",
+ .priority_high = 0,
+ },
+ {
+ .number = U300_DMA_UART1_RX,
+ .name = "UART1 RX",
+ .priority_high = 0,
+ }
+};
+
#define COHC_2_DEV(cohc) (&cohc->chan.dev->device)
#ifdef VERBOSE_DEBUG
@@ -54,7 +1284,6 @@ struct coh901318_base {
struct dma_device dma_slave;
struct dma_device dma_memcpy;
struct coh901318_chan *chans;
- struct coh901318_platform *platform;
};
struct coh901318_chan {
@@ -75,8 +1304,8 @@ struct coh901318_chan {
unsigned long nbr_active_done;
unsigned long busy;
- u32 runtime_addr;
- u32 runtime_ctrl;
+ u32 addr;
+ u32 ctrl;
struct coh901318_base *base;
};
@@ -122,7 +1351,7 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf,
tmp += sprintf(tmp, "DMA -- enabled dma channels\n");
- for (i = 0; i < debugfs_dma_base->platform->max_channels; i++)
+ for (i = 0; i < U300_DMA_CHANNELS; i++)
if (started_channels & (1 << i))
tmp += sprintf(tmp, "channel %d\n", i);
@@ -187,25 +1416,16 @@ static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan)
return container_of(chan, struct coh901318_chan, chan);
}
-static inline dma_addr_t
-cohc_dev_addr(struct coh901318_chan *cohc)
-{
- /* Runtime supplied address will take precedence */
- if (cohc->runtime_addr)
- return cohc->runtime_addr;
- return cohc->base->platform->chan_conf[cohc->id].dev_addr;
-}
-
static inline const struct coh901318_params *
cohc_chan_param(struct coh901318_chan *cohc)
{
- return &cohc->base->platform->chan_conf[cohc->id].param;
+ return &chan_config[cohc->id].param;
}
static inline const struct coh_dma_channel *
cohc_chan_conf(struct coh901318_chan *cohc)
{
- return &cohc->base->platform->chan_conf[cohc->id];
+ return &chan_config[cohc->id];
}
static void enable_powersave(struct coh901318_chan *cohc)
@@ -217,12 +1437,6 @@ static void enable_powersave(struct coh901318_chan *cohc)
pm->started_channels &= ~(1ULL << cohc->id);
- if (!pm->started_channels) {
- /* DMA no longer intends to access memory */
- cohc->base->platform->access_memory_state(cohc->base->dev,
- false);
- }
-
spin_unlock_irqrestore(&pm->lock, flags);
}
static void disable_powersave(struct coh901318_chan *cohc)
@@ -232,12 +1446,6 @@ static void disable_powersave(struct coh901318_chan *cohc)
spin_lock_irqsave(&pm->lock, flags);
- if (!pm->started_channels) {
- /* DMA intends to access memory */
- cohc->base->platform->access_memory_state(cohc->base->dev,
- true);
- }
-
pm->started_channels |= (1ULL << cohc->id);
spin_unlock_irqrestore(&pm->lock, flags);
@@ -596,7 +1804,7 @@ static int coh901318_config(struct coh901318_chan *cohc,
if (param)
p = param;
else
- p = &cohc->base->platform->chan_conf[channel].param;
+ p = cohc_chan_param(cohc);
/* Clear any pending BE or TC interrupt */
if (channel < 32) {
@@ -1052,9 +2260,9 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
* sure the bits you set per peripheral channel are
* cleared in the default config from the platform.
*/
- ctrl_chained |= cohc->runtime_ctrl;
- ctrl_last |= cohc->runtime_ctrl;
- ctrl |= cohc->runtime_ctrl;
+ ctrl_chained |= cohc->ctrl;
+ ctrl_last |= cohc->ctrl;
+ ctrl |= cohc->ctrl;
if (direction == DMA_MEM_TO_DEV) {
u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
@@ -1103,7 +2311,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
/* initiate allocated lli list */
ret = coh901318_lli_fill_sg(&cohc->base->pool, lli, sgl, sg_len,
- cohc_dev_addr(cohc),
+ cohc->addr,
ctrl_chained,
ctrl,
ctrl_last,
@@ -1244,7 +2452,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
dma_addr_t addr;
enum dma_slave_buswidth addr_width;
u32 maxburst;
- u32 runtime_ctrl = 0;
+ u32 ctrl = 0;
int i = 0;
/* We only support mem to per or per to mem transfers */
@@ -1265,7 +2473,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
addr_width);
switch (addr_width) {
case DMA_SLAVE_BUSWIDTH_1_BYTE:
- runtime_ctrl |=
+ ctrl |=
COH901318_CX_CTRL_SRC_BUS_SIZE_8_BITS |
COH901318_CX_CTRL_DST_BUS_SIZE_8_BITS;
@@ -1277,7 +2485,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
break;
case DMA_SLAVE_BUSWIDTH_2_BYTES:
- runtime_ctrl |=
+ ctrl |=
COH901318_CX_CTRL_SRC_BUS_SIZE_16_BITS |
COH901318_CX_CTRL_DST_BUS_SIZE_16_BITS;
@@ -1290,7 +2498,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
break;
case DMA_SLAVE_BUSWIDTH_4_BYTES:
/* Direction doesn't matter here, it's 32/32 bits */
- runtime_ctrl |=
+ ctrl |=
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS;
@@ -1307,13 +2515,13 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
return;
}
- runtime_ctrl |= burst_sizes[i].reg;
+ ctrl |= burst_sizes[i].reg;
dev_dbg(COHC_2_DEV(cohc),
"selected burst size %d bytes for address width %d bytes, maxburst %d\n",
burst_sizes[i].burst_8bit, addr_width, maxburst);
- cohc->runtime_addr = addr;
- cohc->runtime_ctrl = runtime_ctrl;
+ cohc->addr = addr;
+ cohc->ctrl = ctrl;
}
static int
@@ -1431,7 +2639,6 @@ void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
static int __init coh901318_probe(struct platform_device *pdev)
{
int err = 0;
- struct coh901318_platform *pdata;
struct coh901318_base *base;
int irq;
struct resource *io;
@@ -1447,13 +2654,9 @@ static int __init coh901318_probe(struct platform_device *pdev)
pdev->dev.driver->name) == NULL)
return -ENOMEM;
- pdata = pdev->dev.platform_data;
- if (!pdata)
- return -ENODEV;
-
base = devm_kzalloc(&pdev->dev,
ALIGN(sizeof(struct coh901318_base), 4) +
- pdata->max_channels *
+ U300_DMA_CHANNELS *
sizeof(struct coh901318_chan),
GFP_KERNEL);
if (!base)
@@ -1466,7 +2669,6 @@ static int __init coh901318_probe(struct platform_device *pdev)
return -ENOMEM;
base->dev = &pdev->dev;
- base->platform = pdata;
spin_lock_init(&base->pm.lock);
base->pm.started_channels = 0;
@@ -1488,7 +2690,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
return err;
/* init channels for device transfers */
- coh901318_base_init(&base->dma_slave, base->platform->chans_slave,
+ coh901318_base_init(&base->dma_slave, dma_slave_channels,
base);
dma_cap_zero(base->dma_slave.cap_mask);
@@ -1508,7 +2710,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
goto err_register_slave;
/* init channels for memcpy */
- coh901318_base_init(&base->dma_memcpy, base->platform->chans_memcpy,
+ coh901318_base_init(&base->dma_memcpy, dma_memcpy_channels,
base);
dma_cap_zero(base->dma_memcpy.cap_mask);
diff --git a/drivers/dma/coh901318_lli.h b/drivers/dma/coh901318.h
index abff3714fdda..95ce1e2123ec 100644
--- a/drivers/dma/coh901318_lli.h
+++ b/drivers/dma/coh901318.h
@@ -1,16 +1,15 @@
/*
- * driver/dma/coh901318_lli.h
- *
- * Copyright (C) 2007-2009 ST-Ericsson
+ * Copyright (C) 2007-2013 ST-Ericsson
* License terms: GNU General Public License (GPL) version 2
- * Support functions for handling lli for coh901318
+ * DMA driver for COH 901 318
* Author: Per Friden <per.friden@stericsson.com>
*/
-#ifndef COH901318_LLI_H
-#define COH901318_LLI_H
+#ifndef COH901318_H
+#define COH901318_H
-#include <mach/coh901318.h>
+#define MAX_DMA_PACKET_SIZE_SHIFT 11
+#define MAX_DMA_PACKET_SIZE (1 << MAX_DMA_PACKET_SIZE_SHIFT)
struct device;
@@ -24,7 +23,25 @@ struct coh901318_pool {
#endif
};
-struct device;
+/**
+ * struct coh901318_lli - linked list item for DMAC
+ * @control: control settings for DMAC
+ * @src_addr: transfer source address
+ * @dst_addr: transfer destination address
+ * @link_addr: physical address to next lli
+ * @virt_link_addr: virtual address of next lli (only used by pool_free)
+ * @phy_this: physical address of current lli (only used by pool_free)
+ */
+struct coh901318_lli {
+ u32 control;
+ dma_addr_t src_addr;
+ dma_addr_t dst_addr;
+ dma_addr_t link_addr;
+
+ void *virt_link_addr;
+ dma_addr_t phy_this;
+};
+
/**
* coh901318_pool_create() - Creates an dma pool for lli:s
* @pool: pool handle
@@ -121,4 +138,4 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
u32 ctrl, u32 ctrl_last,
enum dma_transfer_direction dir, u32 ctrl_irq_mask);
-#endif /* COH901318_LLI_H */
+#endif /* COH901318_H */
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
index 780e0429b38c..3e96610e18e2 100644
--- a/drivers/dma/coh901318_lli.c
+++ b/drivers/dma/coh901318_lli.c
@@ -11,9 +11,9 @@
#include <linux/memory.h>
#include <linux/gfp.h>
#include <linux/dmapool.h>
-#include <mach/coh901318.h>
+#include <linux/dmaengine.h>
-#include "coh901318_lli.h"
+#include "coh901318.h"
#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_U300_DEBUG))
#define DEBUGFS_POOL_COUNTER_RESET(pool) (pool->debugfs_pool_counter = 0)
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 3e8ba02ba292..b33d1f6e1333 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -1489,9 +1490,9 @@ static int dw_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- regs = devm_request_and_ioremap(&pdev->dev, io);
- if (!regs)
- return -EBUSY;
+ regs = devm_ioremap_resource(&pdev->dev, io);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
dw_params = dma_read_byaddr(regs, DW_PARAMS);
autocfg = dw_params >> DW_PARAMS_EN & 0x1;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index dbf0e6f8de8a..70b8975d107e 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -14,6 +14,7 @@
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/mm.h>
@@ -684,9 +685,8 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
break;
}
- imxdmac->hw_chaining = 1;
- if (!imxdma_hw_chain(imxdmac))
- return -EINVAL;
+ imxdmac->hw_chaining = 0;
+
imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
CCR_REN;
@@ -1011,9 +1011,9 @@ static int __init imxdma_probe(struct platform_device *pdev)
imxdma->devtype = pdev->id_entry->driver_data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- imxdma->base = devm_request_and_ioremap(&pdev->dev, res);
- if (!imxdma->base)
- return -EADDRNOTAVAIL;
+ imxdma->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(imxdma->base))
+ return PTR_ERR(imxdma->base);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index e5fc944de1f0..3e9d66920eb3 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -951,7 +951,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
goto free_resources;
}
}
- dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_TO_DEVICE);
+ dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
/* skip validate if the capability is not present */
if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index c6d98c00f05c..dc7466563507 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -5,6 +5,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -782,9 +783,9 @@ static int mmp_pdma_probe(struct platform_device *op)
if (!iores)
return -EINVAL;
- pdev->base = devm_request_and_ioremap(pdev->dev, iores);
- if (!pdev->base)
- return -EADDRNOTAVAIL;
+ pdev->base = devm_ioremap_resource(pdev->dev, iores);
+ if (IS_ERR(pdev->base))
+ return PTR_ERR(pdev->base);
of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
if (of_id)
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index a9f1cd56689c..43d5a6c33297 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -547,9 +548,9 @@ static int mmp_tdma_probe(struct platform_device *pdev)
if (!iores)
return -EINVAL;
- tdev->base = devm_request_and_ioremap(&pdev->dev, iores);
- if (!tdev->base)
- return -EADDRNOTAVAIL;
+ tdev->base = devm_ioremap_resource(&pdev->dev, iores);
+ if (IS_ERR(tdev->base))
+ return PTR_ERR(tdev->base);
INIT_LIST_HEAD(&tdev->device.channels);
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 5a31264f2bd1..c4b4fd2acc42 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -661,32 +661,14 @@ bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
}
EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
-static struct platform_device *pdev;
-
-static const struct platform_device_info omap_dma_dev_info = {
- .name = "omap-dma-engine",
- .id = -1,
- .dma_mask = DMA_BIT_MASK(32),
-};
-
static int omap_dma_init(void)
{
- int rc = platform_driver_register(&omap_dma_driver);
-
- if (rc == 0) {
- pdev = platform_device_register_full(&omap_dma_dev_info);
- if (IS_ERR(pdev)) {
- platform_driver_unregister(&omap_dma_driver);
- rc = PTR_ERR(pdev);
- }
- }
- return rc;
+ return platform_driver_register(&omap_dma_driver);
}
subsys_initcall(omap_dma_init);
static void __exit omap_dma_exit(void)
{
- platform_device_unregister(pdev);
platform_driver_unregister(&omap_dma_driver);
}
module_exit(omap_dma_exit);
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index c39e61bc8172..f6c018f1b453 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -21,6 +21,7 @@
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -31,8 +32,8 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <linux/clk/tegra.h>
-#include <mach/clk.h>
#include "dmaengine.h"
#define TEGRA_APBDMA_GENERAL 0x0
@@ -266,6 +267,7 @@ static struct tegra_dma_desc *tegra_dma_desc_get(
if (async_tx_test_ack(&dma_desc->txd)) {
list_del(&dma_desc->node);
spin_unlock_irqrestore(&tdc->lock, flags);
+ dma_desc->txd.flags = 0;
return dma_desc;
}
}
@@ -1050,7 +1052,9 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
- csr |= TEGRA_APBDMA_CSR_FLOW | TEGRA_APBDMA_CSR_IE_EOC;
+ csr |= TEGRA_APBDMA_CSR_FLOW;
+ if (flags & DMA_PREP_INTERRUPT)
+ csr |= TEGRA_APBDMA_CSR_IE_EOC;
csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
@@ -1095,7 +1099,8 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
mem += len;
}
sg_req->last_sg = true;
- dma_desc->txd.flags = 0;
+ if (flags & DMA_CTRL_ACK)
+ dma_desc->txd.flags = DMA_CTRL_ACK;
/*
* Make sure that mode should not be conflicting with currently
@@ -1236,12 +1241,9 @@ static int tegra_dma_probe(struct platform_device *pdev)
return -EINVAL;
}
- tdma->base_addr = devm_request_and_ioremap(&pdev->dev, res);
- if (!tdma->base_addr) {
- dev_err(&pdev->dev,
- "Cannot request memregion/iomap dma address\n");
- return -EADDRNOTAVAIL;
- }
+ tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tdma->base_addr))
+ return PTR_ERR(tdma->base_addr);
tdma->dma_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(tdma->dma_clk)) {
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 66719925970f..acb709bfac0f 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -158,7 +158,7 @@ config EDAC_I3000
config EDAC_I3200
tristate "Intel 3200"
- depends on EDAC_MM_EDAC && PCI && X86 && EXPERIMENTAL
+ depends on EDAC_MM_EDAC && PCI && X86
help
Support for error detection and correction on the Intel
3200 and 3210 server chipsets.
@@ -224,7 +224,7 @@ config EDAC_I7300
config EDAC_SBRIDGE
tristate "Intel Sandy-Bridge Integrated MC"
depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL
- depends on PCI_MMCONFIG && EXPERIMENTAL
+ depends on PCI_MMCONFIG
help
Support for error detection and correction the Intel
Sandy Bridge Integrated Memory Controller.
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index ad8bf2aa629d..910b0116c128 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -31,7 +31,7 @@ static struct ecc_settings **ecc_stngs;
*
*FIXME: Produce a better mapping/linearisation.
*/
-struct scrubrate {
+static const struct scrubrate {
u32 scrubval; /* bit pattern for scrub rate */
u32 bandwidth; /* bandwidth consumed (bytes/sec) */
} scrubrates[] = {
@@ -239,7 +239,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
* DRAM base/limit associated with node_id
*/
static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr,
- unsigned nid)
+ u8 nid)
{
u64 addr;
@@ -265,7 +265,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
u64 sys_addr)
{
struct amd64_pvt *pvt;
- unsigned node_id;
+ u8 node_id;
u32 intlv_en, bits;
/*
@@ -602,111 +602,6 @@ static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
return input_addr;
}
-
-/*
- * @input_addr is an InputAddr associated with the node represented by mci.
- * Translate @input_addr to a DramAddr and return the result.
- */
-static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
-{
- struct amd64_pvt *pvt;
- unsigned node_id, intlv_shift;
- u64 bits, dram_addr;
- u32 intlv_sel;
-
- /*
- * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
- * shows how to translate a DramAddr to an InputAddr. Here we reverse
- * this procedure. When translating from a DramAddr to an InputAddr, the
- * bits used for node interleaving are discarded. Here we recover these
- * bits from the IntlvSel field of the DRAM Limit register (section
- * 3.4.4.2) for the node that input_addr is associated with.
- */
- pvt = mci->pvt_info;
- node_id = pvt->mc_node_id;
-
- BUG_ON(node_id > 7);
-
- intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
- if (intlv_shift == 0) {
- edac_dbg(1, " InputAddr 0x%lx translates to DramAddr of same value\n",
- (unsigned long)input_addr);
-
- return input_addr;
- }
-
- bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) +
- (input_addr & 0xfff);
-
- intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
- dram_addr = bits + (intlv_sel << 12);
-
- edac_dbg(1, "InputAddr 0x%lx translates to DramAddr 0x%lx (%d node interleave bits)\n",
- (unsigned long)input_addr,
- (unsigned long)dram_addr, intlv_shift);
-
- return dram_addr;
-}
-
-/*
- * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
- * @dram_addr to a SysAddr.
- */
-static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
- u64 hole_base, hole_offset, hole_size, base, sys_addr;
- int ret = 0;
-
- ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
- &hole_size);
- if (!ret) {
- if ((dram_addr >= hole_base) &&
- (dram_addr < (hole_base + hole_size))) {
- sys_addr = dram_addr + hole_offset;
-
- edac_dbg(1, "using DHAR to translate DramAddr 0x%lx to SysAddr 0x%lx\n",
- (unsigned long)dram_addr,
- (unsigned long)sys_addr);
-
- return sys_addr;
- }
- }
-
- base = get_dram_base(pvt, pvt->mc_node_id);
- sys_addr = dram_addr + base;
-
- /*
- * The sys_addr we have computed up to this point is a 40-bit value
- * because the k8 deals with 40-bit values. However, the value we are
- * supposed to return is a full 64-bit physical address. The AMD
- * x86-64 architecture specifies that the most significant implemented
- * address bit through bit 63 of a physical address must be either all
- * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
- * 64-bit value below. See section 3.4.2 of AMD publication 24592:
- * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
- * Programming.
- */
- sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
-
- edac_dbg(1, " Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
- pvt->mc_node_id, (unsigned long)dram_addr,
- (unsigned long)sys_addr);
-
- return sys_addr;
-}
-
-/*
- * @input_addr is an InputAddr associated with the node given by mci. Translate
- * @input_addr to a SysAddr.
- */
-static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
- u64 input_addr)
-{
- return dram_addr_to_sys_addr(mci,
- input_addr_to_dram_addr(mci, input_addr));
-}
-
/* Map the Error address to a PAGE and PAGE OFFSET. */
static inline void error_address_to_page_and_offset(u64 error_address,
struct err_info *err)
@@ -939,7 +834,8 @@ static u64 get_error_address(struct mce *m)
struct amd64_pvt *pvt;
u64 cc6_base, tmp_addr;
u32 tmp;
- u8 mce_nid, intlv_en;
+ u16 mce_nid;
+ u8 intlv_en;
if ((addr & GENMASK(24, 47)) >> 24 != 0x00fdf7)
return addr;
@@ -979,10 +875,29 @@ static u64 get_error_address(struct mce *m)
return addr;
}
+static struct pci_dev *pci_get_related_function(unsigned int vendor,
+ unsigned int device,
+ struct pci_dev *related)
+{
+ struct pci_dev *dev = NULL;
+
+ while ((dev = pci_get_device(vendor, device, dev))) {
+ if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
+ (dev->bus->number == related->bus->number) &&
+ (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
+ break;
+ }
+
+ return dev;
+}
+
static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
{
+ struct amd_northbridge *nb;
+ struct pci_dev *misc, *f1 = NULL;
struct cpuinfo_x86 *c = &boot_cpu_data;
int off = range << 3;
+ u32 llim;
amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
@@ -996,30 +911,32 @@ static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
- /* Factor in CC6 save area by reading dst node's limit reg */
- if (c->x86 == 0x15) {
- struct pci_dev *f1 = NULL;
- u8 nid = dram_dst_node(pvt, range);
- u32 llim;
+ /* F15h: factor in CC6 save area by reading dst node's limit reg */
+ if (c->x86 != 0x15)
+ return;
- f1 = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x18 + nid, 1));
- if (WARN_ON(!f1))
- return;
+ nb = node_to_amd_nb(dram_dst_node(pvt, range));
+ if (WARN_ON(!nb))
+ return;
+
+ misc = nb->misc;
+ f1 = pci_get_related_function(misc->vendor, PCI_DEVICE_ID_AMD_15H_NB_F1, misc);
+ if (WARN_ON(!f1))
+ return;
- amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
+ amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
- pvt->ranges[range].lim.lo &= GENMASK(0, 15);
+ pvt->ranges[range].lim.lo &= GENMASK(0, 15);
- /* {[39:27],111b} */
- pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
+ /* {[39:27],111b} */
+ pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
- pvt->ranges[range].lim.hi &= GENMASK(0, 7);
+ pvt->ranges[range].lim.hi &= GENMASK(0, 7);
- /* [47:40] */
- pvt->ranges[range].lim.hi |= llim >> 13;
+ /* [47:40] */
+ pvt->ranges[range].lim.hi |= llim >> 13;
- pci_dev_put(f1);
- }
+ pci_dev_put(f1);
}
static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
@@ -1305,7 +1222,7 @@ static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
}
/* Convert the sys_addr to the normalized DCT address */
-static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range,
+static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
u64 sys_addr, bool hi_rng,
u32 dct_sel_base_addr)
{
@@ -1381,7 +1298,7 @@ static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
* -EINVAL: NOT FOUND
* 0..csrow = Chip-Select Row
*/
-static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
+static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
{
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
@@ -1672,23 +1589,6 @@ static struct amd64_family_type amd64_family_types[] = {
},
};
-static struct pci_dev *pci_get_related_function(unsigned int vendor,
- unsigned int device,
- struct pci_dev *related)
-{
- struct pci_dev *dev = NULL;
-
- dev = pci_get_device(vendor, device, dev);
- while (dev) {
- if ((dev->bus->number == related->bus->number) &&
- (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
- break;
- dev = pci_get_device(vendor, device, dev);
- }
-
- return dev;
-}
-
/*
* These are tables of eigenvectors (one per line) which can be used for the
* construction of the syndrome tables. The modified syndrome search algorithm
@@ -1696,7 +1596,7 @@ static struct pci_dev *pci_get_related_function(unsigned int vendor,
*
* Algorithm courtesy of Ross LaFetra from AMD.
*/
-static u16 x4_vectors[] = {
+static const u16 x4_vectors[] = {
0x2f57, 0x1afe, 0x66cc, 0xdd88,
0x11eb, 0x3396, 0x7f4c, 0xeac8,
0x0001, 0x0002, 0x0004, 0x0008,
@@ -1735,7 +1635,7 @@ static u16 x4_vectors[] = {
0x19a9, 0x2efe, 0xb5cc, 0x6f88,
};
-static u16 x8_vectors[] = {
+static const u16 x8_vectors[] = {
0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
@@ -1757,7 +1657,7 @@ static u16 x8_vectors[] = {
0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
};
-static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs,
+static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
unsigned v_dim)
{
unsigned int i, err_sym;
@@ -2181,7 +2081,7 @@ static int init_csrows(struct mem_ctl_info *mci)
}
/* get all cores on this DCT */
-static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
+static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
{
int cpu;
@@ -2191,7 +2091,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
}
/* check MCG_CTL on all the cpus on this node */
-static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid)
+static bool amd64_nb_mce_bank_enabled_on_node(u16 nid)
{
cpumask_var_t mask;
int cpu, nbe;
@@ -2224,7 +2124,7 @@ out:
return ret;
}
-static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
+static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
{
cpumask_var_t cmask;
int cpu;
@@ -2262,7 +2162,7 @@ static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
return 0;
}
-static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
+static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
struct pci_dev *F3)
{
bool ret = true;
@@ -2314,7 +2214,7 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
return ret;
}
-static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
+static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
struct pci_dev *F3)
{
u32 value, mask = 0x3; /* UECC/CECC enable */
@@ -2353,7 +2253,7 @@ static const char *ecc_msg =
"'ecc_enable_override'.\n"
" (Note that use of the override may cause unknown side effects.)\n";
-static bool ecc_enabled(struct pci_dev *F3, u8 nid)
+static bool ecc_enabled(struct pci_dev *F3, u16 nid)
{
u32 value;
u8 ecc_en = 0;
@@ -2474,7 +2374,7 @@ static int amd64_init_one_instance(struct pci_dev *F2)
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
int err = 0, ret;
- u8 nid = get_node_id(F2);
+ u16 nid = amd_get_node_id(F2);
ret = -ENOMEM;
pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
@@ -2566,7 +2466,7 @@ err_ret:
static int amd64_probe_one_instance(struct pci_dev *pdev,
const struct pci_device_id *mc_type)
{
- u8 nid = get_node_id(pdev);
+ u16 nid = amd_get_node_id(pdev);
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
struct ecc_settings *s;
int ret = 0;
@@ -2616,7 +2516,7 @@ static void amd64_remove_one_instance(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
- u8 nid = get_node_id(pdev);
+ u16 nid = amd_get_node_id(pdev);
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
struct ecc_settings *s = ecc_stngs[nid];
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index e864f407806c..35637d83f235 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -292,12 +292,6 @@
/* MSRs */
#define MSR_MCGCTL_NBE BIT(4)
-/* AMD sets the first MC device at device ID 0x18. */
-static inline u8 get_node_id(struct pci_dev *pdev)
-{
- return PCI_SLOT(pdev->devfn) - 0x18;
-}
-
enum amd_families {
K8_CPUS = 0,
F10_CPUS,
@@ -340,7 +334,7 @@ struct amd64_pvt {
/* pci_device handles which we utilize */
struct pci_dev *F1, *F2, *F3;
- unsigned mc_node_id; /* MC index of this MC node */
+ u16 mc_node_id; /* MC index of this MC node */
int ext_model; /* extended model value of this node */
int channel_count;
@@ -393,7 +387,7 @@ struct err_info {
u32 offset;
};
-static inline u64 get_dram_base(struct amd64_pvt *pvt, unsigned i)
+static inline u64 get_dram_base(struct amd64_pvt *pvt, u8 i)
{
u64 addr = ((u64)pvt->ranges[i].base.lo & 0xffff0000) << 8;
@@ -403,7 +397,7 @@ static inline u64 get_dram_base(struct amd64_pvt *pvt, unsigned i)
return (((u64)pvt->ranges[i].base.hi & 0x000000ff) << 40) | addr;
}
-static inline u64 get_dram_limit(struct amd64_pvt *pvt, unsigned i)
+static inline u64 get_dram_limit(struct amd64_pvt *pvt, u8 i)
{
u64 lim = (((u64)pvt->ranges[i].lim.lo & 0xffff0000) << 8) | 0x00ffffff;
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 281f566a5513..d1e9eb191f2b 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -340,7 +340,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
/*
* Alocate and fill the csrow/channels structs
*/
- mci->csrows = kcalloc(sizeof(*mci->csrows), tot_csrows, GFP_KERNEL);
+ mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL);
if (!mci->csrows)
goto error;
for (row = 0; row < tot_csrows; row++) {
@@ -351,7 +351,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
csr->csrow_idx = row;
csr->mci = mci;
csr->nr_channels = tot_channels;
- csr->channels = kcalloc(sizeof(*csr->channels), tot_channels,
+ csr->channels = kcalloc(tot_channels, sizeof(*csr->channels),
GFP_KERNEL);
if (!csr->channels)
goto error;
@@ -369,7 +369,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
/*
* Allocate and fill the dimm structs
*/
- mci->dimms = kcalloc(sizeof(*mci->dimms), tot_dimms, GFP_KERNEL);
+ mci->dimms = kcalloc(tot_dimms, sizeof(*mci->dimms), GFP_KERNEL);
if (!mci->dimms)
goto error;
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index dc6e905ee1a5..0056c4dae9d5 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -256,7 +256,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
struct edac_pci_dev_attribute *edac_pci_dev;
edac_pci_dev = (struct edac_pci_dev_attribute *)attr;
- if (edac_pci_dev->show)
+ if (edac_pci_dev->store)
return edac_pci_dev->store(edac_pci_dev->value, buffer, count);
return -EIO;
}
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index ad637572d8c7..f3f0c930d550 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -39,30 +39,28 @@ EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder);
*/
/* transaction type */
-const char * const tt_msgs[] = { "INSN", "DATA", "GEN", "RESV" };
-EXPORT_SYMBOL_GPL(tt_msgs);
+static const char * const tt_msgs[] = { "INSN", "DATA", "GEN", "RESV" };
/* cache level */
-const char * const ll_msgs[] = { "RESV", "L1", "L2", "L3/GEN" };
-EXPORT_SYMBOL_GPL(ll_msgs);
+static const char * const ll_msgs[] = { "RESV", "L1", "L2", "L3/GEN" };
/* memory transaction type */
-const char * const rrrr_msgs[] = {
+static const char * const rrrr_msgs[] = {
"GEN", "RD", "WR", "DRD", "DWR", "IRD", "PRF", "EV", "SNP"
};
-EXPORT_SYMBOL_GPL(rrrr_msgs);
/* participating processor */
const char * const pp_msgs[] = { "SRC", "RES", "OBS", "GEN" };
EXPORT_SYMBOL_GPL(pp_msgs);
/* request timeout */
-const char * const to_msgs[] = { "no timeout", "timed out" };
-EXPORT_SYMBOL_GPL(to_msgs);
+static const char * const to_msgs[] = { "no timeout", "timed out" };
/* memory or i/o */
-const char * const ii_msgs[] = { "MEM", "RESV", "IO", "GEN" };
-EXPORT_SYMBOL_GPL(ii_msgs);
+static const char * const ii_msgs[] = { "MEM", "RESV", "IO", "GEN" };
+
+/* internal error type */
+static const char * const uu_msgs[] = { "RESV", "RESV", "HWA", "RESV" };
static const char * const f15h_mc1_mce_desc[] = {
"UC during a demand linefill from L2",
@@ -176,7 +174,7 @@ static bool k8_mc0_mce(u16 ec, u8 xec)
return f10h_mc0_mce(ec, xec);
}
-static bool f14h_mc0_mce(u16 ec, u8 xec)
+static bool cat_mc0_mce(u16 ec, u8 xec)
{
u8 r4 = R4(ec);
bool ret = true;
@@ -330,22 +328,28 @@ static bool k8_mc1_mce(u16 ec, u8 xec)
return ret;
}
-static bool f14h_mc1_mce(u16 ec, u8 xec)
+static bool cat_mc1_mce(u16 ec, u8 xec)
{
u8 r4 = R4(ec);
bool ret = true;
- if (MEM_ERROR(ec)) {
- if (TT(ec) != 0 || LL(ec) != 1)
- ret = false;
+ if (!MEM_ERROR(ec))
+ return false;
+
+ if (TT(ec) != TT_INSTR)
+ return false;
+
+ if (r4 == R4_IRD)
+ pr_cont("Data/tag array parity error for a tag hit.\n");
+ else if (r4 == R4_SNOOP)
+ pr_cont("Tag error during snoop/victimization.\n");
+ else if (xec == 0x0)
+ pr_cont("Tag parity error from victim castout.\n");
+ else if (xec == 0x2)
+ pr_cont("Microcode patch RAM parity error.\n");
+ else
+ ret = false;
- if (r4 == R4_IRD)
- pr_cont("Data/tag array parity error for a tag hit.\n");
- else if (r4 == R4_SNOOP)
- pr_cont("Tag error during snoop/victimization.\n");
- else
- ret = false;
- }
return ret;
}
@@ -399,12 +403,9 @@ static void decode_mc1_mce(struct mce *m)
pr_emerg(HW_ERR "Corrupted MC1 MCE info?\n");
}
-static void decode_mc2_mce(struct mce *m)
+static bool k8_mc2_mce(u16 ec, u8 xec)
{
- u16 ec = EC(m->status);
- u8 xec = XEC(m->status, xec_mask);
-
- pr_emerg(HW_ERR "MC2 Error");
+ bool ret = true;
if (xec == 0x1)
pr_cont(" in the write data buffers.\n");
@@ -429,24 +430,18 @@ static void decode_mc2_mce(struct mce *m)
pr_cont(": %s parity/ECC error during data "
"access from L2.\n", R4_MSG(ec));
else
- goto wrong_mc2_mce;
+ ret = false;
} else
- goto wrong_mc2_mce;
+ ret = false;
} else
- goto wrong_mc2_mce;
-
- return;
+ ret = false;
- wrong_mc2_mce:
- pr_emerg(HW_ERR "Corrupted MC2 MCE info?\n");
+ return ret;
}
-static void decode_f15_mc2_mce(struct mce *m)
+static bool f15h_mc2_mce(u16 ec, u8 xec)
{
- u16 ec = EC(m->status);
- u8 xec = XEC(m->status, xec_mask);
-
- pr_emerg(HW_ERR "MC2 Error: ");
+ bool ret = true;
if (TLB_ERROR(ec)) {
if (xec == 0x0)
@@ -454,10 +449,10 @@ static void decode_f15_mc2_mce(struct mce *m)
else if (xec == 0x1)
pr_cont("Poison data provided for TLB fill.\n");
else
- goto wrong_f15_mc2_mce;
+ ret = false;
} else if (BUS_ERROR(ec)) {
if (xec > 2)
- goto wrong_f15_mc2_mce;
+ ret = false;
pr_cont("Error during attempted NB data read.\n");
} else if (MEM_ERROR(ec)) {
@@ -471,14 +466,63 @@ static void decode_f15_mc2_mce(struct mce *m)
break;
default:
- goto wrong_f15_mc2_mce;
+ ret = false;
}
}
- return;
+ return ret;
+}
- wrong_f15_mc2_mce:
- pr_emerg(HW_ERR "Corrupted MC2 MCE info?\n");
+static bool f16h_mc2_mce(u16 ec, u8 xec)
+{
+ u8 r4 = R4(ec);
+
+ if (!MEM_ERROR(ec))
+ return false;
+
+ switch (xec) {
+ case 0x04 ... 0x05:
+ pr_cont("%cBUFF parity error.\n", (r4 == R4_RD) ? 'I' : 'O');
+ break;
+
+ case 0x09 ... 0x0b:
+ case 0x0d ... 0x0f:
+ pr_cont("ECC error in L2 tag (%s).\n",
+ ((r4 == R4_GEN) ? "BankReq" :
+ ((r4 == R4_SNOOP) ? "Prb" : "Fill")));
+ break;
+
+ case 0x10 ... 0x19:
+ case 0x1b:
+ pr_cont("ECC error in L2 data array (%s).\n",
+ (((r4 == R4_RD) && !(xec & 0x3)) ? "Hit" :
+ ((r4 == R4_GEN) ? "Attr" :
+ ((r4 == R4_EVICT) ? "Vict" : "Fill"))));
+ break;
+
+ case 0x1c ... 0x1d:
+ case 0x1f:
+ pr_cont("Parity error in L2 attribute bits (%s).\n",
+ ((r4 == R4_RD) ? "Hit" :
+ ((r4 == R4_GEN) ? "Attr" : "Fill")));
+ break;
+
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static void decode_mc2_mce(struct mce *m)
+{
+ u16 ec = EC(m->status);
+ u8 xec = XEC(m->status, xec_mask);
+
+ pr_emerg(HW_ERR "MC2 Error: ");
+
+ if (!fam_ops->mc2_mce(ec, xec))
+ pr_cont(HW_ERR "Corrupted MC2 MCE info?\n");
}
static void decode_mc3_mce(struct mce *m)
@@ -547,7 +591,7 @@ static void decode_mc4_mce(struct mce *m)
return;
case 0x19:
- if (boot_cpu_data.x86 == 0x15)
+ if (boot_cpu_data.x86 == 0x15 || boot_cpu_data.x86 == 0x16)
pr_cont("Compute Unit Data Error.\n");
else
goto wrong_mc4_mce;
@@ -633,6 +677,10 @@ static void decode_mc6_mce(struct mce *m)
static inline void amd_decode_err_code(u16 ec)
{
+ if (INT_ERROR(ec)) {
+ pr_emerg(HW_ERR "internal: %s\n", UU_MSG(ec));
+ return;
+ }
pr_emerg(HW_ERR "cache level: %s", LL_MSG(ec));
@@ -702,10 +750,7 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
break;
case 2:
- if (c->x86 == 0x15)
- decode_f15_mc2_mce(m);
- else
- decode_mc2_mce(m);
+ decode_mc2_mce(m);
break;
case 3:
@@ -740,7 +785,7 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
((m->status & MCI_STATUS_PCC) ? "PCC" : "-"),
((m->status & MCI_STATUS_ADDRV) ? "AddrV" : "-"));
- if (c->x86 == 0x15)
+ if (c->x86 == 0x15 || c->x86 == 0x16)
pr_cont("|%s|%s",
((m->status & MCI_STATUS_DEFERRED) ? "Deferred" : "-"),
((m->status & MCI_STATUS_POISON) ? "Poison" : "-"));
@@ -772,7 +817,7 @@ static int __init mce_amd_init(void)
if (c->x86_vendor != X86_VENDOR_AMD)
return 0;
- if (c->x86 < 0xf || c->x86 > 0x15)
+ if (c->x86 < 0xf || c->x86 > 0x16)
return 0;
fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
@@ -783,33 +828,46 @@ static int __init mce_amd_init(void)
case 0xf:
fam_ops->mc0_mce = k8_mc0_mce;
fam_ops->mc1_mce = k8_mc1_mce;
+ fam_ops->mc2_mce = k8_mc2_mce;
break;
case 0x10:
fam_ops->mc0_mce = f10h_mc0_mce;
fam_ops->mc1_mce = k8_mc1_mce;
+ fam_ops->mc2_mce = k8_mc2_mce;
break;
case 0x11:
fam_ops->mc0_mce = k8_mc0_mce;
fam_ops->mc1_mce = k8_mc1_mce;
+ fam_ops->mc2_mce = k8_mc2_mce;
break;
case 0x12:
fam_ops->mc0_mce = f12h_mc0_mce;
fam_ops->mc1_mce = k8_mc1_mce;
+ fam_ops->mc2_mce = k8_mc2_mce;
break;
case 0x14:
nb_err_cpumask = 0x3;
- fam_ops->mc0_mce = f14h_mc0_mce;
- fam_ops->mc1_mce = f14h_mc1_mce;
+ fam_ops->mc0_mce = cat_mc0_mce;
+ fam_ops->mc1_mce = cat_mc1_mce;
+ fam_ops->mc2_mce = k8_mc2_mce;
break;
case 0x15:
xec_mask = 0x1f;
fam_ops->mc0_mce = f15h_mc0_mce;
fam_ops->mc1_mce = f15h_mc1_mce;
+ fam_ops->mc2_mce = f15h_mc2_mce;
+ break;
+
+ case 0x16:
+ xec_mask = 0x1f;
+ fam_ops->mc0_mce = cat_mc0_mce;
+ fam_ops->mc1_mce = cat_mc1_mce;
+ fam_ops->mc2_mce = f16h_mc2_mce;
break;
default:
diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
index 679679951e23..51b7e3a36e37 100644
--- a/drivers/edac/mce_amd.h
+++ b/drivers/edac/mce_amd.h
@@ -14,6 +14,7 @@
#define TLB_ERROR(x) (((x) & 0xFFF0) == 0x0010)
#define MEM_ERROR(x) (((x) & 0xFF00) == 0x0100)
#define BUS_ERROR(x) (((x) & 0xF800) == 0x0800)
+#define INT_ERROR(x) (((x) & 0xF4FF) == 0x0400)
#define TT(x) (((x) >> 2) & 0x3)
#define TT_MSG(x) tt_msgs[TT(x)]
@@ -25,6 +26,8 @@
#define TO_MSG(x) to_msgs[TO(x)]
#define PP(x) (((x) >> 9) & 0x3)
#define PP_MSG(x) pp_msgs[PP(x)]
+#define UU(x) (((x) >> 8) & 0x3)
+#define UU_MSG(x) uu_msgs[UU(x)]
#define R4(x) (((x) >> 4) & 0xf)
#define R4_MSG(x) ((R4(x) < 9) ? rrrr_msgs[R4(x)] : "Wrong R4!")
@@ -32,6 +35,8 @@
#define MCI_STATUS_DEFERRED BIT_64(44)
#define MCI_STATUS_POISON BIT_64(43)
+extern const char * const pp_msgs[];
+
enum tt_ids {
TT_INSTR = 0,
TT_DATA,
@@ -65,19 +70,13 @@ enum rrrr_ids {
R4_SNOOP,
};
-extern const char * const tt_msgs[];
-extern const char * const ll_msgs[];
-extern const char * const rrrr_msgs[];
-extern const char * const pp_msgs[];
-extern const char * const to_msgs[];
-extern const char * const ii_msgs[];
-
/*
* per-family decoder ops
*/
struct amd_decoder_ops {
bool (*mc0_mce)(u16, u8);
bool (*mc1_mce)(u16, u8);
+ bool (*mc2_mce)(u16, u8);
};
void amd_report_gart_errors(bool);
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 42a840d530a5..3eb32f62d72a 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -301,7 +301,7 @@ int mpc85xx_pci_err_probe(struct platform_device *op)
"[EDAC] PCI err", pci);
if (res < 0) {
printk(KERN_ERR
- "%s: Unable to requiest irq %d for "
+ "%s: Unable to request irq %d for "
"MPC85xx PCI err\n", __func__, pdata->irq);
irq_dispose_mapping(pdata->irq);
res = -ENODEV;
@@ -583,7 +583,7 @@ static int mpc85xx_l2_err_probe(struct platform_device *op)
"[EDAC] L2 err", edac_dev);
if (res < 0) {
printk(KERN_ERR
- "%s: Unable to requiest irq %d for "
+ "%s: Unable to request irq %d for "
"MPC85xx L2 err\n", __func__, pdata->irq);
irq_dispose_mapping(pdata->irq);
res = -ENODEV;
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 07122a9ef36e..5168a1324a65 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -29,7 +29,7 @@ config EXTCON_ADC_JACK
config EXTCON_MAX77693
tristate "MAX77693 EXTCON Support"
- depends on MFD_MAX77693
+ depends on MFD_MAX77693 && INPUT
select IRQ_DOMAIN
select REGMAP_I2C
help
@@ -47,7 +47,7 @@ config EXTCON_MAX8997
config EXTCON_ARIZONA
tristate "Wolfson Arizona EXTCON support"
- depends on MFD_ARIZONA && INPUT
+ depends on MFD_ARIZONA && INPUT && SND_SOC
help
Say Y here to enable support for external accessory detection
with Wolfson Arizona devices. These are audio CODECs with
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index eda2a1aa4adb..d0233cd18ffa 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -135,8 +135,7 @@ static int adc_jack_probe(struct platform_device *pdev)
;
data->num_conditions = i;
- data->chan = iio_channel_get(dev_name(&pdev->dev),
- pdata->consumer_channel);
+ data->chan = iio_channel_get(&pdev->dev, pdata->consumer_channel);
if (IS_ERR(data->chan)) {
err = PTR_ERR(data->chan);
goto out;
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 414aed50b1bc..dc357a4051f6 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -27,12 +27,18 @@
#include <linux/regulator/consumer.h>
#include <linux/extcon.h>
+#include <sound/soc.h>
+
#include <linux/mfd/arizona/core.h>
#include <linux/mfd/arizona/pdata.h>
#include <linux/mfd/arizona/registers.h>
#define ARIZONA_NUM_BUTTONS 6
+#define ARIZONA_ACCDET_MODE_MIC 0
+#define ARIZONA_ACCDET_MODE_HPL 1
+#define ARIZONA_ACCDET_MODE_HPR 2
+
struct arizona_extcon_info {
struct device *dev;
struct arizona *arizona;
@@ -45,17 +51,28 @@ struct arizona_extcon_info {
int micd_num_modes;
bool micd_reva;
+ bool micd_clamp;
+
+ struct delayed_work hpdet_work;
+
+ bool hpdet_active;
+ bool hpdet_done;
+
+ int num_hpdet_res;
+ unsigned int hpdet_res[3];
bool mic;
bool detecting;
int jack_flips;
+ int hpdet_ip;
+
struct extcon_dev edev;
};
static const struct arizona_micd_config micd_default_modes[] = {
- { ARIZONA_ACCDET_SRC, 1 << ARIZONA_MICD_BIAS_SRC_SHIFT, 0 },
{ 0, 2 << ARIZONA_MICD_BIAS_SRC_SHIFT, 1 },
+ { ARIZONA_ACCDET_SRC, 1 << ARIZONA_MICD_BIAS_SRC_SHIFT, 0 },
};
static struct {
@@ -73,11 +90,13 @@ static struct {
#define ARIZONA_CABLE_MECHANICAL 0
#define ARIZONA_CABLE_MICROPHONE 1
#define ARIZONA_CABLE_HEADPHONE 2
+#define ARIZONA_CABLE_LINEOUT 3
static const char *arizona_cable[] = {
"Mechanical",
"Microphone",
"Headphone",
+ "Line-out",
NULL,
};
@@ -85,8 +104,9 @@ static void arizona_extcon_set_mode(struct arizona_extcon_info *info, int mode)
{
struct arizona *arizona = info->arizona;
- gpio_set_value_cansleep(arizona->pdata.micd_pol_gpio,
- info->micd_modes[mode].gpio);
+ if (arizona->pdata.micd_pol_gpio > 0)
+ gpio_set_value_cansleep(arizona->pdata.micd_pol_gpio,
+ info->micd_modes[mode].gpio);
regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
ARIZONA_MICD_BIAS_SRC_MASK,
info->micd_modes[mode].bias);
@@ -98,19 +118,70 @@ static void arizona_extcon_set_mode(struct arizona_extcon_info *info, int mode)
dev_dbg(arizona->dev, "Set jack polarity to %d\n", mode);
}
+static const char *arizona_extcon_get_micbias(struct arizona_extcon_info *info)
+{
+ switch (info->micd_modes[0].bias >> ARIZONA_MICD_BIAS_SRC_SHIFT) {
+ case 1:
+ return "MICBIAS1";
+ case 2:
+ return "MICBIAS2";
+ case 3:
+ return "MICBIAS3";
+ default:
+ return "MICVDD";
+ }
+}
+
+static void arizona_extcon_pulse_micbias(struct arizona_extcon_info *info)
+{
+ struct arizona *arizona = info->arizona;
+ const char *widget = arizona_extcon_get_micbias(info);
+ struct snd_soc_dapm_context *dapm = arizona->dapm;
+ int ret;
+
+ mutex_lock(&dapm->card->dapm_mutex);
+
+ ret = snd_soc_dapm_force_enable_pin(dapm, widget);
+ if (ret != 0)
+ dev_warn(arizona->dev, "Failed to enable %s: %d\n",
+ widget, ret);
+
+ mutex_unlock(&dapm->card->dapm_mutex);
+
+ snd_soc_dapm_sync(dapm);
+
+ if (!arizona->pdata.micd_force_micbias) {
+ mutex_lock(&dapm->card->dapm_mutex);
+
+ ret = snd_soc_dapm_disable_pin(arizona->dapm, widget);
+ if (ret != 0)
+ dev_warn(arizona->dev, "Failed to disable %s: %d\n",
+ widget, ret);
+
+ mutex_unlock(&dapm->card->dapm_mutex);
+
+ snd_soc_dapm_sync(dapm);
+ }
+}
+
static void arizona_start_mic(struct arizona_extcon_info *info)
{
struct arizona *arizona = info->arizona;
bool change;
int ret;
- info->detecting = true;
- info->mic = false;
- info->jack_flips = 0;
-
/* Microphone detection can't use idle mode */
pm_runtime_get(info->dev);
+ if (info->detecting) {
+ ret = regulator_allow_bypass(info->micvdd, false);
+ if (ret != 0) {
+ dev_err(arizona->dev,
+ "Failed to regulate MICVDD: %d\n",
+ ret);
+ }
+ }
+
ret = regulator_enable(info->micvdd);
if (ret != 0) {
dev_err(arizona->dev, "Failed to enable MICVDD: %d\n",
@@ -123,6 +194,12 @@ static void arizona_start_mic(struct arizona_extcon_info *info)
regmap_write(arizona->regmap, 0x80, 0x0);
}
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
+
+ arizona_extcon_pulse_micbias(info);
+
regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
ARIZONA_MICD_ENA, ARIZONA_MICD_ENA,
&change);
@@ -135,18 +212,39 @@ static void arizona_start_mic(struct arizona_extcon_info *info)
static void arizona_stop_mic(struct arizona_extcon_info *info)
{
struct arizona *arizona = info->arizona;
+ const char *widget = arizona_extcon_get_micbias(info);
+ struct snd_soc_dapm_context *dapm = arizona->dapm;
bool change;
+ int ret;
regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
ARIZONA_MICD_ENA, 0,
&change);
+ mutex_lock(&dapm->card->dapm_mutex);
+
+ ret = snd_soc_dapm_disable_pin(dapm, widget);
+ if (ret != 0)
+ dev_warn(arizona->dev,
+ "Failed to disable %s: %d\n",
+ widget, ret);
+
+ mutex_unlock(&dapm->card->dapm_mutex);
+
+ snd_soc_dapm_sync(dapm);
+
if (info->micd_reva) {
regmap_write(arizona->regmap, 0x80, 0x3);
regmap_write(arizona->regmap, 0x294, 2);
regmap_write(arizona->regmap, 0x80, 0x0);
}
+ ret = regulator_allow_bypass(info->micvdd, true);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to bypass MICVDD: %d\n",
+ ret);
+ }
+
if (change) {
regulator_disable(info->micvdd);
pm_runtime_mark_last_busy(info->dev);
@@ -154,6 +252,478 @@ static void arizona_stop_mic(struct arizona_extcon_info *info)
}
}
+static struct {
+ unsigned int factor_a;
+ unsigned int factor_b;
+} arizona_hpdet_b_ranges[] = {
+ { 5528, 362464 },
+ { 11084, 6186851 },
+ { 11065, 65460395 },
+};
+
+static struct {
+ int min;
+ int max;
+} arizona_hpdet_c_ranges[] = {
+ { 0, 30 },
+ { 8, 100 },
+ { 100, 1000 },
+ { 1000, 10000 },
+};
+
+static int arizona_hpdet_read(struct arizona_extcon_info *info)
+{
+ struct arizona *arizona = info->arizona;
+ unsigned int val, range;
+ int ret;
+
+ ret = regmap_read(arizona->regmap, ARIZONA_HEADPHONE_DETECT_2, &val);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to read HPDET status: %d\n",
+ ret);
+ return ret;
+ }
+
+ switch (info->hpdet_ip) {
+ case 0:
+ if (!(val & ARIZONA_HP_DONE)) {
+ dev_err(arizona->dev, "HPDET did not complete: %x\n",
+ val);
+ return -EAGAIN;
+ }
+
+ val &= ARIZONA_HP_LVL_MASK;
+ break;
+
+ case 1:
+ if (!(val & ARIZONA_HP_DONE_B)) {
+ dev_err(arizona->dev, "HPDET did not complete: %x\n",
+ val);
+ return -EAGAIN;
+ }
+
+ ret = regmap_read(arizona->regmap, ARIZONA_HP_DACVAL, &val);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to read HP value: %d\n",
+ ret);
+ return -EAGAIN;
+ }
+
+ regmap_read(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1,
+ &range);
+ range = (range & ARIZONA_HP_IMPEDANCE_RANGE_MASK)
+ >> ARIZONA_HP_IMPEDANCE_RANGE_SHIFT;
+
+ if (range < ARRAY_SIZE(arizona_hpdet_b_ranges) - 1 &&
+ (val < 100 || val > 0x3fb)) {
+ range++;
+ dev_dbg(arizona->dev, "Moving to HPDET range %d\n",
+ range);
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_IMPEDANCE_RANGE_MASK,
+ range <<
+ ARIZONA_HP_IMPEDANCE_RANGE_SHIFT);
+ return -EAGAIN;
+ }
+
+ /* If we go out of range report top of range */
+ if (val < 100 || val > 0x3fb) {
+ dev_dbg(arizona->dev, "Measurement out of range\n");
+ return 10000;
+ }
+
+ dev_dbg(arizona->dev, "HPDET read %d in range %d\n",
+ val, range);
+
+ val = arizona_hpdet_b_ranges[range].factor_b
+ / ((val * 100) -
+ arizona_hpdet_b_ranges[range].factor_a);
+ break;
+
+ default:
+ dev_warn(arizona->dev, "Unknown HPDET IP revision %d\n",
+ info->hpdet_ip);
+ case 2:
+ if (!(val & ARIZONA_HP_DONE_B)) {
+ dev_err(arizona->dev, "HPDET did not complete: %x\n",
+ val);
+ return -EAGAIN;
+ }
+
+ val &= ARIZONA_HP_LVL_B_MASK;
+
+ regmap_read(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1,
+ &range);
+ range = (range & ARIZONA_HP_IMPEDANCE_RANGE_MASK)
+ >> ARIZONA_HP_IMPEDANCE_RANGE_SHIFT;
+
+ /* Skip up or down a range? */
+ if (range && (val < arizona_hpdet_c_ranges[range].min)) {
+ range--;
+ dev_dbg(arizona->dev, "Moving to HPDET range %d-%d\n",
+ arizona_hpdet_c_ranges[range].min,
+ arizona_hpdet_c_ranges[range].max);
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_IMPEDANCE_RANGE_MASK,
+ range <<
+ ARIZONA_HP_IMPEDANCE_RANGE_SHIFT);
+ return -EAGAIN;
+ }
+
+ if (range < ARRAY_SIZE(arizona_hpdet_c_ranges) - 1 &&
+ (val >= arizona_hpdet_c_ranges[range].max)) {
+ range++;
+ dev_dbg(arizona->dev, "Moving to HPDET range %d-%d\n",
+ arizona_hpdet_c_ranges[range].min,
+ arizona_hpdet_c_ranges[range].max);
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_IMPEDANCE_RANGE_MASK,
+ range <<
+ ARIZONA_HP_IMPEDANCE_RANGE_SHIFT);
+ return -EAGAIN;
+ }
+ }
+
+ dev_dbg(arizona->dev, "HP impedance %d ohms\n", val);
+ return val;
+}
+
+static int arizona_hpdet_do_id(struct arizona_extcon_info *info, int *reading)
+{
+ struct arizona *arizona = info->arizona;
+ int id_gpio = arizona->pdata.hpdet_id_gpio;
+
+ /*
+ * If we're using HPDET for accessory identification we need
+ * to take multiple measurements, step through them in sequence.
+ */
+ if (arizona->pdata.hpdet_acc_id) {
+ info->hpdet_res[info->num_hpdet_res++] = *reading;
+
+ /*
+ * If the impedence is too high don't measure the
+ * second ground.
+ */
+ if (info->num_hpdet_res == 1 && *reading >= 45) {
+ dev_dbg(arizona->dev, "Skipping ground flip\n");
+ info->hpdet_res[info->num_hpdet_res++] = *reading;
+ }
+
+ if (info->num_hpdet_res == 1) {
+ dev_dbg(arizona->dev, "Flipping ground\n");
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_SRC,
+ ~info->micd_modes[0].src);
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_POLL, ARIZONA_HP_POLL);
+ return -EAGAIN;
+ }
+
+ /* Only check the mic directly if we didn't already ID it */
+ if (id_gpio && info->num_hpdet_res == 2 &&
+ !((info->hpdet_res[0] > info->hpdet_res[1] * 2))) {
+ dev_dbg(arizona->dev, "Measuring mic\n");
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_MODE_MASK |
+ ARIZONA_ACCDET_SRC,
+ ARIZONA_ACCDET_MODE_HPR |
+ info->micd_modes[0].src);
+
+ gpio_set_value_cansleep(id_gpio, 1);
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_POLL, ARIZONA_HP_POLL);
+ return -EAGAIN;
+ }
+
+ /* OK, got both. Now, compare... */
+ dev_dbg(arizona->dev, "HPDET measured %d %d %d\n",
+ info->hpdet_res[0], info->hpdet_res[1],
+ info->hpdet_res[2]);
+
+
+ /* Take the headphone impedance for the main report */
+ *reading = info->hpdet_res[0];
+
+ /*
+ * Either the two grounds measure differently or we
+ * measure the mic as high impedance.
+ */
+ if ((info->hpdet_res[0] > info->hpdet_res[1] * 2) ||
+ (id_gpio && info->hpdet_res[2] > 10)) {
+ dev_dbg(arizona->dev, "Detected mic\n");
+ info->mic = true;
+ info->detecting = true;
+ } else {
+ dev_dbg(arizona->dev, "Detected headphone\n");
+ }
+
+ /* Make sure everything is reset back to the real polarity */
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_SRC,
+ info->micd_modes[0].src);
+ }
+
+ return 0;
+}
+
+static irqreturn_t arizona_hpdet_irq(int irq, void *data)
+{
+ struct arizona_extcon_info *info = data;
+ struct arizona *arizona = info->arizona;
+ int id_gpio = arizona->pdata.hpdet_id_gpio;
+ int report = ARIZONA_CABLE_HEADPHONE;
+ unsigned int val;
+ int ret, reading;
+
+ mutex_lock(&info->lock);
+
+ /* If we got a spurious IRQ for some reason then ignore it */
+ if (!info->hpdet_active) {
+ dev_warn(arizona->dev, "Spurious HPDET IRQ\n");
+ mutex_unlock(&info->lock);
+ return IRQ_NONE;
+ }
+
+ /* If the cable was removed while measuring ignore the result */
+ ret = extcon_get_cable_state_(&info->edev, ARIZONA_CABLE_MECHANICAL);
+ if (ret < 0) {
+ dev_err(arizona->dev, "Failed to check cable state: %d\n",
+ ret);
+ goto out;
+ } else if (!ret) {
+ dev_dbg(arizona->dev, "Ignoring HPDET for removed cable\n");
+ goto done;
+ }
+
+ ret = arizona_hpdet_read(info);
+ if (ret == -EAGAIN) {
+ goto out;
+ } else if (ret < 0) {
+ goto done;
+ }
+ reading = ret;
+
+ /* Reset back to starting range */
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_IMPEDANCE_RANGE_MASK | ARIZONA_HP_POLL,
+ 0);
+
+ ret = arizona_hpdet_do_id(info, &reading);
+ if (ret == -EAGAIN) {
+ goto out;
+ } else if (ret < 0) {
+ goto done;
+ }
+
+ /* Report high impedence cables as line outputs */
+ if (reading >= 5000)
+ report = ARIZONA_CABLE_LINEOUT;
+ else
+ report = ARIZONA_CABLE_HEADPHONE;
+
+ ret = extcon_set_cable_state_(&info->edev, report, true);
+ if (ret != 0)
+ dev_err(arizona->dev, "Failed to report HP/line: %d\n",
+ ret);
+
+ mutex_lock(&arizona->dapm->card->dapm_mutex);
+
+ ret = regmap_read(arizona->regmap, ARIZONA_OUTPUT_ENABLES_1, &val);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to read output enables: %d\n",
+ ret);
+ val = 0;
+ }
+
+ if (!(val & (ARIZONA_OUT1L_ENA | ARIZONA_OUT1R_ENA))) {
+ ret = regmap_update_bits(arizona->regmap, 0x225, 0x4000, 0);
+ if (ret != 0)
+ dev_warn(arizona->dev, "Failed to undo magic: %d\n",
+ ret);
+
+ ret = regmap_update_bits(arizona->regmap, 0x226, 0x4000, 0);
+ if (ret != 0)
+ dev_warn(arizona->dev, "Failed to undo magic: %d\n",
+ ret);
+ }
+
+ mutex_unlock(&arizona->dapm->card->dapm_mutex);
+
+done:
+ if (id_gpio)
+ gpio_set_value_cansleep(id_gpio, 0);
+
+ /* Revert back to MICDET mode */
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
+
+ /* If we have a mic then reenable MICDET */
+ if (info->mic)
+ arizona_start_mic(info);
+
+ if (info->hpdet_active) {
+ pm_runtime_put_autosuspend(info->dev);
+ info->hpdet_active = false;
+ }
+
+ info->hpdet_done = true;
+
+out:
+ mutex_unlock(&info->lock);
+
+ return IRQ_HANDLED;
+}
+
+static void arizona_identify_headphone(struct arizona_extcon_info *info)
+{
+ struct arizona *arizona = info->arizona;
+ int ret;
+
+ if (info->hpdet_done)
+ return;
+
+ dev_dbg(arizona->dev, "Starting HPDET\n");
+
+ /* Make sure we keep the device enabled during the measurement */
+ pm_runtime_get(info->dev);
+
+ info->hpdet_active = true;
+
+ if (info->mic)
+ arizona_stop_mic(info);
+
+ ret = regmap_update_bits(arizona->regmap, 0x225, 0x4000, 0x4000);
+ if (ret != 0)
+ dev_warn(arizona->dev, "Failed to do magic: %d\n", ret);
+
+ ret = regmap_update_bits(arizona->regmap, 0x226, 0x4000, 0x4000);
+ if (ret != 0)
+ dev_warn(arizona->dev, "Failed to do magic: %d\n", ret);
+
+ ret = regmap_update_bits(arizona->regmap,
+ ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_MODE_MASK,
+ ARIZONA_ACCDET_MODE_HPL);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to set HPDETL mode: %d\n", ret);
+ goto err;
+ }
+
+ ret = regmap_update_bits(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_POLL, ARIZONA_HP_POLL);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Can't start HPDETL measurement: %d\n",
+ ret);
+ goto err;
+ }
+
+ return;
+
+err:
+ regmap_update_bits(arizona->regmap, ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
+
+ /* Just report headphone */
+ ret = extcon_update_state(&info->edev,
+ 1 << ARIZONA_CABLE_HEADPHONE,
+ 1 << ARIZONA_CABLE_HEADPHONE);
+ if (ret != 0)
+ dev_err(arizona->dev, "Failed to report headphone: %d\n", ret);
+
+ if (info->mic)
+ arizona_start_mic(info);
+
+ info->hpdet_active = false;
+}
+
+static void arizona_start_hpdet_acc_id(struct arizona_extcon_info *info)
+{
+ struct arizona *arizona = info->arizona;
+ unsigned int val;
+ int ret;
+
+ dev_dbg(arizona->dev, "Starting identification via HPDET\n");
+
+ /* Make sure we keep the device enabled during the measurement */
+ pm_runtime_get_sync(info->dev);
+
+ info->hpdet_active = true;
+
+ arizona_extcon_pulse_micbias(info);
+
+ mutex_lock(&arizona->dapm->card->dapm_mutex);
+
+ ret = regmap_read(arizona->regmap, ARIZONA_OUTPUT_ENABLES_1, &val);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to read output enables: %d\n",
+ ret);
+ val = 0;
+ }
+
+ if (!(val & (ARIZONA_OUT1L_ENA | ARIZONA_OUT1R_ENA))) {
+ ret = regmap_update_bits(arizona->regmap, 0x225, 0x4000,
+ 0x4000);
+ if (ret != 0)
+ dev_warn(arizona->dev, "Failed to do magic: %d\n",
+ ret);
+
+ ret = regmap_update_bits(arizona->regmap, 0x226, 0x4000,
+ 0x4000);
+ if (ret != 0)
+ dev_warn(arizona->dev, "Failed to do magic: %d\n",
+ ret);
+ }
+
+ mutex_unlock(&arizona->dapm->card->dapm_mutex);
+
+ ret = regmap_update_bits(arizona->regmap,
+ ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_SRC | ARIZONA_ACCDET_MODE_MASK,
+ info->micd_modes[0].src |
+ ARIZONA_ACCDET_MODE_HPL);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to set HPDETL mode: %d\n", ret);
+ goto err;
+ }
+
+ ret = regmap_update_bits(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1,
+ ARIZONA_HP_POLL, ARIZONA_HP_POLL);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Can't start HPDETL measurement: %d\n",
+ ret);
+ goto err;
+ }
+
+ return;
+
+err:
+ regmap_update_bits(arizona->regmap, ARIZONA_ACCESSORY_DETECT_MODE_1,
+ ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
+
+ /* Just report headphone */
+ ret = extcon_update_state(&info->edev,
+ 1 << ARIZONA_CABLE_HEADPHONE,
+ 1 << ARIZONA_CABLE_HEADPHONE);
+ if (ret != 0)
+ dev_err(arizona->dev, "Failed to report headphone: %d\n", ret);
+
+ info->hpdet_active = false;
+}
+
static irqreturn_t arizona_micdet(int irq, void *data)
{
struct arizona_extcon_info *info = data;
@@ -187,16 +757,23 @@ static irqreturn_t arizona_micdet(int irq, void *data)
/* If we got a high impedence we should have a headset, report it. */
if (info->detecting && (val & 0x400)) {
+ arizona_identify_headphone(info);
+
ret = extcon_update_state(&info->edev,
- 1 << ARIZONA_CABLE_MICROPHONE |
- 1 << ARIZONA_CABLE_HEADPHONE,
- 1 << ARIZONA_CABLE_MICROPHONE |
- 1 << ARIZONA_CABLE_HEADPHONE);
+ 1 << ARIZONA_CABLE_MICROPHONE,
+ 1 << ARIZONA_CABLE_MICROPHONE);
if (ret != 0)
dev_err(arizona->dev, "Headset report failed: %d\n",
ret);
+ /* Don't need to regulate for button detection */
+ ret = regulator_allow_bypass(info->micvdd, false);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to bypass MICVDD: %d\n",
+ ret);
+ }
+
info->mic = true;
info->detecting = false;
goto handled;
@@ -209,20 +786,13 @@ static irqreturn_t arizona_micdet(int irq, void *data)
* impedence then give up and report headphones.
*/
if (info->detecting && (val & 0x3f8)) {
- info->jack_flips++;
-
if (info->jack_flips >= info->micd_num_modes) {
- dev_dbg(arizona->dev, "Detected headphone\n");
+ dev_dbg(arizona->dev, "Detected HP/line\n");
+ arizona_identify_headphone(info);
+
info->detecting = false;
- arizona_stop_mic(info);
- ret = extcon_set_cable_state_(&info->edev,
- ARIZONA_CABLE_HEADPHONE,
- true);
- if (ret != 0)
- dev_err(arizona->dev,
- "Headphone report failed: %d\n",
- ret);
+ arizona_stop_mic(info);
} else {
info->micd_mode++;
if (info->micd_mode == info->micd_num_modes)
@@ -258,13 +828,7 @@ static irqreturn_t arizona_micdet(int irq, void *data)
info->detecting = false;
arizona_stop_mic(info);
- ret = extcon_set_cable_state_(&info->edev,
- ARIZONA_CABLE_HEADPHONE,
- true);
- if (ret != 0)
- dev_err(arizona->dev,
- "Headphone report failed: %d\n",
- ret);
+ arizona_identify_headphone(info);
} else {
dev_warn(arizona->dev, "Button with no mic: %x\n",
val);
@@ -275,6 +839,7 @@ static irqreturn_t arizona_micdet(int irq, void *data)
input_report_key(info->input,
arizona_lvl_to_key[i].report, 0);
input_sync(info->input);
+ arizona_extcon_pulse_micbias(info);
}
handled:
@@ -284,17 +849,38 @@ handled:
return IRQ_HANDLED;
}
+static void arizona_hpdet_work(struct work_struct *work)
+{
+ struct arizona_extcon_info *info = container_of(work,
+ struct arizona_extcon_info,
+ hpdet_work.work);
+
+ mutex_lock(&info->lock);
+ arizona_start_hpdet_acc_id(info);
+ mutex_unlock(&info->lock);
+}
+
static irqreturn_t arizona_jackdet(int irq, void *data)
{
struct arizona_extcon_info *info = data;
struct arizona *arizona = info->arizona;
- unsigned int val;
+ unsigned int val, present, mask;
int ret, i;
pm_runtime_get_sync(info->dev);
+ cancel_delayed_work_sync(&info->hpdet_work);
+
mutex_lock(&info->lock);
+ if (arizona->pdata.jd_gpio5) {
+ mask = ARIZONA_MICD_CLAMP_STS;
+ present = 0;
+ } else {
+ mask = ARIZONA_JD1_STS;
+ present = ARIZONA_JD1_STS;
+ }
+
ret = regmap_read(arizona->regmap, ARIZONA_AOD_IRQ_RAW_STATUS, &val);
if (ret != 0) {
dev_err(arizona->dev, "Failed to read jackdet status: %d\n",
@@ -304,7 +890,7 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
return IRQ_NONE;
}
- if (val & ARIZONA_JD1_STS) {
+ if ((val & mask) == present) {
dev_dbg(arizona->dev, "Detected jack\n");
ret = extcon_set_cable_state_(&info->edev,
ARIZONA_CABLE_MECHANICAL, true);
@@ -313,12 +899,31 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
dev_err(arizona->dev, "Mechanical report failed: %d\n",
ret);
- arizona_start_mic(info);
+ if (!arizona->pdata.hpdet_acc_id) {
+ info->detecting = true;
+ info->mic = false;
+ info->jack_flips = 0;
+
+ arizona_start_mic(info);
+ } else {
+ schedule_delayed_work(&info->hpdet_work,
+ msecs_to_jiffies(250));
+ }
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_JACK_DETECT_DEBOUNCE,
+ ARIZONA_MICD_CLAMP_DB | ARIZONA_JD1_DB, 0);
} else {
dev_dbg(arizona->dev, "Detected jack removal\n");
arizona_stop_mic(info);
+ info->num_hpdet_res = 0;
+ for (i = 0; i < ARRAY_SIZE(info->hpdet_res); i++)
+ info->hpdet_res[i] = 0;
+ info->mic = false;
+ info->hpdet_done = false;
+
for (i = 0; i < ARIZONA_NUM_BUTTONS; i++)
input_report_key(info->input,
arizona_lvl_to_key[i].report, 0);
@@ -328,8 +933,20 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
if (ret != 0)
dev_err(arizona->dev, "Removal report failed: %d\n",
ret);
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_JACK_DETECT_DEBOUNCE,
+ ARIZONA_MICD_CLAMP_DB | ARIZONA_JD1_DB,
+ ARIZONA_MICD_CLAMP_DB | ARIZONA_JD1_DB);
}
+ /* Clear trig_sts to make sure DCVDD is not forced up */
+ regmap_write(arizona->regmap, ARIZONA_AOD_WKUP_AND_TRIG,
+ ARIZONA_MICD_CLAMP_FALL_TRIG_STS |
+ ARIZONA_MICD_CLAMP_RISE_TRIG_STS |
+ ARIZONA_JD1_FALL_TRIG_STS |
+ ARIZONA_JD1_RISE_TRIG_STS);
+
mutex_unlock(&info->lock);
pm_runtime_mark_last_busy(info->dev);
@@ -343,8 +960,12 @@ static int arizona_extcon_probe(struct platform_device *pdev)
struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
struct arizona_pdata *pdata;
struct arizona_extcon_info *info;
+ int jack_irq_fall, jack_irq_rise;
int ret, mode, i;
+ if (!arizona->dapm || !arizona->dapm->card)
+ return -EPROBE_DEFER;
+
pdata = dev_get_platdata(arizona->dev);
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@@ -364,7 +985,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
mutex_init(&info->lock);
info->arizona = arizona;
info->dev = &pdev->dev;
- info->detecting = true;
+ INIT_DELAYED_WORK(&info->hpdet_work, arizona_hpdet_work);
platform_set_drvdata(pdev, info);
switch (arizona->type) {
@@ -374,6 +995,8 @@ static int arizona_extcon_probe(struct platform_device *pdev)
info->micd_reva = true;
break;
default:
+ info->micd_clamp = true;
+ info->hpdet_ip = 1;
break;
}
break;
@@ -416,9 +1039,64 @@ static int arizona_extcon_probe(struct platform_device *pdev)
}
}
+ if (arizona->pdata.hpdet_id_gpio > 0) {
+ ret = devm_gpio_request_one(&pdev->dev,
+ arizona->pdata.hpdet_id_gpio,
+ GPIOF_OUT_INIT_LOW,
+ "HPDET");
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to request GPIO%d: %d\n",
+ arizona->pdata.hpdet_id_gpio, ret);
+ goto err_register;
+ }
+ }
+
+ if (arizona->pdata.micd_bias_start_time)
+ regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
+ ARIZONA_MICD_BIAS_STARTTIME_MASK,
+ arizona->pdata.micd_bias_start_time
+ << ARIZONA_MICD_BIAS_STARTTIME_SHIFT);
+
+ if (arizona->pdata.micd_rate)
+ regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
+ ARIZONA_MICD_RATE_MASK,
+ arizona->pdata.micd_rate
+ << ARIZONA_MICD_RATE_SHIFT);
+
+ if (arizona->pdata.micd_dbtime)
+ regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
+ ARIZONA_MICD_DBTIME_MASK,
+ arizona->pdata.micd_dbtime
+ << ARIZONA_MICD_DBTIME_SHIFT);
+
+ /*
+ * If we have a clamp use it, activating in conjunction with
+ * GPIO5 if that is connected for jack detect operation.
+ */
+ if (info->micd_clamp) {
+ if (arizona->pdata.jd_gpio5) {
+ /* Put the GPIO into input mode */
+ regmap_write(arizona->regmap, ARIZONA_GPIO5_CTRL,
+ 0xc101);
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_MICD_CLAMP_CONTROL,
+ ARIZONA_MICD_CLAMP_MODE_MASK, 0x9);
+ } else {
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_MICD_CLAMP_CONTROL,
+ ARIZONA_MICD_CLAMP_MODE_MASK, 0x4);
+ }
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_JACK_DETECT_DEBOUNCE,
+ ARIZONA_MICD_CLAMP_DB,
+ ARIZONA_MICD_CLAMP_DB);
+ }
+
arizona_extcon_set_mode(info, 0);
- info->input = input_allocate_device();
+ info->input = devm_input_allocate_device(&pdev->dev);
if (!info->input) {
dev_err(arizona->dev, "Can't allocate input dev\n");
ret = -ENOMEM;
@@ -436,7 +1114,15 @@ static int arizona_extcon_probe(struct platform_device *pdev)
pm_runtime_idle(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- ret = arizona_request_irq(arizona, ARIZONA_IRQ_JD_RISE,
+ if (arizona->pdata.jd_gpio5) {
+ jack_irq_rise = ARIZONA_IRQ_MICD_CLAMP_RISE;
+ jack_irq_fall = ARIZONA_IRQ_MICD_CLAMP_FALL;
+ } else {
+ jack_irq_rise = ARIZONA_IRQ_JD_RISE;
+ jack_irq_fall = ARIZONA_IRQ_JD_FALL;
+ }
+
+ ret = arizona_request_irq(arizona, jack_irq_rise,
"JACKDET rise", arizona_jackdet, info);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to get JACKDET rise IRQ: %d\n",
@@ -444,21 +1130,21 @@ static int arizona_extcon_probe(struct platform_device *pdev)
goto err_input;
}
- ret = arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_RISE, 1);
+ ret = arizona_set_irq_wake(arizona, jack_irq_rise, 1);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to set JD rise IRQ wake: %d\n",
ret);
goto err_rise;
}
- ret = arizona_request_irq(arizona, ARIZONA_IRQ_JD_FALL,
+ ret = arizona_request_irq(arizona, jack_irq_fall,
"JACKDET fall", arizona_jackdet, info);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to get JD fall IRQ: %d\n", ret);
goto err_rise_wake;
}
- ret = arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_FALL, 1);
+ ret = arizona_set_irq_wake(arizona, jack_irq_fall, 1);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to set JD fall IRQ wake: %d\n",
ret);
@@ -472,11 +1158,12 @@ static int arizona_extcon_probe(struct platform_device *pdev)
goto err_fall_wake;
}
- regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
- ARIZONA_MICD_BIAS_STARTTIME_MASK |
- ARIZONA_MICD_RATE_MASK,
- 7 << ARIZONA_MICD_BIAS_STARTTIME_SHIFT |
- 8 << ARIZONA_MICD_RATE_SHIFT);
+ ret = arizona_request_irq(arizona, ARIZONA_IRQ_HPDET,
+ "HPDET", arizona_hpdet_irq, info);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Failed to get HPDET IRQ: %d\n", ret);
+ goto err_micdet;
+ }
arizona_clk32k_enable(arizona);
regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_DEBOUNCE,
@@ -494,23 +1181,24 @@ static int arizona_extcon_probe(struct platform_device *pdev)
ret = input_register_device(info->input);
if (ret) {
dev_err(&pdev->dev, "Can't register input device: %d\n", ret);
- goto err_micdet;
+ goto err_hpdet;
}
return 0;
+err_hpdet:
+ arizona_free_irq(arizona, ARIZONA_IRQ_HPDET, info);
err_micdet:
arizona_free_irq(arizona, ARIZONA_IRQ_MICDET, info);
err_fall_wake:
- arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_FALL, 0);
+ arizona_set_irq_wake(arizona, jack_irq_fall, 0);
err_fall:
- arizona_free_irq(arizona, ARIZONA_IRQ_JD_FALL, info);
+ arizona_free_irq(arizona, jack_irq_fall, info);
err_rise_wake:
- arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_RISE, 0);
+ arizona_set_irq_wake(arizona, jack_irq_rise, 0);
err_rise:
- arizona_free_irq(arizona, ARIZONA_IRQ_JD_RISE, info);
+ arizona_free_irq(arizona, jack_irq_rise, info);
err_input:
- input_free_device(info->input);
err_register:
pm_runtime_disable(&pdev->dev);
extcon_dev_unregister(&info->edev);
@@ -522,18 +1210,32 @@ static int arizona_extcon_remove(struct platform_device *pdev)
{
struct arizona_extcon_info *info = platform_get_drvdata(pdev);
struct arizona *arizona = info->arizona;
+ int jack_irq_rise, jack_irq_fall;
pm_runtime_disable(&pdev->dev);
- arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_RISE, 0);
- arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_FALL, 0);
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_MICD_CLAMP_CONTROL,
+ ARIZONA_MICD_CLAMP_MODE_MASK, 0);
+
+ if (arizona->pdata.jd_gpio5) {
+ jack_irq_rise = ARIZONA_IRQ_MICD_CLAMP_RISE;
+ jack_irq_fall = ARIZONA_IRQ_MICD_CLAMP_FALL;
+ } else {
+ jack_irq_rise = ARIZONA_IRQ_JD_RISE;
+ jack_irq_fall = ARIZONA_IRQ_JD_FALL;
+ }
+
+ arizona_set_irq_wake(arizona, jack_irq_rise, 0);
+ arizona_set_irq_wake(arizona, jack_irq_fall, 0);
+ arizona_free_irq(arizona, ARIZONA_IRQ_HPDET, info);
arizona_free_irq(arizona, ARIZONA_IRQ_MICDET, info);
- arizona_free_irq(arizona, ARIZONA_IRQ_JD_RISE, info);
- arizona_free_irq(arizona, ARIZONA_IRQ_JD_FALL, info);
+ arizona_free_irq(arizona, jack_irq_rise, info);
+ arizona_free_irq(arizona, jack_irq_fall, info);
+ cancel_delayed_work_sync(&info->hpdet_work);
regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_ANALOGUE,
ARIZONA_JD1_ENA, 0);
arizona_clk32k_disable(arizona);
- input_unregister_device(info->input);
extcon_dev_unregister(&info->edev);
return 0;
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index 1b14bfcdc176..02bec32adde4 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -29,7 +29,7 @@
#include <linux/workqueue.h>
#include <linux/gpio.h>
#include <linux/extcon.h>
-#include <linux/extcon/extcon_gpio.h>
+#include <linux/extcon/extcon-gpio.h>
struct gpio_extcon_data {
struct extcon_dev edev;
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index 8c17b65eb74d..b70e3815c459 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/slab.h>
+#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/platform_device.h>
@@ -29,92 +30,7 @@
#include <linux/irqdomain.h>
#define DEV_NAME "max77693-muic"
-
-/* MAX77693 MUIC - STATUS1~3 Register */
-#define STATUS1_ADC_SHIFT (0)
-#define STATUS1_ADCLOW_SHIFT (5)
-#define STATUS1_ADCERR_SHIFT (6)
-#define STATUS1_ADC1K_SHIFT (7)
-#define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
-#define STATUS1_ADCLOW_MASK (0x1 << STATUS1_ADCLOW_SHIFT)
-#define STATUS1_ADCERR_MASK (0x1 << STATUS1_ADCERR_SHIFT)
-#define STATUS1_ADC1K_MASK (0x1 << STATUS1_ADC1K_SHIFT)
-
-#define STATUS2_CHGTYP_SHIFT (0)
-#define STATUS2_CHGDETRUN_SHIFT (3)
-#define STATUS2_DCDTMR_SHIFT (4)
-#define STATUS2_DXOVP_SHIFT (5)
-#define STATUS2_VBVOLT_SHIFT (6)
-#define STATUS2_VIDRM_SHIFT (7)
-#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
-#define STATUS2_CHGDETRUN_MASK (0x1 << STATUS2_CHGDETRUN_SHIFT)
-#define STATUS2_DCDTMR_MASK (0x1 << STATUS2_DCDTMR_SHIFT)
-#define STATUS2_DXOVP_MASK (0x1 << STATUS2_DXOVP_SHIFT)
-#define STATUS2_VBVOLT_MASK (0x1 << STATUS2_VBVOLT_SHIFT)
-#define STATUS2_VIDRM_MASK (0x1 << STATUS2_VIDRM_SHIFT)
-
-#define STATUS3_OVP_SHIFT (2)
-#define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT)
-
-/* MAX77693 CDETCTRL1~2 register */
-#define CDETCTRL1_CHGDETEN_SHIFT (0)
-#define CDETCTRL1_CHGTYPMAN_SHIFT (1)
-#define CDETCTRL1_DCDEN_SHIFT (2)
-#define CDETCTRL1_DCD2SCT_SHIFT (3)
-#define CDETCTRL1_CDDELAY_SHIFT (4)
-#define CDETCTRL1_DCDCPL_SHIFT (5)
-#define CDETCTRL1_CDPDET_SHIFT (7)
-#define CDETCTRL1_CHGDETEN_MASK (0x1 << CDETCTRL1_CHGDETEN_SHIFT)
-#define CDETCTRL1_CHGTYPMAN_MASK (0x1 << CDETCTRL1_CHGTYPMAN_SHIFT)
-#define CDETCTRL1_DCDEN_MASK (0x1 << CDETCTRL1_DCDEN_SHIFT)
-#define CDETCTRL1_DCD2SCT_MASK (0x1 << CDETCTRL1_DCD2SCT_SHIFT)
-#define CDETCTRL1_CDDELAY_MASK (0x1 << CDETCTRL1_CDDELAY_SHIFT)
-#define CDETCTRL1_DCDCPL_MASK (0x1 << CDETCTRL1_DCDCPL_SHIFT)
-#define CDETCTRL1_CDPDET_MASK (0x1 << CDETCTRL1_CDPDET_SHIFT)
-
-#define CDETCTRL2_VIDRMEN_SHIFT (1)
-#define CDETCTRL2_DXOVPEN_SHIFT (3)
-#define CDETCTRL2_VIDRMEN_MASK (0x1 << CDETCTRL2_VIDRMEN_SHIFT)
-#define CDETCTRL2_DXOVPEN_MASK (0x1 << CDETCTRL2_DXOVPEN_SHIFT)
-
-/* MAX77693 MUIC - CONTROL1~3 register */
-#define COMN1SW_SHIFT (0)
-#define COMP2SW_SHIFT (3)
-#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT)
-#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT)
-#define COMP_SW_MASK (COMP2SW_MASK | COMN1SW_MASK)
-#define CONTROL1_SW_USB ((1 << COMP2SW_SHIFT) \
- | (1 << COMN1SW_SHIFT))
-#define CONTROL1_SW_AUDIO ((2 << COMP2SW_SHIFT) \
- | (2 << COMN1SW_SHIFT))
-#define CONTROL1_SW_UART ((3 << COMP2SW_SHIFT) \
- | (3 << COMN1SW_SHIFT))
-#define CONTROL1_SW_OPEN ((0 << COMP2SW_SHIFT) \
- | (0 << COMN1SW_SHIFT))
-
-#define CONTROL2_LOWPWR_SHIFT (0)
-#define CONTROL2_ADCEN_SHIFT (1)
-#define CONTROL2_CPEN_SHIFT (2)
-#define CONTROL2_SFOUTASRT_SHIFT (3)
-#define CONTROL2_SFOUTORD_SHIFT (4)
-#define CONTROL2_ACCDET_SHIFT (5)
-#define CONTROL2_USBCPINT_SHIFT (6)
-#define CONTROL2_RCPS_SHIFT (7)
-#define CONTROL2_LOWPWR_MASK (0x1 << CONTROL2_LOWPWR_SHIFT)
-#define CONTROL2_ADCEN_MASK (0x1 << CONTROL2_ADCEN_SHIFT)
-#define CONTROL2_CPEN_MASK (0x1 << CONTROL2_CPEN_SHIFT)
-#define CONTROL2_SFOUTASRT_MASK (0x1 << CONTROL2_SFOUTASRT_SHIFT)
-#define CONTROL2_SFOUTORD_MASK (0x1 << CONTROL2_SFOUTORD_SHIFT)
-#define CONTROL2_ACCDET_MASK (0x1 << CONTROL2_ACCDET_SHIFT)
-#define CONTROL2_USBCPINT_MASK (0x1 << CONTROL2_USBCPINT_SHIFT)
-#define CONTROL2_RCPS_MASK (0x1 << CONTROL2_RCPS_SHIFT)
-
-#define CONTROL3_JIGSET_SHIFT (0)
-#define CONTROL3_BTLDSET_SHIFT (2)
-#define CONTROL3_ADCDBSET_SHIFT (4)
-#define CONTROL3_JIGSET_MASK (0x3 << CONTROL3_JIGSET_SHIFT)
-#define CONTROL3_BTLDSET_MASK (0x3 << CONTROL3_BTLDSET_SHIFT)
-#define CONTROL3_ADCDBSET_MASK (0x3 << CONTROL3_ADCDBSET_SHIFT)
+#define DELAY_MS_DEFAULT 20000 /* unit: millisecond */
enum max77693_muic_adc_debounce_time {
ADC_DEBOUNCE_TIME_5MS = 0,
@@ -127,14 +43,40 @@ struct max77693_muic_info {
struct device *dev;
struct max77693_dev *max77693;
struct extcon_dev *edev;
- int prev_adc;
- int prev_adc_gnd;
+ int prev_cable_type;
+ int prev_cable_type_gnd;
int prev_chg_type;
+ int prev_button_type;
u8 status[2];
int irq;
struct work_struct irq_work;
struct mutex mutex;
+
+ /*
+ * Use delayed workqueue to detect cable state and then
+ * notify cable state to notifiee/platform through uevent.
+ * After completing the booting of platform, the extcon provider
+ * driver should notify cable state to upper layer.
+ */
+ struct delayed_work wq_detcable;
+
+ /* Button of dock device */
+ struct input_dev *dock;
+
+ /*
+ * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
+ * h/w path of COMP2/COMN1 on CONTROL1 register.
+ */
+ int path_usb;
+ int path_uart;
+};
+
+enum max77693_muic_cable_group {
+ MAX77693_CABLE_GROUP_ADC = 0,
+ MAX77693_CABLE_GROUP_ADC_GND,
+ MAX77693_CABLE_GROUP_CHG,
+ MAX77693_CABLE_GROUP_VBVOLT,
};
enum max77693_muic_charger_type {
@@ -215,27 +157,59 @@ enum max77693_muic_acc_type {
/* The below accessories have same ADC value so ADCLow and
ADC1K bit is used to separate specific accessory */
- MAX77693_MUIC_GND_USB_OTG = 0x100, /* ADC:0x0, ADCLow:0, ADC1K:0 */
- MAX77693_MUIC_GND_AV_CABLE_LOAD = 0x102,/* ADC:0x0, ADCLow:1, ADC1K:0 */
- MAX77693_MUIC_GND_MHL_CABLE = 0x103, /* ADC:0x0, ADCLow:1, ADC1K:1 */
+ MAX77693_MUIC_GND_USB_OTG = 0x100, /* ADC:0x0, VBVolot:0, ADCLow:0, ADC1K:0 */
+ MAX77693_MUIC_GND_USB_OTG_VB = 0x104, /* ADC:0x0, VBVolot:1, ADCLow:0, ADC1K:0 */
+ MAX77693_MUIC_GND_AV_CABLE_LOAD = 0x102,/* ADC:0x0, VBVolot:0, ADCLow:1, ADC1K:0 */
+ MAX77693_MUIC_GND_MHL = 0x103, /* ADC:0x0, VBVolot:0, ADCLow:1, ADC1K:1 */
+ MAX77693_MUIC_GND_MHL_VB = 0x107, /* ADC:0x0, VBVolot:1, ADCLow:1, ADC1K:1 */
};
/* MAX77693 MUIC device support below list of accessories(external connector) */
-const char *max77693_extcon_cable[] = {
- [0] = "USB",
- [1] = "USB-Host",
- [2] = "TA",
- [3] = "Fast-charger",
- [4] = "Slow-charger",
- [5] = "Charge-downstream",
- [6] = "MHL",
- [7] = "Audio-video-load",
- [8] = "Audio-video-noload",
- [9] = "JIG",
+enum {
+ EXTCON_CABLE_USB = 0,
+ EXTCON_CABLE_USB_HOST,
+ EXTCON_CABLE_TA,
+ EXTCON_CABLE_FAST_CHARGER,
+ EXTCON_CABLE_SLOW_CHARGER,
+ EXTCON_CABLE_CHARGE_DOWNSTREAM,
+ EXTCON_CABLE_MHL,
+ EXTCON_CABLE_MHL_TA,
+ EXTCON_CABLE_JIG_USB_ON,
+ EXTCON_CABLE_JIG_USB_OFF,
+ EXTCON_CABLE_JIG_UART_OFF,
+ EXTCON_CABLE_JIG_UART_ON,
+ EXTCON_CABLE_DOCK_SMART,
+ EXTCON_CABLE_DOCK_DESK,
+ EXTCON_CABLE_DOCK_AUDIO,
+
+ _EXTCON_CABLE_NUM,
+};
+
+static const char *max77693_extcon_cable[] = {
+ [EXTCON_CABLE_USB] = "USB",
+ [EXTCON_CABLE_USB_HOST] = "USB-Host",
+ [EXTCON_CABLE_TA] = "TA",
+ [EXTCON_CABLE_FAST_CHARGER] = "Fast-charger",
+ [EXTCON_CABLE_SLOW_CHARGER] = "Slow-charger",
+ [EXTCON_CABLE_CHARGE_DOWNSTREAM] = "Charge-downstream",
+ [EXTCON_CABLE_MHL] = "MHL",
+ [EXTCON_CABLE_MHL_TA] = "MHL_TA",
+ [EXTCON_CABLE_JIG_USB_ON] = "JIG-USB-ON",
+ [EXTCON_CABLE_JIG_USB_OFF] = "JIG-USB-OFF",
+ [EXTCON_CABLE_JIG_UART_OFF] = "JIG-UART-OFF",
+ [EXTCON_CABLE_JIG_UART_ON] = "Dock-Car",
+ [EXTCON_CABLE_DOCK_SMART] = "Dock-Smart",
+ [EXTCON_CABLE_DOCK_DESK] = "Dock-Desk",
+ [EXTCON_CABLE_DOCK_AUDIO] = "Dock-Audio",
NULL,
};
+/*
+ * max77693_muic_set_debounce_time - Set the debounce time of ADC
+ * @info: the instance including private data of max77693 MUIC
+ * @time: the debounce time of ADC
+ */
static int max77693_muic_set_debounce_time(struct max77693_muic_info *info,
enum max77693_muic_adc_debounce_time time)
{
@@ -250,18 +224,29 @@ static int max77693_muic_set_debounce_time(struct max77693_muic_info *info,
MAX77693_MUIC_REG_CTRL3,
time << CONTROL3_ADCDBSET_SHIFT,
CONTROL3_ADCDBSET_MASK);
- if (ret)
+ if (ret) {
dev_err(info->dev, "failed to set ADC debounce time\n");
+ return -EAGAIN;
+ }
break;
default:
dev_err(info->dev, "invalid ADC debounce time\n");
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- return ret;
+ return 0;
};
+/*
+ * max77693_muic_set_path - Set hardware line according to attached cable
+ * @info: the instance including private data of max77693 MUIC
+ * @value: the path according to attached cable
+ * @attached: the state of cable (true:attached, false:detached)
+ *
+ * The max77693 MUIC device share outside H/W line among a varity of cables
+ * so, this function set internal path of H/W line according to the type of
+ * attached cable.
+ */
static int max77693_muic_set_path(struct max77693_muic_info *info,
u8 val, bool attached)
{
@@ -277,7 +262,7 @@ static int max77693_muic_set_path(struct max77693_muic_info *info,
MAX77693_MUIC_REG_CTRL1, ctrl1, COMP_SW_MASK);
if (ret < 0) {
dev_err(info->dev, "failed to update MUIC register\n");
- goto out;
+ return -EAGAIN;
}
if (attached)
@@ -290,141 +275,457 @@ static int max77693_muic_set_path(struct max77693_muic_info *info,
CONTROL2_LOWPWR_MASK | CONTROL2_CPEN_MASK);
if (ret < 0) {
dev_err(info->dev, "failed to update MUIC register\n");
- goto out;
+ return -EAGAIN;
}
dev_info(info->dev,
"CONTROL1 : 0x%02x, CONTROL2 : 0x%02x, state : %s\n",
ctrl1, ctrl2, attached ? "attached" : "detached");
-out:
- return ret;
+
+ return 0;
}
-static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info,
- bool attached)
+/*
+ * max77693_muic_get_cable_type - Return cable type and check cable state
+ * @info: the instance including private data of max77693 MUIC
+ * @group: the path according to attached cable
+ * @attached: store cable state and return
+ *
+ * This function check the cable state either attached or detached,
+ * and then divide precise type of cable according to cable group.
+ * - MAX77693_CABLE_GROUP_ADC
+ * - MAX77693_CABLE_GROUP_ADC_GND
+ * - MAX77693_CABLE_GROUP_CHG
+ * - MAX77693_CABLE_GROUP_VBVOLT
+ */
+static int max77693_muic_get_cable_type(struct max77693_muic_info *info,
+ enum max77693_muic_cable_group group, bool *attached)
{
- int ret = 0;
- int type;
- int adc, adc1k, adclow;
+ int cable_type = 0;
+ int adc;
+ int adc1k;
+ int adclow;
+ int vbvolt;
+ int chg_type;
+
+ switch (group) {
+ case MAX77693_CABLE_GROUP_ADC:
+ /*
+ * Read ADC value to check cable type and decide cable state
+ * according to cable type
+ */
+ adc = info->status[0] & STATUS1_ADC_MASK;
+ adc >>= STATUS1_ADC_SHIFT;
+
+ /*
+ * Check current cable state/cable type and store cable type
+ * (info->prev_cable_type) for handling cable when cable is
+ * detached.
+ */
+ if (adc == MAX77693_MUIC_ADC_OPEN) {
+ *attached = false;
+
+ cable_type = info->prev_cable_type;
+ info->prev_cable_type = MAX77693_MUIC_ADC_OPEN;
+ } else {
+ *attached = true;
+
+ cable_type = info->prev_cable_type = adc;
+ }
+ break;
+ case MAX77693_CABLE_GROUP_ADC_GND:
+ /*
+ * Read ADC value to check cable type and decide cable state
+ * according to cable type
+ */
+ adc = info->status[0] & STATUS1_ADC_MASK;
+ adc >>= STATUS1_ADC_SHIFT;
+
+ /*
+ * Check current cable state/cable type and store cable type
+ * (info->prev_cable_type/_gnd) for handling cable when cable
+ * is detached.
+ */
+ if (adc == MAX77693_MUIC_ADC_OPEN) {
+ *attached = false;
+
+ cable_type = info->prev_cable_type_gnd;
+ info->prev_cable_type_gnd = MAX77693_MUIC_ADC_OPEN;
+ } else {
+ *attached = true;
+
+ adclow = info->status[0] & STATUS1_ADCLOW_MASK;
+ adclow >>= STATUS1_ADCLOW_SHIFT;
+ adc1k = info->status[0] & STATUS1_ADC1K_MASK;
+ adc1k >>= STATUS1_ADC1K_SHIFT;
+
+ vbvolt = info->status[1] & STATUS2_VBVOLT_MASK;
+ vbvolt >>= STATUS2_VBVOLT_SHIFT;
+
+ /**
+ * [0x1][VBVolt][ADCLow][ADC1K]
+ * [0x1 0 0 0 ] : USB_OTG
+ * [0x1 1 0 0 ] : USB_OTG_VB
+ * [0x1 0 1 0 ] : Audio Video Cable with load
+ * [0x1 0 1 1 ] : MHL without charging connector
+ * [0x1 1 1 1 ] : MHL with charging connector
+ */
+ cable_type = ((0x1 << 8)
+ | (vbvolt << 2)
+ | (adclow << 1)
+ | adc1k);
+
+ info->prev_cable_type = adc;
+ info->prev_cable_type_gnd = cable_type;
+ }
- if (attached) {
+ break;
+ case MAX77693_CABLE_GROUP_CHG:
+ /*
+ * Read charger type to check cable type and decide cable state
+ * according to type of charger cable.
+ */
+ chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
+ chg_type >>= STATUS2_CHGTYP_SHIFT;
+
+ if (chg_type == MAX77693_CHARGER_TYPE_NONE) {
+ *attached = false;
+
+ cable_type = info->prev_chg_type;
+ info->prev_chg_type = MAX77693_CHARGER_TYPE_NONE;
+ } else {
+ *attached = true;
+
+ /*
+ * Check current cable state/cable type and store cable
+ * type(info->prev_chg_type) for handling cable when
+ * charger cable is detached.
+ */
+ cable_type = info->prev_chg_type = chg_type;
+ }
+
+ break;
+ case MAX77693_CABLE_GROUP_VBVOLT:
+ /*
+ * Read ADC value to check cable type and decide cable state
+ * according to cable type
+ */
adc = info->status[0] & STATUS1_ADC_MASK;
- adclow = info->status[0] & STATUS1_ADCLOW_MASK;
- adclow >>= STATUS1_ADCLOW_SHIFT;
- adc1k = info->status[0] & STATUS1_ADC1K_MASK;
- adc1k >>= STATUS1_ADC1K_SHIFT;
-
- /**
- * [0x1][ADCLow][ADC1K]
- * [0x1 0 0 ] : USB_OTG
- * [0x1 1 0 ] : Audio Video Cable with load
- * [0x1 1 1 ] : MHL
+ adc >>= STATUS1_ADC_SHIFT;
+ chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
+ chg_type >>= STATUS2_CHGTYP_SHIFT;
+
+ if (adc == MAX77693_MUIC_ADC_OPEN
+ && chg_type == MAX77693_CHARGER_TYPE_NONE)
+ *attached = false;
+ else
+ *attached = true;
+
+ /*
+ * Read vbvolt field, if vbvolt is 1,
+ * this cable is used for charging.
+ */
+ vbvolt = info->status[1] & STATUS2_VBVOLT_MASK;
+ vbvolt >>= STATUS2_VBVOLT_SHIFT;
+
+ cable_type = vbvolt;
+ break;
+ default:
+ dev_err(info->dev, "Unknown cable group (%d)\n", group);
+ cable_type = -EINVAL;
+ break;
+ }
+
+ return cable_type;
+}
+
+static int max77693_muic_dock_handler(struct max77693_muic_info *info,
+ int cable_type, bool attached)
+{
+ int ret = 0;
+ int vbvolt;
+ bool cable_attached;
+ char dock_name[CABLE_NAME_MAX];
+
+ dev_info(info->dev,
+ "external connector is %s (adc:0x%02x)\n",
+ attached ? "attached" : "detached", cable_type);
+
+ switch (cable_type) {
+ case MAX77693_MUIC_ADC_RESERVED_ACC_3: /* Dock-Smart */
+ /*
+ * Check power cable whether attached or detached state.
+ * The Dock-Smart device need surely external power supply.
+ * If power cable(USB/TA) isn't connected to Dock device,
+ * user can't use Dock-Smart for desktop mode.
+ */
+ vbvolt = max77693_muic_get_cable_type(info,
+ MAX77693_CABLE_GROUP_VBVOLT, &cable_attached);
+ if (attached && !vbvolt) {
+ dev_warn(info->dev,
+ "Cannot detect external power supply\n");
+ return 0;
+ }
+
+ /*
+ * Notify Dock-Smart/MHL state.
+ * - Dock-Smart device include three type of cable which
+ * are HDMI, USB for mouse/keyboard and micro-usb port
+ * for USB/TA cable. Dock-Smart device need always exteranl
+ * power supply(USB/TA cable through micro-usb cable). Dock-
+ * Smart device support screen output of target to separate
+ * monitor and mouse/keyboard for desktop mode.
+ *
+ * Features of 'USB/TA cable with Dock-Smart device'
+ * - Support MHL
+ * - Support external output feature of audio
+ * - Support charging through micro-usb port without data
+ * connection if TA cable is connected to target.
+ * - Support charging and data connection through micro-usb port
+ * if USB cable is connected between target and host
+ * device.
+ * - Support OTG device (Mouse/Keyboard)
*/
- type = ((0x1 << 8) | (adclow << 1) | adc1k);
+ ret = max77693_muic_set_path(info, info->path_usb, attached);
+ if (ret < 0)
+ return ret;
+
+ extcon_set_cable_state(info->edev, "Dock-Smart", attached);
+ extcon_set_cable_state(info->edev, "MHL", attached);
+ goto out;
+ case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON: /* Dock-Car */
+ strcpy(dock_name, "Dock-Car");
+ break;
+ case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE: /* Dock-Desk */
+ strcpy(dock_name, "Dock-Desk");
+ break;
+ case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD: /* Dock-Audio */
+ strcpy(dock_name, "Dock-Audio");
+ if (!attached)
+ extcon_set_cable_state(info->edev, "USB", false);
+ break;
+ default:
+ dev_err(info->dev, "failed to detect %s dock device\n",
+ attached ? "attached" : "detached");
+ return -EINVAL;
+ }
+
+ /* Dock-Car/Desk/Audio, PATH:AUDIO */
+ ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
+ if (ret < 0)
+ return ret;
+ extcon_set_cable_state(info->edev, dock_name, attached);
+
+out:
+ return 0;
+}
+
+static int max77693_muic_dock_button_handler(struct max77693_muic_info *info,
+ int button_type, bool attached)
+{
+ struct input_dev *dock = info->dock;
+ unsigned int code;
+
+ switch (button_type) {
+ case MAX77693_MUIC_ADC_REMOTE_S3_BUTTON-1
+ ... MAX77693_MUIC_ADC_REMOTE_S3_BUTTON+1:
+ /* DOCK_KEY_PREV */
+ code = KEY_PREVIOUSSONG;
+ break;
+ case MAX77693_MUIC_ADC_REMOTE_S7_BUTTON-1
+ ... MAX77693_MUIC_ADC_REMOTE_S7_BUTTON+1:
+ /* DOCK_KEY_NEXT */
+ code = KEY_NEXTSONG;
+ break;
+ case MAX77693_MUIC_ADC_REMOTE_S9_BUTTON:
+ /* DOCK_VOL_DOWN */
+ code = KEY_VOLUMEDOWN;
+ break;
+ case MAX77693_MUIC_ADC_REMOTE_S10_BUTTON:
+ /* DOCK_VOL_UP */
+ code = KEY_VOLUMEUP;
+ break;
+ case MAX77693_MUIC_ADC_REMOTE_S12_BUTTON-1
+ ... MAX77693_MUIC_ADC_REMOTE_S12_BUTTON+1:
+ /* DOCK_KEY_PLAY_PAUSE */
+ code = KEY_PLAYPAUSE;
+ break;
+ default:
+ dev_err(info->dev,
+ "failed to detect %s key (adc:0x%x)\n",
+ attached ? "pressed" : "released", button_type);
+ return -EINVAL;
+ }
+
+ input_event(dock, EV_KEY, code, attached);
+ input_sync(dock);
+
+ return 0;
+}
- /* Store previous ADC value to handle accessory
- when accessory will be detached */
- info->prev_adc = adc;
- info->prev_adc_gnd = type;
- } else
- type = info->prev_adc_gnd;
+static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info)
+{
+ int cable_type_gnd;
+ int ret = 0;
+ bool attached;
- switch (type) {
+ cable_type_gnd = max77693_muic_get_cable_type(info,
+ MAX77693_CABLE_GROUP_ADC_GND, &attached);
+
+ switch (cable_type_gnd) {
case MAX77693_MUIC_GND_USB_OTG:
- /* USB_OTG */
+ case MAX77693_MUIC_GND_USB_OTG_VB:
+ /* USB_OTG, PATH: AP_USB */
ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached);
if (ret < 0)
- goto out;
+ return ret;
extcon_set_cable_state(info->edev, "USB-Host", attached);
break;
case MAX77693_MUIC_GND_AV_CABLE_LOAD:
- /* Audio Video Cable with load */
+ /* Audio Video Cable with load, PATH:AUDIO */
ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
if (ret < 0)
- goto out;
+ return ret;
extcon_set_cable_state(info->edev,
"Audio-video-load", attached);
break;
- case MAX77693_MUIC_GND_MHL_CABLE:
- /* MHL */
+ case MAX77693_MUIC_GND_MHL:
+ case MAX77693_MUIC_GND_MHL_VB:
+ /* MHL or MHL with USB/TA cable */
extcon_set_cable_state(info->edev, "MHL", attached);
break;
default:
- dev_err(info->dev, "failed to detect %s accessory\n",
+ dev_err(info->dev, "failed to detect %s cable of gnd type\n",
attached ? "attached" : "detached");
- dev_err(info->dev, "- adc:0x%x, adclow:0x%x, adc1k:0x%x\n",
- adc, adclow, adc1k);
- ret = -EINVAL;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int max77693_muic_jig_handler(struct max77693_muic_info *info,
+ int cable_type, bool attached)
+{
+ char cable_name[32];
+ int ret = 0;
+ u8 path = CONTROL1_SW_OPEN;
+
+ dev_info(info->dev,
+ "external connector is %s (adc:0x%02x)\n",
+ attached ? "attached" : "detached", cable_type);
+
+ switch (cable_type) {
+ case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF: /* ADC_JIG_USB_OFF */
+ /* PATH:AP_USB */
+ strcpy(cable_name, "JIG-USB-OFF");
+ path = CONTROL1_SW_USB;
+ break;
+ case MAX77693_MUIC_ADC_FACTORY_MODE_USB_ON: /* ADC_JIG_USB_ON */
+ /* PATH:AP_USB */
+ strcpy(cable_name, "JIG-USB-ON");
+ path = CONTROL1_SW_USB;
break;
+ case MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF: /* ADC_JIG_UART_OFF */
+ /* PATH:AP_UART */
+ strcpy(cable_name, "JIG-UART-OFF");
+ path = CONTROL1_SW_UART;
+ break;
+ default:
+ dev_err(info->dev, "failed to detect %s jig cable\n",
+ attached ? "attached" : "detached");
+ return -EINVAL;
}
-out:
- return ret;
+ ret = max77693_muic_set_path(info, path, attached);
+ if (ret < 0)
+ return ret;
+
+ extcon_set_cable_state(info->edev, cable_name, attached);
+
+ return 0;
}
-static int max77693_muic_adc_handler(struct max77693_muic_info *info,
- int curr_adc, bool attached)
+static int max77693_muic_adc_handler(struct max77693_muic_info *info)
{
+ int cable_type;
+ int button_type;
+ bool attached;
int ret = 0;
- int adc;
- if (attached) {
- /* Store ADC value to handle accessory
- when accessory will be detached */
- info->prev_adc = curr_adc;
- adc = curr_adc;
- } else
- adc = info->prev_adc;
+ /* Check accessory state which is either detached or attached */
+ cable_type = max77693_muic_get_cable_type(info,
+ MAX77693_CABLE_GROUP_ADC, &attached);
dev_info(info->dev,
"external connector is %s (adc:0x%02x, prev_adc:0x%x)\n",
- attached ? "attached" : "detached", curr_adc, info->prev_adc);
+ attached ? "attached" : "detached", cable_type,
+ info->prev_cable_type);
- switch (adc) {
+ switch (cable_type) {
case MAX77693_MUIC_ADC_GROUND:
/* USB_OTG/MHL/Audio */
- max77693_muic_adc_ground_handler(info, attached);
+ max77693_muic_adc_ground_handler(info);
break;
case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF:
case MAX77693_MUIC_ADC_FACTORY_MODE_USB_ON:
- /* USB */
- ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached);
- if (ret < 0)
- goto out;
- extcon_set_cable_state(info->edev, "USB", attached);
- break;
case MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF:
- case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON:
/* JIG */
- ret = max77693_muic_set_path(info, CONTROL1_SW_UART, attached);
+ ret = max77693_muic_jig_handler(info, cable_type, attached);
if (ret < 0)
- goto out;
- extcon_set_cable_state(info->edev, "JIG", attached);
+ return ret;
break;
- case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE:
- /* Audio Video cable with no-load */
- ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
+ case MAX77693_MUIC_ADC_RESERVED_ACC_3: /* Dock-Smart */
+ case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON: /* Dock-Car */
+ case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE: /* Dock-Desk */
+ case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD: /* Dock-Audio */
+ /*
+ * DOCK device
+ *
+ * The MAX77693 MUIC device can detect total 34 cable type
+ * except of charger cable and MUIC device didn't define
+ * specfic role of cable in the range of from 0x01 to 0x12
+ * of ADC value. So, can use/define cable with no role according
+ * to schema of hardware board.
+ */
+ ret = max77693_muic_dock_handler(info, cable_type, attached);
if (ret < 0)
- goto out;
- extcon_set_cable_state(info->edev,
- "Audio-video-noload", attached);
+ return ret;
+ break;
+ case MAX77693_MUIC_ADC_REMOTE_S3_BUTTON: /* DOCK_KEY_PREV */
+ case MAX77693_MUIC_ADC_REMOTE_S7_BUTTON: /* DOCK_KEY_NEXT */
+ case MAX77693_MUIC_ADC_REMOTE_S9_BUTTON: /* DOCK_VOL_DOWN */
+ case MAX77693_MUIC_ADC_REMOTE_S10_BUTTON: /* DOCK_VOL_UP */
+ case MAX77693_MUIC_ADC_REMOTE_S12_BUTTON: /* DOCK_KEY_PLAY_PAUSE */
+ /*
+ * Button of DOCK device
+ * - the Prev/Next/Volume Up/Volume Down/Play-Pause button
+ *
+ * The MAX77693 MUIC device can detect total 34 cable type
+ * except of charger cable and MUIC device didn't define
+ * specfic role of cable in the range of from 0x01 to 0x12
+ * of ADC value. So, can use/define cable with no role according
+ * to schema of hardware board.
+ */
+ if (attached)
+ button_type = info->prev_button_type = cable_type;
+ else
+ button_type = info->prev_button_type;
+
+ ret = max77693_muic_dock_button_handler(info, button_type,
+ attached);
+ if (ret < 0)
+ return ret;
break;
case MAX77693_MUIC_ADC_SEND_END_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S1_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S2_BUTTON:
- case MAX77693_MUIC_ADC_REMOTE_S3_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S4_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S5_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S6_BUTTON:
- case MAX77693_MUIC_ADC_REMOTE_S7_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S8_BUTTON:
- case MAX77693_MUIC_ADC_REMOTE_S9_BUTTON:
- case MAX77693_MUIC_ADC_REMOTE_S10_BUTTON:
case MAX77693_MUIC_ADC_REMOTE_S11_BUTTON:
- case MAX77693_MUIC_ADC_REMOTE_S12_BUTTON:
case MAX77693_MUIC_ADC_RESERVED_ACC_1:
case MAX77693_MUIC_ADC_RESERVED_ACC_2:
- case MAX77693_MUIC_ADC_RESERVED_ACC_3:
case MAX77693_MUIC_ADC_RESERVED_ACC_4:
case MAX77693_MUIC_ADC_RESERVED_ACC_5:
case MAX77693_MUIC_ADC_CEA936_AUDIO:
@@ -432,60 +733,164 @@ static int max77693_muic_adc_handler(struct max77693_muic_info *info,
case MAX77693_MUIC_ADC_TTY_CONVERTER:
case MAX77693_MUIC_ADC_UART_CABLE:
case MAX77693_MUIC_ADC_CEA936A_TYPE1_CHG:
- case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD:
case MAX77693_MUIC_ADC_CEA936A_TYPE2_CHG:
- /* This accessory isn't used in general case if it is specially
- needed to detect additional accessory, should implement
- proper operation when this accessory is attached/detached. */
+ /*
+ * This accessory isn't used in general case if it is specially
+ * needed to detect additional accessory, should implement
+ * proper operation when this accessory is attached/detached.
+ */
dev_info(info->dev,
"accessory is %s but it isn't used (adc:0x%x)\n",
- attached ? "attached" : "detached", adc);
- goto out;
+ attached ? "attached" : "detached", cable_type);
+ return -EAGAIN;
default:
dev_err(info->dev,
"failed to detect %s accessory (adc:0x%x)\n",
- attached ? "attached" : "detached", adc);
- ret = -EINVAL;
- goto out;
+ attached ? "attached" : "detached", cable_type);
+ return -EINVAL;
}
-out:
- return ret;
+ return 0;
}
-static int max77693_muic_chg_handler(struct max77693_muic_info *info,
- int curr_chg_type, bool attached)
+static int max77693_muic_chg_handler(struct max77693_muic_info *info)
{
- int ret = 0;
int chg_type;
+ int cable_type_gnd;
+ int cable_type;
+ bool attached;
+ bool cable_attached;
+ int ret = 0;
- if (attached) {
- /* Store previous charger type to control
- when charger accessory will be detached */
- info->prev_chg_type = curr_chg_type;
- chg_type = curr_chg_type;
- } else
- chg_type = info->prev_chg_type;
+ chg_type = max77693_muic_get_cable_type(info,
+ MAX77693_CABLE_GROUP_CHG, &attached);
dev_info(info->dev,
"external connector is %s(chg_type:0x%x, prev_chg_type:0x%x)\n",
attached ? "attached" : "detached",
- curr_chg_type, info->prev_chg_type);
+ chg_type, info->prev_chg_type);
switch (chg_type) {
case MAX77693_CHARGER_TYPE_USB:
- ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached);
- if (ret < 0)
- goto out;
- extcon_set_cable_state(info->edev, "USB", attached);
+ case MAX77693_CHARGER_TYPE_DEDICATED_CHG:
+ case MAX77693_CHARGER_TYPE_NONE:
+ /* Check MAX77693_CABLE_GROUP_ADC_GND type */
+ cable_type_gnd = max77693_muic_get_cable_type(info,
+ MAX77693_CABLE_GROUP_ADC_GND,
+ &cable_attached);
+ switch (cable_type_gnd) {
+ case MAX77693_MUIC_GND_MHL:
+ case MAX77693_MUIC_GND_MHL_VB:
+ /*
+ * MHL cable with MHL_TA(USB/TA) cable
+ * - MHL cable include two port(HDMI line and separate micro-
+ * usb port. When the target connect MHL cable, extcon driver
+ * check whether MHL_TA(USB/TA) cable is connected. If MHL_TA
+ * cable is connected, extcon driver notify state to notifiee
+ * for charging battery.
+ *
+ * Features of 'MHL_TA(USB/TA) with MHL cable'
+ * - Support MHL
+ * - Support charging through micro-usb port without data connection
+ */
+ extcon_set_cable_state(info->edev, "MHL_TA", attached);
+ if (!cable_attached)
+ extcon_set_cable_state(info->edev, "MHL", cable_attached);
+ break;
+ }
+
+ /* Check MAX77693_CABLE_GROUP_ADC type */
+ cable_type = max77693_muic_get_cable_type(info,
+ MAX77693_CABLE_GROUP_ADC,
+ &cable_attached);
+ switch (cable_type) {
+ case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD: /* Dock-Audio */
+ /*
+ * Dock-Audio device with USB/TA cable
+ * - Dock device include two port(Dock-Audio and micro-usb
+ * port). When the target connect Dock-Audio device, extcon
+ * driver check whether USB/TA cable is connected. If USB/TA
+ * cable is connected, extcon driver notify state to notifiee
+ * for charging battery.
+ *
+ * Features of 'USB/TA cable with Dock-Audio device'
+ * - Support external output feature of audio.
+ * - Support charging through micro-usb port without data
+ * connection.
+ */
+ extcon_set_cable_state(info->edev, "USB", attached);
+
+ if (!cable_attached)
+ extcon_set_cable_state(info->edev, "Dock-Audio", cable_attached);
+ break;
+ case MAX77693_MUIC_ADC_RESERVED_ACC_3: /* Dock-Smart */
+ /*
+ * Dock-Smart device with USB/TA cable
+ * - Dock-Desk device include three type of cable which
+ * are HDMI, USB for mouse/keyboard and micro-usb port
+ * for USB/TA cable. Dock-Smart device need always exteranl
+ * power supply(USB/TA cable through micro-usb cable). Dock-
+ * Smart device support screen output of target to separate
+ * monitor and mouse/keyboard for desktop mode.
+ *
+ * Features of 'USB/TA cable with Dock-Smart device'
+ * - Support MHL
+ * - Support external output feature of audio
+ * - Support charging through micro-usb port without data
+ * connection if TA cable is connected to target.
+ * - Support charging and data connection through micro-usb port
+ * if USB cable is connected between target and host
+ * device.
+ * - Support OTG device (Mouse/Keyboard)
+ */
+ ret = max77693_muic_set_path(info, info->path_usb, attached);
+ if (ret < 0)
+ return ret;
+
+ extcon_set_cable_state(info->edev, "Dock-Smart", attached);
+ extcon_set_cable_state(info->edev, "MHL", attached);
+
+ break;
+ }
+
+ /* Check MAX77693_CABLE_GROUP_CHG type */
+ switch (chg_type) {
+ case MAX77693_CHARGER_TYPE_NONE:
+ /*
+ * When MHL(with USB/TA cable) or Dock-Audio with USB/TA cable
+ * is attached, muic device happen below two interrupt.
+ * - 'MAX77693_MUIC_IRQ_INT1_ADC' for detecting MHL/Dock-Audio.
+ * - 'MAX77693_MUIC_IRQ_INT2_CHGTYP' for detecting USB/TA cable
+ * connected to MHL or Dock-Audio.
+ * Always, happen eariler MAX77693_MUIC_IRQ_INT1_ADC interrupt
+ * than MAX77693_MUIC_IRQ_INT2_CHGTYP interrupt.
+ *
+ * If user attach MHL (with USB/TA cable and immediately detach
+ * MHL with USB/TA cable before MAX77693_MUIC_IRQ_INT2_CHGTYP
+ * interrupt is happened, USB/TA cable remain connected state to
+ * target. But USB/TA cable isn't connected to target. The user
+ * be face with unusual action. So, driver should check this
+ * situation in spite of, that previous charger type is N/A.
+ */
+ break;
+ case MAX77693_CHARGER_TYPE_USB:
+ /* Only USB cable, PATH:AP_USB */
+ ret = max77693_muic_set_path(info, info->path_usb, attached);
+ if (ret < 0)
+ return ret;
+
+ extcon_set_cable_state(info->edev, "USB", attached);
+ break;
+ case MAX77693_CHARGER_TYPE_DEDICATED_CHG:
+ /* Only TA cable */
+ extcon_set_cable_state(info->edev, "TA", attached);
+ break;
+ }
break;
case MAX77693_CHARGER_TYPE_DOWNSTREAM_PORT:
extcon_set_cable_state(info->edev,
"Charge-downstream", attached);
break;
- case MAX77693_CHARGER_TYPE_DEDICATED_CHG:
- extcon_set_cable_state(info->edev, "TA", attached);
- break;
case MAX77693_CHARGER_TYPE_APPLE_500MA:
extcon_set_cable_state(info->edev, "Slow-charger", attached);
break;
@@ -498,22 +903,18 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info,
dev_err(info->dev,
"failed to detect %s accessory (chg_type:0x%x)\n",
attached ? "attached" : "detached", chg_type);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
-out:
- return ret;
+ return 0;
}
static void max77693_muic_irq_work(struct work_struct *work)
{
struct max77693_muic_info *info = container_of(work,
struct max77693_muic_info, irq_work);
- int curr_adc, curr_chg_type;
int irq_type = -1;
int i, ret = 0;
- bool attached = true;
if (!info->edev)
return;
@@ -539,14 +940,7 @@ static void max77693_muic_irq_work(struct work_struct *work)
case MAX77693_MUIC_IRQ_INT1_ADC1K:
/* Handle all of accessory except for
type of charger accessory */
- curr_adc = info->status[0] & STATUS1_ADC_MASK;
- curr_adc >>= STATUS1_ADC_SHIFT;
-
- /* Check accessory state which is either detached or attached */
- if (curr_adc == MAX77693_MUIC_ADC_OPEN)
- attached = false;
-
- ret = max77693_muic_adc_handler(info, curr_adc, attached);
+ ret = max77693_muic_adc_handler(info);
break;
case MAX77693_MUIC_IRQ_INT2_CHGTYP:
case MAX77693_MUIC_IRQ_INT2_CHGDETREUN:
@@ -555,15 +949,7 @@ static void max77693_muic_irq_work(struct work_struct *work)
case MAX77693_MUIC_IRQ_INT2_VBVOLT:
case MAX77693_MUIC_IRQ_INT2_VIDRM:
/* Handle charger accessory */
- curr_chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
- curr_chg_type >>= STATUS2_CHGTYP_SHIFT;
-
- /* Check charger accessory state which
- is either detached or attached */
- if (curr_chg_type == MAX77693_CHARGER_TYPE_NONE)
- attached = false;
-
- ret = max77693_muic_chg_handler(info, curr_chg_type, attached);
+ ret = max77693_muic_chg_handler(info);
break;
case MAX77693_MUIC_IRQ_INT3_EOC:
case MAX77693_MUIC_IRQ_INT3_CGMBC:
@@ -575,7 +961,8 @@ static void max77693_muic_irq_work(struct work_struct *work)
default:
dev_err(info->dev, "muic interrupt: irq %d occurred\n",
irq_type);
- break;
+ mutex_unlock(&info->mutex);
+ return;
}
if (ret < 0)
@@ -604,7 +991,9 @@ static struct regmap_config max77693_muic_regmap_config = {
static int max77693_muic_detect_accessory(struct max77693_muic_info *info)
{
int ret = 0;
- int adc, chg_type;
+ int adc;
+ int chg_type;
+ bool attached;
mutex_lock(&info->mutex);
@@ -617,35 +1006,39 @@ static int max77693_muic_detect_accessory(struct max77693_muic_info *info)
return -EINVAL;
}
- adc = info->status[0] & STATUS1_ADC_MASK;
- adc >>= STATUS1_ADC_SHIFT;
-
- if (adc != MAX77693_MUIC_ADC_OPEN) {
- dev_info(info->dev,
- "external connector is attached (adc:0x%02x)\n", adc);
+ adc = max77693_muic_get_cable_type(info, MAX77693_CABLE_GROUP_ADC,
+ &attached);
+ if (attached && adc != MAX77693_MUIC_ADC_OPEN) {
+ ret = max77693_muic_adc_handler(info);
+ if (ret < 0) {
+ dev_err(info->dev, "Cannot detect accessory\n");
+ mutex_unlock(&info->mutex);
+ return ret;
+ }
+ }
- ret = max77693_muic_adc_handler(info, adc, true);
- if (ret < 0)
- dev_err(info->dev, "failed to detect accessory\n");
- goto out;
+ chg_type = max77693_muic_get_cable_type(info, MAX77693_CABLE_GROUP_CHG,
+ &attached);
+ if (attached && chg_type != MAX77693_CHARGER_TYPE_NONE) {
+ ret = max77693_muic_chg_handler(info);
+ if (ret < 0) {
+ dev_err(info->dev, "Cannot detect charger accessory\n");
+ mutex_unlock(&info->mutex);
+ return ret;
+ }
}
- chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
- chg_type >>= STATUS2_CHGTYP_SHIFT;
+ mutex_unlock(&info->mutex);
- if (chg_type != MAX77693_CHARGER_TYPE_NONE) {
- dev_info(info->dev,
- "external connector is attached (chg_type:0x%x)\n",
- chg_type);
+ return 0;
+}
- max77693_muic_chg_handler(info, chg_type, true);
- if (ret < 0)
- dev_err(info->dev, "failed to detect charger accessory\n");
- }
+static void max77693_muic_detect_cable_wq(struct work_struct *work)
+{
+ struct max77693_muic_info *info = container_of(to_delayed_work(work),
+ struct max77693_muic_info, wq_detcable);
-out:
- mutex_unlock(&info->mutex);
- return ret;
+ max77693_muic_detect_accessory(info);
}
static int max77693_muic_probe(struct platform_device *pdev)
@@ -654,7 +1047,9 @@ static int max77693_muic_probe(struct platform_device *pdev)
struct max77693_platform_data *pdata = dev_get_platdata(max77693->dev);
struct max77693_muic_platform_data *muic_pdata = pdata->muic_data;
struct max77693_muic_info *info;
- int ret, i;
+ int delay_jiffies;
+ int ret;
+ int i;
u8 id;
info = devm_kzalloc(&pdev->dev, sizeof(struct max77693_muic_info),
@@ -678,6 +1073,32 @@ static int max77693_muic_probe(struct platform_device *pdev)
return ret;
}
}
+
+ /* Register input device for button of dock device */
+ info->dock = devm_input_allocate_device(&pdev->dev);
+ if (!info->dock) {
+ dev_err(&pdev->dev, "%s: failed to allocate input\n", __func__);
+ return -ENOMEM;
+ }
+ info->dock->name = "max77693-muic/dock";
+ info->dock->phys = "max77693-muic/extcon";
+ info->dock->dev.parent = &pdev->dev;
+
+ __set_bit(EV_REP, info->dock->evbit);
+
+ input_set_capability(info->dock, EV_KEY, KEY_VOLUMEUP);
+ input_set_capability(info->dock, EV_KEY, KEY_VOLUMEDOWN);
+ input_set_capability(info->dock, EV_KEY, KEY_PLAYPAUSE);
+ input_set_capability(info->dock, EV_KEY, KEY_PREVIOUSSONG);
+ input_set_capability(info->dock, EV_KEY, KEY_NEXTSONG);
+
+ ret = input_register_device(info->dock);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot register input device error(%d)\n",
+ ret);
+ return ret;
+ }
+
platform_set_drvdata(pdev, info);
mutex_init(&info->mutex);
@@ -697,13 +1118,13 @@ static int max77693_muic_probe(struct platform_device *pdev)
ret = request_threaded_irq(virq, NULL,
max77693_muic_irq_handler,
- IRQF_ONESHOT, muic_irq->name, info);
+ IRQF_NO_SUSPEND,
+ muic_irq->name, info);
if (ret) {
dev_err(&pdev->dev,
"failed: irq request (IRQ: %d,"
" error :%d)\n",
muic_irq->irq, ret);
-
goto err_irq;
}
}
@@ -749,23 +1170,54 @@ static int max77693_muic_probe(struct platform_device *pdev)
= muic_pdata->init_data[i].data;
}
+ /*
+ * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
+ * h/w path of COMP2/COMN1 on CONTROL1 register.
+ */
+ if (muic_pdata->path_uart)
+ info->path_uart = muic_pdata->path_uart;
+ else
+ info->path_uart = CONTROL1_SW_UART;
+
+ if (muic_pdata->path_usb)
+ info->path_usb = muic_pdata->path_usb;
+ else
+ info->path_usb = CONTROL1_SW_USB;
+
+ /* Set initial path for UART */
+ max77693_muic_set_path(info, info->path_uart, true);
+
/* Check revision number of MUIC device*/
ret = max77693_read_reg(info->max77693->regmap_muic,
MAX77693_MUIC_REG_ID, &id);
if (ret < 0) {
dev_err(&pdev->dev, "failed to read revision number\n");
- goto err_irq;
+ goto err_extcon;
}
dev_info(info->dev, "device ID : 0x%x\n", id);
/* Set ADC debounce time */
max77693_muic_set_debounce_time(info, ADC_DEBOUNCE_TIME_25MS);
- /* Detect accessory on boot */
- max77693_muic_detect_accessory(info);
+ /*
+ * Detect accessory after completing the initialization of platform
+ *
+ * - Use delayed workqueue to detect cable state and then
+ * notify cable state to notifiee/platform through uevent.
+ * After completing the booting of platform, the extcon provider
+ * driver should notify cable state to upper layer.
+ */
+ INIT_DELAYED_WORK(&info->wq_detcable, max77693_muic_detect_cable_wq);
+ if (muic_pdata->detcable_delay_ms)
+ delay_jiffies = msecs_to_jiffies(muic_pdata->detcable_delay_ms);
+ else
+ delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
+ schedule_delayed_work(&info->wq_detcable, delay_jiffies);
return ret;
+err_extcon:
+ extcon_dev_unregister(info->edev);
err_irq:
while (--i >= 0)
free_irq(muic_irqs[i].virq, info);
@@ -780,6 +1232,7 @@ static int max77693_muic_remove(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(muic_irqs); i++)
free_irq(muic_irqs[i].virq, info);
cancel_work_sync(&info->irq_work);
+ input_unregister_device(info->dock);
extcon_dev_unregister(info->edev);
return 0;
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 93009fe6ef05..e636d950ad6c 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -29,51 +29,14 @@
#include <linux/irqdomain.h>
#define DEV_NAME "max8997-muic"
+#define DELAY_MS_DEFAULT 20000 /* unit: millisecond */
-/* MAX8997-MUIC STATUS1 register */
-#define STATUS1_ADC_SHIFT 0
-#define STATUS1_ADCLOW_SHIFT 5
-#define STATUS1_ADCERR_SHIFT 6
-#define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
-#define STATUS1_ADCLOW_MASK (0x1 << STATUS1_ADCLOW_SHIFT)
-#define STATUS1_ADCERR_MASK (0x1 << STATUS1_ADCERR_SHIFT)
-
-/* MAX8997-MUIC STATUS2 register */
-#define STATUS2_CHGTYP_SHIFT 0
-#define STATUS2_CHGDETRUN_SHIFT 3
-#define STATUS2_DCDTMR_SHIFT 4
-#define STATUS2_DBCHG_SHIFT 5
-#define STATUS2_VBVOLT_SHIFT 6
-#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
-#define STATUS2_CHGDETRUN_MASK (0x1 << STATUS2_CHGDETRUN_SHIFT)
-#define STATUS2_DCDTMR_MASK (0x1 << STATUS2_DCDTMR_SHIFT)
-#define STATUS2_DBCHG_MASK (0x1 << STATUS2_DBCHG_SHIFT)
-#define STATUS2_VBVOLT_MASK (0x1 << STATUS2_VBVOLT_SHIFT)
-
-/* MAX8997-MUIC STATUS3 register */
-#define STATUS3_OVP_SHIFT 2
-#define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT)
-
-/* MAX8997-MUIC CONTROL1 register */
-#define COMN1SW_SHIFT 0
-#define COMP2SW_SHIFT 3
-#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT)
-#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT)
-#define SW_MASK (COMP2SW_MASK | COMN1SW_MASK)
-
-#define MAX8997_SW_USB ((1 << COMP2SW_SHIFT) | (1 << COMN1SW_SHIFT))
-#define MAX8997_SW_AUDIO ((2 << COMP2SW_SHIFT) | (2 << COMN1SW_SHIFT))
-#define MAX8997_SW_UART ((3 << COMP2SW_SHIFT) | (3 << COMN1SW_SHIFT))
-#define MAX8997_SW_OPEN ((0 << COMP2SW_SHIFT) | (0 << COMN1SW_SHIFT))
-
-#define MAX8997_ADC_GROUND 0x00
-#define MAX8997_ADC_MHL 0x01
-#define MAX8997_ADC_JIG_USB_1 0x18
-#define MAX8997_ADC_JIG_USB_2 0x19
-#define MAX8997_ADC_DESKDOCK 0x1a
-#define MAX8997_ADC_JIG_UART 0x1c
-#define MAX8997_ADC_CARDOCK 0x1d
-#define MAX8997_ADC_OPEN 0x1f
+enum max8997_muic_adc_debounce_time {
+ ADC_DEBOUNCE_TIME_0_5MS = 0, /* 0.5ms */
+ ADC_DEBOUNCE_TIME_10MS, /* 10ms */
+ ADC_DEBOUNCE_TIME_25MS, /* 25ms */
+ ADC_DEBOUNCE_TIME_38_62MS, /* 38.62ms */
+};
struct max8997_muic_irq {
unsigned int irq;
@@ -82,61 +45,303 @@ struct max8997_muic_irq {
};
static struct max8997_muic_irq muic_irqs[] = {
- { MAX8997_MUICIRQ_ADCError, "muic-ADC_error" },
- { MAX8997_MUICIRQ_ADCLow, "muic-ADC_low" },
- { MAX8997_MUICIRQ_ADC, "muic-ADC" },
- { MAX8997_MUICIRQ_VBVolt, "muic-VB_voltage" },
- { MAX8997_MUICIRQ_DBChg, "muic-DB_charger" },
- { MAX8997_MUICIRQ_DCDTmr, "muic-DCD_timer" },
- { MAX8997_MUICIRQ_ChgDetRun, "muic-CDR_status" },
- { MAX8997_MUICIRQ_ChgTyp, "muic-charger_type" },
- { MAX8997_MUICIRQ_OVP, "muic-over_voltage" },
+ { MAX8997_MUICIRQ_ADCError, "muic-ADCERROR" },
+ { MAX8997_MUICIRQ_ADCLow, "muic-ADCLOW" },
+ { MAX8997_MUICIRQ_ADC, "muic-ADC" },
+ { MAX8997_MUICIRQ_VBVolt, "muic-VBVOLT" },
+ { MAX8997_MUICIRQ_DBChg, "muic-DBCHG" },
+ { MAX8997_MUICIRQ_DCDTmr, "muic-DCDTMR" },
+ { MAX8997_MUICIRQ_ChgDetRun, "muic-CHGDETRUN" },
+ { MAX8997_MUICIRQ_ChgTyp, "muic-CHGTYP" },
+ { MAX8997_MUICIRQ_OVP, "muic-OVP" },
+};
+
+/* Define supported cable type */
+enum max8997_muic_acc_type {
+ MAX8997_MUIC_ADC_GROUND = 0x0,
+ MAX8997_MUIC_ADC_MHL, /* MHL*/
+ MAX8997_MUIC_ADC_REMOTE_S1_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S2_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S3_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S4_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S5_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S6_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S7_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S8_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S9_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S10_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S11_BUTTON,
+ MAX8997_MUIC_ADC_REMOTE_S12_BUTTON,
+ MAX8997_MUIC_ADC_RESERVED_ACC_1,
+ MAX8997_MUIC_ADC_RESERVED_ACC_2,
+ MAX8997_MUIC_ADC_RESERVED_ACC_3,
+ MAX8997_MUIC_ADC_RESERVED_ACC_4,
+ MAX8997_MUIC_ADC_RESERVED_ACC_5,
+ MAX8997_MUIC_ADC_CEA936_AUDIO,
+ MAX8997_MUIC_ADC_PHONE_POWERED_DEV,
+ MAX8997_MUIC_ADC_TTY_CONVERTER,
+ MAX8997_MUIC_ADC_UART_CABLE,
+ MAX8997_MUIC_ADC_CEA936A_TYPE1_CHG,
+ MAX8997_MUIC_ADC_FACTORY_MODE_USB_OFF, /* JIG-USB-OFF */
+ MAX8997_MUIC_ADC_FACTORY_MODE_USB_ON, /* JIG-USB-ON */
+ MAX8997_MUIC_ADC_AV_CABLE_NOLOAD, /* DESKDOCK */
+ MAX8997_MUIC_ADC_CEA936A_TYPE2_CHG,
+ MAX8997_MUIC_ADC_FACTORY_MODE_UART_OFF, /* JIG-UART */
+ MAX8997_MUIC_ADC_FACTORY_MODE_UART_ON, /* CARDOCK */
+ MAX8997_MUIC_ADC_AUDIO_MODE_REMOTE,
+ MAX8997_MUIC_ADC_OPEN, /* OPEN */
+};
+
+enum max8997_muic_cable_group {
+ MAX8997_CABLE_GROUP_ADC = 0,
+ MAX8997_CABLE_GROUP_ADC_GND,
+ MAX8997_CABLE_GROUP_CHG,
+ MAX8997_CABLE_GROUP_VBVOLT,
+};
+
+enum max8997_muic_usb_type {
+ MAX8997_USB_HOST,
+ MAX8997_USB_DEVICE,
+};
+
+enum max8997_muic_charger_type {
+ MAX8997_CHARGER_TYPE_NONE = 0,
+ MAX8997_CHARGER_TYPE_USB,
+ MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT,
+ MAX8997_CHARGER_TYPE_DEDICATED_CHG,
+ MAX8997_CHARGER_TYPE_500MA,
+ MAX8997_CHARGER_TYPE_1A,
+ MAX8997_CHARGER_TYPE_DEAD_BATTERY = 7,
};
struct max8997_muic_info {
struct device *dev;
struct i2c_client *muic;
- struct max8997_muic_platform_data *muic_pdata;
+ struct extcon_dev *edev;
+ int prev_cable_type;
+ int prev_chg_type;
+ u8 status[2];
int irq;
struct work_struct irq_work;
+ struct mutex mutex;
+ struct max8997_muic_platform_data *muic_pdata;
enum max8997_muic_charger_type pre_charger_type;
- int pre_adc;
- struct mutex mutex;
+ /*
+ * Use delayed workqueue to detect cable state and then
+ * notify cable state to notifiee/platform through uevent.
+ * After completing the booting of platform, the extcon provider
+ * driver should notify cable state to upper layer.
+ */
+ struct delayed_work wq_detcable;
+
+ /*
+ * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
+ * h/w path of COMP2/COMN1 on CONTROL1 register.
+ */
+ int path_usb;
+ int path_uart;
+};
- struct extcon_dev *edev;
+enum {
+ EXTCON_CABLE_USB = 0,
+ EXTCON_CABLE_USB_HOST,
+ EXTCON_CABLE_TA,
+ EXTCON_CABLE_FAST_CHARGER,
+ EXTCON_CABLE_SLOW_CHARGER,
+ EXTCON_CABLE_CHARGE_DOWNSTREAM,
+ EXTCON_CABLE_MHL,
+ EXTCON_CABLE_DOCK_DESK,
+ EXTCON_CABLE_DOCK_CARD,
+ EXTCON_CABLE_JIG,
+
+ _EXTCON_CABLE_NUM,
};
-const char *max8997_extcon_cable[] = {
- [0] = "USB",
- [1] = "USB-Host",
- [2] = "TA",
- [3] = "Fast-charger",
- [4] = "Slow-charger",
- [5] = "Charge-downstream",
- [6] = "MHL",
- [7] = "Dock-desk",
- [8] = "Dock-card",
- [9] = "JIG",
+static const char *max8997_extcon_cable[] = {
+ [EXTCON_CABLE_USB] = "USB",
+ [EXTCON_CABLE_USB_HOST] = "USB-Host",
+ [EXTCON_CABLE_TA] = "TA",
+ [EXTCON_CABLE_FAST_CHARGER] = "Fast-charger",
+ [EXTCON_CABLE_SLOW_CHARGER] = "Slow-charger",
+ [EXTCON_CABLE_CHARGE_DOWNSTREAM] = "Charge-downstream",
+ [EXTCON_CABLE_MHL] = "MHL",
+ [EXTCON_CABLE_DOCK_DESK] = "Dock-Desk",
+ [EXTCON_CABLE_DOCK_CARD] = "Dock-Card",
+ [EXTCON_CABLE_JIG] = "JIG",
NULL,
};
+/*
+ * max8997_muic_set_debounce_time - Set the debounce time of ADC
+ * @info: the instance including private data of max8997 MUIC
+ * @time: the debounce time of ADC
+ */
+static int max8997_muic_set_debounce_time(struct max8997_muic_info *info,
+ enum max8997_muic_adc_debounce_time time)
+{
+ int ret;
+
+ switch (time) {
+ case ADC_DEBOUNCE_TIME_0_5MS:
+ case ADC_DEBOUNCE_TIME_10MS:
+ case ADC_DEBOUNCE_TIME_25MS:
+ case ADC_DEBOUNCE_TIME_38_62MS:
+ ret = max8997_update_reg(info->muic,
+ MAX8997_MUIC_REG_CONTROL3,
+ time << CONTROL3_ADCDBSET_SHIFT,
+ CONTROL3_ADCDBSET_MASK);
+ if (ret) {
+ dev_err(info->dev, "failed to set ADC debounce time\n");
+ return -EAGAIN;
+ }
+ break;
+ default:
+ dev_err(info->dev, "invalid ADC debounce time\n");
+ return -EINVAL;
+ }
+
+ return 0;
+};
+
+/*
+ * max8997_muic_set_path - Set hardware line according to attached cable
+ * @info: the instance including private data of max8997 MUIC
+ * @value: the path according to attached cable
+ * @attached: the state of cable (true:attached, false:detached)
+ *
+ * The max8997 MUIC device share outside H/W line among a varity of cables,
+ * so this function set internal path of H/W line according to the type of
+ * attached cable.
+ */
+static int max8997_muic_set_path(struct max8997_muic_info *info,
+ u8 val, bool attached)
+{
+ int ret = 0;
+ u8 ctrl1, ctrl2 = 0;
+
+ if (attached)
+ ctrl1 = val;
+ else
+ ctrl1 = CONTROL1_SW_OPEN;
+
+ ret = max8997_update_reg(info->muic,
+ MAX8997_MUIC_REG_CONTROL1, ctrl1, COMP_SW_MASK);
+ if (ret < 0) {
+ dev_err(info->dev, "failed to update MUIC register\n");
+ return -EAGAIN;
+ }
+
+ if (attached)
+ ctrl2 |= CONTROL2_CPEN_MASK; /* LowPwr=0, CPEn=1 */
+ else
+ ctrl2 |= CONTROL2_LOWPWR_MASK; /* LowPwr=1, CPEn=0 */
+
+ ret = max8997_update_reg(info->muic,
+ MAX8997_MUIC_REG_CONTROL2, ctrl2,
+ CONTROL2_LOWPWR_MASK | CONTROL2_CPEN_MASK);
+ if (ret < 0) {
+ dev_err(info->dev, "failed to update MUIC register\n");
+ return -EAGAIN;
+ }
+
+ dev_info(info->dev,
+ "CONTROL1 : 0x%02x, CONTROL2 : 0x%02x, state : %s\n",
+ ctrl1, ctrl2, attached ? "attached" : "detached");
+
+ return 0;
+}
+
+/*
+ * max8997_muic_get_cable_type - Return cable type and check cable state
+ * @info: the instance including private data of max8997 MUIC
+ * @group: the path according to attached cable
+ * @attached: store cable state and return
+ *
+ * This function check the cable state either attached or detached,
+ * and then divide precise type of cable according to cable group.
+ * - MAX8997_CABLE_GROUP_ADC
+ * - MAX8997_CABLE_GROUP_CHG
+ */
+static int max8997_muic_get_cable_type(struct max8997_muic_info *info,
+ enum max8997_muic_cable_group group, bool *attached)
+{
+ int cable_type = 0;
+ int adc;
+ int chg_type;
+
+ switch (group) {
+ case MAX8997_CABLE_GROUP_ADC:
+ /*
+ * Read ADC value to check cable type and decide cable state
+ * according to cable type
+ */
+ adc = info->status[0] & STATUS1_ADC_MASK;
+ adc >>= STATUS1_ADC_SHIFT;
+
+ /*
+ * Check current cable state/cable type and store cable type
+ * (info->prev_cable_type) for handling cable when cable is
+ * detached.
+ */
+ if (adc == MAX8997_MUIC_ADC_OPEN) {
+ *attached = false;
+
+ cable_type = info->prev_cable_type;
+ info->prev_cable_type = MAX8997_MUIC_ADC_OPEN;
+ } else {
+ *attached = true;
+
+ cable_type = info->prev_cable_type = adc;
+ }
+ break;
+ case MAX8997_CABLE_GROUP_CHG:
+ /*
+ * Read charger type to check cable type and decide cable state
+ * according to type of charger cable.
+ */
+ chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
+ chg_type >>= STATUS2_CHGTYP_SHIFT;
+
+ if (chg_type == MAX8997_CHARGER_TYPE_NONE) {
+ *attached = false;
+
+ cable_type = info->prev_chg_type;
+ info->prev_chg_type = MAX8997_CHARGER_TYPE_NONE;
+ } else {
+ *attached = true;
+
+ /*
+ * Check current cable state/cable type and store cable
+ * type(info->prev_chg_type) for handling cable when
+ * charger cable is detached.
+ */
+ cable_type = info->prev_chg_type = chg_type;
+ }
+
+ break;
+ default:
+ dev_err(info->dev, "Unknown cable group (%d)\n", group);
+ cable_type = -EINVAL;
+ break;
+ }
+
+ return cable_type;
+}
+
static int max8997_muic_handle_usb(struct max8997_muic_info *info,
enum max8997_muic_usb_type usb_type, bool attached)
{
int ret = 0;
if (usb_type == MAX8997_USB_HOST) {
- /* switch to USB */
- ret = max8997_update_reg(info->muic, MAX8997_MUIC_REG_CONTROL1,
- attached ? MAX8997_SW_USB : MAX8997_SW_OPEN,
- SW_MASK);
- if (ret) {
+ ret = max8997_muic_set_path(info, info->path_usb, attached);
+ if (ret < 0) {
dev_err(info->dev, "failed to update muic register\n");
- goto out;
+ return ret;
}
}
@@ -148,41 +353,39 @@ static int max8997_muic_handle_usb(struct max8997_muic_info *info,
extcon_set_cable_state(info->edev, "USB", attached);
break;
default:
- ret = -EINVAL;
- break;
+ dev_err(info->dev, "failed to detect %s usb cable\n",
+ attached ? "attached" : "detached");
+ return -EINVAL;
}
-out:
- return ret;
+ return 0;
}
static int max8997_muic_handle_dock(struct max8997_muic_info *info,
- int adc, bool attached)
+ int cable_type, bool attached)
{
int ret = 0;
- /* switch to AUDIO */
- ret = max8997_update_reg(info->muic, MAX8997_MUIC_REG_CONTROL1,
- attached ? MAX8997_SW_AUDIO : MAX8997_SW_OPEN,
- SW_MASK);
+ ret = max8997_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
if (ret) {
dev_err(info->dev, "failed to update muic register\n");
- goto out;
+ return ret;
}
- switch (adc) {
- case MAX8997_ADC_DESKDOCK:
+ switch (cable_type) {
+ case MAX8997_MUIC_ADC_AV_CABLE_NOLOAD:
extcon_set_cable_state(info->edev, "Dock-desk", attached);
break;
- case MAX8997_ADC_CARDOCK:
+ case MAX8997_MUIC_ADC_FACTORY_MODE_UART_ON:
extcon_set_cable_state(info->edev, "Dock-card", attached);
break;
default:
- ret = -EINVAL;
- break;
+ dev_err(info->dev, "failed to detect %s dock device\n",
+ attached ? "attached" : "detached");
+ return -EINVAL;
}
-out:
- return ret;
+
+ return 0;
}
static int max8997_muic_handle_jig_uart(struct max8997_muic_info *info,
@@ -191,199 +394,188 @@ static int max8997_muic_handle_jig_uart(struct max8997_muic_info *info,
int ret = 0;
/* switch to UART */
- ret = max8997_update_reg(info->muic, MAX8997_MUIC_REG_CONTROL1,
- attached ? MAX8997_SW_UART : MAX8997_SW_OPEN,
- SW_MASK);
+ ret = max8997_muic_set_path(info, info->path_uart, attached);
if (ret) {
dev_err(info->dev, "failed to update muic register\n");
- goto out;
+ return -EINVAL;
}
extcon_set_cable_state(info->edev, "JIG", attached);
-out:
- return ret;
-}
-
-static int max8997_muic_handle_adc_detach(struct max8997_muic_info *info)
-{
- int ret = 0;
- switch (info->pre_adc) {
- case MAX8997_ADC_GROUND:
- ret = max8997_muic_handle_usb(info, MAX8997_USB_HOST, false);
- break;
- case MAX8997_ADC_MHL:
- extcon_set_cable_state(info->edev, "MHL", false);
- break;
- case MAX8997_ADC_JIG_USB_1:
- case MAX8997_ADC_JIG_USB_2:
- ret = max8997_muic_handle_usb(info, MAX8997_USB_DEVICE, false);
- break;
- case MAX8997_ADC_DESKDOCK:
- case MAX8997_ADC_CARDOCK:
- ret = max8997_muic_handle_dock(info, info->pre_adc, false);
- break;
- case MAX8997_ADC_JIG_UART:
- ret = max8997_muic_handle_jig_uart(info, false);
- break;
- default:
- break;
- }
-
- return ret;
+ return 0;
}
-static int max8997_muic_handle_adc(struct max8997_muic_info *info, int adc)
+static int max8997_muic_adc_handler(struct max8997_muic_info *info)
{
+ int cable_type;
+ bool attached;
int ret = 0;
- switch (adc) {
- case MAX8997_ADC_GROUND:
- ret = max8997_muic_handle_usb(info, MAX8997_USB_HOST, true);
- break;
- case MAX8997_ADC_MHL:
- extcon_set_cable_state(info->edev, "MHL", true);
- break;
- case MAX8997_ADC_JIG_USB_1:
- case MAX8997_ADC_JIG_USB_2:
- ret = max8997_muic_handle_usb(info, MAX8997_USB_DEVICE, true);
- break;
- case MAX8997_ADC_DESKDOCK:
- case MAX8997_ADC_CARDOCK:
- ret = max8997_muic_handle_dock(info, adc, true);
- break;
- case MAX8997_ADC_JIG_UART:
- ret = max8997_muic_handle_jig_uart(info, true);
- break;
- case MAX8997_ADC_OPEN:
- ret = max8997_muic_handle_adc_detach(info);
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
-
- info->pre_adc = adc;
-out:
- return ret;
-}
-
-static int max8997_muic_handle_charger_type_detach(
- struct max8997_muic_info *info)
-{
- switch (info->pre_charger_type) {
- case MAX8997_CHARGER_TYPE_USB:
- extcon_set_cable_state(info->edev, "USB", false);
- break;
- case MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT:
- extcon_set_cable_state(info->edev, "Charge-downstream", false);
- break;
- case MAX8997_CHARGER_TYPE_DEDICATED_CHG:
- extcon_set_cable_state(info->edev, "TA", false);
- break;
- case MAX8997_CHARGER_TYPE_500MA:
- extcon_set_cable_state(info->edev, "Slow-charger", false);
- break;
- case MAX8997_CHARGER_TYPE_1A:
- extcon_set_cable_state(info->edev, "Fast-charger", false);
- break;
+ /* Check cable state which is either detached or attached */
+ cable_type = max8997_muic_get_cable_type(info,
+ MAX8997_CABLE_GROUP_ADC, &attached);
+
+ switch (cable_type) {
+ case MAX8997_MUIC_ADC_GROUND:
+ ret = max8997_muic_handle_usb(info, MAX8997_USB_HOST, attached);
+ if (ret < 0)
+ return ret;
+ break;
+ case MAX8997_MUIC_ADC_MHL:
+ extcon_set_cable_state(info->edev, "MHL", attached);
+ break;
+ case MAX8997_MUIC_ADC_FACTORY_MODE_USB_OFF:
+ case MAX8997_MUIC_ADC_FACTORY_MODE_USB_ON:
+ ret = max8997_muic_handle_usb(info, MAX8997_USB_DEVICE, attached);
+ if (ret < 0)
+ return ret;
+ break;
+ case MAX8997_MUIC_ADC_AV_CABLE_NOLOAD:
+ case MAX8997_MUIC_ADC_FACTORY_MODE_UART_ON:
+ ret = max8997_muic_handle_dock(info, cable_type, attached);
+ if (ret < 0)
+ return ret;
+ break;
+ case MAX8997_MUIC_ADC_FACTORY_MODE_UART_OFF:
+ ret = max8997_muic_handle_jig_uart(info, attached);
+ break;
+ case MAX8997_MUIC_ADC_REMOTE_S1_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S2_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S3_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S4_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S5_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S6_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S7_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S8_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S9_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S10_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S11_BUTTON:
+ case MAX8997_MUIC_ADC_REMOTE_S12_BUTTON:
+ case MAX8997_MUIC_ADC_RESERVED_ACC_1:
+ case MAX8997_MUIC_ADC_RESERVED_ACC_2:
+ case MAX8997_MUIC_ADC_RESERVED_ACC_3:
+ case MAX8997_MUIC_ADC_RESERVED_ACC_4:
+ case MAX8997_MUIC_ADC_RESERVED_ACC_5:
+ case MAX8997_MUIC_ADC_CEA936_AUDIO:
+ case MAX8997_MUIC_ADC_PHONE_POWERED_DEV:
+ case MAX8997_MUIC_ADC_TTY_CONVERTER:
+ case MAX8997_MUIC_ADC_UART_CABLE:
+ case MAX8997_MUIC_ADC_CEA936A_TYPE1_CHG:
+ case MAX8997_MUIC_ADC_CEA936A_TYPE2_CHG:
+ case MAX8997_MUIC_ADC_AUDIO_MODE_REMOTE:
+ /*
+ * This cable isn't used in general case if it is specially
+ * needed to detect additional cable, should implement
+ * proper operation when this cable is attached/detached.
+ */
+ dev_info(info->dev,
+ "cable is %s but it isn't used (type:0x%x)\n",
+ attached ? "attached" : "detached", cable_type);
+ return -EAGAIN;
default:
+ dev_err(info->dev,
+ "failed to detect %s unknown cable (type:0x%x)\n",
+ attached ? "attached" : "detached", cable_type);
return -EINVAL;
- break;
}
return 0;
}
-static int max8997_muic_handle_charger_type(struct max8997_muic_info *info,
- enum max8997_muic_charger_type charger_type)
+static int max8997_muic_chg_handler(struct max8997_muic_info *info)
{
- u8 adc;
- int ret;
+ int chg_type;
+ bool attached;
+ int adc;
- ret = max8997_read_reg(info->muic, MAX8997_MUIC_REG_STATUS1, &adc);
- if (ret) {
- dev_err(info->dev, "failed to read muic register\n");
- goto out;
- }
+ chg_type = max8997_muic_get_cable_type(info,
+ MAX8997_CABLE_GROUP_CHG, &attached);
- switch (charger_type) {
+ switch (chg_type) {
case MAX8997_CHARGER_TYPE_NONE:
- ret = max8997_muic_handle_charger_type_detach(info);
break;
case MAX8997_CHARGER_TYPE_USB:
- if ((adc & STATUS1_ADC_MASK) == MAX8997_ADC_OPEN) {
+ adc = info->status[0] & STATUS1_ADC_MASK;
+ adc >>= STATUS1_ADC_SHIFT;
+
+ if ((adc & STATUS1_ADC_MASK) == MAX8997_MUIC_ADC_OPEN) {
max8997_muic_handle_usb(info,
- MAX8997_USB_DEVICE, true);
+ MAX8997_USB_DEVICE, attached);
}
break;
case MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT:
- extcon_set_cable_state(info->edev, "Charge-downstream", true);
+ extcon_set_cable_state(info->edev, "Charge-downstream", attached);
break;
case MAX8997_CHARGER_TYPE_DEDICATED_CHG:
- extcon_set_cable_state(info->edev, "TA", true);
+ extcon_set_cable_state(info->edev, "TA", attached);
break;
case MAX8997_CHARGER_TYPE_500MA:
- extcon_set_cable_state(info->edev, "Slow-charger", true);
+ extcon_set_cable_state(info->edev, "Slow-charger", attached);
break;
case MAX8997_CHARGER_TYPE_1A:
- extcon_set_cable_state(info->edev, "Fast-charger", true);
+ extcon_set_cable_state(info->edev, "Fast-charger", attached);
break;
default:
- ret = -EINVAL;
- goto out;
+ dev_err(info->dev,
+ "failed to detect %s unknown chg cable (type:0x%x)\n",
+ attached ? "attached" : "detached", chg_type);
+ return -EINVAL;
}
- info->pre_charger_type = charger_type;
-out:
- return ret;
+ return 0;
}
static void max8997_muic_irq_work(struct work_struct *work)
{
struct max8997_muic_info *info = container_of(work,
struct max8997_muic_info, irq_work);
- u8 status[2];
- u8 adc, chg_type;
int irq_type = 0;
int i, ret;
+ if (!info->edev)
+ return;
+
mutex_lock(&info->mutex);
+ for (i = 0 ; i < ARRAY_SIZE(muic_irqs) ; i++)
+ if (info->irq == muic_irqs[i].virq)
+ irq_type = muic_irqs[i].irq;
+
ret = max8997_bulk_read(info->muic, MAX8997_MUIC_REG_STATUS1,
- 2, status);
+ 2, info->status);
if (ret) {
dev_err(info->dev, "failed to read muic register\n");
mutex_unlock(&info->mutex);
return;
}
- dev_dbg(info->dev, "%s: STATUS1:0x%x, 2:0x%x\n", __func__,
- status[0], status[1]);
-
- for (i = 0 ; i < ARRAY_SIZE(muic_irqs) ; i++)
- if (info->irq == muic_irqs[i].virq)
- irq_type = muic_irqs[i].irq;
-
switch (irq_type) {
+ case MAX8997_MUICIRQ_ADCError:
+ case MAX8997_MUICIRQ_ADCLow:
case MAX8997_MUICIRQ_ADC:
- adc = status[0] & STATUS1_ADC_MASK;
- adc >>= STATUS1_ADC_SHIFT;
-
- max8997_muic_handle_adc(info, adc);
+ /* Handle all of cable except for charger cable */
+ ret = max8997_muic_adc_handler(info);
break;
+ case MAX8997_MUICIRQ_VBVolt:
+ case MAX8997_MUICIRQ_DBChg:
+ case MAX8997_MUICIRQ_DCDTmr:
+ case MAX8997_MUICIRQ_ChgDetRun:
case MAX8997_MUICIRQ_ChgTyp:
- chg_type = status[1] & STATUS2_CHGTYP_MASK;
- chg_type >>= STATUS2_CHGTYP_SHIFT;
-
- max8997_muic_handle_charger_type(info, chg_type);
+ /* Handle charger cable */
+ ret = max8997_muic_chg_handler(info);
+ break;
+ case MAX8997_MUICIRQ_OVP:
break;
default:
dev_info(info->dev, "misc interrupt: irq %d occurred\n",
irq_type);
- break;
+ mutex_unlock(&info->mutex);
+ return;
}
+ if (ret < 0)
+ dev_err(info->dev, "failed to handle MUIC interrupt\n");
+
mutex_unlock(&info->mutex);
return;
@@ -401,29 +593,60 @@ static irqreturn_t max8997_muic_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static void max8997_muic_detect_dev(struct max8997_muic_info *info)
+static int max8997_muic_detect_dev(struct max8997_muic_info *info)
{
- int ret;
- u8 status[2], adc, chg_type;
+ int ret = 0;
+ int adc;
+ int chg_type;
+ bool attached;
- ret = max8997_bulk_read(info->muic, MAX8997_MUIC_REG_STATUS1,
- 2, status);
+ mutex_lock(&info->mutex);
+
+ /* Read STATUSx register to detect accessory */
+ ret = max8997_bulk_read(info->muic,
+ MAX8997_MUIC_REG_STATUS1, 2, info->status);
if (ret) {
- dev_err(info->dev, "failed to read muic register\n");
- return;
+ dev_err(info->dev, "failed to read MUIC register\n");
+ mutex_unlock(&info->mutex);
+ return -EINVAL;
}
- dev_info(info->dev, "STATUS1:0x%x, STATUS2:0x%x\n",
- status[0], status[1]);
+ adc = max8997_muic_get_cable_type(info, MAX8997_CABLE_GROUP_ADC,
+ &attached);
+ if (attached && adc != MAX8997_MUIC_ADC_OPEN) {
+ ret = max8997_muic_adc_handler(info);
+ if (ret < 0) {
+ dev_err(info->dev, "Cannot detect ADC cable\n");
+ mutex_unlock(&info->mutex);
+ return ret;
+ }
+ }
- adc = status[0] & STATUS1_ADC_MASK;
- adc >>= STATUS1_ADC_SHIFT;
+ chg_type = max8997_muic_get_cable_type(info, MAX8997_CABLE_GROUP_CHG,
+ &attached);
+ if (attached && chg_type != MAX8997_CHARGER_TYPE_NONE) {
+ ret = max8997_muic_chg_handler(info);
+ if (ret < 0) {
+ dev_err(info->dev, "Cannot detect charger cable\n");
+ mutex_unlock(&info->mutex);
+ return ret;
+ }
+ }
+
+ mutex_unlock(&info->mutex);
+
+ return 0;
+}
- chg_type = status[1] & STATUS2_CHGTYP_MASK;
- chg_type >>= STATUS2_CHGTYP_SHIFT;
+static void max8997_muic_detect_cable_wq(struct work_struct *work)
+{
+ struct max8997_muic_info *info = container_of(to_delayed_work(work),
+ struct max8997_muic_info, wq_detcable);
+ int ret;
- max8997_muic_handle_adc(info, adc);
- max8997_muic_handle_charger_type(info, chg_type);
+ ret = max8997_muic_detect_dev(info);
+ if (ret < 0)
+ pr_err("failed to detect cable type\n");
}
static int max8997_muic_probe(struct platform_device *pdev)
@@ -431,6 +654,7 @@ static int max8997_muic_probe(struct platform_device *pdev)
struct max8997_dev *max8997 = dev_get_drvdata(pdev->dev.parent);
struct max8997_platform_data *pdata = dev_get_platdata(max8997->dev);
struct max8997_muic_info *info;
+ int delay_jiffies;
int ret, i;
info = devm_kzalloc(&pdev->dev, sizeof(struct max8997_muic_info),
@@ -459,8 +683,10 @@ static int max8997_muic_probe(struct platform_device *pdev)
}
muic_irq->virq = virq;
- ret = request_threaded_irq(virq, NULL, max8997_muic_irq_handler,
- 0, muic_irq->name, info);
+ ret = request_threaded_irq(virq, NULL,
+ max8997_muic_irq_handler,
+ IRQF_NO_SUSPEND,
+ muic_irq->name, info);
if (ret) {
dev_err(&pdev->dev,
"failed: irq request (IRQ: %d,"
@@ -496,10 +722,42 @@ static int max8997_muic_probe(struct platform_device *pdev)
}
}
- /* Initial device detection */
- max8997_muic_detect_dev(info);
+ /*
+ * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
+ * h/w path of COMP2/COMN1 on CONTROL1 register.
+ */
+ if (pdata->muic_pdata->path_uart)
+ info->path_uart = pdata->muic_pdata->path_uart;
+ else
+ info->path_uart = CONTROL1_SW_UART;
+
+ if (pdata->muic_pdata->path_usb)
+ info->path_usb = pdata->muic_pdata->path_usb;
+ else
+ info->path_usb = CONTROL1_SW_USB;
+
+ /* Set initial path for UART */
+ max8997_muic_set_path(info, info->path_uart, true);
+
+ /* Set ADC debounce time */
+ max8997_muic_set_debounce_time(info, ADC_DEBOUNCE_TIME_25MS);
+
+ /*
+ * Detect accessory after completing the initialization of platform
+ *
+ * - Use delayed workqueue to detect cable state and then
+ * notify cable state to notifiee/platform through uevent.
+ * After completing the booting of platform, the extcon provider
+ * driver should notify cable state to upper layer.
+ */
+ INIT_DELAYED_WORK(&info->wq_detcable, max8997_muic_detect_cable_wq);
+ if (pdata->muic_pdata->detcable_delay_ms)
+ delay_jiffies = msecs_to_jiffies(pdata->muic_pdata->detcable_delay_ms);
+ else
+ delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
+ schedule_delayed_work(&info->wq_detcable, delay_jiffies);
- return ret;
+ return 0;
err_irq:
while (--i >= 0)
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index e7a711f53a6f..2b27bff2591a 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -270,7 +270,7 @@ static int fwnet_header_cache(const struct neighbour *neigh,
if (type == cpu_to_be16(ETH_P_802_3))
return -1;
net = neigh->dev;
- h = (struct fwnet_header *)((u8 *)hh->hh_data + 16 - sizeof(*h));
+ h = (struct fwnet_header *)((u8 *)hh->hh_data + HH_DATA_OFF(sizeof(*h)));
h->h_proto = type;
memcpy(h->h_dest, neigh->ha, net->addr_len);
hh->hh_len = FWNET_HLEN;
@@ -282,7 +282,7 @@ static int fwnet_header_cache(const struct neighbour *neigh,
static void fwnet_header_cache_update(struct hh_cache *hh,
const struct net_device *net, const unsigned char *haddr)
{
- memcpy((u8 *)hh->hh_data + 16 - FWNET_HLEN, haddr, net->addr_len);
+ memcpy((u8 *)hh->hh_data + HH_DATA_OFF(FWNET_HLEN), haddr, net->addr_len);
}
static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr)
@@ -398,11 +398,11 @@ static struct fwnet_partial_datagram *fwnet_pd_new(struct net_device *net,
new->datagram_label = datagram_label;
new->datagram_size = dg_size;
- new->skb = dev_alloc_skb(dg_size + net->hard_header_len + 15);
+ new->skb = dev_alloc_skb(dg_size + LL_RESERVED_SPACE(net));
if (new->skb == NULL)
goto fail_w_fi;
- skb_reserve(new->skb, (net->hard_header_len + 15) & ~15);
+ skb_reserve(new->skb, LL_RESERVED_SPACE(net));
new->pbuf = skb_put(new->skb, dg_size);
memcpy(new->pbuf + frag_off, frag_buf, frag_len);
list_add_tail(&new->pd_link, &peer->pd_list);
@@ -520,7 +520,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
dev = netdev_priv(net);
/* Write metadata, and then pass to the receive level */
skb->dev = net;
- skb->ip_summed = CHECKSUM_UNNECESSARY; /* don't check it */
+ skb->ip_summed = CHECKSUM_NONE;
/*
* Parse the encapsulation header. This actually does the job of
@@ -690,14 +690,14 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
buf++;
len -= RFC2374_UNFRAG_HDR_SIZE;
- skb = dev_alloc_skb(len + net->hard_header_len + 15);
+ skb = dev_alloc_skb(len + LL_RESERVED_SPACE(net));
if (unlikely(!skb)) {
dev_err(&net->dev, "out of memory\n");
net->stats.rx_dropped++;
return -ENOMEM;
}
- skb_reserve(skb, (net->hard_header_len + 15) & ~15);
+ skb_reserve(skb, LL_RESERVED_SPACE(net));
memcpy(skb_put(skb, len), buf, len);
return fwnet_finish_incoming_packet(net, skb, source_node_id,
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 6ce6e07c38c1..45912e6e0ac2 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -329,7 +329,7 @@ module_param_named(quirks, param_quirks, int, 0644);
MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
- ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
+ ", AR/selfID endianness = " __stringify(QUIRK_BE_HEADERS)
", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
", disable MSI = " __stringify(QUIRK_NO_MSI)
", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index fd3ae6290d71..982f1f5f5742 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -471,7 +471,7 @@ void __init dmi_scan_machine(void)
char __iomem *p, *q;
int rc;
- if (efi_enabled) {
+ if (efi_enabled(EFI_CONFIG_TABLES)) {
if (efi.smbios == EFI_INVALID_TABLE_ADDR)
goto error;
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 7b1c37497c9a..fed08b661711 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -158,6 +158,13 @@ efivar_create_sysfs_entry(struct efivars *efivars,
efi_char16_t *variable_name,
efi_guid_t *vendor_guid);
+/*
+ * Prototype for workqueue functions updating sysfs entry
+ */
+
+static void efivar_update_sysfs_entries(struct work_struct *);
+static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries);
+
/* Return the number of unicode characters in data */
static unsigned long
utf16_strnlen(efi_char16_t *s, size_t maxlength)
@@ -405,10 +412,11 @@ static efi_status_t
get_var_data(struct efivars *efivars, struct efi_variable *var)
{
efi_status_t status;
+ unsigned long flags;
- spin_lock(&efivars->lock);
+ spin_lock_irqsave(&efivars->lock, flags);
status = get_var_data_locked(efivars, var);
- spin_unlock(&efivars->lock);
+ spin_unlock_irqrestore(&efivars->lock, flags);
if (status != EFI_SUCCESS) {
printk(KERN_WARNING "efivars: get_variable() failed 0x%lx!\n",
@@ -537,14 +545,14 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
return -EINVAL;
}
- spin_lock(&efivars->lock);
+ spin_lock_irq(&efivars->lock);
status = efivars->ops->set_variable(new_var->VariableName,
&new_var->VendorGuid,
new_var->Attributes,
new_var->DataSize,
new_var->Data);
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
if (status != EFI_SUCCESS) {
printk(KERN_WARNING "efivars: set_variable() failed: status=%lx\n",
@@ -674,7 +682,7 @@ static int efi_status_to_err(efi_status_t status)
err = -EACCES;
break;
case EFI_NOT_FOUND:
- err = -ENOENT;
+ err = -EIO;
break;
default:
err = -EINVAL;
@@ -713,7 +721,7 @@ static ssize_t efivarfs_file_write(struct file *file,
* amounts of memory. Pick a default size of 64K if
* QueryVariableInfo() isn't supported by the firmware.
*/
- spin_lock(&efivars->lock);
+ spin_lock_irq(&efivars->lock);
if (!efivars->ops->query_variable_info)
status = EFI_UNSUPPORTED;
@@ -723,7 +731,7 @@ static ssize_t efivarfs_file_write(struct file *file,
&remaining_size, &max_size);
}
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
if (status != EFI_SUCCESS) {
if (status != EFI_UNSUPPORTED)
@@ -754,7 +762,7 @@ static ssize_t efivarfs_file_write(struct file *file,
* set_variable call, and removal of the variable from the efivars
* list (in the case of an authenticated delete).
*/
- spin_lock(&efivars->lock);
+ spin_lock_irq(&efivars->lock);
status = efivars->ops->set_variable(var->var.VariableName,
&var->var.VendorGuid,
@@ -762,7 +770,7 @@ static ssize_t efivarfs_file_write(struct file *file,
data);
if (status != EFI_SUCCESS) {
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
kfree(data);
return efi_status_to_err(status);
@@ -783,20 +791,21 @@ static ssize_t efivarfs_file_write(struct file *file,
NULL);
if (status == EFI_BUFFER_TOO_SMALL) {
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
mutex_lock(&inode->i_mutex);
i_size_write(inode, newdatasize + sizeof(attributes));
mutex_unlock(&inode->i_mutex);
} else if (status == EFI_NOT_FOUND) {
list_del(&var->list);
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
efivar_unregister(var);
drop_nlink(inode);
+ d_delete(file->f_dentry);
dput(file->f_dentry);
} else {
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
pr_warn("efivarfs: inconsistent EFI variable implementation? "
"status = %lx\n", status);
}
@@ -818,11 +827,11 @@ static ssize_t efivarfs_file_read(struct file *file, char __user *userbuf,
void *data;
ssize_t size = 0;
- spin_lock(&efivars->lock);
+ spin_lock_irq(&efivars->lock);
status = efivars->ops->get_variable(var->var.VariableName,
&var->var.VendorGuid,
&attributes, &datasize, NULL);
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
if (status != EFI_BUFFER_TOO_SMALL)
return efi_status_to_err(status);
@@ -832,12 +841,12 @@ static ssize_t efivarfs_file_read(struct file *file, char __user *userbuf,
if (!data)
return -ENOMEM;
- spin_lock(&efivars->lock);
+ spin_lock_irq(&efivars->lock);
status = efivars->ops->get_variable(var->var.VariableName,
&var->var.VendorGuid,
&attributes, &datasize,
(data + sizeof(attributes)));
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
if (status != EFI_SUCCESS) {
size = efi_status_to_err(status);
@@ -965,9 +974,9 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
goto out;
kobject_uevent(&var->kobj, KOBJ_ADD);
- spin_lock(&efivars->lock);
+ spin_lock_irq(&efivars->lock);
list_add(&var->list, &efivars->list);
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
d_instantiate(dentry, inode);
dget(dentry);
out:
@@ -984,7 +993,7 @@ static int efivarfs_unlink(struct inode *dir, struct dentry *dentry)
struct efivars *efivars = var->efivars;
efi_status_t status;
- spin_lock(&efivars->lock);
+ spin_lock_irq(&efivars->lock);
status = efivars->ops->set_variable(var->var.VariableName,
&var->var.VendorGuid,
@@ -992,14 +1001,14 @@ static int efivarfs_unlink(struct inode *dir, struct dentry *dentry)
if (status == EFI_SUCCESS || status == EFI_NOT_FOUND) {
list_del(&var->list);
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
efivar_unregister(var);
- drop_nlink(dir);
+ drop_nlink(dentry->d_inode);
dput(dentry);
return 0;
}
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
return -EINVAL;
};
@@ -1065,13 +1074,13 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
/* copied by the above to local storage in the dentry. */
kfree(name);
- spin_lock(&efivars->lock);
+ spin_lock_irq(&efivars->lock);
efivars->ops->get_variable(entry->var.VariableName,
&entry->var.VendorGuid,
&entry->var.Attributes,
&size,
NULL);
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
mutex_lock(&inode->i_mutex);
inode->i_private = entry;
@@ -1122,7 +1131,7 @@ static int efi_pstore_open(struct pstore_info *psi)
{
struct efivars *efivars = psi->data;
- spin_lock(&efivars->lock);
+ spin_lock_irq(&efivars->lock);
efivars->walk_entry = list_first_entry(&efivars->list,
struct efivar_entry, list);
return 0;
@@ -1132,7 +1141,7 @@ static int efi_pstore_close(struct pstore_info *psi)
{
struct efivars *efivars = psi->data;
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
return 0;
}
@@ -1208,8 +1217,18 @@ static int efi_pstore_write(enum pstore_type_id type,
int i, ret = 0;
u64 storage_space, remaining_space, max_variable_size;
efi_status_t status = EFI_NOT_FOUND;
-
- spin_lock(&efivars->lock);
+ unsigned long flags;
+
+ if (pstore_cannot_block_path(reason)) {
+ /*
+ * If the lock is taken by another cpu in non-blocking path,
+ * this driver returns without entering firmware to avoid
+ * hanging up.
+ */
+ if (!spin_trylock_irqsave(&efivars->lock, flags))
+ return -EBUSY;
+ } else
+ spin_lock_irqsave(&efivars->lock, flags);
/*
* Check if there is a space enough to log.
@@ -1221,7 +1240,7 @@ static int efi_pstore_write(enum pstore_type_id type,
&remaining_space,
&max_variable_size);
if (status || remaining_space < size + DUMP_NAME_LEN * 2) {
- spin_unlock(&efivars->lock);
+ spin_unlock_irqrestore(&efivars->lock, flags);
*id = part;
return -ENOSPC;
}
@@ -1235,13 +1254,10 @@ static int efi_pstore_write(enum pstore_type_id type,
efivars->ops->set_variable(efi_name, &vendor, PSTORE_EFI_ATTRIBUTES,
size, psi->buf);
- spin_unlock(&efivars->lock);
+ spin_unlock_irqrestore(&efivars->lock, flags);
- if (size)
- ret = efivar_create_sysfs_entry(efivars,
- utf16_strsize(efi_name,
- DUMP_NAME_LEN * 2),
- efi_name, &vendor);
+ if (reason == KMSG_DUMP_OOPS)
+ schedule_work(&efivar_work);
*id = part;
return ret;
@@ -1262,7 +1278,7 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
sprintf(name, "dump-type%u-%u-%d-%lu", type, (unsigned int)id, count,
time.tv_sec);
- spin_lock(&efivars->lock);
+ spin_lock_irq(&efivars->lock);
for (i = 0; i < DUMP_NAME_LEN; i++)
efi_name[i] = name[i];
@@ -1306,7 +1322,7 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
if (found)
list_del(&found->list);
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
if (found)
efivar_unregister(found);
@@ -1376,7 +1392,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
return -EINVAL;
}
- spin_lock(&efivars->lock);
+ spin_lock_irq(&efivars->lock);
/*
* Does this variable already exist?
@@ -1394,7 +1410,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
}
}
if (found) {
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
return -EINVAL;
}
@@ -1408,10 +1424,10 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
if (status != EFI_SUCCESS) {
printk(KERN_WARNING "efivars: set_variable() failed: status=%lx\n",
status);
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
return -EIO;
}
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
/* Create the entry in sysfs. Locking is not required here */
status = efivar_create_sysfs_entry(efivars,
@@ -1439,7 +1455,7 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- spin_lock(&efivars->lock);
+ spin_lock_irq(&efivars->lock);
/*
* Does this variable already exist?
@@ -1457,7 +1473,7 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
}
}
if (!found) {
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
return -EINVAL;
}
/* force the Attributes/DataSize to 0 to ensure deletion */
@@ -1473,18 +1489,87 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
if (status != EFI_SUCCESS) {
printk(KERN_WARNING "efivars: set_variable() failed: status=%lx\n",
status);
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
return -EIO;
}
list_del(&search_efivar->list);
/* We need to release this lock before unregistering. */
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
efivar_unregister(search_efivar);
/* It's dead Jim.... */
return count;
}
+static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor)
+{
+ struct efivar_entry *entry, *n;
+ struct efivars *efivars = &__efivars;
+ unsigned long strsize1, strsize2;
+ bool found = false;
+
+ strsize1 = utf16_strsize(variable_name, 1024);
+ list_for_each_entry_safe(entry, n, &efivars->list, list) {
+ strsize2 = utf16_strsize(entry->var.VariableName, 1024);
+ if (strsize1 == strsize2 &&
+ !memcmp(variable_name, &(entry->var.VariableName),
+ strsize2) &&
+ !efi_guidcmp(entry->var.VendorGuid,
+ *vendor)) {
+ found = true;
+ break;
+ }
+ }
+ return found;
+}
+
+static void efivar_update_sysfs_entries(struct work_struct *work)
+{
+ struct efivars *efivars = &__efivars;
+ efi_guid_t vendor;
+ efi_char16_t *variable_name;
+ unsigned long variable_name_size = 1024;
+ efi_status_t status = EFI_NOT_FOUND;
+ bool found;
+
+ /* Add new sysfs entries */
+ while (1) {
+ variable_name = kzalloc(variable_name_size, GFP_KERNEL);
+ if (!variable_name) {
+ pr_err("efivars: Memory allocation failed.\n");
+ return;
+ }
+
+ spin_lock_irq(&efivars->lock);
+ found = false;
+ while (1) {
+ variable_name_size = 1024;
+ status = efivars->ops->get_next_variable(
+ &variable_name_size,
+ variable_name,
+ &vendor);
+ if (status != EFI_SUCCESS) {
+ break;
+ } else {
+ if (!variable_is_present(variable_name,
+ &vendor)) {
+ found = true;
+ break;
+ }
+ }
+ }
+ spin_unlock_irq(&efivars->lock);
+
+ if (!found) {
+ kfree(variable_name);
+ break;
+ } else
+ efivar_create_sysfs_entry(efivars,
+ variable_name_size,
+ variable_name, &vendor);
+ }
+}
+
/*
* Let's not leave out systab information that snuck into
* the efivars driver
@@ -1593,9 +1678,9 @@ efivar_create_sysfs_entry(struct efivars *efivars,
kfree(short_name);
short_name = NULL;
- spin_lock(&efivars->lock);
+ spin_lock_irq(&efivars->lock);
list_add(&new_efivar->list, &efivars->list);
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
return 0;
}
@@ -1664,9 +1749,9 @@ void unregister_efivars(struct efivars *efivars)
struct efivar_entry *entry, *n;
list_for_each_entry_safe(entry, n, &efivars->list, list) {
- spin_lock(&efivars->lock);
+ spin_lock_irq(&efivars->lock);
list_del(&entry->list);
- spin_unlock(&efivars->lock);
+ spin_unlock_irq(&efivars->lock);
efivar_unregister(entry);
}
if (efivars->new_var)
@@ -1782,7 +1867,7 @@ efivars_init(void)
printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION,
EFIVARS_DATE);
- if (!efi_enabled)
+ if (!efi_enabled(EFI_RUNTIME_SERVICES))
return 0;
/* For now we'll register the efi directory at /sys/firmware/efi */
@@ -1822,7 +1907,9 @@ err_put:
static void __exit
efivars_exit(void)
{
- if (efi_enabled) {
+ cancel_work_sync(&efivar_work);
+
+ if (efi_enabled(EFI_RUNTIME_SERVICES)) {
unregister_efivars(&__efivars);
kobject_put(efi_kobj);
}
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index 4da4eb9ae926..2224f1dc074b 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -99,7 +99,7 @@ unsigned long __init find_ibft_region(unsigned long *sizep)
/* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
* only use ACPI for this */
- if (!efi_enabled)
+ if (!efi_enabled(EFI_BOOT))
find_ibft_in_mem();
if (ibft_addr) {
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index 90723e65b081..0b5b5f619c75 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -21,6 +21,7 @@
#include <linux/types.h>
#include <linux/bootmem.h>
#include <linux/slab.h>
+#include <linux/mm.h>
/*
* Data types ------------------------------------------------------------------
@@ -52,6 +53,9 @@ static ssize_t start_show(struct firmware_map_entry *entry, char *buf);
static ssize_t end_show(struct firmware_map_entry *entry, char *buf);
static ssize_t type_show(struct firmware_map_entry *entry, char *buf);
+static struct firmware_map_entry * __meminit
+firmware_map_find_entry(u64 start, u64 end, const char *type);
+
/*
* Static data -----------------------------------------------------------------
*/
@@ -79,7 +83,52 @@ static const struct sysfs_ops memmap_attr_ops = {
.show = memmap_attr_show,
};
-static struct kobj_type memmap_ktype = {
+/* Firmware memory map entries. */
+static LIST_HEAD(map_entries);
+static DEFINE_SPINLOCK(map_entries_lock);
+
+/*
+ * For memory hotplug, there is no way to free memory map entries allocated
+ * by boot mem after the system is up. So when we hot-remove memory whose
+ * map entry is allocated by bootmem, we need to remember the storage and
+ * reuse it when the memory is hot-added again.
+ */
+static LIST_HEAD(map_entries_bootmem);
+static DEFINE_SPINLOCK(map_entries_bootmem_lock);
+
+
+static inline struct firmware_map_entry *
+to_memmap_entry(struct kobject *kobj)
+{
+ return container_of(kobj, struct firmware_map_entry, kobj);
+}
+
+static void __meminit release_firmware_map_entry(struct kobject *kobj)
+{
+ struct firmware_map_entry *entry = to_memmap_entry(kobj);
+
+ if (PageReserved(virt_to_page(entry))) {
+ /*
+ * Remember the storage allocated by bootmem, and reuse it when
+ * the memory is hot-added again. The entry will be added to
+ * map_entries_bootmem here, and deleted from &map_entries in
+ * firmware_map_remove_entry().
+ */
+ if (firmware_map_find_entry(entry->start, entry->end,
+ entry->type)) {
+ spin_lock(&map_entries_bootmem_lock);
+ list_add(&entry->list, &map_entries_bootmem);
+ spin_unlock(&map_entries_bootmem_lock);
+ }
+
+ return;
+ }
+
+ kfree(entry);
+}
+
+static struct kobj_type __refdata memmap_ktype = {
+ .release = release_firmware_map_entry,
.sysfs_ops = &memmap_attr_ops,
.default_attrs = def_attrs,
};
@@ -88,13 +137,6 @@ static struct kobj_type memmap_ktype = {
* Registration functions ------------------------------------------------------
*/
-/*
- * Firmware memory map entries. No locking is needed because the
- * firmware_map_add() and firmware_map_add_early() functions are called
- * in firmware initialisation code in one single thread of execution.
- */
-static LIST_HEAD(map_entries);
-
/**
* firmware_map_add_entry() - Does the real work to add a firmware memmap entry.
* @start: Start of the memory range.
@@ -118,11 +160,25 @@ static int firmware_map_add_entry(u64 start, u64 end,
INIT_LIST_HEAD(&entry->list);
kobject_init(&entry->kobj, &memmap_ktype);
+ spin_lock(&map_entries_lock);
list_add_tail(&entry->list, &map_entries);
+ spin_unlock(&map_entries_lock);
return 0;
}
+/**
+ * firmware_map_remove_entry() - Does the real work to remove a firmware
+ * memmap entry.
+ * @entry: removed entry.
+ *
+ * The caller must hold map_entries_lock, and release it properly.
+ **/
+static inline void firmware_map_remove_entry(struct firmware_map_entry *entry)
+{
+ list_del(&entry->list);
+}
+
/*
* Add memmap entry on sysfs
*/
@@ -144,6 +200,78 @@ static int add_sysfs_fw_map_entry(struct firmware_map_entry *entry)
return 0;
}
+/*
+ * Remove memmap entry on sysfs
+ */
+static inline void remove_sysfs_fw_map_entry(struct firmware_map_entry *entry)
+{
+ kobject_put(&entry->kobj);
+}
+
+/*
+ * firmware_map_find_entry_in_list() - Search memmap entry in a given list.
+ * @start: Start of the memory range.
+ * @end: End of the memory range (exclusive).
+ * @type: Type of the memory range.
+ * @list: In which to find the entry.
+ *
+ * This function is to find the memmap entey of a given memory range in a
+ * given list. The caller must hold map_entries_lock, and must not release
+ * the lock until the processing of the returned entry has completed.
+ *
+ * Return: Pointer to the entry to be found on success, or NULL on failure.
+ */
+static struct firmware_map_entry * __meminit
+firmware_map_find_entry_in_list(u64 start, u64 end, const char *type,
+ struct list_head *list)
+{
+ struct firmware_map_entry *entry;
+
+ list_for_each_entry(entry, list, list)
+ if ((entry->start == start) && (entry->end == end) &&
+ (!strcmp(entry->type, type))) {
+ return entry;
+ }
+
+ return NULL;
+}
+
+/*
+ * firmware_map_find_entry() - Search memmap entry in map_entries.
+ * @start: Start of the memory range.
+ * @end: End of the memory range (exclusive).
+ * @type: Type of the memory range.
+ *
+ * This function is to find the memmap entey of a given memory range.
+ * The caller must hold map_entries_lock, and must not release the lock
+ * until the processing of the returned entry has completed.
+ *
+ * Return: Pointer to the entry to be found on success, or NULL on failure.
+ */
+static struct firmware_map_entry * __meminit
+firmware_map_find_entry(u64 start, u64 end, const char *type)
+{
+ return firmware_map_find_entry_in_list(start, end, type, &map_entries);
+}
+
+/*
+ * firmware_map_find_entry_bootmem() - Search memmap entry in map_entries_bootmem.
+ * @start: Start of the memory range.
+ * @end: End of the memory range (exclusive).
+ * @type: Type of the memory range.
+ *
+ * This function is similar to firmware_map_find_entry except that it find the
+ * given entry in map_entries_bootmem.
+ *
+ * Return: Pointer to the entry to be found on success, or NULL on failure.
+ */
+static struct firmware_map_entry * __meminit
+firmware_map_find_entry_bootmem(u64 start, u64 end, const char *type)
+{
+ return firmware_map_find_entry_in_list(start, end, type,
+ &map_entries_bootmem);
+}
+
/**
* firmware_map_add_hotplug() - Adds a firmware mapping entry when we do
* memory hotplug.
@@ -161,9 +289,19 @@ int __meminit firmware_map_add_hotplug(u64 start, u64 end, const char *type)
{
struct firmware_map_entry *entry;
- entry = kzalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC);
- if (!entry)
- return -ENOMEM;
+ entry = firmware_map_find_entry_bootmem(start, end, type);
+ if (!entry) {
+ entry = kzalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC);
+ if (!entry)
+ return -ENOMEM;
+ } else {
+ /* Reuse storage allocated by bootmem. */
+ spin_lock(&map_entries_bootmem_lock);
+ list_del(&entry->list);
+ spin_unlock(&map_entries_bootmem_lock);
+
+ memset(entry, 0, sizeof(*entry));
+ }
firmware_map_add_entry(start, end, type, entry);
/* create the memmap entry */
@@ -196,6 +334,36 @@ int __init firmware_map_add_early(u64 start, u64 end, const char *type)
return firmware_map_add_entry(start, end, type, entry);
}
+/**
+ * firmware_map_remove() - remove a firmware mapping entry
+ * @start: Start of the memory range.
+ * @end: End of the memory range.
+ * @type: Type of the memory range.
+ *
+ * removes a firmware mapping entry.
+ *
+ * Returns 0 on success, or -EINVAL if no entry.
+ **/
+int __meminit firmware_map_remove(u64 start, u64 end, const char *type)
+{
+ struct firmware_map_entry *entry;
+
+ spin_lock(&map_entries_lock);
+ entry = firmware_map_find_entry(start, end - 1, type);
+ if (!entry) {
+ spin_unlock(&map_entries_lock);
+ return -EINVAL;
+ }
+
+ firmware_map_remove_entry(entry);
+ spin_unlock(&map_entries_lock);
+
+ /* remove the memmap entry */
+ remove_sysfs_fw_map_entry(entry);
+
+ return 0;
+}
+
/*
* Sysfs functions -------------------------------------------------------------
*/
@@ -217,8 +385,10 @@ static ssize_t type_show(struct firmware_map_entry *entry, char *buf)
return snprintf(buf, PAGE_SIZE, "%s\n", entry->type);
}
-#define to_memmap_attr(_attr) container_of(_attr, struct memmap_attribute, attr)
-#define to_memmap_entry(obj) container_of(obj, struct firmware_map_entry, kobj)
+static inline struct memmap_attribute *to_memmap_attr(struct attribute *attr)
+{
+ return container_of(attr, struct memmap_attribute, attr);
+}
static ssize_t memmap_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 682de754d63f..b89d250f56e7 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -66,7 +66,7 @@ config DEBUG_GPIO
config GPIO_SYSFS
bool "/sys/class/gpio/... (sysfs interface)"
- depends on SYSFS && EXPERIMENTAL
+ depends on SYSFS
help
Say Y here to add a sysfs interface for GPIOs.
@@ -277,7 +277,7 @@ config GPIO_ICH
config GPIO_VX855
tristate "VIA VX855/VX875 GPIO"
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
select MFD_CORE
select MFD_VX855
help
@@ -599,7 +599,7 @@ config GPIO_TIMBERDALE
config GPIO_RDC321X
tristate "RDC R-321x GPIO support"
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
select MFD_CORE
select MFD_RDC321X
help
@@ -657,12 +657,6 @@ config GPIO_JANZ_TTL
This driver provides support for driving the pins in output
mode only. Input mode is not supported.
-config GPIO_AB8500
- bool "ST-Ericsson AB8500 Mixed Signal Circuit gpio functions"
- depends on AB8500_CORE && BROKEN
- help
- Select this to enable the AB8500 IC GPIO driver
-
config GPIO_TPS6586X
bool "TPS6586X GPIO"
depends on MFD_TPS6586X
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index c5aebd008dde..45a388c21d04 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o
obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o
obj-$(CONFIG_GPIO_74X164) += gpio-74x164.o
-obj-$(CONFIG_GPIO_AB8500) += gpio-ab8500.o
obj-$(CONFIG_GPIO_ADNP) += gpio-adnp.o
obj-$(CONFIG_GPIO_ADP5520) += gpio-adp5520.o
obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o
diff --git a/drivers/gpio/gpio-ab8500.c b/drivers/gpio/gpio-ab8500.c
deleted file mode 100644
index 983ad425f0ac..000000000000
--- a/drivers/gpio/gpio-ab8500.c
+++ /dev/null
@@ -1,520 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2011
- *
- * Author: BIBEK BASU <bibek.basu@stericsson.com>
- * License terms: GNU General Public License (GPL) version 2
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/mfd/ab8500.h>
-#include <linux/mfd/abx500.h>
-#include <linux/mfd/ab8500/gpio.h>
-
-/*
- * GPIO registers offset
- * Bank: 0x10
- */
-#define AB8500_GPIO_SEL1_REG 0x00
-#define AB8500_GPIO_SEL2_REG 0x01
-#define AB8500_GPIO_SEL3_REG 0x02
-#define AB8500_GPIO_SEL4_REG 0x03
-#define AB8500_GPIO_SEL5_REG 0x04
-#define AB8500_GPIO_SEL6_REG 0x05
-
-#define AB8500_GPIO_DIR1_REG 0x10
-#define AB8500_GPIO_DIR2_REG 0x11
-#define AB8500_GPIO_DIR3_REG 0x12
-#define AB8500_GPIO_DIR4_REG 0x13
-#define AB8500_GPIO_DIR5_REG 0x14
-#define AB8500_GPIO_DIR6_REG 0x15
-
-#define AB8500_GPIO_OUT1_REG 0x20
-#define AB8500_GPIO_OUT2_REG 0x21
-#define AB8500_GPIO_OUT3_REG 0x22
-#define AB8500_GPIO_OUT4_REG 0x23
-#define AB8500_GPIO_OUT5_REG 0x24
-#define AB8500_GPIO_OUT6_REG 0x25
-
-#define AB8500_GPIO_PUD1_REG 0x30
-#define AB8500_GPIO_PUD2_REG 0x31
-#define AB8500_GPIO_PUD3_REG 0x32
-#define AB8500_GPIO_PUD4_REG 0x33
-#define AB8500_GPIO_PUD5_REG 0x34
-#define AB8500_GPIO_PUD6_REG 0x35
-
-#define AB8500_GPIO_IN1_REG 0x40
-#define AB8500_GPIO_IN2_REG 0x41
-#define AB8500_GPIO_IN3_REG 0x42
-#define AB8500_GPIO_IN4_REG 0x43
-#define AB8500_GPIO_IN5_REG 0x44
-#define AB8500_GPIO_IN6_REG 0x45
-#define AB8500_GPIO_ALTFUN_REG 0x45
-#define ALTFUN_REG_INDEX 6
-#define AB8500_NUM_GPIO 42
-#define AB8500_NUM_VIR_GPIO_IRQ 16
-
-enum ab8500_gpio_action {
- NONE,
- STARTUP,
- SHUTDOWN,
- MASK,
- UNMASK
-};
-
-struct ab8500_gpio {
- struct gpio_chip chip;
- struct ab8500 *parent;
- struct device *dev;
- struct mutex lock;
- u32 irq_base;
- enum ab8500_gpio_action irq_action;
- u16 rising;
- u16 falling;
-};
-/**
- * to_ab8500_gpio() - get the pointer to ab8500_gpio
- * @chip: Member of the structure ab8500_gpio
- */
-static inline struct ab8500_gpio *to_ab8500_gpio(struct gpio_chip *chip)
-{
- return container_of(chip, struct ab8500_gpio, chip);
-}
-
-static int ab8500_gpio_set_bits(struct gpio_chip *chip, u8 reg,
- unsigned offset, int val)
-{
- struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
- u8 pos = offset % 8;
- int ret;
-
- reg = reg + (offset / 8);
- ret = abx500_mask_and_set_register_interruptible(ab8500_gpio->dev,
- AB8500_MISC, reg, 1 << pos, val << pos);
- if (ret < 0)
- dev_err(ab8500_gpio->dev, "%s write failed\n", __func__);
- return ret;
-}
-/**
- * ab8500_gpio_get() - Get the particular GPIO value
- * @chip: Gpio device
- * @offset: GPIO number to read
- */
-static int ab8500_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
- u8 mask = 1 << (offset % 8);
- u8 reg = AB8500_GPIO_OUT1_REG + (offset / 8);
- int ret;
- u8 data;
- ret = abx500_get_register_interruptible(ab8500_gpio->dev, AB8500_MISC,
- reg, &data);
- if (ret < 0) {
- dev_err(ab8500_gpio->dev, "%s read failed\n", __func__);
- return ret;
- }
- return (data & mask) >> (offset % 8);
-}
-
-static void ab8500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
-{
- struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
- int ret;
- /* Write the data */
- ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, 1);
- if (ret < 0)
- dev_err(ab8500_gpio->dev, "%s write failed\n", __func__);
-}
-
-static int ab8500_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
- int val)
-{
- int ret;
- /* set direction as output */
- ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_DIR1_REG, offset, 1);
- if (ret < 0)
- return ret;
- /* disable pull down */
- ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_PUD1_REG, offset, 1);
- if (ret < 0)
- return ret;
- /* set the output as 1 or 0 */
- return ab8500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, val);
-
-}
-
-static int ab8500_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- /* set the register as input */
- return ab8500_gpio_set_bits(chip, AB8500_GPIO_DIR1_REG, offset, 0);
-}
-
-static int ab8500_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- /*
- * Only some GPIOs are interrupt capable, and they are
- * organized in discontiguous clusters:
- *
- * GPIO6 to GPIO13
- * GPIO24 and GPIO25
- * GPIO36 to GPIO41
- */
- static struct ab8500_gpio_irq_cluster {
- int start;
- int end;
- } clusters[] = {
- {.start = 6, .end = 13},
- {.start = 24, .end = 25},
- {.start = 36, .end = 41},
- };
- struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
- int base = ab8500_gpio->irq_base;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(clusters); i++) {
- struct ab8500_gpio_irq_cluster *cluster = &clusters[i];
-
- if (offset >= cluster->start && offset <= cluster->end)
- return base + offset - cluster->start;
-
- /* Advance by the number of gpios in this cluster */
- base += cluster->end - cluster->start + 1;
- }
-
- return -EINVAL;
-}
-
-static struct gpio_chip ab8500gpio_chip = {
- .label = "ab8500_gpio",
- .owner = THIS_MODULE,
- .direction_input = ab8500_gpio_direction_input,
- .get = ab8500_gpio_get,
- .direction_output = ab8500_gpio_direction_output,
- .set = ab8500_gpio_set,
- .to_irq = ab8500_gpio_to_irq,
-};
-
-static unsigned int irq_to_rising(unsigned int irq)
-{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
- int offset = irq - ab8500_gpio->irq_base;
- int new_irq = offset + AB8500_INT_GPIO6R
- + ab8500_gpio->parent->irq_base;
- return new_irq;
-}
-
-static unsigned int irq_to_falling(unsigned int irq)
-{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
- int offset = irq - ab8500_gpio->irq_base;
- int new_irq = offset + AB8500_INT_GPIO6F
- + ab8500_gpio->parent->irq_base;
- return new_irq;
-
-}
-
-static unsigned int rising_to_irq(unsigned int irq, void *dev)
-{
- struct ab8500_gpio *ab8500_gpio = dev;
- int offset = irq - AB8500_INT_GPIO6R
- - ab8500_gpio->parent->irq_base ;
- int new_irq = offset + ab8500_gpio->irq_base;
- return new_irq;
-}
-
-static unsigned int falling_to_irq(unsigned int irq, void *dev)
-{
- struct ab8500_gpio *ab8500_gpio = dev;
- int offset = irq - AB8500_INT_GPIO6F
- - ab8500_gpio->parent->irq_base ;
- int new_irq = offset + ab8500_gpio->irq_base;
- return new_irq;
-
-}
-
-/*
- * IRQ handler
- */
-
-static irqreturn_t handle_rising(int irq, void *dev)
-{
-
- handle_nested_irq(rising_to_irq(irq , dev));
- return IRQ_HANDLED;
-}
-
-static irqreturn_t handle_falling(int irq, void *dev)
-{
-
- handle_nested_irq(falling_to_irq(irq, dev));
- return IRQ_HANDLED;
-}
-
-static void ab8500_gpio_irq_lock(unsigned int irq)
-{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
- mutex_lock(&ab8500_gpio->lock);
-}
-
-static void ab8500_gpio_irq_sync_unlock(unsigned int irq)
-{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
- int offset = irq - ab8500_gpio->irq_base;
- bool rising = ab8500_gpio->rising & BIT(offset);
- bool falling = ab8500_gpio->falling & BIT(offset);
- int ret;
-
- switch (ab8500_gpio->irq_action) {
- case STARTUP:
- if (rising)
- ret = request_threaded_irq(irq_to_rising(irq),
- NULL, handle_rising,
- IRQF_TRIGGER_RISING,
- "ab8500-gpio-r", ab8500_gpio);
- if (falling)
- ret = request_threaded_irq(irq_to_falling(irq),
- NULL, handle_falling,
- IRQF_TRIGGER_FALLING,
- "ab8500-gpio-f", ab8500_gpio);
- break;
- case SHUTDOWN:
- if (rising)
- free_irq(irq_to_rising(irq), ab8500_gpio);
- if (falling)
- free_irq(irq_to_falling(irq), ab8500_gpio);
- break;
- case MASK:
- if (rising)
- disable_irq(irq_to_rising(irq));
- if (falling)
- disable_irq(irq_to_falling(irq));
- break;
- case UNMASK:
- if (rising)
- enable_irq(irq_to_rising(irq));
- if (falling)
- enable_irq(irq_to_falling(irq));
- break;
- case NONE:
- break;
- }
- ab8500_gpio->irq_action = NONE;
- ab8500_gpio->rising &= ~(BIT(offset));
- ab8500_gpio->falling &= ~(BIT(offset));
- mutex_unlock(&ab8500_gpio->lock);
-}
-
-
-static void ab8500_gpio_irq_mask(unsigned int irq)
-{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
- ab8500_gpio->irq_action = MASK;
-}
-
-static void ab8500_gpio_irq_unmask(unsigned int irq)
-{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
- ab8500_gpio->irq_action = UNMASK;
-}
-
-static int ab8500_gpio_irq_set_type(unsigned int irq, unsigned int type)
-{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
- int offset = irq - ab8500_gpio->irq_base;
-
- if (type == IRQ_TYPE_EDGE_BOTH) {
- ab8500_gpio->rising = BIT(offset);
- ab8500_gpio->falling = BIT(offset);
- } else if (type == IRQ_TYPE_EDGE_RISING) {
- ab8500_gpio->rising = BIT(offset);
- } else {
- ab8500_gpio->falling = BIT(offset);
- }
- return 0;
-}
-
-unsigned int ab8500_gpio_irq_startup(unsigned int irq)
-{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
- ab8500_gpio->irq_action = STARTUP;
- return 0;
-}
-
-void ab8500_gpio_irq_shutdown(unsigned int irq)
-{
- struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
- ab8500_gpio->irq_action = SHUTDOWN;
-}
-
-static struct irq_chip ab8500_gpio_irq_chip = {
- .name = "ab8500-gpio",
- .startup = ab8500_gpio_irq_startup,
- .shutdown = ab8500_gpio_irq_shutdown,
- .bus_lock = ab8500_gpio_irq_lock,
- .bus_sync_unlock = ab8500_gpio_irq_sync_unlock,
- .mask = ab8500_gpio_irq_mask,
- .unmask = ab8500_gpio_irq_unmask,
- .set_type = ab8500_gpio_irq_set_type,
-};
-
-static int ab8500_gpio_irq_init(struct ab8500_gpio *ab8500_gpio)
-{
- u32 base = ab8500_gpio->irq_base;
- int irq;
-
- for (irq = base; irq < base + AB8500_NUM_VIR_GPIO_IRQ ; irq++) {
- set_irq_chip_data(irq, ab8500_gpio);
- set_irq_chip_and_handler(irq, &ab8500_gpio_irq_chip,
- handle_simple_irq);
- set_irq_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
-#else
- set_irq_noprobe(irq);
-#endif
- }
-
- return 0;
-}
-
-static void ab8500_gpio_irq_remove(struct ab8500_gpio *ab8500_gpio)
-{
- int base = ab8500_gpio->irq_base;
- int irq;
-
- for (irq = base; irq < base + AB8500_NUM_VIR_GPIO_IRQ; irq++) {
-#ifdef CONFIG_ARM
- set_irq_flags(irq, 0);
-#endif
- set_irq_chip_and_handler(irq, NULL, NULL);
- set_irq_chip_data(irq, NULL);
- }
-}
-
-static int ab8500_gpio_probe(struct platform_device *pdev)
-{
- struct ab8500_platform_data *ab8500_pdata =
- dev_get_platdata(pdev->dev.parent);
- struct ab8500_gpio_platform_data *pdata;
- struct ab8500_gpio *ab8500_gpio;
- int ret;
- int i;
-
- pdata = ab8500_pdata->gpio;
- if (!pdata) {
- dev_err(&pdev->dev, "gpio platform data missing\n");
- return -ENODEV;
- }
-
- ab8500_gpio = kzalloc(sizeof(struct ab8500_gpio), GFP_KERNEL);
- if (ab8500_gpio == NULL) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
- return -ENOMEM;
- }
- ab8500_gpio->dev = &pdev->dev;
- ab8500_gpio->parent = dev_get_drvdata(pdev->dev.parent);
- ab8500_gpio->chip = ab8500gpio_chip;
- ab8500_gpio->chip.ngpio = AB8500_NUM_GPIO;
- ab8500_gpio->chip.dev = &pdev->dev;
- ab8500_gpio->chip.base = pdata->gpio_base;
- ab8500_gpio->irq_base = pdata->irq_base;
- /* initialize the lock */
- mutex_init(&ab8500_gpio->lock);
- /*
- * AB8500 core will handle and clear the IRQ
- * configre GPIO based on config-reg value.
- * These values are for selecting the PINs as
- * GPIO or alternate function
- */
- for (i = AB8500_GPIO_SEL1_REG; i <= AB8500_GPIO_SEL6_REG; i++) {
- ret = abx500_set_register_interruptible(ab8500_gpio->dev,
- AB8500_MISC, i,
- pdata->config_reg[i]);
- if (ret < 0)
- goto out_free;
- }
- ret = abx500_set_register_interruptible(ab8500_gpio->dev, AB8500_MISC,
- AB8500_GPIO_ALTFUN_REG,
- pdata->config_reg[ALTFUN_REG_INDEX]);
- if (ret < 0)
- goto out_free;
-
- ret = ab8500_gpio_irq_init(ab8500_gpio);
- if (ret)
- goto out_free;
- ret = gpiochip_add(&ab8500_gpio->chip);
- if (ret) {
- dev_err(&pdev->dev, "unable to add gpiochip: %d\n",
- ret);
- goto out_rem_irq;
- }
- platform_set_drvdata(pdev, ab8500_gpio);
- return 0;
-
-out_rem_irq:
- ab8500_gpio_irq_remove(ab8500_gpio);
-out_free:
- mutex_destroy(&ab8500_gpio->lock);
- kfree(ab8500_gpio);
- return ret;
-}
-
-/*
- * ab8500_gpio_remove() - remove Ab8500-gpio driver
- * @pdev : Platform device registered
- */
-static int ab8500_gpio_remove(struct platform_device *pdev)
-{
- struct ab8500_gpio *ab8500_gpio = platform_get_drvdata(pdev);
- int ret;
-
- ret = gpiochip_remove(&ab8500_gpio->chip);
- if (ret < 0) {
- dev_err(ab8500_gpio->dev, "unable to remove gpiochip: %d\n",
- ret);
- return ret;
- }
-
- platform_set_drvdata(pdev, NULL);
- mutex_destroy(&ab8500_gpio->lock);
- kfree(ab8500_gpio);
-
- return 0;
-}
-
-static struct platform_driver ab8500_gpio_driver = {
- .driver = {
- .name = "ab8500-gpio",
- .owner = THIS_MODULE,
- },
- .probe = ab8500_gpio_probe,
- .remove = ab8500_gpio_remove,
-};
-
-static int __init ab8500_gpio_init(void)
-{
- return platform_driver_register(&ab8500_gpio_driver);
-}
-arch_initcall(ab8500_gpio_init);
-
-static void __exit ab8500_gpio_exit(void)
-{
- platform_driver_unregister(&ab8500_gpio_driver);
-}
-module_exit(ab8500_gpio_exit);
-
-MODULE_AUTHOR("BIBEK BASU <bibek.basu@stericsson.com>");
-MODULE_DESCRIPTION("Driver allows to use AB8500 unused pins to be used as GPIO");
-MODULE_ALIAS("platform:ab8500-gpio");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 7d9bd94be8d2..7472182967ce 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -33,6 +33,7 @@
* interrupts.
*/
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/gpio.h>
#include <linux/irq.h>
@@ -544,12 +545,9 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip->chip.of_node = np;
spin_lock_init(&mvchip->lock);
- mvchip->membase = devm_request_and_ioremap(&pdev->dev, res);
- if (! mvchip->membase) {
- dev_err(&pdev->dev, "Cannot ioremap\n");
- kfree(mvchip->chip.label);
- return -ENOMEM;
- }
+ mvchip->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mvchip->membase))
+ return PTR_ERR(mvchip->membase);
/* The Armada XP has a second range of registers for the
* per-CPU registers */
@@ -557,16 +555,13 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (! res) {
dev_err(&pdev->dev, "Cannot get memory resource\n");
- kfree(mvchip->chip.label);
return -ENODEV;
}
- mvchip->percpu_membase = devm_request_and_ioremap(&pdev->dev, res);
- if (! mvchip->percpu_membase) {
- dev_err(&pdev->dev, "Cannot ioremap\n");
- kfree(mvchip->chip.label);
- return -ENOMEM;
- }
+ mvchip->percpu_membase = devm_ioremap_resource(&pdev->dev,
+ res);
+ if (IS_ERR(mvchip->percpu_membase))
+ return PTR_ERR(mvchip->percpu_membase);
}
/*
@@ -625,7 +620,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1);
if (mvchip->irqbase < 0) {
dev_err(&pdev->dev, "no irqs\n");
- kfree(mvchip->chip.label);
return -ENOMEM;
}
@@ -633,7 +627,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip->membase, handle_level_irq);
if (! gc) {
dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n");
- kfree(mvchip->chip.label);
return -ENOMEM;
}
@@ -668,7 +661,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST,
IRQ_LEVEL | IRQ_NOPROBE);
kfree(gc);
- kfree(mvchip->chip.label);
return -ENODEV;
}
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index fa2a63cad32e..45d97c46831a 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -20,6 +20,7 @@
* MA 02110-1301, USA.
*/
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -253,12 +254,14 @@ static int mxs_gpio_probe(struct platform_device *pdev)
parent = of_get_parent(np);
base = of_iomap(parent, 0);
of_node_put(parent);
+ if (!base)
+ return -EADDRNOTAVAIL;
} else {
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_request_and_ioremap(&pdev->dev, iores);
+ base = devm_ioremap_resource(&pdev->dev, iores);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
}
- if (!base)
- return -EADDRNOTAVAIL;
}
port->base = base;
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 01f7fe955590..b3643ff007e4 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -32,14 +32,12 @@
#include <mach/hardware.h>
#include <mach/map.h>
-#include <mach/regs-clock.h>
#include <mach/regs-gpio.h>
#include <plat/cpu.h>
#include <plat/gpio-core.h>
#include <plat/gpio-cfg.h>
#include <plat/gpio-cfg-helpers.h>
-#include <plat/gpio-fns.h>
#include <plat/pm.h>
int samsung_gpio_setpull_updown(struct samsung_gpio_chip *chip,
@@ -446,7 +444,7 @@ static struct samsung_gpio_cfg s3c24xx_gpiocfg_banka = {
};
#endif
-#if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_ARCH_EXYNOS5)
+#if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_SOC_EXYNOS5250)
static struct samsung_gpio_cfg exynos_gpio_cfg = {
.set_pull = exynos_gpio_setpull,
.get_pull = exynos_gpio_getpull,
@@ -2446,7 +2444,7 @@ static struct samsung_gpio_chip exynos4_gpios_3[] = {
};
#endif
-#ifdef CONFIG_ARCH_EXYNOS5
+#ifdef CONFIG_SOC_EXYNOS5250
static struct samsung_gpio_chip exynos5_gpios_1[] = {
{
.chip = {
@@ -2614,7 +2612,7 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = {
};
#endif
-#ifdef CONFIG_ARCH_EXYNOS5
+#ifdef CONFIG_SOC_EXYNOS5250
static struct samsung_gpio_chip exynos5_gpios_2[] = {
{
.chip = {
@@ -2675,7 +2673,7 @@ static struct samsung_gpio_chip exynos5_gpios_2[] = {
};
#endif
-#ifdef CONFIG_ARCH_EXYNOS5
+#ifdef CONFIG_SOC_EXYNOS5250
static struct samsung_gpio_chip exynos5_gpios_3[] = {
{
.chip = {
@@ -2711,7 +2709,7 @@ static struct samsung_gpio_chip exynos5_gpios_3[] = {
};
#endif
-#ifdef CONFIG_ARCH_EXYNOS5
+#ifdef CONFIG_SOC_EXYNOS5250
static struct samsung_gpio_chip exynos5_gpios_4[] = {
{
.chip = {
@@ -3010,7 +3008,7 @@ static __init int samsung_gpiolib_init(void)
int i, nr_chips;
int group = 0;
-#ifdef CONFIG_PINCTRL_SAMSUNG
+#if defined(CONFIG_PINCTRL_EXYNOS) || defined(CONFIG_PINCTRL_EXYNOS5440)
/*
* This gpio driver includes support for device tree support and there
* are platforms using it. In order to maintain compatibility with those
@@ -3024,8 +3022,9 @@ static __init int samsung_gpiolib_init(void)
*/
struct device_node *pctrl_np;
static const struct of_device_id exynos_pinctrl_ids[] = {
- { .compatible = "samsung,pinctrl-exynos4210", },
- { .compatible = "samsung,pinctrl-exynos4x12", },
+ { .compatible = "samsung,exynos4210-pinctrl", },
+ { .compatible = "samsung,exynos4x12-pinctrl", },
+ { .compatible = "samsung,exynos5440-pinctrl", },
};
for_each_matching_node(pctrl_np, exynos_pinctrl_ids)
if (pctrl_np && of_device_is_available(pctrl_np))
diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c
index 5f45fc4ed5d1..7a4bf7c0d98f 100644
--- a/drivers/gpio/gpio-spear-spics.c
+++ b/drivers/gpio/gpio-spear-spics.c
@@ -140,11 +140,9 @@ static int spics_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
}
- spics->base = devm_request_and_ioremap(&pdev->dev, res);
- if (!spics->base) {
- dev_err(&pdev->dev, "request and ioremap fail\n");
- return -ENOMEM;
- }
+ spics->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(spics->base))
+ return PTR_ERR(spics->base);
if (of_property_read_u32(np, "st-spics,peripcfg-reg",
&spics->perip_cfg))
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
index 85841ee70b17..c20e05151212 100644
--- a/drivers/gpio/gpio-stp-xway.c
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -214,11 +214,10 @@ static int xway_stp_probe(struct platform_device *pdev)
if (!chip)
return -ENOMEM;
- chip->virt = devm_request_and_ioremap(&pdev->dev, res);
- if (!chip->virt) {
- dev_err(&pdev->dev, "failed to remap STP memory\n");
- return -ENOMEM;
- }
+ chip->virt = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(chip->virt))
+ return PTR_ERR(chip->virt);
+
chip->gc.dev = &pdev->dev;
chip->gc.label = "stp-xway";
chip->gc.direction_output = xway_stp_dir_out;
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 63cb643d4b5a..414ad912232f 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -17,6 +17,7 @@
*
*/
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
@@ -450,11 +451,9 @@ static int tegra_gpio_probe(struct platform_device *pdev)
return -ENODEV;
}
- regs = devm_request_and_ioremap(&pdev->dev, res);
- if (!regs) {
- dev_err(&pdev->dev, "Couldn't ioremap regs\n");
- return -ENODEV;
- }
+ regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
for (i = 0; i < tegra_gpio_bank_count; i++) {
for (j = 0; j < 4; j++) {
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index d542a141811a..a71a54a3e3f7 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -89,41 +89,6 @@ int of_get_named_gpio_flags(struct device_node *np, const char *propname,
EXPORT_SYMBOL(of_get_named_gpio_flags);
/**
- * of_gpio_named_count - Count GPIOs for a device
- * @np: device node to count GPIOs for
- * @propname: property name containing gpio specifier(s)
- *
- * The function returns the count of GPIOs specified for a node.
- *
- * Note that the empty GPIO specifiers counts too. For example,
- *
- * gpios = <0
- * &pio1 1 2
- * 0
- * &pio2 3 4>;
- *
- * defines four GPIOs (so this function will return 4), two of which
- * are not specified.
- */
-unsigned int of_gpio_named_count(struct device_node *np, const char* propname)
-{
- unsigned int cnt = 0;
-
- do {
- int ret;
-
- ret = of_parse_phandle_with_args(np, propname, "#gpio-cells",
- cnt, NULL);
- /* A hole in the gpios = <> counts anyway. */
- if (ret < 0 && ret != -EEXIST)
- break;
- } while (++cnt);
-
- return cnt;
-}
-EXPORT_SYMBOL(of_gpio_named_count);
-
-/**
* of_gpio_simple_xlate - translate gpio_spec to the GPIO number and flags
* @gc: pointer to the gpio_chip structure
* @np: device node of the GPIO chip
@@ -250,7 +215,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
* on the same GPIO chip.
*/
ret = gpiochip_add_pin_range(chip,
- pinctrl_dev_get_name(pctldev),
+ pinctrl_dev_get_devname(pctldev),
0, /* offset in gpiochip */
pinspec.args[0],
pinspec.args[1]);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 199fca15f270..5359ca78130f 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -806,7 +806,7 @@ fail_unlock:
}
EXPORT_SYMBOL_GPL(gpio_export);
-static int match_export(struct device *dev, void *data)
+static int match_export(struct device *dev, const void *data)
{
return dev_get_drvdata(dev) == data;
}
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
index a277b1257888..da4a51eae824 100644
--- a/drivers/gpu/drm/ast/Kconfig
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -1,6 +1,6 @@
config DRM_AST
tristate "AST server chips"
- depends on DRM && PCI && EXPERIMENTAL
+ depends on DRM && PCI
select DRM_TTM
select FB_SYS_COPYAREA
select FB_SYS_FILLRECT
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
index fc154dd75296..bf67b22723f9 100644
--- a/drivers/gpu/drm/cirrus/Kconfig
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -1,6 +1,6 @@
config DRM_CIRRUS_QEMU
tristate "Cirrus driver for QEMU emulated device"
- depends on DRM && PCI && EXPERIMENTAL
+ depends on DRM && PCI
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 2bf9670ba29b..2aa331499f81 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -221,11 +221,13 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
BUG_ON(!hole_node->hole_follows || node->allocated);
- if (mm->color_adjust)
- mm->color_adjust(hole_node, color, &adj_start, &adj_end);
-
if (adj_start < start)
adj_start = start;
+ if (adj_end > end)
+ adj_end = end;
+
+ if (mm->color_adjust)
+ mm->color_adjust(hole_node, color, &adj_start, &adj_end);
if (alignment) {
unsigned tmp = adj_start % alignment;
@@ -506,7 +508,7 @@ void drm_mm_init_scan(struct drm_mm *mm,
mm->scan_size = size;
mm->scanned_blocks = 0;
mm->scan_hit_start = 0;
- mm->scan_hit_size = 0;
+ mm->scan_hit_end = 0;
mm->scan_check_range = 0;
mm->prev_scanned_node = NULL;
}
@@ -533,7 +535,7 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm,
mm->scan_size = size;
mm->scanned_blocks = 0;
mm->scan_hit_start = 0;
- mm->scan_hit_size = 0;
+ mm->scan_hit_end = 0;
mm->scan_start = start;
mm->scan_end = end;
mm->scan_check_range = 1;
@@ -552,8 +554,7 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
unsigned long hole_start, hole_end;
- unsigned long adj_start;
- unsigned long adj_end;
+ unsigned long adj_start, adj_end;
mm->scanned_blocks++;
@@ -570,14 +571,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
node->node_list.next = &mm->prev_scanned_node->node_list;
mm->prev_scanned_node = node;
- hole_start = drm_mm_hole_node_start(prev_node);
- hole_end = drm_mm_hole_node_end(prev_node);
-
- adj_start = hole_start;
- adj_end = hole_end;
-
- if (mm->color_adjust)
- mm->color_adjust(prev_node, mm->scan_color, &adj_start, &adj_end);
+ adj_start = hole_start = drm_mm_hole_node_start(prev_node);
+ adj_end = hole_end = drm_mm_hole_node_end(prev_node);
if (mm->scan_check_range) {
if (adj_start < mm->scan_start)
@@ -586,11 +581,14 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
adj_end = mm->scan_end;
}
+ if (mm->color_adjust)
+ mm->color_adjust(prev_node, mm->scan_color,
+ &adj_start, &adj_end);
+
if (check_free_hole(adj_start, adj_end,
mm->scan_size, mm->scan_alignment)) {
mm->scan_hit_start = hole_start;
- mm->scan_hit_size = hole_end;
-
+ mm->scan_hit_end = hole_end;
return 1;
}
@@ -626,19 +624,10 @@ int drm_mm_scan_remove_block(struct drm_mm_node *node)
node_list);
prev_node->hole_follows = node->scanned_preceeds_hole;
- INIT_LIST_HEAD(&node->node_list);
list_add(&node->node_list, &prev_node->node_list);
- /* Only need to check for containement because start&size for the
- * complete resulting free block (not just the desired part) is
- * stored. */
- if (node->start >= mm->scan_hit_start &&
- node->start + node->size
- <= mm->scan_hit_start + mm->scan_hit_size) {
- return 1;
- }
-
- return 0;
+ return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
+ node->start < mm->scan_hit_end);
}
EXPORT_SYMBOL(drm_mm_scan_remove_block);
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 1d1f1e5e33f0..046bcda36abe 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -24,7 +24,7 @@ config DRM_EXYNOS_DMABUF
config DRM_EXYNOS_FIMD
bool "Exynos DRM FIMD"
- depends on DRM_EXYNOS && !FB_S3C
+ depends on DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM
help
Choose this option if you want to use Exynos FIMD for DRM.
@@ -48,7 +48,7 @@ config DRM_EXYNOS_G2D
config DRM_EXYNOS_IPP
bool "Exynos DRM IPP"
- depends on DRM_EXYNOS
+ depends on DRM_EXYNOS && !ARCH_MULTIPLATFORM
help
Choose this option if you want to use IPP feature for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index ab37437bad8a..4c5b6859c9ea 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -18,7 +18,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_encoder.h"
-#define MAX_EDID 256
#define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\
drm_connector)
@@ -96,7 +95,9 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
to_exynos_connector(connector);
struct exynos_drm_manager *manager = exynos_connector->manager;
struct exynos_drm_display_ops *display_ops = manager->display_ops;
- unsigned int count;
+ struct edid *edid = NULL;
+ unsigned int count = 0;
+ int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -114,27 +115,21 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
* because lcd panel has only one mode.
*/
if (display_ops->get_edid) {
- int ret;
- void *edid;
-
- edid = kzalloc(MAX_EDID, GFP_KERNEL);
- if (!edid) {
- DRM_ERROR("failed to allocate edid\n");
- return 0;
+ edid = display_ops->get_edid(manager->dev, connector);
+ if (IS_ERR_OR_NULL(edid)) {
+ ret = PTR_ERR(edid);
+ edid = NULL;
+ DRM_ERROR("Panel operation get_edid failed %d\n", ret);
+ goto out;
}
- ret = display_ops->get_edid(manager->dev, connector,
- edid, MAX_EDID);
- if (ret < 0) {
- DRM_ERROR("failed to get edid data.\n");
- kfree(edid);
- edid = NULL;
- return 0;
+ count = drm_add_edid_modes(connector, edid);
+ if (count < 0) {
+ DRM_ERROR("Add edid modes failed %d\n", count);
+ goto out;
}
drm_mode_connector_update_edid_property(connector, edid);
- count = drm_add_edid_modes(connector, edid);
- kfree(edid);
} else {
struct exynos_drm_panel_info *panel;
struct drm_display_mode *mode = drm_mode_create(connector->dev);
@@ -161,6 +156,8 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
count = 1;
}
+out:
+ kfree(edid);
return count;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index 9df97714b6c0..ba0a3aa78547 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -19,6 +19,7 @@
struct exynos_drm_dmabuf_attachment {
struct sg_table sgt;
enum dma_data_direction dir;
+ bool is_mapped;
};
static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
@@ -72,17 +73,10 @@ static struct sg_table *
DRM_DEBUG_PRIME("%s\n", __FILE__);
- if (WARN_ON(dir == DMA_NONE))
- return ERR_PTR(-EINVAL);
-
/* just return current sgt if already requested. */
- if (exynos_attach->dir == dir)
+ if (exynos_attach->dir == dir && exynos_attach->is_mapped)
return &exynos_attach->sgt;
- /* reattaching is not allowed. */
- if (WARN_ON(exynos_attach->dir != DMA_NONE))
- return ERR_PTR(-EBUSY);
-
buf = gem_obj->buffer;
if (!buf) {
DRM_ERROR("buffer is null.\n");
@@ -107,13 +101,17 @@ static struct sg_table *
wr = sg_next(wr);
}
- nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
- if (!nents) {
- DRM_ERROR("failed to map sgl with iommu.\n");
- sgt = ERR_PTR(-EIO);
- goto err_unlock;
+ if (dir != DMA_NONE) {
+ nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
+ if (!nents) {
+ DRM_ERROR("failed to map sgl with iommu.\n");
+ sg_free_table(sgt);
+ sgt = ERR_PTR(-EIO);
+ goto err_unlock;
+ }
}
+ exynos_attach->is_mapped = true;
exynos_attach->dir = dir;
attach->priv = exynos_attach;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index b9e51bc09e81..4606fac7241a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -148,8 +148,8 @@ struct exynos_drm_overlay {
struct exynos_drm_display_ops {
enum exynos_drm_output_type type;
bool (*is_connected)(struct device *dev);
- int (*get_edid)(struct device *dev, struct drm_connector *connector,
- u8 *edid, int len);
+ struct edid *(*get_edid)(struct device *dev,
+ struct drm_connector *connector);
void *(*get_panel)(struct device *dev);
int (*check_timing)(struct device *dev, void *timing);
int (*power_on)(struct device *dev, int mode);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 67a83e69544b..411f69b76e84 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1785,11 +1785,9 @@ static int fimc_probe(struct platform_device *pdev)
/* resource memory */
ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
- if (!ctx->regs) {
- dev_err(dev, "failed to map registers.\n");
- return -ENXIO;
- }
+ ctx->regs = devm_ioremap_resource(dev, ctx->regs_res);
+ if (IS_ERR(ctx->regs))
+ return PTR_ERR(ctx->regs);
/* resource irq */
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 9537761931ee..36493ce71f9a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -913,11 +913,9 @@ static int fimd_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->regs = devm_request_and_ioremap(&pdev->dev, res);
- if (!ctx->regs) {
- dev_err(dev, "failed to map registers\n");
- return -ENXIO;
- }
+ ctx->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctx->regs))
+ return PTR_ERR(ctx->regs);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 36c3905536a6..fb2f81b8063d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -324,7 +324,7 @@ out:
g2d_userptr = NULL;
}
-dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
+static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
unsigned long userptr,
unsigned long size,
struct drm_file *filp,
@@ -1136,10 +1136,9 @@ static int g2d_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- g2d->regs = devm_request_and_ioremap(&pdev->dev, res);
- if (!g2d->regs) {
- dev_err(dev, "failed to remap I/O memory\n");
- ret = -ENXIO;
+ g2d->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(g2d->regs)) {
+ ret = PTR_ERR(g2d->regs);
goto err_put_clk;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 8140753ec9c8..7841c3b8a20e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1692,11 +1692,9 @@ static int gsc_probe(struct platform_device *pdev)
/* resource memory */
ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
- if (!ctx->regs) {
- dev_err(dev, "failed to map registers.\n");
- return -ENXIO;
- }
+ ctx->regs = devm_ioremap_resource(dev, ctx->regs_res);
+ if (IS_ERR(ctx->regs))
+ return PTR_ERR(ctx->regs);
/* resource irq */
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 850e9950b7da..28644539b305 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -108,18 +108,17 @@ static bool drm_hdmi_is_connected(struct device *dev)
return false;
}
-static int drm_hdmi_get_edid(struct device *dev,
- struct drm_connector *connector, u8 *edid, int len)
+static struct edid *drm_hdmi_get_edid(struct device *dev,
+ struct drm_connector *connector)
{
struct drm_hdmi_context *ctx = to_context(dev);
DRM_DEBUG_KMS("%s\n", __FILE__);
if (hdmi_ops && hdmi_ops->get_edid)
- return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector, edid,
- len);
+ return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector);
- return 0;
+ return NULL;
}
static int drm_hdmi_check_timing(struct device *dev, void *timing)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index 784a7e9a766c..d80516fc9ed7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -30,8 +30,8 @@ struct exynos_drm_hdmi_context {
struct exynos_hdmi_ops {
/* display */
bool (*is_connected)(void *ctx);
- int (*get_edid)(void *ctx, struct drm_connector *connector,
- u8 *edid, int len);
+ struct edid *(*get_edid)(void *ctx,
+ struct drm_connector *connector);
int (*check_timing)(void *ctx, void *timing);
int (*power_on)(void *ctx, int mode);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 0bda96454a02..1a556354e92f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -869,7 +869,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
}
}
-void ipp_handle_cmd_work(struct device *dev,
+static void ipp_handle_cmd_work(struct device *dev,
struct exynos_drm_ippdrv *ippdrv,
struct drm_exynos_ipp_cmd_work *cmd_work,
struct drm_exynos_ipp_cmd_node *c_node)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index e9e83ef688f0..a40b9fb60240 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -656,11 +656,9 @@ static int rotator_probe(struct platform_device *pdev)
platform_get_device_id(pdev)->driver_data;
rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rot->regs = devm_request_and_ioremap(dev, rot->regs_res);
- if (!rot->regs) {
- dev_err(dev, "failed to map register\n");
- return -ENXIO;
- }
+ rot->regs = devm_ioremap_resource(dev, rot->regs_res);
+ if (IS_ERR(rot->regs))
+ return PTR_ERR(rot->regs);
rot->irq = platform_get_irq(pdev, 0);
if (rot->irq < 0) {
@@ -734,7 +732,7 @@ static int rotator_remove(struct platform_device *pdev)
return 0;
}
-struct rot_limit_table rot_limit_tbl = {
+static struct rot_limit_table rot_limit_tbl = {
.ycbcr420_2p = {
.min_w = 32,
.min_h = 32,
@@ -751,7 +749,7 @@ struct rot_limit_table rot_limit_tbl = {
},
};
-struct platform_device_id rotator_driver_ids[] = {
+static struct platform_device_id rotator_driver_ids[] = {
{
.name = "exynos-rot",
.driver_data = (unsigned long)&rot_limit_tbl,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index d0ca3c4e06c6..13ccbd4bcfaa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -98,10 +98,12 @@ static bool vidi_display_is_connected(struct device *dev)
return ctx->connected ? true : false;
}
-static int vidi_get_edid(struct device *dev, struct drm_connector *connector,
- u8 *edid, int len)
+static struct edid *vidi_get_edid(struct device *dev,
+ struct drm_connector *connector)
{
struct vidi_context *ctx = get_vidi_context(dev);
+ struct edid *edid;
+ int edid_len;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -111,13 +113,18 @@ static int vidi_get_edid(struct device *dev, struct drm_connector *connector,
*/
if (!ctx->raw_edid) {
DRM_DEBUG_KMS("raw_edid is null.\n");
- return -EFAULT;
+ return ERR_PTR(-EFAULT);
}
- memcpy(edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions)
- * EDID_LENGTH, len));
+ edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
+ edid = kzalloc(edid_len, GFP_KERNEL);
+ if (!edid) {
+ DRM_DEBUG_KMS("failed to allocate edid\n");
+ return ERR_PTR(-ENOMEM);
+ }
- return 0;
+ memcpy(edid, ctx->raw_edid, edid_len);
+ return edid;
}
static void *vidi_get_panel(struct device *dev)
@@ -514,7 +521,6 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
struct exynos_drm_manager *manager;
struct exynos_drm_display_ops *display_ops;
struct drm_exynos_vidi_connection *vidi = data;
- struct edid *raw_edid;
int edid_len;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -551,11 +557,11 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
}
if (vidi->connection) {
- if (!vidi->edid) {
- DRM_DEBUG_KMS("edid data is null.\n");
+ struct edid *raw_edid = (struct edid *)(uint32_t)vidi->edid;
+ if (!drm_edid_is_valid(raw_edid)) {
+ DRM_DEBUG_KMS("edid data is invalid.\n");
return -EINVAL;
}
- raw_edid = (struct edid *)(uint32_t)vidi->edid;
edid_len = (1 + raw_edid->extensions) * EDID_LENGTH;
ctx->raw_edid = kzalloc(edid_len, GFP_KERNEL);
if (!ctx->raw_edid) {
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 41ff79d8ac8e..233247505ff8 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -34,7 +34,6 @@
#include <linux/regulator/consumer.h>
#include <linux/io.h>
#include <linux/of_gpio.h>
-#include <plat/gpio-cfg.h>
#include <drm/exynos_drm.h>
@@ -98,8 +97,7 @@ struct hdmi_context {
void __iomem *regs;
void *parent_ctx;
- int external_irq;
- int internal_irq;
+ int irq;
struct i2c_client *ddc_port;
struct i2c_client *hdmiphy_port;
@@ -1391,8 +1389,7 @@ static bool hdmi_is_connected(void *ctx)
return hdata->hpd;
}
-static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
- u8 *edid, int len)
+static struct edid *hdmi_get_edid(void *ctx, struct drm_connector *connector)
{
struct edid *raw_edid;
struct hdmi_context *hdata = ctx;
@@ -1400,22 +1397,18 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
if (!hdata->ddc_port)
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
raw_edid = drm_get_edid(connector, hdata->ddc_port->adapter);
- if (raw_edid) {
- hdata->dvi_mode = !drm_detect_hdmi_monitor(raw_edid);
- memcpy(edid, raw_edid, min((1 + raw_edid->extensions)
- * EDID_LENGTH, len));
- DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
- (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
- raw_edid->width_cm, raw_edid->height_cm);
- kfree(raw_edid);
- } else {
- return -ENODEV;
- }
+ if (!raw_edid)
+ return ERR_PTR(-ENODEV);
- return 0;
+ hdata->dvi_mode = !drm_detect_hdmi_monitor(raw_edid);
+ DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
+ (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
+ raw_edid->width_cm, raw_edid->height_cm);
+
+ return raw_edid;
}
static int hdmi_v13_check_timing(struct fb_videomode *check_timing)
@@ -1652,16 +1645,16 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
/* resetting HDMI core */
hdmi_reg_writemask(hdata, reg, 0, HDMI_CORE_SW_RSTOUT);
- mdelay(10);
+ usleep_range(10000, 12000);
hdmi_reg_writemask(hdata, reg, ~0, HDMI_CORE_SW_RSTOUT);
- mdelay(10);
+ usleep_range(10000, 12000);
}
static void hdmi_conf_init(struct hdmi_context *hdata)
{
struct hdmi_infoframe infoframe;
- /* disable HPD interrupts */
+ /* disable HPD interrupts from HDMI IP block, use GPIO instead */
hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
@@ -1779,7 +1772,7 @@ static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
u32 val = hdmi_reg_read(hdata, HDMI_V13_PHY_STATUS);
if (val & HDMI_PHY_STATUS_READY)
break;
- mdelay(1);
+ usleep_range(1000, 2000);
}
/* steady state not achieved */
if (tries == 0) {
@@ -1946,7 +1939,7 @@ static void hdmi_v14_timing_apply(struct hdmi_context *hdata)
u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS_0);
if (val & HDMI_PHY_STATUS_READY)
break;
- mdelay(1);
+ usleep_range(1000, 2000);
}
/* steady state not achieved */
if (tries == 0) {
@@ -1998,9 +1991,9 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
/* reset hdmiphy */
hdmi_reg_writemask(hdata, reg, ~0, HDMI_PHY_SW_RSTOUT);
- mdelay(10);
+ usleep_range(10000, 12000);
hdmi_reg_writemask(hdata, reg, 0, HDMI_PHY_SW_RSTOUT);
- mdelay(10);
+ usleep_range(10000, 12000);
}
static void hdmiphy_poweron(struct hdmi_context *hdata)
@@ -2048,7 +2041,7 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
return;
}
- mdelay(10);
+ usleep_range(10000, 12000);
/* operation mode */
operation[0] = 0x1f;
@@ -2170,6 +2163,13 @@ static void hdmi_commit(void *ctx)
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+ mutex_lock(&hdata->hdmi_mutex);
+ if (!hdata->powered) {
+ mutex_unlock(&hdata->hdmi_mutex);
+ return;
+ }
+ mutex_unlock(&hdata->hdmi_mutex);
+
hdmi_conf_apply(hdata);
}
@@ -2265,7 +2265,7 @@ static struct exynos_hdmi_ops hdmi_ops = {
.dpms = hdmi_dpms,
};
-static irqreturn_t hdmi_external_irq_thread(int irq, void *arg)
+static irqreturn_t hdmi_irq_thread(int irq, void *arg)
{
struct exynos_drm_hdmi_context *ctx = arg;
struct hdmi_context *hdata = ctx->ctx;
@@ -2280,31 +2280,6 @@ static irqreturn_t hdmi_external_irq_thread(int irq, void *arg)
return IRQ_HANDLED;
}
-static irqreturn_t hdmi_internal_irq_thread(int irq, void *arg)
-{
- struct exynos_drm_hdmi_context *ctx = arg;
- struct hdmi_context *hdata = ctx->ctx;
- u32 intc_flag;
-
- intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG);
- /* clearing flags for HPD plug/unplug */
- if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) {
- DRM_DEBUG_KMS("unplugged\n");
- hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
- HDMI_INTC_FLAG_HPD_UNPLUG);
- }
- if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) {
- DRM_DEBUG_KMS("plugged\n");
- hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
- HDMI_INTC_FLAG_HPD_PLUG);
- }
-
- if (ctx->drm_dev)
- drm_helper_hpd_irq_event(ctx->drm_dev);
-
- return IRQ_HANDLED;
-}
-
static int hdmi_resources_init(struct hdmi_context *hdata)
{
struct device *dev = hdata->dev;
@@ -2526,11 +2501,9 @@ static int hdmi_probe(struct platform_device *pdev)
return -ENOENT;
}
- hdata->regs = devm_request_and_ioremap(&pdev->dev, res);
- if (!hdata->regs) {
- DRM_ERROR("failed to map registers\n");
- return -ENXIO;
- }
+ hdata->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hdata->regs))
+ return PTR_ERR(hdata->regs);
ret = devm_gpio_request(&pdev->dev, hdata->hpd_gpio, "HPD");
if (ret) {
@@ -2555,39 +2528,24 @@ static int hdmi_probe(struct platform_device *pdev)
hdata->hdmiphy_port = hdmi_hdmiphy;
- hdata->external_irq = gpio_to_irq(hdata->hpd_gpio);
- if (hdata->external_irq < 0) {
- DRM_ERROR("failed to get GPIO external irq\n");
- ret = hdata->external_irq;
- goto err_hdmiphy;
- }
-
- hdata->internal_irq = platform_get_irq(pdev, 0);
- if (hdata->internal_irq < 0) {
- DRM_ERROR("failed to get platform internal irq\n");
- ret = hdata->internal_irq;
+ hdata->irq = gpio_to_irq(hdata->hpd_gpio);
+ if (hdata->irq < 0) {
+ DRM_ERROR("failed to get GPIO irq\n");
+ ret = hdata->irq;
goto err_hdmiphy;
}
hdata->hpd = gpio_get_value(hdata->hpd_gpio);
- ret = request_threaded_irq(hdata->external_irq, NULL,
- hdmi_external_irq_thread, IRQF_TRIGGER_RISING |
+ ret = request_threaded_irq(hdata->irq, NULL,
+ hdmi_irq_thread, IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "hdmi_external", drm_hdmi_ctx);
+ "hdmi", drm_hdmi_ctx);
if (ret) {
- DRM_ERROR("failed to register hdmi external interrupt\n");
+ DRM_ERROR("failed to register hdmi interrupt\n");
goto err_hdmiphy;
}
- ret = request_threaded_irq(hdata->internal_irq, NULL,
- hdmi_internal_irq_thread, IRQF_ONESHOT,
- "hdmi_internal", drm_hdmi_ctx);
- if (ret) {
- DRM_ERROR("failed to register hdmi internal interrupt\n");
- goto err_free_irq;
- }
-
/* Attach HDMI Driver to common hdmi. */
exynos_hdmi_drv_attach(drm_hdmi_ctx);
@@ -2598,8 +2556,6 @@ static int hdmi_probe(struct platform_device *pdev)
return 0;
-err_free_irq:
- free_irq(hdata->external_irq, drm_hdmi_ctx);
err_hdmiphy:
i2c_del_driver(&hdmiphy_driver);
err_ddc:
@@ -2617,8 +2573,7 @@ static int hdmi_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
- free_irq(hdata->internal_irq, hdata);
- free_irq(hdata->external_irq, hdata);
+ free_irq(hdata->irq, hdata);
/* hdmiphy i2c driver */
@@ -2637,8 +2592,7 @@ static int hdmi_suspend(struct device *dev)
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- disable_irq(hdata->internal_irq);
- disable_irq(hdata->external_irq);
+ disable_irq(hdata->irq);
hdata->hpd = false;
if (ctx->drm_dev)
@@ -2663,8 +2617,7 @@ static int hdmi_resume(struct device *dev)
hdata->hpd = gpio_get_value(hdata->hpd_gpio);
- enable_irq(hdata->external_irq);
- enable_irq(hdata->internal_irq);
+ enable_irq(hdata->irq);
if (!pm_runtime_suspended(dev)) {
DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index c187ea33b748..c414584bfbae 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -600,7 +600,7 @@ static void vp_win_reset(struct mixer_context *ctx)
/* waiting until VP_SRESET_PROCESSING is 0 */
if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING)
break;
- mdelay(10);
+ usleep_range(10000, 12000);
}
WARN(tries == 0, "failed to reset Video Processor\n");
}
@@ -776,6 +776,13 @@ static void mixer_win_commit(void *ctx, int win)
DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
+ mutex_lock(&mixer_ctx->mixer_mutex);
+ if (!mixer_ctx->powered) {
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+ return;
+ }
+ mutex_unlock(&mixer_ctx->mixer_mutex);
+
if (win > 1 && mixer_ctx->vp_enabled)
vp_video_buffer(mixer_ctx, win);
else
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index 42e665c7e90a..1188f0fe7e4f 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -1,6 +1,6 @@
config DRM_GMA500
tristate "Intel GMA5/600 KMS Framebuffer"
- depends on DRM && PCI && X86 && EXPERIMENTAL
+ depends on DRM && PCI && X86
select FB_CFB_COPYAREA
select FB_CFB_FILLRECT
select FB_CFB_IMAGEBLIT
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 51044cc55cf2..88d9ef6b5b4a 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -27,6 +27,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e6a11ca85eaf..32158d21c632 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -30,6 +30,7 @@
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <generated/utsrelease.h>
#include <drm/drmP.h>
#include "intel_drv.h"
#include "intel_ringbuffer.h"
@@ -641,6 +642,7 @@ static void i915_ring_error_state(struct seq_file *m,
seq_printf(m, "%s command stream:\n", ring_str(ring));
seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
+ seq_printf(m, " CTL: 0x%08x\n", error->ctl[ring]);
seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
@@ -689,10 +691,13 @@ static int i915_error_state(struct seq_file *m, void *unused)
seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
error->time.tv_usec);
+ seq_printf(m, "Kernel: " UTS_RELEASE);
seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
seq_printf(m, "EIR: 0x%08x\n", error->eir);
seq_printf(m, "IER: 0x%08x\n", error->ier);
seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+ seq_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
+ seq_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
seq_printf(m, "CCID: 0x%08x\n", error->ccid);
for (i = 0; i < dev_priv->num_fence_regs; i++)
@@ -1455,7 +1460,7 @@ static const char *swizzle_string(unsigned swizzle)
case I915_BIT_6_SWIZZLE_9_10_17:
return "bit9/bit10/bit17";
case I915_BIT_6_SWIZZLE_UNKNOWN:
- return "unkown";
+ return "unknown";
}
return "bug";
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ed3059575576..12ab3bdea54d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -188,10 +188,13 @@ struct drm_i915_error_state {
u32 pgtbl_er;
u32 ier;
u32 ccid;
+ u32 derrmr;
+ u32 forcewake;
bool waiting[I915_NUM_RINGS];
u32 pipestat[I915_MAX_PIPES];
u32 tail[I915_NUM_RINGS];
u32 head[I915_NUM_RINGS];
+ u32 ctl[I915_NUM_RINGS];
u32 ipeir[I915_NUM_RINGS];
u32 ipehr[I915_NUM_RINGS];
u32 instdone[I915_NUM_RINGS];
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index da3c82e301b1..8febea6daa08 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1717,7 +1717,8 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
}
static long
-i915_gem_purge(struct drm_i915_private *dev_priv, long target)
+__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
+ bool purgeable_only)
{
struct drm_i915_gem_object *obj, *next;
long count = 0;
@@ -1725,7 +1726,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
list_for_each_entry_safe(obj, next,
&dev_priv->mm.unbound_list,
gtt_list) {
- if (i915_gem_object_is_purgeable(obj) &&
+ if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
i915_gem_object_put_pages(obj) == 0) {
count += obj->base.size >> PAGE_SHIFT;
if (count >= target)
@@ -1736,7 +1737,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
list_for_each_entry_safe(obj, next,
&dev_priv->mm.inactive_list,
mm_list) {
- if (i915_gem_object_is_purgeable(obj) &&
+ if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
i915_gem_object_unbind(obj) == 0 &&
i915_gem_object_put_pages(obj) == 0) {
count += obj->base.size >> PAGE_SHIFT;
@@ -1748,6 +1749,12 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
return count;
}
+static long
+i915_gem_purge(struct drm_i915_private *dev_priv, long target)
+{
+ return __i915_gem_shrink(dev_priv, target, true);
+}
+
static void
i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
@@ -3522,14 +3529,15 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
goto out;
}
- obj->user_pin_count++;
- obj->pin_filp = file;
- if (obj->user_pin_count == 1) {
+ if (obj->user_pin_count == 0) {
ret = i915_gem_object_pin(obj, args->alignment, true, false);
if (ret)
goto out;
}
+ obj->user_pin_count++;
+ obj->pin_filp = file;
+
/* XXX - flush the CPU caches for pinned objects
* as the X server doesn't manage domains yet
*/
@@ -4395,6 +4403,9 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
if (nr_to_scan) {
nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
if (nr_to_scan > 0)
+ nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
+ false);
+ if (nr_to_scan > 0)
i915_gem_shrink_all(dev_priv);
}
@@ -4402,7 +4413,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
if (obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
+ list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d6a994a07393..26d08bb58218 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -539,6 +539,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
total = 0;
for (i = 0; i < count; i++) {
struct drm_i915_gem_relocation_entry __user *user_relocs;
+ u64 invalid_offset = (u64)-1;
+ int j;
user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
@@ -549,6 +551,25 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
goto err;
}
+ /* As we do not update the known relocation offsets after
+ * relocating (due to the complexities in lock handling),
+ * we need to mark them as invalid now so that we force the
+ * relocation processing next time. Just in case the target
+ * object is evicted and then rebound into its old
+ * presumed_offset before the next execbuffer - if that
+ * happened we would make the mistake of assuming that the
+ * relocations were valid.
+ */
+ for (j = 0; j < exec[i].relocation_count; j++) {
+ if (copy_to_user(&user_relocs[j].presumed_offset,
+ &invalid_offset,
+ sizeof(invalid_offset))) {
+ ret = -EFAULT;
+ mutex_lock(&dev->struct_mutex);
+ goto err;
+ }
+ }
+
reloc_offset[i] = total;
total += exec[i].relocation_count;
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2220dec3e5d9..fe843389c7b4 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1157,6 +1157,7 @@ static void i915_record_ring_state(struct drm_device *dev,
error->acthd[ring->id] = intel_ring_get_active_head(ring);
error->head[ring->id] = I915_READ_HEAD(ring);
error->tail[ring->id] = I915_READ_TAIL(ring);
+ error->ctl[ring->id] = I915_READ_CTL(ring);
error->cpu_ring_head[ring->id] = ring->head;
error->cpu_ring_tail[ring->id] = ring->tail;
@@ -1251,6 +1252,16 @@ static void i915_capture_error_state(struct drm_device *dev)
else
error->ier = I915_READ(IER);
+ if (INTEL_INFO(dev)->gen >= 6)
+ error->derrmr = I915_READ(DERRMR);
+
+ if (IS_VALLEYVIEW(dev))
+ error->forcewake = I915_READ(FORCEWAKE_VLV);
+ else if (INTEL_INFO(dev)->gen >= 7)
+ error->forcewake = I915_READ(FORCEWAKE_MT);
+ else if (INTEL_INFO(dev)->gen == 6)
+ error->forcewake = I915_READ(FORCEWAKE);
+
for_each_pipe(pipe)
error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 186ee5c85b51..59afb7eb6db6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -512,6 +512,8 @@
#define GEN7_ERR_INT 0x44040
#define ERR_INT_MMIO_UNCLAIMED (1<<13)
+#define DERRMR 0x44050
+
/* GM45+ chicken bits -- debug workaround bits that may be required
* for various sorts of correct behavior. The top 16 bits of each are
* the enables for writing to the corresponding low bit.
@@ -531,6 +533,7 @@
#define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6)
# define MI_FLUSH_ENABLE (1 << 12)
+# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
#define GEN6_GT_MODE 0x20d0
#define GEN6_GT_MODE_HI (1 << 9)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a9fb046b94a1..da1ad9c80bb5 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -8598,19 +8598,30 @@ int intel_framebuffer_init(struct drm_device *dev,
{
int ret;
- if (obj->tiling_mode == I915_TILING_Y)
+ if (obj->tiling_mode == I915_TILING_Y) {
+ DRM_DEBUG("hardware does not support tiling Y\n");
return -EINVAL;
+ }
- if (mode_cmd->pitches[0] & 63)
+ if (mode_cmd->pitches[0] & 63) {
+ DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
+ mode_cmd->pitches[0]);
return -EINVAL;
+ }
/* FIXME <= Gen4 stride limits are bit unclear */
- if (mode_cmd->pitches[0] > 32768)
+ if (mode_cmd->pitches[0] > 32768) {
+ DRM_DEBUG("pitch (%d) must be at less than 32768\n",
+ mode_cmd->pitches[0]);
return -EINVAL;
+ }
if (obj->tiling_mode != I915_TILING_NONE &&
- mode_cmd->pitches[0] != obj->stride)
+ mode_cmd->pitches[0] != obj->stride) {
+ DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
+ mode_cmd->pitches[0], obj->stride);
return -EINVAL;
+ }
/* Reject formats not supported by any plane early. */
switch (mode_cmd->pixel_format) {
@@ -8621,8 +8632,10 @@ int intel_framebuffer_init(struct drm_device *dev,
break;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_ARGB1555:
- if (INTEL_INFO(dev)->gen > 3)
+ if (INTEL_INFO(dev)->gen > 3) {
+ DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
return -EINVAL;
+ }
break;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
@@ -8630,18 +8643,22 @@ int intel_framebuffer_init(struct drm_device *dev,
case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_ABGR2101010:
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_INFO(dev)->gen < 4) {
+ DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
return -EINVAL;
+ }
break;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_VYUY:
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_INFO(dev)->gen < 5) {
+ DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
return -EINVAL;
+ }
break;
default:
- DRM_DEBUG_KMS("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
+ DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1b63d55318a0..fb3715b4b09d 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2579,7 +2579,8 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
static void
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
- struct intel_dp *intel_dp)
+ struct intel_dp *intel_dp,
+ struct edp_power_seq *out)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct edp_power_seq cur, vbt, spec, final;
@@ -2650,16 +2651,35 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
#undef get_delay
+ DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
+ intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
+ intel_dp->panel_power_cycle_delay);
+
+ DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
+ intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+
+ if (out)
+ *out = final;
+}
+
+static void
+intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
+ struct intel_dp *intel_dp,
+ struct edp_power_seq *seq)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp_on, pp_off, pp_div;
+
/* And finally store the new values in the power sequencer. */
- pp_on = (final.t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
- (final.t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
- pp_off = (final.t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
- (final.t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
+ pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
+ (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
+ pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
+ (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
/* Compute the divisor for the pp clock, simply match the Bspec
* formula. */
pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1)
<< PP_REFERENCE_DIVIDER_SHIFT;
- pp_div |= (DIV_ROUND_UP(final.t11_t12, 1000)
+ pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
<< PANEL_POWER_CYCLE_DELAY_SHIFT);
/* Haswell doesn't have any port selection bits for the panel
@@ -2675,14 +2695,6 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
I915_WRITE(PCH_PP_OFF_DELAYS, pp_off);
I915_WRITE(PCH_PP_DIVISOR, pp_div);
-
- DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
- intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
- intel_dp->panel_power_cycle_delay);
-
- DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
- intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
-
DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
I915_READ(PCH_PP_ON_DELAYS),
I915_READ(PCH_PP_OFF_DELAYS),
@@ -2699,6 +2711,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *fixed_mode = NULL;
+ struct edp_power_seq power_seq = { 0 };
enum port port = intel_dig_port->port;
const char *name = NULL;
int type;
@@ -2771,7 +2784,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
}
if (is_edp(intel_dp))
- intel_dp_init_panel_power_sequencer(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
intel_dp_i2c_init(intel_dp, intel_connector, name);
@@ -2798,6 +2811,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
return;
}
+ /* We now know it's not a ghost, init power sequence regs. */
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+ &power_seq);
+
ironlake_edp_panel_vdd_on(intel_dp);
edid = drm_get_edid(connector, &intel_dp->adapter);
if (edid) {
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b9a660a53677..17aee74258ad 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -776,14 +776,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
{
.callback = intel_no_lvds_dmi_callback,
- .ident = "ZOTAC ZBOXSD-ID12/ID13",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"),
- DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
- },
- },
- {
- .callback = intel_no_lvds_dmi_callback,
.ident = "Gigabyte GA-D525TUD",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index e6f54ffab3ba..3280cffe50f4 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -44,6 +44,14 @@
* i915.i915_enable_fbc parameter
*/
+static bool intel_crtc_active(struct drm_crtc *crtc)
+{
+ /* Be paranoid as we can arrive here with only partial
+ * state retrieved from the hardware during setup.
+ */
+ return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
+}
+
static void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -405,9 +413,8 @@ void intel_update_fbc(struct drm_device *dev)
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
- if (to_intel_crtc(tmp_crtc)->active &&
- !to_intel_crtc(tmp_crtc)->primary_disabled &&
- tmp_crtc->fb) {
+ if (intel_crtc_active(tmp_crtc) &&
+ !to_intel_crtc(tmp_crtc)->primary_disabled) {
if (crtc) {
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
@@ -992,7 +999,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
struct drm_crtc *crtc, *enabled = NULL;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (to_intel_crtc(crtc)->active && crtc->fb) {
+ if (intel_crtc_active(crtc)) {
if (enabled)
return NULL;
enabled = crtc;
@@ -1086,7 +1093,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
int entries, tlb_miss;
crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) {
+ if (!intel_crtc_active(crtc)) {
*cursor_wm = cursor->guard_size;
*plane_wm = display->guard_size;
return false;
@@ -1215,7 +1222,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
int entries;
crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !to_intel_crtc(crtc)->active)
+ if (!intel_crtc_active(crtc))
return false;
clock = crtc->mode.clock; /* VESA DOT Clock */
@@ -1476,7 +1483,7 @@ static void i9xx_update_wm(struct drm_device *dev)
fifo_size = dev_priv->display.get_fifo_size(dev, 0);
crtc = intel_get_crtc_for_plane(dev, 0);
- if (to_intel_crtc(crtc)->active && crtc->fb) {
+ if (intel_crtc_active(crtc)) {
int cpp = crtc->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
@@ -1490,7 +1497,7 @@ static void i9xx_update_wm(struct drm_device *dev)
fifo_size = dev_priv->display.get_fifo_size(dev, 1);
crtc = intel_get_crtc_for_plane(dev, 1);
- if (to_intel_crtc(crtc)->active && crtc->fb) {
+ if (intel_crtc_active(crtc)) {
int cpp = crtc->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
@@ -2044,7 +2051,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
int entries, tlb_miss;
crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) {
+ if (!intel_crtc_active(crtc)) {
*sprite_wm = display->guard_size;
return false;
}
@@ -4243,7 +4250,8 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
- POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ POSTING_READ(ECOBUS);
}
static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
@@ -4260,7 +4268,8 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
- POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ POSTING_READ(ECOBUS);
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
FORCEWAKE_ACK_TIMEOUT_MS))
@@ -4297,14 +4306,16 @@ void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
- /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
+ /* something from same cacheline, but !FORCEWAKE */
+ POSTING_READ(ECOBUS);
gen6_gt_check_fifodbg(dev_priv);
}
static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
- /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ POSTING_READ(ECOBUS);
gen6_gt_check_fifodbg(dev_priv);
}
@@ -4344,6 +4355,8 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
+ /* something from same cacheline, but !FORCEWAKE_VLV */
+ POSTING_READ(FORCEWAKE_ACK_VLV);
}
static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
@@ -4364,7 +4377,8 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
- /* The below doubles as a POSTING_READ */
+ /* something from same cacheline, but !FORCEWAKE_VLV */
+ POSTING_READ(FORCEWAKE_ACK_VLV);
gen6_gt_check_fifodbg(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ae253e04c391..42ff97d667d2 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -505,13 +505,25 @@ static int init_render_ring(struct intel_ring_buffer *ring)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = init_ring_common(ring);
- if (INTEL_INFO(dev)->gen > 3) {
+ if (INTEL_INFO(dev)->gen > 3)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
- if (IS_GEN7(dev))
- I915_WRITE(GFX_MODE_GEN7,
- _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
- _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
- }
+
+ /* We need to disable the AsyncFlip performance optimisations in order
+ * to use MI_WAIT_FOR_EVENT within the CS. It should already be
+ * programmed to '1' on all products.
+ */
+ if (INTEL_INFO(dev)->gen >= 6)
+ I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
+
+ /* Required for the hardware to program scanline values for waiting */
+ if (INTEL_INFO(dev)->gen == 6)
+ I915_WRITE(GFX_MODE,
+ _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
+
+ if (IS_GEN7(dev))
+ I915_WRITE(GFX_MODE_GEN7,
+ _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
+ _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
if (INTEL_INFO(dev)->gen >= 5) {
ret = init_pipe_control(ring);
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 827dcd4edf1c..d7b060e0a231 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -120,11 +120,10 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
- linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ linear_offset = y * fb->pitches[0] + x * pixel_size;
sprsurf_offset =
intel_gen4_compute_offset_xtiled(&x, &y,
- fb->bits_per_pixel / 8,
- fb->pitches[0]);
+ pixel_size, fb->pitches[0]);
linear_offset -= sprsurf_offset;
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
@@ -286,11 +285,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
- linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ linear_offset = y * fb->pitches[0] + x * pixel_size;
dvssurf_offset =
intel_gen4_compute_offset_xtiled(&x, &y,
- fb->bits_per_pixel / 8,
- fb->pitches[0]);
+ pixel_size, fb->pitches[0]);
linear_offset -= dvssurf_offset;
if (obj->tiling_mode != I915_TILING_NONE)
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index d63013497f66..b487cdec5ee7 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -1,6 +1,6 @@
config DRM_MGAG200
tristate "Kernel modesetting driver for MGA G200 server engines"
- depends on DRM && PCI && EXPERIMENTAL
+ depends on DRM && PCI
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/core/core/client.c
index c617f0480071..8bbb58f94a19 100644
--- a/drivers/gpu/drm/nouveau/core/core/client.c
+++ b/drivers/gpu/drm/nouveau/core/core/client.c
@@ -66,10 +66,8 @@ nouveau_client_create_(const char *name, u64 devname, const char *cfg,
ret = nouveau_handle_create(nv_object(client), ~0, ~0,
nv_object(client), &client->root);
- if (ret) {
- nouveau_namedb_destroy(&client->base);
+ if (ret)
return ret;
- }
/* prevent init/fini being called, os in in charge of this */
atomic_set(&nv_object(client)->usecount, 2);
diff --git a/drivers/gpu/drm/nouveau/core/core/falcon.c b/drivers/gpu/drm/nouveau/core/core/falcon.c
index 6b0843c33877..e05c15777588 100644
--- a/drivers/gpu/drm/nouveau/core/core/falcon.c
+++ b/drivers/gpu/drm/nouveau/core/core/falcon.c
@@ -73,8 +73,11 @@ _nouveau_falcon_init(struct nouveau_object *object)
nv_debug(falcon, "data limit: %d\n", falcon->data.limit);
/* wait for 'uc halted' to be signalled before continuing */
- if (falcon->secret) {
- nv_wait(falcon, 0x008, 0x00000010, 0x00000010);
+ if (falcon->secret && falcon->version < 4) {
+ if (!falcon->version)
+ nv_wait(falcon, 0x008, 0x00000010, 0x00000010);
+ else
+ nv_wait(falcon, 0x180, 0x80000000, 0);
nv_wo32(falcon, 0x004, 0x00000010);
}
diff --git a/drivers/gpu/drm/nouveau/core/core/handle.c b/drivers/gpu/drm/nouveau/core/core/handle.c
index b8d2cbf8a7a7..264c2b338ac3 100644
--- a/drivers/gpu/drm/nouveau/core/core/handle.c
+++ b/drivers/gpu/drm/nouveau/core/core/handle.c
@@ -109,7 +109,7 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
while (!nv_iclass(namedb, NV_NAMEDB_CLASS))
namedb = namedb->parent;
- handle = *phandle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
@@ -146,6 +146,9 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
}
hprintk(handle, TRACE, "created\n");
+
+ *phandle = handle;
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/core/core/subdev.c
index f74c30aa33a0..48f06378d3f9 100644
--- a/drivers/gpu/drm/nouveau/core/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/core/core/subdev.c
@@ -99,7 +99,7 @@ nouveau_subdev_create_(struct nouveau_object *parent,
if (ret)
return ret;
- mutex_init(&subdev->mutex);
+ __mutex_init(&subdev->mutex, subname, &oclass->lock_class_key);
subdev->name = subname;
if (parent) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 0f09af135415..ca1a7d76a95b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -851,20 +851,23 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
- if (nv_device(priv)->chipset < 0x90 ||
- nv_device(priv)->chipset == 0x92 ||
- nv_device(priv)->chipset == 0xa0) {
- for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
- ctrl = nv_rd32(priv, 0x610b74 + (i * 8));
- i += 3;
- } else {
- for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
- ctrl = nv_rd32(priv, 0x610798 + (i * 8));
- i += 3;
+ if (!(ctrl & (1 << head))) {
+ if (nv_device(priv)->chipset < 0x90 ||
+ nv_device(priv)->chipset == 0x92 ||
+ nv_device(priv)->chipset == 0xa0) {
+ for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
+ ctrl = nv_rd32(priv, 0x610b74 + (i * 8));
+ i += 4;
+ } else {
+ for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
+ ctrl = nv_rd32(priv, 0x610798 + (i * 8));
+ i += 4;
+ }
}
if (!(ctrl & (1 << head)))
return false;
+ i--;
data = exec_lookup(priv, head, i, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
if (data) {
@@ -898,20 +901,23 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
- if (nv_device(priv)->chipset < 0x90 ||
- nv_device(priv)->chipset == 0x92 ||
- nv_device(priv)->chipset == 0xa0) {
- for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
- ctrl = nv_rd32(priv, 0x610b70 + (i * 8));
- i += 3;
- } else {
- for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
- ctrl = nv_rd32(priv, 0x610794 + (i * 8));
- i += 3;
+ if (!(ctrl & (1 << head))) {
+ if (nv_device(priv)->chipset < 0x90 ||
+ nv_device(priv)->chipset == 0x92 ||
+ nv_device(priv)->chipset == 0xa0) {
+ for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
+ ctrl = nv_rd32(priv, 0x610b70 + (i * 8));
+ i += 4;
+ } else {
+ for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
+ ctrl = nv_rd32(priv, 0x610794 + (i * 8));
+ i += 4;
+ }
}
if (!(ctrl & (1 << head)))
return 0x0000;
+ i--;
data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1);
if (!data)
diff --git a/drivers/gpu/drm/nouveau/core/include/core/client.h b/drivers/gpu/drm/nouveau/core/include/core/client.h
index 0193532ceac9..63acc0346ff2 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/client.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/client.h
@@ -36,6 +36,9 @@ nouveau_client(void *obj)
int nouveau_client_create_(const char *name, u64 device, const char *cfg,
const char *dbg, int, void **);
+#define nouveau_client_destroy(p) \
+ nouveau_namedb_destroy(&(p)->base)
+
int nouveau_client_init(struct nouveau_client *);
int nouveau_client_fini(struct nouveau_client *, bool suspend);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h
index 5982935ee23a..106bb19fdd9a 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/object.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/object.h
@@ -50,10 +50,13 @@ int nouveau_object_fini(struct nouveau_object *, bool suspend);
extern struct nouveau_ofuncs nouveau_object_ofuncs;
+/* Don't allocate dynamically, because lockdep needs lock_class_keys to be in
+ * ".data". */
struct nouveau_oclass {
u32 handle;
- struct nouveau_ofuncs *ofuncs;
- struct nouveau_omthds *omthds;
+ struct nouveau_ofuncs * const ofuncs;
+ struct nouveau_omthds * const omthds;
+ struct lock_class_key lock_class_key;
};
#define nv_oclass(o) nv_object(o)->oclass
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
index c345097592f2..b2f3d4d0aa49 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
@@ -38,6 +38,8 @@ enum nvbios_pll_type {
PLL_UNK42 = 0x42,
PLL_VPLL0 = 0x80,
PLL_VPLL1 = 0x81,
+ PLL_VPLL2 = 0x82,
+ PLL_VPLL3 = 0x83,
PLL_MAX = 0xff
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 2917d552689b..690ed438b2ad 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -1534,7 +1534,6 @@ init_io(struct nvbios_init *init)
mdelay(10);
init_wr32(init, 0x614100, 0x10000018);
init_wr32(init, 0x614900, 0x10000018);
- return;
}
value = init_rdport(init, port) & mask;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
index f6962c9b6c36..7c9626258a46 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
@@ -52,6 +52,8 @@ nvc0_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
switch (info.type) {
case PLL_VPLL0:
case PLL_VPLL1:
+ case PLL_VPLL2:
+ case PLL_VPLL3:
nv_mask(priv, info.reg + 0x0c, 0x00000000, 0x00000100);
nv_wr32(priv, info.reg + 0x04, (P << 16) | (N << 8) | M);
nv_wr32(priv, info.reg + 0x10, fN << 16);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
index d6d16007ec1a..d62045f454b2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
@@ -86,8 +86,8 @@ nouveau_fb_preinit(struct nouveau_fb *pfb)
return ret;
}
- if (!nouveau_mm_initialised(&pfb->tags) && tags) {
- ret = nouveau_mm_init(&pfb->tags, 0, ++tags, 1);
+ if (!nouveau_mm_initialised(&pfb->tags)) {
+ ret = nouveau_mm_init(&pfb->tags, 0, tags ? ++tags : 0, 1);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
index 487cb8c6c204..eac236ed19b2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -99,7 +99,7 @@ nv50_fb_vram_init(struct nouveau_fb *pfb)
struct nouveau_bios *bios = nouveau_bios(device);
const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
- u32 size;
+ u32 size, tags = 0;
int ret;
pfb->ram.size = nv_rd32(pfb, 0x10020c);
@@ -140,10 +140,11 @@ nv50_fb_vram_init(struct nouveau_fb *pfb)
return ret;
pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
+ tags = nv_rd32(pfb, 0x100320);
break;
}
- return nv_rd32(pfb, 0x100320);
+ return tags;
}
static int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index 306bdf121452..7606ed15b6fa 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -145,14 +145,14 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
mem->memtype = type;
mem->size = size;
- mutex_lock(&mm->mutex);
+ mutex_lock(&pfb->base.mutex);
do {
if (back)
ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r);
else
ret = nouveau_mm_head(mm, 1, size, ncmin, align, &r);
if (ret) {
- mutex_unlock(&mm->mutex);
+ mutex_unlock(&pfb->base.mutex);
pfb->ram.put(pfb, &mem);
return ret;
}
@@ -160,7 +160,7 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
list_add_tail(&r->rl_entry, &mem->regions);
size -= r->length;
} while (size);
- mutex_unlock(&mm->mutex);
+ mutex_unlock(&pfb->base.mutex);
r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
mem->offset = (u64)r->offset << 12;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
index 1188227ca6aa..6565f3dbbe04 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
@@ -40,15 +40,21 @@ nouveau_instobj_create_(struct nouveau_object *parent,
if (ret)
return ret;
+ mutex_lock(&imem->base.mutex);
list_add(&iobj->head, &imem->list);
+ mutex_unlock(&imem->base.mutex);
return 0;
}
void
nouveau_instobj_destroy(struct nouveau_instobj *iobj)
{
- if (iobj->head.prev)
- list_del(&iobj->head);
+ struct nouveau_subdev *subdev = nv_subdev(iobj->base.engine);
+
+ mutex_lock(&subdev->mutex);
+ list_del(&iobj->head);
+ mutex_unlock(&subdev->mutex);
+
return nouveau_object_destroy(&iobj->base);
}
@@ -88,6 +94,8 @@ nouveau_instmem_init(struct nouveau_instmem *imem)
if (ret)
return ret;
+ mutex_lock(&imem->base.mutex);
+
list_for_each_entry(iobj, &imem->list, head) {
if (iobj->suspend) {
for (i = 0; i < iobj->size; i += 4)
@@ -97,6 +105,8 @@ nouveau_instmem_init(struct nouveau_instmem *imem)
}
}
+ mutex_unlock(&imem->base.mutex);
+
return 0;
}
@@ -104,17 +114,26 @@ int
nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend)
{
struct nouveau_instobj *iobj;
- int i;
+ int i, ret = 0;
if (suspend) {
+ mutex_lock(&imem->base.mutex);
+
list_for_each_entry(iobj, &imem->list, head) {
iobj->suspend = vmalloc(iobj->size);
- if (iobj->suspend) {
- for (i = 0; i < iobj->size; i += 4)
- iobj->suspend[i / 4] = nv_ro32(iobj, i);
- } else
- return -ENOMEM;
+ if (!iobj->suspend) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ for (i = 0; i < iobj->size; i += 4)
+ iobj->suspend[i / 4] = nv_ro32(iobj, i);
}
+
+ mutex_unlock(&imem->base.mutex);
+
+ if (ret)
+ return ret;
}
return nouveau_subdev_fini(&imem->base, suspend);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
index 082c11b75acb..77c67fc970e6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
@@ -352,7 +352,7 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
u64 mm_length = (offset + length) - mm_offset;
int ret;
- vm = *pvm = kzalloc(sizeof(*vm), GFP_KERNEL);
+ vm = kzalloc(sizeof(*vm), GFP_KERNEL);
if (!vm)
return -ENOMEM;
@@ -376,6 +376,8 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
return ret;
}
+ *pvm = vm;
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 69d7b1d0b9d6..1699a9083a2f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -28,6 +28,7 @@
*/
#include <core/engine.h>
+#include <linux/swiotlb.h>
#include <subdev/fb.h>
#include <subdev/vm.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index ac340ba32017..e620ba8271b4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -127,12 +127,26 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
struct nouveau_encoder **pnv_encoder)
{
struct drm_device *dev = connector->dev;
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
- int i;
+ struct nouveau_i2c_port *port = NULL;
+ int i, panel = -ENODEV;
+
+ /* eDP panels need powering on by us (if the VBIOS doesn't default it
+ * to on) before doing any AUX channel transactions. LVDS panel power
+ * is handled by the SOR itself, and not required for LVDS DDC.
+ */
+ if (nv_connector->type == DCB_CONNECTOR_eDP) {
+ panel = gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
+ if (panel == 0) {
+ gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
+ msleep(300);
+ }
+ }
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- struct nouveau_i2c_port *port = NULL;
struct nouveau_encoder *nv_encoder;
struct drm_mode_object *obj;
int id;
@@ -150,11 +164,19 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
port = i2c->find(i2c, nv_encoder->dcb->i2c_index);
if (port && nv_probe_i2c(port, 0x50)) {
*pnv_encoder = nv_encoder;
- return port;
+ break;
}
+
+ port = NULL;
}
- return NULL;
+ /* eDP panel not detected, restore panel power GPIO to previous
+ * state to avoid confusing the SOR for other output types.
+ */
+ if (!port && panel == 0)
+ gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
+
+ return port;
}
static struct nouveau_encoder *
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index e4188f24fc75..508b00a2ce0d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -225,15 +225,6 @@ nouveau_display_init(struct drm_device *dev)
if (ret)
return ret;
- /* power on internal panel if it's not already. the init tables of
- * some vbios default this to off for some reason, causing the
- * panel to not work after resume
- */
- if (gpio && gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff) == 0) {
- gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
- msleep(300);
- }
-
/* enable polling for external displays */
drm_kms_helper_poll_enable(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 180a45e3b525..5e7aef23825a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -84,11 +84,16 @@ nouveau_cli_create(struct pci_dev *pdev, const char *name,
struct nouveau_cli *cli;
int ret;
+ *pcli = NULL;
ret = nouveau_client_create_(name, nouveau_name(pdev), nouveau_config,
nouveau_debug, size, pcli);
cli = *pcli;
- if (ret)
+ if (ret) {
+ if (cli)
+ nouveau_client_destroy(&cli->base);
+ *pcli = NULL;
return ret;
+ }
mutex_init(&cli->mutex);
return 0;
@@ -240,6 +245,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
return 0;
}
+static struct lock_class_key drm_client_lock_class_key;
+
static int
nouveau_drm_load(struct drm_device *dev, unsigned long flags)
{
@@ -251,6 +258,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm);
if (ret)
return ret;
+ lockdep_set_class(&drm->client.mutex, &drm_client_lock_class_key);
dev->dev_private = drm;
drm->dev = dev;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index bedafd1c9539..cdb83acdffe2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -60,6 +60,7 @@ u32 nv10_fence_read(struct nouveau_channel *);
void nv10_fence_context_del(struct nouveau_channel *);
void nv10_fence_destroy(struct nouveau_drm *);
int nv10_fence_create(struct nouveau_drm *);
+void nv17_fence_resume(struct nouveau_drm *drm);
int nv50_fence_create(struct nouveau_drm *);
int nv84_fence_create(struct nouveau_drm *);
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 184cdf806761..39ffc07f906b 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -505,7 +505,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
static inline bool is_powersaving_dpms(int mode)
{
- return (mode != DRM_MODE_DPMS_ON);
+ return mode != DRM_MODE_DPMS_ON && mode != NV_DPMS_CLEARED;
}
static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 7ae7f97a6d4d..03017f24d593 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -162,6 +162,13 @@ nv10_fence_destroy(struct nouveau_drm *drm)
kfree(priv);
}
+void nv17_fence_resume(struct nouveau_drm *drm)
+{
+ struct nv10_fence_priv *priv = drm->fence;
+
+ nouveau_bo_wr32(priv->bo, 0, priv->sequence);
+}
+
int
nv10_fence_create(struct nouveau_drm *drm)
{
@@ -197,6 +204,7 @@ nv10_fence_create(struct nouveau_drm *drm)
if (ret == 0) {
nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
priv->base.sync = nv17_fence_sync;
+ priv->base.resume = nv17_fence_resume;
}
}
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index c20f2727ea0b..d889f3ac0d41 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -122,6 +122,7 @@ nv50_fence_create(struct nouveau_drm *drm)
if (ret == 0) {
nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
priv->base.sync = nv17_fence_sync;
+ priv->base.resume = nv17_fence_resume;
}
if (ret)
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 061fa0a28900..a2d478e8692a 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1313,14 +1313,18 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
radeon_wait_for_vblank(rdev, i);
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
} else {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
radeon_wait_for_vblank(rdev, i);
tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
}
/* wait for the next frame */
@@ -1345,6 +1349,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
blackout &= ~BLACKOUT_MODE_MASK;
WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
}
+ /* wait for the MC to settle */
+ udelay(100);
}
void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -1378,11 +1384,15 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
if (ASIC_IS_DCE6(rdev)) {
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
} else {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
/* wait for the next frame */
frame_count = radeon_get_vblank_counter(rdev, i);
@@ -2036,9 +2046,20 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
WREG32(DMA_TILING_CONFIG, gb_addr_config);
- tmp = gb_addr_config & NUM_PIPES_MASK;
- tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
- EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
+ if ((rdev->config.evergreen.max_backends == 1) &&
+ (rdev->flags & RADEON_IS_IGP)) {
+ if ((disabled_rb_mask & 3) == 1) {
+ /* RB0 disabled, RB1 enabled */
+ tmp = 0x11111111;
+ } else {
+ /* RB1 disabled, RB0 enabled */
+ tmp = 0x00000000;
+ }
+ } else {
+ tmp = gb_addr_config & NUM_PIPES_MASK;
+ tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
+ EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
+ }
WREG32(GB_BACKEND_MAP, tmp);
WREG32(CGTS_SYS_TCC_DISABLE, 0);
@@ -2401,6 +2422,12 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
struct evergreen_mc_save save;
+ if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+ reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
+
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ reset_mask &= ~RADEON_RESET_DMA;
+
if (reset_mask == 0)
return 0;
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 7a445666e71f..ee4cff534f10 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -2909,14 +2909,14 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
return -EINVAL;
}
if (tiled) {
- dst_offset = ib[idx+1];
+ dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
p->idx += count + 7;
} else {
- dst_offset = ib[idx+1];
- dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
@@ -2954,12 +2954,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
return -EINVAL;
}
- dst_offset = ib[idx+1];
+ dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8;
- dst2_offset = ib[idx+2];
+ dst2_offset = radeon_get_ib_value(p, idx+2);
dst2_offset <<= 8;
- src_offset = ib[idx+8];
- src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+ src_offset = radeon_get_ib_value(p, idx+8);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3014,12 +3014,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
return -EINVAL;
}
- dst_offset = ib[idx+1];
+ dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8;
- dst2_offset = ib[idx+2];
+ dst2_offset = radeon_get_ib_value(p, idx+2);
dst2_offset <<= 8;
- src_offset = ib[idx+8];
- src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+ src_offset = radeon_get_ib_value(p, idx+8);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3046,22 +3046,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
/* detile bit */
if (idx_value & (1 << 31)) {
/* tiled src, linear dst */
- src_offset = ib[idx+1];
+ src_offset = radeon_get_ib_value(p, idx+1);
src_offset <<= 8;
ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
- dst_offset = ib[idx+7];
- dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+7);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
} else {
/* linear src, tiled dst */
- src_offset = ib[idx+7];
- src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+ src_offset = radeon_get_ib_value(p, idx+7);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- dst_offset = ib[idx+1];
+ dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
}
@@ -3098,12 +3098,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
return -EINVAL;
}
- dst_offset = ib[idx+1];
+ dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8;
- dst2_offset = ib[idx+2];
+ dst2_offset = radeon_get_ib_value(p, idx+2);
dst2_offset <<= 8;
- src_offset = ib[idx+8];
- src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+ src_offset = radeon_get_ib_value(p, idx+8);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3135,22 +3135,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
/* detile bit */
if (idx_value & (1 << 31)) {
/* tiled src, linear dst */
- src_offset = ib[idx+1];
+ src_offset = radeon_get_ib_value(p, idx+1);
src_offset <<= 8;
ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
- dst_offset = ib[idx+7];
- dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+7);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
} else {
/* linear src, tiled dst */
- src_offset = ib[idx+7];
- src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+ src_offset = radeon_get_ib_value(p, idx+7);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- dst_offset = ib[idx+1];
+ dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
}
@@ -3176,10 +3176,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
switch (misc) {
case 0:
/* L2L, byte */
- src_offset = ib[idx+2];
- src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
- dst_offset = ib[idx+1];
- dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+ src_offset = radeon_get_ib_value(p, idx+2);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
src_offset + count, radeon_bo_size(src_reloc->robj));
@@ -3216,12 +3216,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
return -EINVAL;
}
- dst_offset = ib[idx+1];
- dst_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
- dst2_offset = ib[idx+2];
- dst2_offset |= ((u64)(ib[idx+5] & 0xff)) << 32;
- src_offset = ib[idx+3];
- src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
+ src_offset = radeon_get_ib_value(p, idx+3);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3251,10 +3251,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
}
} else {
/* L2L, dw */
- src_offset = ib[idx+2];
- src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
- dst_offset = ib[idx+1];
- dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+ src_offset = radeon_get_ib_value(p, idx+2);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3279,8 +3279,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
return -EINVAL;
}
- dst_offset = ib[idx+1];
- dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
dst_offset, radeon_bo_size(dst_reloc->robj));
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 896f1cbc58a5..835992d8d067 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1216,7 +1216,7 @@ void cayman_dma_stop(struct radeon_device *rdev)
int cayman_dma_resume(struct radeon_device *rdev)
{
struct radeon_ring *ring;
- u32 rb_cntl, dma_cntl;
+ u32 rb_cntl, dma_cntl, ib_cntl;
u32 rb_bufsz;
u32 reg_offset, wb_offset;
int i, r;
@@ -1265,7 +1265,11 @@ int cayman_dma_resume(struct radeon_device *rdev)
WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
/* enable DMA IBs */
- WREG32(DMA_IB_CNTL + reg_offset, DMA_IB_ENABLE | CMD_VMID_FORCE);
+ ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
+#ifdef __BIG_ENDIAN
+ ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+ WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
dma_cntl = RREG32(DMA_CNTL + reg_offset);
dma_cntl &= ~CTXEMPTY_INT_ENABLE;
@@ -1409,6 +1413,12 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
struct evergreen_mc_save save;
+ if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+ reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
+
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ reset_mask &= ~RADEON_RESET_DMA;
+
if (reset_mask == 0)
return 0;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 537e259b3837..becb03e8b32f 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1378,6 +1378,12 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
struct rv515_mc_save save;
+ if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+ reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
+
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ reset_mask &= ~RADEON_RESET_DMA;
+
if (reset_mask == 0)
return 0;
@@ -1456,12 +1462,15 @@ u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 disabled_rb_mask)
{
u32 rendering_pipe_num, rb_num_width, req_rb_num;
- u32 pipe_rb_ratio, pipe_rb_remain;
+ u32 pipe_rb_ratio, pipe_rb_remain, tmp;
u32 data = 0, mask = 1 << (max_rb_num - 1);
unsigned i, j;
/* mask out the RBs that don't exist on that asic */
- disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
+ tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
+ /* make sure at least one RB is available */
+ if ((tmp & 0xff) != 0xff)
+ disabled_rb_mask = tmp;
rendering_pipe_num = 1 << tiling_pipe_num;
req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
@@ -2307,7 +2316,7 @@ void r600_dma_stop(struct radeon_device *rdev)
int r600_dma_resume(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
- u32 rb_cntl, dma_cntl;
+ u32 rb_cntl, dma_cntl, ib_cntl;
u32 rb_bufsz;
int r;
@@ -2347,7 +2356,11 @@ int r600_dma_resume(struct radeon_device *rdev)
WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
/* enable DMA IBs */
- WREG32(DMA_IB_CNTL, DMA_IB_ENABLE);
+ ib_cntl = DMA_IB_ENABLE;
+#ifdef __BIG_ENDIAN
+ ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+ WREG32(DMA_IB_CNTL, ib_cntl);
dma_cntl = RREG32(DMA_CNTL);
dma_cntl &= ~CTXEMPTY_INT_ENABLE;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 03191a56eb44..9b2512bf1a46 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -2476,8 +2476,10 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
kfree(parser->relocs);
for (i = 0; i < parser->nchunks; i++) {
kfree(parser->chunks[i].kdata);
- kfree(parser->chunks[i].kpage[0]);
- kfree(parser->chunks[i].kpage[1]);
+ if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) {
+ kfree(parser->chunks[i].kpage[0]);
+ kfree(parser->chunks[i].kpage[1]);
+ }
}
kfree(parser->chunks);
kfree(parser->chunks_array);
@@ -2561,16 +2563,16 @@ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_chunk *relocs_chunk;
unsigned idx;
+ *cs_reloc = NULL;
if (p->chunk_relocs_idx == -1) {
DRM_ERROR("No relocation chunk !\n");
return -EINVAL;
}
- *cs_reloc = NULL;
relocs_chunk = &p->chunks[p->chunk_relocs_idx];
idx = p->dma_reloc_idx;
- if (idx >= relocs_chunk->length_dw) {
+ if (idx >= p->nrelocs) {
DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
- idx, relocs_chunk->length_dw);
+ idx, p->nrelocs);
return -EINVAL;
}
*cs_reloc = p->relocs_ptr[idx];
@@ -2621,14 +2623,14 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
return -EINVAL;
}
if (tiled) {
- dst_offset = ib[idx+1];
+ dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
p->idx += count + 5;
} else {
- dst_offset = ib[idx+1];
- dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
@@ -2656,32 +2658,32 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
/* detile bit */
if (idx_value & (1 << 31)) {
/* tiled src, linear dst */
- src_offset = ib[idx+1];
+ src_offset = radeon_get_ib_value(p, idx+1);
src_offset <<= 8;
ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
- dst_offset = ib[idx+5];
- dst_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+5);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
} else {
/* linear src, tiled dst */
- src_offset = ib[idx+5];
- src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+ src_offset = radeon_get_ib_value(p, idx+5);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- dst_offset = ib[idx+1];
+ dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
}
p->idx += 7;
} else {
if (p->family >= CHIP_RV770) {
- src_offset = ib[idx+2];
- src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
- dst_offset = ib[idx+1];
- dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+ src_offset = radeon_get_ib_value(p, idx+2);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
@@ -2689,10 +2691,10 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
p->idx += 5;
} else {
- src_offset = ib[idx+2];
- src_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
- dst_offset = ib[idx+1];
- dst_offset |= ((u64)(ib[idx+3] & 0xff0000)) << 16;
+ src_offset = radeon_get_ib_value(p, idx+2);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
@@ -2722,8 +2724,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("bad DMA_PACKET_WRITE\n");
return -EINVAL;
}
- dst_offset = ib[idx+1];
- dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 34e52304a525..a08f657329a0 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -324,7 +324,6 @@ struct radeon_bo {
struct list_head list;
/* Protected by tbo.reserved */
u32 placements[3];
- u32 busy_placements[3];
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
@@ -654,6 +653,8 @@ struct radeon_ring {
u32 ptr_reg_mask;
u32 nop;
u32 idx;
+ u64 last_semaphore_signal_addr;
+ u64 last_semaphore_wait_addr;
};
/*
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 9056fafb00ea..0b202c07fe50 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1445,7 +1445,7 @@ static struct radeon_asic cayman_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.set_page = &cayman_vm_set_page,
},
.ring = {
@@ -1572,7 +1572,7 @@ static struct radeon_asic trinity_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.set_page = &cayman_vm_set_page,
},
.ring = {
@@ -1699,7 +1699,7 @@ static struct radeon_asic si_asic = {
.vm = {
.init = &si_vm_init,
.fini = &si_vm_fini,
- .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
+ .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.set_page = &si_vm_set_page,
},
.ring = {
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 33a56a09ff10..3e403bdda58f 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -2470,6 +2470,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1),
ATOM_DEVICE_CRT1_SUPPORT);
}
+ /* RV100 board with external TDMS bit mis-set.
+ * Actually uses internal TMDS, clear the bit.
+ */
+ if (dev->pdev->device == 0x5159 &&
+ dev->pdev->subsystem_vendor == 0x1014 &&
+ dev->pdev->subsystem_device == 0x029A) {
+ tmp &= ~(1 << 4);
+ }
if ((tmp >> 4) & 0x1) {
devices |= ATOM_DEVICE_DFP2_SUPPORT;
radeon_add_legacy_encoder(dev,
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 396baba0141a..5407459e56d2 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -279,13 +279,15 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->chunks[p->chunk_ib_idx].length_dw);
return -EINVAL;
}
- if ((p->rdev->flags & RADEON_IS_AGP)) {
+ if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) {
p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
- kfree(p->chunks[i].kpage[0]);
- kfree(p->chunks[i].kpage[1]);
+ kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
+ kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
+ p->chunks[p->chunk_ib_idx].kpage[0] = NULL;
+ p->chunks[p->chunk_ib_idx].kpage[1] = NULL;
return -ENOMEM;
}
}
@@ -583,7 +585,8 @@ static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
int i;
int size = PAGE_SIZE;
- bool copy1 = (p->rdev->flags & RADEON_IS_AGP) ? false : true;
+ bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ?
+ false : true;
for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index ad6df625e8b8..0d67674b64b1 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -241,7 +241,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
y = 0;
}
- if (ASIC_IS_AVIVO(rdev)) {
+ /* fixed on DCE6 and newer */
+ if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE6(rdev)) {
int i = 0;
struct drm_crtc *crtc_p;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index edfc54e41842..0d6562bb0c93 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -429,7 +429,8 @@ bool radeon_card_posted(struct radeon_device *rdev)
{
uint32_t reg;
- if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
+ if (efi_enabled(EFI_BOOT) &&
+ rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
return false;
/* first check CRTCs */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 1da2386d7cf7..05c96fa0b051 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1115,14 +1115,16 @@ radeon_user_framebuffer_create(struct drm_device *dev,
}
radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
- if (radeon_fb == NULL)
+ if (radeon_fb == NULL) {
+ drm_gem_object_unreference_unlocked(obj);
return ERR_PTR(-ENOMEM);
+ }
ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
if (ret) {
kfree(radeon_fb);
drm_gem_object_unreference_unlocked(obj);
- return NULL;
+ return ERR_PTR(ret);
}
return &radeon_fb->base;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index dff6cf77f953..d9bf96ee299a 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -69,9 +69,10 @@
* 2.26.0 - r600-eg: fix htile size computation
* 2.27.0 - r600-SI: Add CS ioctl support for async DMA
* 2.28.0 - r600-eg: Add MEM_WRITE packet support
+ * 2.29.0 - R500 FP16 color clear registers
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 28
+#define KMS_DRIVER_MINOR 29
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index f5ba2241dacc..62cd512f5c8d 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -640,6 +640,14 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
enum drm_connector_status found = connector_status_disconnected;
bool color = true;
+ /* just don't bother on RN50 those chip are often connected to remoting
+ * console hw and often we get failure to load detect those. So to make
+ * everyone happy report the encoder as always connected.
+ */
+ if (ASIC_IS_RN50(rdev)) {
+ return connector_status_connected;
+ }
+
/* save the regs we need */
vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 883c95d8d90f..d3aface2d12d 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -84,6 +84,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
rbo->placement.fpfn = 0;
rbo->placement.lpfn = 0;
rbo->placement.placement = rbo->placements;
+ rbo->placement.busy_placement = rbo->placements;
if (domain & RADEON_GEM_DOMAIN_VRAM)
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
@@ -104,14 +105,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
if (!c)
rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
rbo->placement.num_placement = c;
-
- c = 0;
- rbo->placement.busy_placement = rbo->busy_placements;
- if (rbo->rdev->flags & RADEON_IS_AGP) {
- rbo->busy_placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
- } else {
- rbo->busy_placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
- }
rbo->placement.num_busy_placement = c;
}
@@ -357,6 +350,7 @@ int radeon_bo_list_validate(struct list_head *head)
{
struct radeon_bo_list *lobj;
struct radeon_bo *bo;
+ u32 domain;
int r;
r = ttm_eu_reserve_buffers(head);
@@ -366,9 +360,17 @@ int radeon_bo_list_validate(struct list_head *head)
list_for_each_entry(lobj, head, tv.head) {
bo = lobj->bo;
if (!bo->pin_count) {
+ domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
+
+ retry:
+ radeon_ttm_placement_from_domain(bo, domain);
r = ttm_bo_validate(&bo->tbo, &bo->placement,
true, false);
if (unlikely(r)) {
+ if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
+ domain |= RADEON_GEM_DOMAIN_GTT;
+ goto retry;
+ }
return r;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 141f2b6a9cf2..cd72062d5a91 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -377,6 +377,9 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
{
int r;
+ /* make sure we aren't trying to allocate more space than there is on the ring */
+ if (ndw > (ring->ring_size / 4))
+ return -ENOMEM;
/* Align requested size with padding so unlock_commit can
* pad safely */
ndw = (ndw + ring->align_mask) & ~ring->align_mask;
@@ -784,6 +787,8 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
}
seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr);
seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr);
+ seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr);
+ seq_printf(m, "last semaphore wait addr : 0x%016llx\n", ring->last_semaphore_wait_addr);
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
/* print 8 dw before current rptr as often it's the last executed
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 97f3ece81cd2..8dcc20f53d73 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -95,6 +95,10 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
/* we assume caller has already allocated space on waiters ring */
radeon_semaphore_emit_wait(rdev, waiter, semaphore);
+ /* for debugging lockup only, used by sysfs debug files */
+ rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr;
+ rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr;
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 1d8ff2f850ba..93f760e27a92 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -38,6 +38,7 @@
#include <drm/radeon_drm.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/swiotlb.h>
#include "radeon_reg.h"
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
index 0f656b111c15..a072fa8c46b0 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/cayman
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -1,5 +1,6 @@
cayman 0x9400
0x0000802C GRBM_GFX_INDEX
+0x00008040 WAIT_UNTIL
0x000084FC CP_STRMOUT_CNTL
0x000085F0 CP_COHER_CNTL
0x000085F4 CP_COHER_SIZE
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index 911a8fbd32bb..78d5e99d759d 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -324,6 +324,8 @@ rv515 0x6d40
0x46AC US_OUT_FMT_2
0x46B0 US_OUT_FMT_3
0x46B4 US_W_FMT
+0x46C0 RB3D_COLOR_CLEAR_VALUE_AR
+0x46C4 RB3D_COLOR_CLEAR_VALUE_GB
0x4BC0 FG_FOG_BLEND
0x4BC4 FG_FOG_FACTOR
0x4BC8 FG_FOG_COLOR_R
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 2bb6d0e84b3d..435ed3551364 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -336,6 +336,8 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
WREG32(R600_CITF_CNTL, blackout);
}
}
+ /* wait for the MC to settle */
+ udelay(100);
}
void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 3240a3d64f30..ae8b48205a6c 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2215,6 +2215,12 @@ static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
struct evergreen_mc_save save;
+ if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+ reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
+
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ reset_mask &= ~RADEON_RESET_DMA;
+
if (reset_mask == 0)
return 0;
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 656b2e3334a6..b6679b36700f 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -12,8 +12,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
-
-#include <mach/clk.h>
+#include <linux/clk/tegra.h>
#include "drm.h"
#include "dc.h"
@@ -764,11 +763,9 @@ static int tegra_dc_probe(struct platform_device *pdev)
return -ENXIO;
}
- dc->regs = devm_request_and_ioremap(&pdev->dev, regs);
- if (!dc->regs) {
- dev_err(&pdev->dev, "failed to remap registers\n");
- return -ENXIO;
- }
+ dc->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(dc->regs))
+ return PTR_ERR(dc->regs);
dc->irq = platform_get_irq(pdev, 0);
if (dc->irq < 0) {
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 3a503c9e4686..d980dc75788c 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -11,7 +11,6 @@
#include <linux/of_address.h>
#include <linux/of_platform.h>
-#include <mach/clk.h>
#include <linux/dma-mapping.h>
#include <asm/dma-iommu.h>
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index e060c7e6434d..d4f3fb9f0c29 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -14,8 +14,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
-
-#include <mach/clk.h>
+#include <linux/clk/tegra.h>
#include "hdmi.h"
#include "drm.h"
@@ -1259,9 +1258,9 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
if (!regs)
return -ENXIO;
- hdmi->regs = devm_request_and_ioremap(&pdev->dev, regs);
- if (!hdmi->regs)
- return -EADDRNOTAVAIL;
+ hdmi->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(hdmi->regs))
+ return PTR_ERR(hdmi->regs);
err = platform_get_irq(pdev, 0);
if (err < 0)
diff --git a/drivers/gpu/drm/tegra/host1x.c b/drivers/gpu/drm/tegra/host1x.c
index 5d17b113a6fc..92e25a7e00ea 100644
--- a/drivers/gpu/drm/tegra/host1x.c
+++ b/drivers/gpu/drm/tegra/host1x.c
@@ -139,9 +139,9 @@ static int tegra_host1x_probe(struct platform_device *pdev)
host1x->irq = err;
- host1x->regs = devm_request_and_ioremap(&pdev->dev, regs);
- if (!host1x->regs) {
- err = -EADDRNOTAVAIL;
+ host1x->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(host1x->regs)) {
+ err = PTR_ERR(host1x->regs);
goto err;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 33d20be87db5..52b20b12c83a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -434,6 +434,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
bo->mem = tmp_mem;
bdev->driver->move_notify(bo, mem);
bo->mem = *mem;
+ *mem = tmp_mem;
}
goto out_err;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index d73d6e3e17b2..8be35c809c7b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -344,8 +344,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
if (ttm->state == tt_unpopulated) {
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
- if (ret)
+ if (ret) {
+ /* if we fail here don't nuke the mm node
+ * as the bo still owns it */
+ old_copy.mm_node = NULL;
goto out1;
+ }
}
add = 0;
@@ -371,8 +375,11 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
prot);
} else
ret = ttm_copy_io_page(new_iomap, old_iomap, page);
- if (ret)
+ if (ret) {
+ /* failing here, means keep old copy as-is */
+ old_copy.mm_node = NULL;
goto out1;
+ }
}
mb();
out2:
@@ -422,7 +429,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_driver *driver = bdev->driver;
- fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
+ fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
if (!fbo)
return -ENOMEM;
@@ -441,7 +448,12 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
fbo->vm_node = NULL;
atomic_set(&fbo->cpu_writers, 0);
- fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
+ spin_lock(&bdev->fence_lock);
+ if (bo->sync_obj)
+ fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
+ else
+ fbo->sync_obj = NULL;
+ spin_unlock(&bdev->fence_lock);
kref_init(&fbo->list_kref);
kref_init(&fbo->kref);
fbo->destroy = &ttm_transfered_destroy;
@@ -654,13 +666,11 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
*/
set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
-
- /* ttm_buffer_object_transfer accesses bo->sync_obj */
- ret = ttm_buffer_object_transfer(bo, &ghost_obj);
spin_unlock(&bdev->fence_lock);
if (tmp_obj)
driver->sync_obj_unref(&tmp_obj);
+ ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
index 56e0bf31d425..6222af19f456 100644
--- a/drivers/gpu/drm/udl/Kconfig
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -1,6 +1,6 @@
config DRM_UDL
tristate "DisplayLink"
- depends on DRM && EXPERIMENTAL
+ depends on DRM
depends on USB_ARCH_HAS_HCD
select DRM_USB
select FB_SYS_FILLRECT
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 512f44add89f..fe5cdbcf2636 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -22,13 +22,17 @@
static u8 *udl_get_edid(struct udl_device *udl)
{
u8 *block;
- char rbuf[3];
+ char *rbuf;
int ret, i;
block = kmalloc(EDID_LENGTH, GFP_KERNEL);
if (block == NULL)
return NULL;
+ rbuf = kmalloc(2, GFP_KERNEL);
+ if (rbuf == NULL)
+ goto error;
+
for (i = 0; i < EDID_LENGTH; i++) {
ret = usb_control_msg(udl->ddev->usbdev,
usb_rcvctrlpipe(udl->ddev->usbdev, 0), (0x02),
@@ -36,16 +40,17 @@ static u8 *udl_get_edid(struct udl_device *udl)
HZ);
if (ret < 1) {
DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
- i--;
goto error;
}
block[i] = rbuf[1];
}
+ kfree(rbuf);
return block;
error:
kfree(block);
+ kfree(rbuf);
return NULL;
}
@@ -57,6 +62,14 @@ static int udl_get_modes(struct drm_connector *connector)
edid = (struct edid *)udl_get_edid(udl);
+ /*
+ * We only read the main block, but if the monitor reports extension
+ * blocks then the drm edid code expects them to be present, so patch
+ * the extension count to 0.
+ */
+ edid->checksum += edid->extensions;
+ edid->extensions = 0;
+
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index e7d6a13ec6a6..5f07d85c4189 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -320,7 +320,7 @@ config HID_LOGITECH_DJ
Say Y if you want support for Logitech Unifying receivers and devices.
Unifying receivers are capable of pairing up to 6 Logitech compliant
devices to the same receiver. Without this driver it will be handled by
- generic USB_HID driver and all incomming events will be multiplexed
+ generic USB_HID driver and all incoming events will be multiplexed
into a single mouse and a single keyboard device.
config LOGITECH_FF
@@ -596,6 +596,12 @@ config HID_SPEEDLINK
---help---
Support for Speedlink Vicious and Divine Cezanne mouse.
+config HID_STEELSERIES
+ tristate "Steelseries SRW-S1 steering wheel support"
+ depends on USB_HID
+ ---help---
+ Support for Steelseries SRW-S1 steering wheel
+
config HID_SUNPLUS
tristate "Sunplus wireless desktop"
depends on USB_HID
@@ -655,6 +661,16 @@ config HID_TOPSEED
Say Y if you have a TopSeed Cyberlink or BTC Emprex or Conceptronic
CLLRCMCE remote control.
+config HID_THINGM
+ tristate "ThingM blink(1) USB RGB LED"
+ depends on USB_HID
+ depends on LEDS_CLASS
+ ---help---
+ Support for the ThingM blink(1) USB RGB LED. This driver registers a
+ Linux LED class instance, plus additional sysfs attributes to control
+ RGB colors, fade time and playing. The device is exposed through hidraw
+ to access other functions.
+
config HID_THRUSTMASTER
tristate "ThrustMaster devices support"
depends on USB_HID
@@ -719,7 +735,7 @@ config HID_ZYDACRON
config HID_SENSOR_HUB
tristate "HID Sensors framework support"
- depends on USB_HID
+ depends on USB_HID && GENERIC_HARDIRQS
select MFD_CORE
default n
-- help---
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index b62215716b2f..72d1b0bc0a97 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -101,8 +101,10 @@ obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
obj-$(CONFIG_HID_SONY) += hid-sony.o
obj-$(CONFIG_HID_SPEEDLINK) += hid-speedlink.o
+obj-$(CONFIG_HID_STEELSERIES) += hid-steelseries.o
obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o
obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o
+obj-$(CONFIG_HID_THINGM) += hid-thingm.o
obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o
obj-$(CONFIG_HID_TIVO) += hid-tivo.o
obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o
diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
index 0a239885e67c..7c5507e94820 100644
--- a/drivers/hid/hid-a4tech.c
+++ b/drivers/hid/hid-a4tech.c
@@ -146,17 +146,6 @@ static struct hid_driver a4_driver = {
.probe = a4_probe,
.remove = a4_remove,
};
+module_hid_driver(a4_driver);
-static int __init a4_init(void)
-{
- return hid_register_driver(&a4_driver);
-}
-
-static void __exit a4_exit(void)
-{
- hid_unregister_driver(&a4_driver);
-}
-
-module_init(a4_init);
-module_exit(a4_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index d0f7662aacca..320a958d4139 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -555,23 +555,6 @@ static struct hid_driver apple_driver = {
.input_mapping = apple_input_mapping,
.input_mapped = apple_input_mapped,
};
+module_hid_driver(apple_driver);
-static int __init apple_init(void)
-{
- int ret;
-
- ret = hid_register_driver(&apple_driver);
- if (ret)
- pr_err("can't register apple driver\n");
-
- return ret;
-}
-
-static void __exit apple_exit(void)
-{
- hid_unregister_driver(&apple_driver);
-}
-
-module_init(apple_init);
-module_exit(apple_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-aureal.c b/drivers/hid/hid-aureal.c
index 7968187ddf7b..340ba9d394a0 100644
--- a/drivers/hid/hid-aureal.c
+++ b/drivers/hid/hid-aureal.c
@@ -37,17 +37,6 @@ static struct hid_driver aureal_driver = {
.id_table = aureal_devices,
.report_fixup = aureal_report_fixup,
};
+module_hid_driver(aureal_driver);
-static int __init aureal_init(void)
-{
- return hid_register_driver(&aureal_driver);
-}
-
-static void __exit aureal_exit(void)
-{
- hid_unregister_driver(&aureal_driver);
-}
-
-module_init(aureal_init);
-module_exit(aureal_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c
index 5be858dd9a15..62f0cee032ba 100644
--- a/drivers/hid/hid-axff.c
+++ b/drivers/hid/hid-axff.c
@@ -192,19 +192,7 @@ static struct hid_driver ax_driver = {
.probe = ax_probe,
.remove = ax_remove,
};
-
-static int __init ax_init(void)
-{
- return hid_register_driver(&ax_driver);
-}
-
-static void __exit ax_exit(void)
-{
- hid_unregister_driver(&ax_driver);
-}
-
-module_init(ax_init);
-module_exit(ax_exit);
+module_hid_driver(ax_driver);
MODULE_AUTHOR("Sergei Kolzun");
MODULE_DESCRIPTION("Force feedback support for ACRUX game controllers");
diff --git a/drivers/hid/hid-belkin.c b/drivers/hid/hid-belkin.c
index a1a5a12c3a6b..cc4cf138bef5 100644
--- a/drivers/hid/hid-belkin.c
+++ b/drivers/hid/hid-belkin.c
@@ -86,17 +86,6 @@ static struct hid_driver belkin_driver = {
.input_mapping = belkin_input_mapping,
.probe = belkin_probe,
};
+module_hid_driver(belkin_driver);
-static int __init belkin_init(void)
-{
- return hid_register_driver(&belkin_driver);
-}
-
-static void __exit belkin_exit(void)
-{
- hid_unregister_driver(&belkin_driver);
-}
-
-module_init(belkin_init);
-module_exit(belkin_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-cherry.c b/drivers/hid/hid-cherry.c
index af034d3d9256..1bdcccc54a1d 100644
--- a/drivers/hid/hid-cherry.c
+++ b/drivers/hid/hid-cherry.c
@@ -69,17 +69,6 @@ static struct hid_driver ch_driver = {
.report_fixup = ch_report_fixup,
.input_mapping = ch_input_mapping,
};
+module_hid_driver(ch_driver);
-static int __init ch_init(void)
-{
- return hid_register_driver(&ch_driver);
-}
-
-static void __exit ch_exit(void)
-{
- hid_unregister_driver(&ch_driver);
-}
-
-module_init(ch_init);
-module_exit(ch_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
index a2abb8e15727..b613d5a79684 100644
--- a/drivers/hid/hid-chicony.c
+++ b/drivers/hid/hid-chicony.c
@@ -70,17 +70,6 @@ static struct hid_driver ch_driver = {
.id_table = ch_devices,
.input_mapping = ch_input_mapping,
};
+module_hid_driver(ch_driver);
-static int __init ch_init(void)
-{
- return hid_register_driver(&ch_driver);
-}
-
-static void __exit ch_exit(void)
-{
- hid_unregister_driver(&ch_driver);
-}
-
-module_init(ch_init);
-module_exit(ch_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index ccca9ac34dca..512b01c04ea7 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -729,7 +729,7 @@ static int hid_scan_report(struct hid_device *hid)
item.type == HID_ITEM_TYPE_MAIN &&
item.tag == HID_MAIN_ITEM_TAG_BEGIN_COLLECTION &&
(item_udata(&item) & 0xff) == HID_COLLECTION_PHYSICAL &&
- hid->bus == BUS_USB)
+ (hid->bus == BUS_USB || hid->bus == BUS_I2C))
hid->group = HID_GROUP_SENSOR_HUB;
}
@@ -1195,6 +1195,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
{
struct hid_report_enum *report_enum = hid->report_enum + type;
struct hid_report *report;
+ struct hid_driver *hdrv;
unsigned int a;
int rsize, csize = size;
u8 *cdata = data;
@@ -1231,6 +1232,9 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
if (hid->claimed != HID_CLAIMED_HIDRAW) {
for (a = 0; a < report->maxfield; a++)
hid_input_field(hid, report->field[a], cdata, interrupt);
+ hdrv = hid->driver;
+ if (hdrv && hdrv->report)
+ hdrv->report(hid, report);
}
if (hid->claimed & HID_CLAIMED_INPUT)
@@ -1599,6 +1603,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
@@ -1697,7 +1702,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) },
@@ -2229,6 +2236,14 @@ bool hid_ignore(struct hid_device *hdev)
hdev->type != HID_TYPE_USBMOUSE)
return true;
break;
+ case USB_VENDOR_ID_VELLEMAN:
+ /* These are not HID devices. They are handled by comedi. */
+ if ((hdev->product >= USB_DEVICE_ID_VELLEMAN_K8055_FIRST &&
+ hdev->product <= USB_DEVICE_ID_VELLEMAN_K8055_LAST) ||
+ (hdev->product >= USB_DEVICE_ID_VELLEMAN_K8061_FIRST &&
+ hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST))
+ return true;
+ break;
}
if (hdev->type == HID_TYPE_USBMOUSE &&
diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
index 3e159a50dac7..c4ef3bc726e3 100644
--- a/drivers/hid/hid-cypress.c
+++ b/drivers/hid/hid-cypress.c
@@ -144,17 +144,6 @@ static struct hid_driver cp_driver = {
.event = cp_event,
.probe = cp_probe,
};
+module_hid_driver(cp_driver);
-static int __init cp_init(void)
-{
- return hid_register_driver(&cp_driver);
-}
-
-static void __exit cp_exit(void)
-{
- hid_unregister_driver(&cp_driver);
-}
-
-module_init(cp_init);
-module_exit(cp_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-dr.c b/drivers/hid/hid-dr.c
index e832f44ae383..0fe8f65ef01a 100644
--- a/drivers/hid/hid-dr.c
+++ b/drivers/hid/hid-dr.c
@@ -297,17 +297,6 @@ static struct hid_driver dr_driver = {
.report_fixup = dr_report_fixup,
.probe = dr_probe,
};
+module_hid_driver(dr_driver);
-static int __init dr_init(void)
-{
- return hid_register_driver(&dr_driver);
-}
-
-static void __exit dr_exit(void)
-{
- hid_unregister_driver(&dr_driver);
-}
-
-module_init(dr_init);
-module_exit(dr_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index 79d0c61e7214..d0bd13b62dc2 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -41,17 +41,6 @@ static struct hid_driver elecom_driver = {
.id_table = elecom_devices,
.report_fixup = elecom_report_fixup
};
+module_hid_driver(elecom_driver);
-static int __init elecom_init(void)
-{
- return hid_register_driver(&elecom_driver);
-}
-
-static void __exit elecom_exit(void)
-{
- hid_unregister_driver(&elecom_driver);
-}
-
-module_init(elecom_init);
-module_exit(elecom_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-emsff.c b/drivers/hid/hid-emsff.c
index 2630d483d262..2e093ab99b43 100644
--- a/drivers/hid/hid-emsff.c
+++ b/drivers/hid/hid-emsff.c
@@ -150,18 +150,7 @@ static struct hid_driver ems_driver = {
.id_table = ems_devices,
.probe = ems_probe,
};
+module_hid_driver(ems_driver);
-static int ems_init(void)
-{
- return hid_register_driver(&ems_driver);
-}
-
-static void ems_exit(void)
-{
- hid_unregister_driver(&ems_driver);
-}
-
-module_init(ems_init);
-module_exit(ems_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-ezkey.c b/drivers/hid/hid-ezkey.c
index 6540af2871a7..212ac6be2451 100644
--- a/drivers/hid/hid-ezkey.c
+++ b/drivers/hid/hid-ezkey.c
@@ -76,17 +76,6 @@ static struct hid_driver ez_driver = {
.input_mapping = ez_input_mapping,
.event = ez_event,
};
+module_hid_driver(ez_driver);
-static int __init ez_init(void)
-{
- return hid_register_driver(&ez_driver);
-}
-
-static void __exit ez_exit(void)
-{
- hid_unregister_driver(&ez_driver);
-}
-
-module_init(ez_init);
-module_exit(ez_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-gaff.c b/drivers/hid/hid-gaff.c
index f1e1bcf67427..04d2e6aca778 100644
--- a/drivers/hid/hid-gaff.c
+++ b/drivers/hid/hid-gaff.c
@@ -176,17 +176,6 @@ static struct hid_driver ga_driver = {
.id_table = ga_devices,
.probe = ga_probe,
};
+module_hid_driver(ga_driver);
-static int __init ga_init(void)
-{
- return hid_register_driver(&ga_driver);
-}
-
-static void __exit ga_exit(void)
-{
- hid_unregister_driver(&ga_driver);
-}
-
-module_init(ga_init);
-module_exit(ga_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-generic.c b/drivers/hid/hid-generic.c
index a8b3148e03a2..e288a4a06fe8 100644
--- a/drivers/hid/hid-generic.c
+++ b/drivers/hid/hid-generic.c
@@ -34,19 +34,7 @@ static struct hid_driver hid_generic = {
.name = "hid-generic",
.id_table = hid_table,
};
-
-static int __init hid_init(void)
-{
- return hid_register_driver(&hid_generic);
-}
-
-static void __exit hid_exit(void)
-{
- hid_unregister_driver(&hid_generic);
-}
-
-module_init(hid_init);
-module_exit(hid_exit);
+module_hid_driver(hid_generic);
MODULE_AUTHOR("Henrik Rydberg");
MODULE_DESCRIPTION("HID generic driver");
diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c
index 4442c30ef531..288d61c9748e 100644
--- a/drivers/hid/hid-gyration.c
+++ b/drivers/hid/hid-gyration.c
@@ -88,17 +88,6 @@ static struct hid_driver gyration_driver = {
.input_mapping = gyration_input_mapping,
.event = gyration_event,
};
+module_hid_driver(gyration_driver);
-static int __init gyration_init(void)
-{
- return hid_register_driver(&gyration_driver);
-}
-
-static void __exit gyration_exit(void)
-{
- hid_unregister_driver(&gyration_driver);
-}
-
-module_init(gyration_init);
-module_exit(gyration_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
index e0a5d1739fc3..6e1a4a4fc0c1 100644
--- a/drivers/hid/hid-holtek-kbd.c
+++ b/drivers/hid/hid-holtek-kbd.c
@@ -167,17 +167,6 @@ static struct hid_driver holtek_kbd_driver = {
.report_fixup = holtek_kbd_report_fixup,
.probe = holtek_kbd_probe
};
+module_hid_driver(holtek_kbd_driver);
-static int __init holtek_kbd_init(void)
-{
- return hid_register_driver(&holtek_kbd_driver);
-}
-
-static void __exit holtek_kbd_exit(void)
-{
- hid_unregister_driver(&holtek_kbd_driver);
-}
-
-module_exit(holtek_kbd_exit);
-module_init(holtek_kbd_init);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-holtekff.c b/drivers/hid/hid-holtekff.c
index ff295e60059b..f34d1186a3e1 100644
--- a/drivers/hid/hid-holtekff.c
+++ b/drivers/hid/hid-holtekff.c
@@ -224,17 +224,4 @@ static struct hid_driver holtek_driver = {
.id_table = holtek_devices,
.probe = holtek_probe,
};
-
-static int __init holtek_init(void)
-{
- return hid_register_driver(&holtek_driver);
-}
-
-static void __exit holtek_exit(void)
-{
- hid_unregister_driver(&holtek_driver);
-}
-
-module_init(holtek_init);
-module_exit(holtek_exit);
-
+module_hid_driver(holtek_driver);
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 3d62781b8993..aa3fec0d9dc6 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -568,8 +568,7 @@ static int mousevsc_remove(struct hv_device *dev)
static const struct hv_vmbus_device_id id_table[] = {
/* Mouse guid */
- { VMBUS_DEVICE(0x9E, 0xB6, 0xA8, 0xCF, 0x4A, 0x5B, 0xc0, 0x4c,
- 0xB9, 0x8B, 0x8B, 0xA1, 0xA1, 0xF3, 0xF9, 0x5A) },
+ { HV_MOUSE_GUID, },
{ },
};
diff --git a/drivers/hid/hid-icade.c b/drivers/hid/hid-icade.c
index 1d6565e37ba3..09dcc04595f3 100644
--- a/drivers/hid/hid-icade.c
+++ b/drivers/hid/hid-icade.c
@@ -235,25 +235,8 @@ static struct hid_driver icade_driver = {
.input_mapped = icade_input_mapped,
.input_mapping = icade_input_mapping,
};
+module_hid_driver(icade_driver);
-static int __init icade_init(void)
-{
- int ret;
-
- ret = hid_register_driver(&icade_driver);
- if (ret)
- pr_err("can't register icade driver\n");
-
- return ret;
-}
-
-static void __exit icade_exit(void)
-{
- hid_unregister_driver(&icade_driver);
-}
-
-module_init(icade_init);
-module_exit(icade_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>");
MODULE_DESCRIPTION("ION iCade input driver");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index f39be08c1358..92e47e5c9564 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -306,6 +306,9 @@
#define USB_VENDOR_ID_EZKEY 0x0518
#define USB_DEVICE_ID_BTC_8193 0x0002
+#define USB_VENDOR_ID_FORMOSA 0x147a
+#define USB_DEVICE_ID_FORMOSA_IR_RECEIVER 0xe03e
+
#define USB_VENDOR_ID_FREESCALE 0x15A2
#define USB_DEVICE_ID_FREESCALE_MX28 0x004F
@@ -442,6 +445,9 @@
#define USB_VENDOR_ID_JESS 0x0c45
#define USB_DEVICE_ID_JESS_YUREX 0x1010
+#define USB_VENDOR_ID_JESS2 0x0f30
+#define USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD 0x0111
+
#define USB_VENDOR_ID_KBGEAR 0x084e
#define USB_DEVICE_ID_KBGEAR_JAMSTUDIO 0x1001
@@ -522,8 +528,8 @@
#define USB_DEVICE_ID_LOGITECH_WINGMAN_F3D 0xc283
#define USB_DEVICE_ID_LOGITECH_FORCE3D_PRO 0xc286
#define USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940 0xc287
-#define USB_DEVICE_ID_LOGITECH_WHEEL 0xc294
#define USB_DEVICE_ID_LOGITECH_WINGMAN_FFG 0xc293
+#define USB_DEVICE_ID_LOGITECH_WHEEL 0xc294
#define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL 0xc295
#define USB_DEVICE_ID_LOGITECH_DFP_WHEEL 0xc298
#define USB_DEVICE_ID_LOGITECH_G25_WHEEL 0xc299
@@ -597,6 +603,9 @@
#define USB_VENDOR_ID_NEC 0x073e
#define USB_DEVICE_ID_NEC_USB_GAME_PAD 0x0301
+#define USB_VENDOR_ID_NEXIO 0x1870
+#define USB_DEVICE_ID_NEXIO_MULTITOUCH_420 0x010d
+
#define USB_VENDOR_ID_NEXTWINDOW 0x1926
#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003
@@ -709,6 +718,7 @@
#define USB_VENDOR_ID_SONY 0x054c
#define USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE 0x024b
+#define USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE 0x0374
#define USB_DEVICE_ID_SONY_PS3_BDREMOTE 0x0306
#define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268
#define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f
@@ -726,6 +736,9 @@
#define USB_VENDOR_ID_STANTUM_SITRONIX 0x1403
#define USB_DEVICE_ID_MTP_SITRONIX 0x5001
+#define USB_VENDOR_ID_STEELSERIES 0x1038
+#define USB_DEVICE_ID_STEELSERIES_SRWS1 0x1410
+
#define USB_VENDOR_ID_SUN 0x0430
#define USB_DEVICE_ID_RARITAN_KVM_DONGLE 0xcdab
@@ -747,6 +760,9 @@
#define USB_DEVICE_ID_SYNAPTICS_WTP 0x0010
#define USB_DEVICE_ID_SYNAPTICS_DPAD 0x0013
+#define USB_VENDOR_ID_THINGM 0x27b8
+#define USB_DEVICE_ID_BLINK1 0x01ed
+
#define USB_VENDOR_ID_THRUSTMASTER 0x044f
#define USB_VENDOR_ID_TIVO 0x150a
@@ -794,6 +810,12 @@
#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0709 0x0709
#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19 0x0a19
+#define USB_VENDOR_ID_VELLEMAN 0x10cf
+#define USB_DEVICE_ID_VELLEMAN_K8055_FIRST 0x5500
+#define USB_DEVICE_ID_VELLEMAN_K8055_LAST 0x5503
+#define USB_DEVICE_ID_VELLEMAN_K8061_FIRST 0x8061
+#define USB_DEVICE_ID_VELLEMAN_K8061_LAST 0x8068
+
#define USB_VENDOR_ID_VERNIER 0x08f7
#define USB_DEVICE_ID_VERNIER_LABPRO 0x0001
#define USB_DEVICE_ID_VERNIER_GOTEMP 0x0002
diff --git a/drivers/hid/hid-kensington.c b/drivers/hid/hid-kensington.c
index a5b4016e9bd7..fe9a99dd8d08 100644
--- a/drivers/hid/hid-kensington.c
+++ b/drivers/hid/hid-kensington.c
@@ -47,17 +47,6 @@ static struct hid_driver ks_driver = {
.id_table = ks_devices,
.input_mapping = ks_input_mapping,
};
+module_hid_driver(ks_driver);
-static int __init ks_init(void)
-{
- return hid_register_driver(&ks_driver);
-}
-
-static void __exit ks_exit(void)
-{
- hid_unregister_driver(&ks_driver);
-}
-
-module_init(ks_init);
-module_exit(ks_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-keytouch.c b/drivers/hid/hid-keytouch.c
index 07cd825f6f01..3074671b7d6a 100644
--- a/drivers/hid/hid-keytouch.c
+++ b/drivers/hid/hid-keytouch.c
@@ -49,18 +49,7 @@ static struct hid_driver keytouch_driver = {
.id_table = keytouch_devices,
.report_fixup = keytouch_report_fixup,
};
+module_hid_driver(keytouch_driver);
-static int __init keytouch_init(void)
-{
- return hid_register_driver(&keytouch_driver);
-}
-
-static void __exit keytouch_exit(void)
-{
- hid_unregister_driver(&keytouch_driver);
-}
-
-module_init(keytouch_init);
-module_exit(keytouch_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jiri Kosina");
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
index b4f0d8216fd0..ef72daecfa16 100644
--- a/drivers/hid/hid-kye.c
+++ b/drivers/hid/hid-kye.c
@@ -419,17 +419,6 @@ static struct hid_driver kye_driver = {
.probe = kye_probe,
.report_fixup = kye_report_fixup,
};
+module_hid_driver(kye_driver);
-static int __init kye_init(void)
-{
- return hid_register_driver(&kye_driver);
-}
-
-static void __exit kye_exit(void)
-{
- hid_unregister_driver(&kye_driver);
-}
-
-module_init(kye_init);
-module_exit(kye_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-lcpower.c b/drivers/hid/hid-lcpower.c
index 22bc14abdfa3..6424cfdb7737 100644
--- a/drivers/hid/hid-lcpower.c
+++ b/drivers/hid/hid-lcpower.c
@@ -54,17 +54,6 @@ static struct hid_driver ts_driver = {
.id_table = ts_devices,
.input_mapping = ts_input_mapping,
};
+module_hid_driver(ts_driver);
-static int __init ts_init(void)
-{
- return hid_register_driver(&ts_driver);
-}
-
-static void __exit ts_exit(void)
-{
- hid_unregister_driver(&ts_driver);
-}
-
-module_init(ts_init);
-module_exit(ts_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-lenovo-tpkbd.c b/drivers/hid/hid-lenovo-tpkbd.c
index cea016e94f43..956c3b135f64 100644
--- a/drivers/hid/hid-lenovo-tpkbd.c
+++ b/drivers/hid/hid-lenovo-tpkbd.c
@@ -468,18 +468,6 @@ static struct hid_driver tpkbd_driver = {
.probe = tpkbd_probe,
.remove = tpkbd_remove,
};
-
-static int __init tpkbd_init(void)
-{
- return hid_register_driver(&tpkbd_driver);
-}
-
-static void __exit tpkbd_exit(void)
-{
- hid_unregister_driver(&tpkbd_driver);
-}
-
-module_init(tpkbd_init);
-module_exit(tpkbd_exit);
+module_hid_driver(tpkbd_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index a2f8e88b9fa2..6f12ecd36c88 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -21,8 +21,10 @@
#include <linux/module.h>
#include <linux/random.h>
#include <linux/sched.h>
+#include <linux/usb.h>
#include <linux/wait.h>
+#include "usbhid/usbhid.h"
#include "hid-ids.h"
#include "hid-lg.h"
@@ -40,17 +42,86 @@
#define LG_FF3 0x1000
#define LG_FF4 0x2000
-/* Size of the original descriptor of the Driving Force Pro wheel */
+/* Size of the original descriptors of the Driving Force (and Pro) wheels */
+#define DF_RDESC_ORIG_SIZE 130
#define DFP_RDESC_ORIG_SIZE 97
+#define MOMO_RDESC_ORIG_SIZE 87
-/* Fixed report descriptor for Logitech Driving Force Pro wheel controller
+/* Fixed report descriptors for Logitech Driving Force (and Pro)
+ * wheel controllers
*
- * The original descriptor hides the separate throttle and brake axes in
+ * The original descriptors hide the separate throttle and brake axes in
* a custom vendor usage page, providing only a combined value as
* GenericDesktop.Y.
- * This descriptor removes the combined Y axis and instead reports
+ * These descriptors remove the combined Y axis and instead report
* separate throttle (Y) and brake (RZ).
*/
+static __u8 df_rdesc_fixed[] = {
+0x05, 0x01, /* Usage Page (Desktop), */
+0x09, 0x04, /* Usage (Joystik), */
+0xA1, 0x01, /* Collection (Application), */
+0xA1, 0x02, /* Collection (Logical), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x0A, /* Report Size (10), */
+0x14, /* Logical Minimum (0), */
+0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
+0x34, /* Physical Minimum (0), */
+0x46, 0xFF, 0x03, /* Physical Maximum (1023), */
+0x09, 0x30, /* Usage (X), */
+0x81, 0x02, /* Input (Variable), */
+0x95, 0x0C, /* Report Count (12), */
+0x75, 0x01, /* Report Size (1), */
+0x25, 0x01, /* Logical Maximum (1), */
+0x45, 0x01, /* Physical Maximum (1), */
+0x05, 0x09, /* Usage (Buttons), */
+0x19, 0x01, /* Usage Minimum (1), */
+0x29, 0x0c, /* Usage Maximum (12), */
+0x81, 0x02, /* Input (Variable), */
+0x95, 0x02, /* Report Count (2), */
+0x06, 0x00, 0xFF, /* Usage Page (Vendor: 65280), */
+0x09, 0x01, /* Usage (?: 1), */
+0x81, 0x02, /* Input (Variable), */
+0x05, 0x01, /* Usage Page (Desktop), */
+0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+0x46, 0xFF, 0x00, /* Physical Maximum (255), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x08, /* Report Size (8), */
+0x81, 0x02, /* Input (Variable), */
+0x25, 0x07, /* Logical Maximum (7), */
+0x46, 0x3B, 0x01, /* Physical Maximum (315), */
+0x75, 0x04, /* Report Size (4), */
+0x65, 0x14, /* Unit (Degrees), */
+0x09, 0x39, /* Usage (Hat Switch), */
+0x81, 0x42, /* Input (Variable, Null State), */
+0x75, 0x01, /* Report Size (1), */
+0x95, 0x04, /* Report Count (4), */
+0x65, 0x00, /* Unit (none), */
+0x06, 0x00, 0xFF, /* Usage Page (Vendor: 65280), */
+0x09, 0x01, /* Usage (?: 1), */
+0x25, 0x01, /* Logical Maximum (1), */
+0x45, 0x01, /* Physical Maximum (1), */
+0x81, 0x02, /* Input (Variable), */
+0x05, 0x01, /* Usage Page (Desktop), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x08, /* Report Size (8), */
+0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+0x46, 0xFF, 0x00, /* Physical Maximum (255), */
+0x09, 0x31, /* Usage (Y), */
+0x81, 0x02, /* Input (Variable), */
+0x09, 0x35, /* Usage (Rz), */
+0x81, 0x02, /* Input (Variable), */
+0xC0, /* End Collection, */
+0xA1, 0x02, /* Collection (Logical), */
+0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+0x46, 0xFF, 0x00, /* Physical Maximum (255), */
+0x95, 0x07, /* Report Count (7), */
+0x75, 0x08, /* Report Size (8), */
+0x09, 0x03, /* Usage (?: 3), */
+0x91, 0x02, /* Output (Variable), */
+0xC0, /* End Collection, */
+0xC0 /* End Collection */
+};
+
static __u8 dfp_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x04, /* Usage (Joystik), */
@@ -99,6 +170,51 @@ static __u8 dfp_rdesc_fixed[] = {
0xC0 /* End Collection */
};
+static __u8 momo_rdesc_fixed[] = {
+0x05, 0x01, /* Usage Page (Desktop), */
+0x09, 0x04, /* Usage (Joystik), */
+0xA1, 0x01, /* Collection (Application), */
+0xA1, 0x02, /* Collection (Logical), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x0A, /* Report Size (10), */
+0x15, 0x00, /* Logical Minimum (0), */
+0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
+0x35, 0x00, /* Physical Minimum (0), */
+0x46, 0xFF, 0x03, /* Physical Maximum (1023), */
+0x09, 0x30, /* Usage (X), */
+0x81, 0x02, /* Input (Variable), */
+0x95, 0x08, /* Report Count (8), */
+0x75, 0x01, /* Report Size (1), */
+0x25, 0x01, /* Logical Maximum (1), */
+0x45, 0x01, /* Physical Maximum (1), */
+0x05, 0x09, /* Usage Page (Button), */
+0x19, 0x01, /* Usage Minimum (01h), */
+0x29, 0x08, /* Usage Maximum (08h), */
+0x81, 0x02, /* Input (Variable), */
+0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+0x75, 0x0E, /* Report Size (14), */
+0x95, 0x01, /* Report Count (1), */
+0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+0x46, 0xFF, 0x00, /* Physical Maximum (255), */
+0x09, 0x00, /* Usage (00h), */
+0x81, 0x02, /* Input (Variable), */
+0x05, 0x01, /* Usage Page (Desktop), */
+0x75, 0x08, /* Report Size (8), */
+0x09, 0x31, /* Usage (Y), */
+0x81, 0x02, /* Input (Variable), */
+0x09, 0x32, /* Usage (Z), */
+0x81, 0x02, /* Input (Variable), */
+0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+0x09, 0x01, /* Usage (01h), */
+0x81, 0x02, /* Input (Variable), */
+0xC0, /* End Collection, */
+0xA1, 0x02, /* Collection (Logical), */
+0x09, 0x02, /* Usage (02h), */
+0x95, 0x07, /* Report Count (7), */
+0x91, 0x02, /* Output (Variable), */
+0xC0, /* End Collection, */
+0xC0 /* End Collection */
+};
/*
* Certain Logitech keyboards send in report #3 keys which are far
@@ -109,6 +225,8 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
struct lg_drv_data *drv_data = hid_get_drvdata(hdev);
+ struct usb_device_descriptor *udesc;
+ __u16 bcdDevice, rev_maj, rev_min;
if ((drv_data->quirks & LG_RDESC) && *rsize >= 90 && rdesc[83] == 0x26 &&
rdesc[84] == 0x8c && rdesc[85] == 0x02) {
@@ -124,17 +242,39 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
"fixing up rel/abs in Logitech report descriptor\n");
rdesc[33] = rdesc[50] = 0x02;
}
- if ((drv_data->quirks & LG_FF4) && *rsize >= 101 &&
- rdesc[41] == 0x95 && rdesc[42] == 0x0B &&
- rdesc[47] == 0x05 && rdesc[48] == 0x09) {
- hid_info(hdev, "fixing up Logitech Speed Force Wireless button descriptor\n");
- rdesc[41] = 0x05;
- rdesc[42] = 0x09;
- rdesc[47] = 0x95;
- rdesc[48] = 0x0B;
- }
switch (hdev->product) {
+
+ /* Several wheels report as this id when operating in emulation mode. */
+ case USB_DEVICE_ID_LOGITECH_WHEEL:
+ udesc = &(hid_to_usb_dev(hdev)->descriptor);
+ if (!udesc) {
+ hid_err(hdev, "NULL USB device descriptor\n");
+ break;
+ }
+ bcdDevice = le16_to_cpu(udesc->bcdDevice);
+ rev_maj = bcdDevice >> 8;
+ rev_min = bcdDevice & 0xff;
+
+ /* Update the report descriptor for only the Driving Force wheel */
+ if (rev_maj == 1 && rev_min == 2 &&
+ *rsize == DF_RDESC_ORIG_SIZE) {
+ hid_info(hdev,
+ "fixing up Logitech Driving Force report descriptor\n");
+ rdesc = df_rdesc_fixed;
+ *rsize = sizeof(df_rdesc_fixed);
+ }
+ break;
+
+ case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL:
+ if (*rsize == MOMO_RDESC_ORIG_SIZE) {
+ hid_info(hdev,
+ "fixing up Logitech Momo Force (Red) report descriptor\n");
+ rdesc = momo_rdesc_fixed;
+ *rsize = sizeof(momo_rdesc_fixed);
+ }
+ break;
+
case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
if (*rsize == DFP_RDESC_ORIG_SIZE) {
hid_info(hdev,
@@ -143,6 +283,17 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
*rsize = sizeof(dfp_rdesc_fixed);
}
break;
+
+ case USB_DEVICE_ID_LOGITECH_WII_WHEEL:
+ if (*rsize >= 101 && rdesc[41] == 0x95 && rdesc[42] == 0x0B &&
+ rdesc[47] == 0x05 && rdesc[48] == 0x09) {
+ hid_info(hdev, "fixing up Logitech Speed Force Wireless report descriptor\n");
+ rdesc[41] = 0x05;
+ rdesc[42] = 0x09;
+ rdesc[47] = 0x95;
+ rdesc[48] = 0x0B;
+ }
+ break;
}
return rdesc;
@@ -328,6 +479,26 @@ static int lg_input_mapped(struct hid_device *hdev, struct hid_input *hi,
usage->type == EV_REL || usage->type == EV_ABS))
clear_bit(usage->code, *bit);
+ /* Ensure that Logitech wheels are not given a default fuzz/flat value */
+ if (usage->type == EV_ABS && (usage->code == ABS_X ||
+ usage->code == ABS_Y || usage->code == ABS_Z ||
+ usage->code == ABS_RZ)) {
+ switch (hdev->product) {
+ case USB_DEVICE_ID_LOGITECH_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_G25_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_DFGT_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_G27_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_WII_WHEEL:
+ case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2:
+ field->application = HID_GD_MULTIAXIS;
+ break;
+ default:
+ break;
+ }
+ }
+
return 0;
}
@@ -465,7 +636,7 @@ static const struct hid_device_id lg_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO),
.driver_data = LG_FF },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL),
- .driver_data = LG_FF4 },
+ .driver_data = LG_NOGET | LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2),
.driver_data = LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL),
@@ -503,17 +674,6 @@ static struct hid_driver lg_driver = {
.probe = lg_probe,
.remove = lg_remove,
};
+module_hid_driver(lg_driver);
-static int __init lg_init(void)
-{
- return hid_register_driver(&lg_driver);
-}
-
-static void __exit lg_exit(void)
-{
- hid_unregister_driver(&lg_driver);
-}
-
-module_init(lg_init);
-module_exit(lg_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index d7947c701f30..65a6ec8d3742 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -43,11 +43,6 @@
#define G27_REV_MAJ 0x12
#define G27_REV_MIN 0x38
-#define DFP_X_MIN 0
-#define DFP_X_MAX 16383
-#define DFP_PEDAL_MIN 0
-#define DFP_PEDAL_MAX 255
-
#define to_hid_device(pdev) container_of(pdev, struct hid_device, dev)
static void hid_lg4ff_set_range_dfp(struct hid_device *hid, u16 range);
@@ -598,18 +593,6 @@ int lg4ff_init(struct hid_device *hid)
return error;
dbg_hid("sysfs interface created\n");
- /* Set default axes parameters */
- switch (lg4ff_devices[i].product_id) {
- case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
- dbg_hid("Setting axes parameters for Driving Force Pro\n");
- input_set_abs_params(dev, ABS_X, DFP_X_MIN, DFP_X_MAX, 0, 0);
- input_set_abs_params(dev, ABS_Y, DFP_PEDAL_MIN, DFP_PEDAL_MAX, 0, 0);
- input_set_abs_params(dev, ABS_RZ, DFP_PEDAL_MIN, DFP_PEDAL_MAX, 0, 0);
- break;
- default:
- break;
- }
-
/* Set the maximum range to start with */
entry->range = entry->max_range;
if (entry->set_range != NULL)
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 25ddf3e3aec6..f7f113ba083e 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -569,23 +569,6 @@ static struct hid_driver magicmouse_driver = {
.raw_event = magicmouse_raw_event,
.input_mapping = magicmouse_input_mapping,
};
+module_hid_driver(magicmouse_driver);
-static int __init magicmouse_init(void)
-{
- int ret;
-
- ret = hid_register_driver(&magicmouse_driver);
- if (ret)
- pr_err("can't register magicmouse driver\n");
-
- return ret;
-}
-
-static void __exit magicmouse_exit(void)
-{
- hid_unregister_driver(&magicmouse_driver);
-}
-
-module_init(magicmouse_init);
-module_exit(magicmouse_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 6fcd466d0825..29d27f65a118 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -221,17 +221,6 @@ static struct hid_driver ms_driver = {
.event = ms_event,
.probe = ms_probe,
};
+module_hid_driver(ms_driver);
-static int __init ms_init(void)
-{
- return hid_register_driver(&ms_driver);
-}
-
-static void __exit ms_exit(void)
-{
- hid_unregister_driver(&ms_driver);
-}
-
-module_init(ms_init);
-module_exit(ms_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-monterey.c b/drivers/hid/hid-monterey.c
index cd3643e06fa6..9e14c00eb1b6 100644
--- a/drivers/hid/hid-monterey.c
+++ b/drivers/hid/hid-monterey.c
@@ -63,17 +63,6 @@ static struct hid_driver mr_driver = {
.report_fixup = mr_report_fixup,
.input_mapping = mr_input_mapping,
};
+module_hid_driver(mr_driver);
-static int __init mr_init(void)
-{
- return hid_register_driver(&mr_driver);
-}
-
-static void __exit mr_exit(void)
-{
- hid_unregister_driver(&mr_driver);
-}
-
-module_init(mr_init);
-module_exit(mr_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 61543c02ea0b..7a1ebb867cf4 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -54,6 +54,7 @@ MODULE_LICENSE("GPL");
#define MT_QUIRK_NO_AREA (1 << 9)
#define MT_QUIRK_IGNORE_DUPLICATES (1 << 10)
#define MT_QUIRK_HOVERING (1 << 11)
+#define MT_QUIRK_CONTACT_CNT_ACCURATE (1 << 12)
struct mt_slot {
__s32 x, y, cx, cy, p, w, h;
@@ -83,8 +84,11 @@ struct mt_device {
struct mt_class mtclass; /* our mt device class */
struct mt_fields *fields; /* temporary placeholder for storing the
multitouch fields */
+ int cc_index; /* contact count field index in the report */
+ int cc_value_index; /* contact count value index in the field */
unsigned last_field_index; /* last field index of the report */
unsigned last_slot_field; /* the last field of a slot */
+ unsigned mt_report_id; /* the report ID of the multitouch device */
__s8 inputmode; /* InputMode HID feature, -1 if non-existent */
__s8 inputmode_index; /* InputMode HID feature index in the report */
__s8 maxcontact_report_id; /* Maximum Contact Number HID feature,
@@ -111,6 +115,9 @@ struct mt_device {
#define MT_CLS_DUAL_INRANGE_CONTACTNUMBER 0x0007
#define MT_CLS_DUAL_NSMU_CONTACTID 0x0008
#define MT_CLS_INRANGE_CONTACTNUMBER 0x0009
+#define MT_CLS_NSMU 0x000a
+#define MT_CLS_DUAL_CONTACT_NUMBER 0x0010
+#define MT_CLS_DUAL_CONTACT_ID 0x0011
/* vendor specific classes */
#define MT_CLS_3M 0x0101
@@ -144,6 +151,9 @@ static int cypress_compute_slot(struct mt_device *td)
static struct mt_class mt_classes[] = {
{ .name = MT_CLS_DEFAULT,
+ .quirks = MT_QUIRK_ALWAYS_VALID |
+ MT_QUIRK_CONTACT_CNT_ACCURATE },
+ { .name = MT_CLS_NSMU,
.quirks = MT_QUIRK_NOT_SEEN_MEANS_UP },
{ .name = MT_CLS_SERIAL,
.quirks = MT_QUIRK_ALWAYS_VALID},
@@ -170,6 +180,16 @@ static struct mt_class mt_classes[] = {
{ .name = MT_CLS_INRANGE_CONTACTNUMBER,
.quirks = MT_QUIRK_VALID_IS_INRANGE |
MT_QUIRK_SLOT_IS_CONTACTNUMBER },
+ { .name = MT_CLS_DUAL_CONTACT_NUMBER,
+ .quirks = MT_QUIRK_ALWAYS_VALID |
+ MT_QUIRK_CONTACT_CNT_ACCURATE |
+ MT_QUIRK_SLOT_IS_CONTACTNUMBER,
+ .maxcontacts = 2 },
+ { .name = MT_CLS_DUAL_CONTACT_ID,
+ .quirks = MT_QUIRK_ALWAYS_VALID |
+ MT_QUIRK_CONTACT_CNT_ACCURATE |
+ MT_QUIRK_SLOT_IS_CONTACTID,
+ .maxcontacts = 2 },
/*
* vendor specific classes
@@ -250,6 +270,9 @@ static ssize_t mt_set_quirks(struct device *dev,
td->mtclass.quirks = val;
+ if (td->cc_index < 0)
+ td->mtclass.quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE;
+
return count;
}
@@ -301,6 +324,7 @@ static void mt_feature_mapping(struct hid_device *hdev,
*quirks |= MT_QUIRK_ALWAYS_VALID;
*quirks |= MT_QUIRK_IGNORE_DUPLICATES;
*quirks |= MT_QUIRK_HOVERING;
+ *quirks |= MT_QUIRK_CONTACT_CNT_ACCURATE;
*quirks &= ~MT_QUIRK_NOT_SEEN_MEANS_UP;
*quirks &= ~MT_QUIRK_VALID_IS_INRANGE;
*quirks &= ~MT_QUIRK_VALID_IS_CONFIDENCE;
@@ -428,6 +452,7 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
mt_store_field(usage, td, hi);
td->last_field_index = field->index;
td->touches_by_report++;
+ td->mt_report_id = field->report->id;
return 1;
case HID_DG_WIDTH:
hid_map_usage(hi, usage, bit, max,
@@ -459,6 +484,8 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
td->last_field_index = field->index;
return 1;
case HID_DG_CONTACTCOUNT:
+ td->cc_index = field->index;
+ td->cc_value_index = usage->usage_index;
td->last_field_index = field->index;
return 1;
case HID_DG_CONTACTMAX:
@@ -523,6 +550,10 @@ static int mt_compute_slot(struct mt_device *td, struct input_dev *input)
*/
static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
{
+ if ((td->mtclass.quirks & MT_QUIRK_CONTACT_CNT_ACCURATE) &&
+ td->num_received >= td->num_expected)
+ return;
+
if (td->curvalid || (td->mtclass.quirks & MT_QUIRK_ALWAYS_VALID)) {
int slotnum = mt_compute_slot(td, input);
struct mt_slot *s = &td->curdata;
@@ -578,6 +609,16 @@ static void mt_sync_frame(struct mt_device *td, struct input_dev *input)
static int mt_event(struct hid_device *hid, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
+ /* we will handle the hidinput part later, now remains hiddev */
+ if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
+ hid->hiddev_hid_event(hid, field, usage, value);
+
+ return 1;
+}
+
+static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
struct mt_device *td = hid_get_drvdata(hid);
__s32 quirks = td->mtclass.quirks;
@@ -623,20 +664,13 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
td->curdata.h = value;
break;
case HID_DG_CONTACTCOUNT:
- /*
- * Includes multi-packet support where subsequent
- * packets are sent with zero contactcount.
- */
- if (value)
- td->num_expected = value;
break;
case HID_DG_TOUCH:
/* do nothing */
break;
default:
- /* fallback to the generic hidinput handling */
- return 0;
+ return;
}
if (usage->usage_index + 1 == field->report_count) {
@@ -650,12 +684,43 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
}
}
+}
- /* we have handled the hidinput part, now remains hiddev */
- if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
- hid->hiddev_hid_event(hid, field, usage, value);
+static void mt_report(struct hid_device *hid, struct hid_report *report)
+{
+ struct mt_device *td = hid_get_drvdata(hid);
+ struct hid_field *field;
+ unsigned count;
+ int r, n;
- return 1;
+ if (report->id != td->mt_report_id)
+ return;
+
+ if (!(hid->claimed & HID_CLAIMED_INPUT))
+ return;
+
+ /*
+ * Includes multi-packet support where subsequent
+ * packets are sent with zero contactcount.
+ */
+ if (td->cc_index >= 0) {
+ struct hid_field *field = report->field[td->cc_index];
+ int value = field->value[td->cc_value_index];
+ if (value)
+ td->num_expected = value;
+ }
+
+ for (r = 0; r < report->maxfield; r++) {
+ field = report->field[r];
+ count = field->report_count;
+
+ if (!(HID_MAIN_ITEM_VARIABLE & field->flags))
+ continue;
+
+ for (n = 0; n < count; n++)
+ mt_process_mt_event(hid, field, &field->usage[n],
+ field->value[n]);
+ }
}
static void mt_set_input_mode(struct hid_device *hdev)
@@ -711,6 +776,7 @@ static void mt_post_parse_default_settings(struct mt_device *td)
quirks &= ~MT_QUIRK_NOT_SEEN_MEANS_UP;
quirks &= ~MT_QUIRK_VALID_IS_INRANGE;
quirks &= ~MT_QUIRK_VALID_IS_CONFIDENCE;
+ quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE;
}
td->mtclass.quirks = quirks;
@@ -719,11 +785,15 @@ static void mt_post_parse_default_settings(struct mt_device *td)
static void mt_post_parse(struct mt_device *td)
{
struct mt_fields *f = td->fields;
+ struct mt_class *cls = &td->mtclass;
if (td->touches_by_report > 0) {
int field_count_per_touch = f->length / td->touches_by_report;
td->last_slot_field = f->usages[field_count_per_touch - 1];
}
+
+ if (td->cc_index < 0)
+ cls->quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE;
}
static void mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
@@ -781,6 +851,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
td->mtclass = *mtclass;
td->inputmode = -1;
td->maxcontact_report_id = -1;
+ td->cc_index = -1;
hid_set_drvdata(hdev, td);
td->fields = kzalloc(sizeof(struct mt_fields), GFP_KERNEL);
@@ -875,7 +946,7 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_3M3266) },
/* ActionStar panels */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_ACTIONSTAR,
USB_DEVICE_ID_ACTIONSTAR_1011) },
@@ -888,14 +959,14 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_ATMEL_MXT_DIGITIZER) },
/* Baanto multitouch devices */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_BAANTO,
USB_DEVICE_ID_BAANTO_MT_190W2) },
/* Cando panels */
{ .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER,
MT_USB_DEVICE(USB_VENDOR_ID_CANDO,
USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
- { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER,
+ { .driver_data = MT_CLS_DUAL_CONTACT_NUMBER,
MT_USB_DEVICE(USB_VENDOR_ID_CANDO,
USB_DEVICE_ID_CANDO_MULTI_TOUCH_10_1) },
{ .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER,
@@ -906,12 +977,12 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
/* Chunghwa Telecom touch panels */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT,
USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
/* CVTouch panels */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_CVTOUCH,
USB_DEVICE_ID_CVTOUCH_SCREEN) },
@@ -982,7 +1053,7 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72C4) },
/* Elo TouchSystems IntelliTouch Plus panel */
- { .driver_data = MT_CLS_DUAL_NSMU_CONTACTID,
+ { .driver_data = MT_CLS_DUAL_CONTACT_ID,
MT_USB_DEVICE(USB_VENDOR_ID_ELO,
USB_DEVICE_ID_ELO_TS2515) },
@@ -1000,12 +1071,12 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PWT_TENFINGERS) },
/* Gametel game controller */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_BT_DEVICE(USB_VENDOR_ID_FRUCTEL,
USB_DEVICE_ID_GAMETEL_MT_MODE) },
/* GoodTouch panels */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_GOODTOUCH,
USB_DEVICE_ID_GOODTOUCH_000f) },
@@ -1023,7 +1094,7 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_IDEACOM_IDC6651) },
/* Ilitek dual touch panel */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_ILITEK,
USB_DEVICE_ID_ILITEK_MULTITOUCH) },
@@ -1056,6 +1127,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_TURBOX,
USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART) },
+ /* Nexio panels */
+ { .driver_data = MT_CLS_DEFAULT,
+ MT_USB_DEVICE(USB_VENDOR_ID_NEXIO,
+ USB_DEVICE_ID_NEXIO_MULTITOUCH_420)},
+
/* Panasonic panels */
{ .driver_data = MT_CLS_PANASONIC,
MT_USB_DEVICE(USB_VENDOR_ID_PANASONIC,
@@ -1065,7 +1141,7 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_PANABOARD_UBT880) },
/* Novatek Panel */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_NOVATEK,
USB_DEVICE_ID_NOVATEK_PCT) },
@@ -1111,7 +1187,7 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_CONFIDENCE,
MT_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM,
USB_DEVICE_ID_MTP_STM)},
- { .driver_data = MT_CLS_CONFIDENCE,
+ { .driver_data = MT_CLS_DEFAULT,
MT_USB_DEVICE(USB_VENDOR_ID_STANTUM_SITRONIX,
USB_DEVICE_ID_MTP_SITRONIX)},
@@ -1121,48 +1197,48 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_TOPSEED2_PERIPAD_701) },
/* Touch International panels */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_TOUCH_INTL,
USB_DEVICE_ID_TOUCH_INTL_MULTI_TOUCH) },
/* Unitec panels */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_UNITEC,
USB_DEVICE_ID_UNITEC_USB_TOUCH_0709) },
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_UNITEC,
USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19) },
/* XAT */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XAT,
USB_DEVICE_ID_XAT_CSR) },
/* Xiroku */
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_SPX) },
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_MPX) },
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_CSR) },
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_SPX1) },
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_MPX1) },
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_CSR1) },
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_SPX2) },
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_MPX2) },
- { .driver_data = MT_CLS_DEFAULT,
+ { .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_CSR2) },
@@ -1193,21 +1269,10 @@ static struct hid_driver mt_driver = {
.feature_mapping = mt_feature_mapping,
.usage_table = mt_grabbed_usages,
.event = mt_event,
+ .report = mt_report,
#ifdef CONFIG_PM
.reset_resume = mt_reset_resume,
.resume = mt_resume,
#endif
};
-
-static int __init mt_init(void)
-{
- return hid_register_driver(&mt_driver);
-}
-
-static void __exit mt_exit(void)
-{
- hid_unregister_driver(&mt_driver);
-}
-
-module_init(mt_init);
-module_exit(mt_exit);
+module_hid_driver(mt_driver);
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 86a969f63292..7757e82416e7 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -858,12 +858,43 @@ not_claimed_input:
return 1;
}
+static void ntrig_input_configured(struct hid_device *hid,
+ struct hid_input *hidinput)
+
+{
+ struct input_dev *input = hidinput->input;
+
+ if (hidinput->report->maxfield < 1)
+ return;
+
+ switch (hidinput->report->field[0]->application) {
+ case HID_DG_PEN:
+ input->name = "N-Trig Pen";
+ break;
+ case HID_DG_TOUCHSCREEN:
+ /* These keys are redundant for fingers, clear them
+ * to prevent incorrect identification */
+ __clear_bit(BTN_TOOL_PEN, input->keybit);
+ __clear_bit(BTN_TOOL_FINGER, input->keybit);
+ __clear_bit(BTN_0, input->keybit);
+ __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
+ /*
+ * The physical touchscreen (single touch)
+ * input has a value for physical, whereas
+ * the multitouch only has logical input
+ * fields.
+ */
+ input->name = (hidinput->report->field[0]->physical) ?
+ "N-Trig Touchscreen" :
+ "N-Trig MultiTouch";
+ break;
+ }
+}
+
static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
struct ntrig_data *nd;
- struct hid_input *hidinput;
- struct input_dev *input;
struct hid_report *report;
if (id->driver_data)
@@ -901,38 +932,6 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto err_free;
}
-
- list_for_each_entry(hidinput, &hdev->inputs, list) {
- if (hidinput->report->maxfield < 1)
- continue;
-
- input = hidinput->input;
- switch (hidinput->report->field[0]->application) {
- case HID_DG_PEN:
- input->name = "N-Trig Pen";
- break;
- case HID_DG_TOUCHSCREEN:
- /* These keys are redundant for fingers, clear them
- * to prevent incorrect identification */
- __clear_bit(BTN_TOOL_PEN, input->keybit);
- __clear_bit(BTN_TOOL_FINGER, input->keybit);
- __clear_bit(BTN_0, input->keybit);
- __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
- /*
- * The physical touchscreen (single touch)
- * input has a value for physical, whereas
- * the multitouch only has logical input
- * fields.
- */
- input->name =
- (hidinput->report->field[0]
- ->physical) ?
- "N-Trig Touchscreen" :
- "N-Trig MultiTouch";
- break;
- }
- }
-
/* This is needed for devices with more recent firmware versions */
report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0x0a];
if (report) {
@@ -1023,20 +1022,10 @@ static struct hid_driver ntrig_driver = {
.remove = ntrig_remove,
.input_mapping = ntrig_input_mapping,
.input_mapped = ntrig_input_mapped,
+ .input_configured = ntrig_input_configured,
.usage_table = ntrig_grabbed_usages,
.event = ntrig_event,
};
+module_hid_driver(ntrig_driver);
-static int __init ntrig_init(void)
-{
- return hid_register_driver(&ntrig_driver);
-}
-
-static void __exit ntrig_exit(void)
-{
- hid_unregister_driver(&ntrig_driver);
-}
-
-module_init(ntrig_init);
-module_exit(ntrig_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-ortek.c b/drivers/hid/hid-ortek.c
index 0ffa1d2d64f0..6620f15fec22 100644
--- a/drivers/hid/hid-ortek.c
+++ b/drivers/hid/hid-ortek.c
@@ -50,17 +50,6 @@ static struct hid_driver ortek_driver = {
.id_table = ortek_devices,
.report_fixup = ortek_report_fixup
};
+module_hid_driver(ortek_driver);
-static int __init ortek_init(void)
-{
- return hid_register_driver(&ortek_driver);
-}
-
-static void __exit ortek_exit(void)
-{
- hid_unregister_driver(&ortek_driver);
-}
-
-module_init(ortek_init);
-module_exit(ortek_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-petalynx.c b/drivers/hid/hid-petalynx.c
index 4c521de4e7e6..736b2502df4f 100644
--- a/drivers/hid/hid-petalynx.c
+++ b/drivers/hid/hid-petalynx.c
@@ -103,17 +103,6 @@ static struct hid_driver pl_driver = {
.input_mapping = pl_input_mapping,
.probe = pl_probe,
};
+module_hid_driver(pl_driver);
-static int __init pl_init(void)
-{
- return hid_register_driver(&pl_driver);
-}
-
-static void __exit pl_exit(void)
-{
- hid_unregister_driver(&pl_driver);
-}
-
-module_init(pl_init);
-module_exit(pl_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
index 86df26e58aba..31cd93fc3d4b 100644
--- a/drivers/hid/hid-picolcd_core.c
+++ b/drivers/hid/hid-picolcd_core.c
@@ -672,18 +672,7 @@ static struct hid_driver picolcd_driver = {
.reset_resume = picolcd_reset_resume,
#endif
};
+module_hid_driver(picolcd_driver);
-static int __init picolcd_init(void)
-{
- return hid_register_driver(&picolcd_driver);
-}
-
-static void __exit picolcd_exit(void)
-{
- hid_unregister_driver(&picolcd_driver);
-}
-
-module_init(picolcd_init);
-module_exit(picolcd_exit);
MODULE_DESCRIPTION("Minibox graphics PicoLCD Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c
index 47ed74c46b6b..b0199d27787b 100644
--- a/drivers/hid/hid-pl.c
+++ b/drivers/hid/hid-pl.c
@@ -14,6 +14,8 @@
* 0e8f:0003 "GASIA USB Gamepad"
* - another version of the König gamepad
*
+ * 0f30:0111 "Saitek Color Rumble Pad"
+ *
* Copyright (c) 2007, 2009 Anssi Hannula <anssi.hannula@gmail.com>
*/
@@ -51,6 +53,7 @@
struct plff_device {
struct hid_report *report;
+ s32 maxval;
s32 *strong;
s32 *weak;
};
@@ -66,8 +69,8 @@ static int hid_plff_play(struct input_dev *dev, void *data,
right = effect->u.rumble.weak_magnitude;
debug("called with 0x%04x 0x%04x", left, right);
- left = left * 0x7f / 0xffff;
- right = right * 0x7f / 0xffff;
+ left = left * plff->maxval / 0xffff;
+ right = right * plff->maxval / 0xffff;
*plff->strong = left;
*plff->weak = right;
@@ -87,6 +90,7 @@ static int plff_init(struct hid_device *hid)
struct list_head *report_ptr = report_list;
struct input_dev *dev;
int error;
+ s32 maxval;
s32 *strong;
s32 *weak;
@@ -123,6 +127,7 @@ static int plff_init(struct hid_device *hid)
return -ENODEV;
}
+ maxval = 0x7f;
if (report->field[0]->report_count >= 4) {
report->field[0]->value[0] = 0x00;
report->field[0]->value[1] = 0x00;
@@ -135,6 +140,8 @@ static int plff_init(struct hid_device *hid)
report->field[1]->value[0] = 0x00;
strong = &report->field[2]->value[0];
weak = &report->field[3]->value[0];
+ if (hid->vendor == USB_VENDOR_ID_JESS2)
+ maxval = 0xff;
debug("detected 4-field device");
} else {
hid_err(hid, "not enough fields or values\n");
@@ -158,6 +165,7 @@ static int plff_init(struct hid_device *hid)
plff->report = report;
plff->strong = strong;
plff->weak = weak;
+ plff->maxval = maxval;
*strong = 0x00;
*weak = 0x00;
@@ -207,6 +215,7 @@ static const struct hid_device_id pl_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR),
.driver_data = 1 }, /* Twin USB Joystick */
{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003), },
+ { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD), },
{ }
};
MODULE_DEVICE_TABLE(hid, pl_devices);
@@ -216,17 +225,6 @@ static struct hid_driver pl_driver = {
.id_table = pl_devices,
.probe = pl_probe,
};
+module_hid_driver(pl_driver);
-static int __init pl_init(void)
-{
- return hid_register_driver(&pl_driver);
-}
-
-static void __exit pl_exit(void)
-{
- hid_unregister_driver(&pl_driver);
-}
-
-module_init(pl_init);
-module_exit(pl_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-primax.c b/drivers/hid/hid-primax.c
index c15adb0c98a1..3a1c3c4c50dc 100644
--- a/drivers/hid/hid-primax.c
+++ b/drivers/hid/hid-primax.c
@@ -75,18 +75,7 @@ static struct hid_driver px_driver = {
.id_table = px_devices,
.raw_event = px_raw_event,
};
+module_hid_driver(px_driver);
-static int __init px_init(void)
-{
- return hid_register_driver(&px_driver);
-}
-
-static void __exit px_exit(void)
-{
- hid_unregister_driver(&px_driver);
-}
-
-module_init(px_init);
-module_exit(px_exit);
MODULE_AUTHOR("Terry Lambert <tlambert@google.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index ec8ca3336315..4e1c4bcbdc03 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -889,23 +889,6 @@ static struct hid_driver pk_driver = {
.probe = pk_probe,
.remove = pk_remove,
};
+module_hid_driver(pk_driver);
-static int pk_init(void)
-{
- int ret;
-
- ret = hid_register_driver(&pk_driver);
- if (ret)
- pr_err("can't register prodikeys driver\n");
-
- return ret;
-}
-
-static void pk_exit(void)
-{
- hid_unregister_driver(&pk_driver);
-}
-
-module_init(pk_init);
-module_exit(pk_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-ps3remote.c b/drivers/hid/hid-ps3remote.c
index 03811e539d71..f1239d3c5b14 100644
--- a/drivers/hid/hid-ps3remote.c
+++ b/drivers/hid/hid-ps3remote.c
@@ -198,18 +198,7 @@ static struct hid_driver ps3remote_driver = {
.report_fixup = ps3remote_fixup,
.input_mapping = ps3remote_mapping,
};
+module_hid_driver(ps3remote_driver);
-static int __init ps3remote_init(void)
-{
- return hid_register_driver(&ps3remote_driver);
-}
-
-static void __exit ps3remote_exit(void)
-{
- hid_unregister_driver(&ps3remote_driver);
-}
-
-module_init(ps3remote_init);
-module_exit(ps3remote_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Dillow <dave@thedillows.org>, Antonio Ospite <ospite@studenti.unina.it>");
diff --git a/drivers/hid/hid-roccat-lua.c b/drivers/hid/hid-roccat-lua.c
index 5084fb4b7e91..6adc0fa08d96 100644
--- a/drivers/hid/hid-roccat-lua.c
+++ b/drivers/hid/hid-roccat-lua.c
@@ -208,19 +208,7 @@ static struct hid_driver lua_driver = {
.probe = lua_probe,
.remove = lua_remove
};
-
-static int __init lua_init(void)
-{
- return hid_register_driver(&lua_driver);
-}
-
-static void __exit lua_exit(void)
-{
- hid_unregister_driver(&lua_driver);
-}
-
-module_init(lua_init);
-module_exit(lua_exit);
+module_hid_driver(lua_driver);
MODULE_AUTHOR("Stefan Achatz");
MODULE_DESCRIPTION("USB Roccat Lua driver");
diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c
index 45aea77bb611..37961c7e397d 100644
--- a/drivers/hid/hid-saitek.c
+++ b/drivers/hid/hid-saitek.c
@@ -54,17 +54,6 @@ static struct hid_driver saitek_driver = {
.id_table = saitek_devices,
.report_fixup = saitek_report_fixup
};
+module_hid_driver(saitek_driver);
-static int __init saitek_init(void)
-{
- return hid_register_driver(&saitek_driver);
-}
-
-static void __exit saitek_exit(void)
-{
- hid_unregister_driver(&saitek_driver);
-}
-
-module_init(saitek_init);
-module_exit(saitek_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c
index a5821d317229..7cbb067d4a9e 100644
--- a/drivers/hid/hid-samsung.c
+++ b/drivers/hid/hid-samsung.c
@@ -196,17 +196,6 @@ static struct hid_driver samsung_driver = {
.input_mapping = samsung_input_mapping,
.probe = samsung_probe,
};
+module_hid_driver(samsung_driver);
-static int __init samsung_init(void)
-{
- return hid_register_driver(&samsung_driver);
-}
-
-static void __exit samsung_exit(void)
-{
- hid_unregister_driver(&samsung_driver);
-}
-
-module_init(samsung_init);
-module_exit(samsung_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 0bc58bd8d4f5..6679788bf75a 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -605,16 +605,12 @@ static void sensor_hub_remove(struct hid_device *hdev)
}
static const struct hid_device_id sensor_hub_devices[] = {
- { HID_DEVICE(BUS_USB, HID_GROUP_SENSOR_HUB, HID_ANY_ID, HID_ANY_ID) },
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID,
+ HID_ANY_ID) },
{ }
};
MODULE_DEVICE_TABLE(hid, sensor_hub_devices);
-static const struct hid_usage_id sensor_hub_grabbed_usages[] = {
- { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
- { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1 }
-};
-
static struct hid_driver sensor_hub_driver = {
.name = "hid-sensor-hub",
.id_table = sensor_hub_devices,
@@ -627,19 +623,7 @@ static struct hid_driver sensor_hub_driver = {
.reset_resume = sensor_hub_reset_resume,
#endif
};
-
-static int __init sensor_hub_init(void)
-{
- return hid_register_driver(&sensor_hub_driver);
-}
-
-static void __exit sensor_hub_exit(void)
-{
- hid_unregister_driver(&sensor_hub_driver);
-}
-
-module_init(sensor_hub_init);
-module_exit(sensor_hub_exit);
+module_hid_driver(sensor_hub_driver);
MODULE_DESCRIPTION("HID Sensor Hub driver");
MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@intel.com>");
diff --git a/drivers/hid/hid-sjoy.c b/drivers/hid/hid-sjoy.c
index 42257acfeb73..28f774003f03 100644
--- a/drivers/hid/hid-sjoy.c
+++ b/drivers/hid/hid-sjoy.c
@@ -177,19 +177,8 @@ static struct hid_driver sjoy_driver = {
.id_table = sjoy_devices,
.probe = sjoy_probe,
};
+module_hid_driver(sjoy_driver);
-static int __init sjoy_init(void)
-{
- return hid_register_driver(&sjoy_driver);
-}
-
-static void __exit sjoy_exit(void)
-{
- hid_unregister_driver(&sjoy_driver);
-}
-
-module_init(sjoy_init);
-module_exit(sjoy_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jussi Kivilinna");
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 7f33ebf299c2..312098e4af4f 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -33,6 +33,28 @@ static const u8 sixaxis_rdesc_fixup[] = {
0x03, 0x46, 0xFF, 0x03, 0x09, 0x01, 0x81, 0x02
};
+static const u8 sixaxis_rdesc_fixup2[] = {
+ 0x05, 0x01, 0x09, 0x04, 0xa1, 0x01, 0xa1, 0x02,
+ 0x85, 0x01, 0x75, 0x08, 0x95, 0x01, 0x15, 0x00,
+ 0x26, 0xff, 0x00, 0x81, 0x03, 0x75, 0x01, 0x95,
+ 0x13, 0x15, 0x00, 0x25, 0x01, 0x35, 0x00, 0x45,
+ 0x01, 0x05, 0x09, 0x19, 0x01, 0x29, 0x13, 0x81,
+ 0x02, 0x75, 0x01, 0x95, 0x0d, 0x06, 0x00, 0xff,
+ 0x81, 0x03, 0x15, 0x00, 0x26, 0xff, 0x00, 0x05,
+ 0x01, 0x09, 0x01, 0xa1, 0x00, 0x75, 0x08, 0x95,
+ 0x04, 0x35, 0x00, 0x46, 0xff, 0x00, 0x09, 0x30,
+ 0x09, 0x31, 0x09, 0x32, 0x09, 0x35, 0x81, 0x02,
+ 0xc0, 0x05, 0x01, 0x95, 0x13, 0x09, 0x01, 0x81,
+ 0x02, 0x95, 0x0c, 0x81, 0x01, 0x75, 0x10, 0x95,
+ 0x04, 0x26, 0xff, 0x03, 0x46, 0xff, 0x03, 0x09,
+ 0x01, 0x81, 0x02, 0xc0, 0xa1, 0x02, 0x85, 0x02,
+ 0x75, 0x08, 0x95, 0x30, 0x09, 0x01, 0xb1, 0x02,
+ 0xc0, 0xa1, 0x02, 0x85, 0xee, 0x75, 0x08, 0x95,
+ 0x30, 0x09, 0x01, 0xb1, 0x02, 0xc0, 0xa1, 0x02,
+ 0x85, 0xef, 0x75, 0x08, 0x95, 0x30, 0x09, 0x01,
+ 0xb1, 0x02, 0xc0, 0xc0,
+};
+
struct sony_sc {
unsigned long quirks;
};
@@ -43,9 +65,19 @@ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
{
struct sony_sc *sc = hid_get_drvdata(hdev);
- if ((sc->quirks & VAIO_RDESC_CONSTANT) &&
- *rsize >= 56 && rdesc[54] == 0x81 && rdesc[55] == 0x07) {
- hid_info(hdev, "Fixing up Sony Vaio VGX report descriptor\n");
+ /*
+ * Some Sony RF receivers wrongly declare the mouse pointer as a
+ * a constant non-data variable.
+ */
+ if ((sc->quirks & VAIO_RDESC_CONSTANT) && *rsize >= 56 &&
+ /* usage page: generic desktop controls */
+ /* rdesc[0] == 0x05 && rdesc[1] == 0x01 && */
+ /* usage: mouse */
+ rdesc[2] == 0x09 && rdesc[3] == 0x02 &&
+ /* input (usage page for x,y axes): constant, variable, relative */
+ rdesc[54] == 0x81 && rdesc[55] == 0x07) {
+ hid_info(hdev, "Fixing up Sony RF Receiver report descriptor\n");
+ /* input: data, variable, relative */
rdesc[55] = 0x06;
}
@@ -56,6 +88,12 @@ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
hid_info(hdev, "Fixing up Sony Sixaxis report descriptor\n");
memcpy((void *)&rdesc[83], (void *)&sixaxis_rdesc_fixup,
sizeof(sixaxis_rdesc_fixup));
+ } else if (sc->quirks & SIXAXIS_CONTROLLER_USB &&
+ *rsize > sizeof(sixaxis_rdesc_fixup2)) {
+ hid_info(hdev, "Sony Sixaxis clone detected. Using original report descriptor (size: %d clone; %d new)\n",
+ *rsize, (int)sizeof(sixaxis_rdesc_fixup2));
+ *rsize = sizeof(sixaxis_rdesc_fixup2);
+ memcpy(rdesc, &sixaxis_rdesc_fixup2, *rsize);
}
return rdesc;
}
@@ -217,6 +255,8 @@ static const struct hid_device_id sony_devices[] = {
.driver_data = SIXAXIS_CONTROLLER_BT },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE),
.driver_data = VAIO_RDESC_CONSTANT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE),
+ .driver_data = VAIO_RDESC_CONSTANT },
{ }
};
MODULE_DEVICE_TABLE(hid, sony_devices);
@@ -229,17 +269,6 @@ static struct hid_driver sony_driver = {
.report_fixup = sony_report_fixup,
.raw_event = sony_raw_event
};
+module_hid_driver(sony_driver);
-static int __init sony_init(void)
-{
- return hid_register_driver(&sony_driver);
-}
-
-static void __exit sony_exit(void)
-{
- hid_unregister_driver(&sony_driver);
-}
-
-module_init(sony_init);
-module_exit(sony_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-speedlink.c b/drivers/hid/hid-speedlink.c
index 602013741718..e94371a059cb 100644
--- a/drivers/hid/hid-speedlink.c
+++ b/drivers/hid/hid-speedlink.c
@@ -73,17 +73,6 @@ static struct hid_driver speedlink_driver = {
.input_mapping = speedlink_input_mapping,
.event = speedlink_event,
};
+module_hid_driver(speedlink_driver);
-static int __init speedlink_init(void)
-{
- return hid_register_driver(&speedlink_driver);
-}
-
-static void __exit speedlink_exit(void)
-{
- hid_unregister_driver(&speedlink_driver);
-}
-
-module_init(speedlink_init);
-module_exit(speedlink_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c
new file mode 100644
index 000000000000..2ed995cda44a
--- /dev/null
+++ b/drivers/hid/hid-steelseries.c
@@ -0,0 +1,393 @@
+/*
+ * HID driver for Steelseries SRW-S1
+ *
+ * Copyright (c) 2013 Simon Wood
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/usb.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "usbhid/usbhid.h"
+#include "hid-ids.h"
+
+#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#define SRWS1_NUMBER_LEDS 15
+struct steelseries_srws1_data {
+ __u16 led_state;
+ /* the last element is used for setting all leds simultaneously */
+ struct led_classdev *led[SRWS1_NUMBER_LEDS + 1];
+};
+#endif
+
+/* Fixed report descriptor for Steelseries SRW-S1 wheel controller
+ *
+ * The original descriptor hides the sensitivity and assists dials
+ * a custom vendor usage page. This inserts a patch to make them
+ * appear in the 'Generic Desktop' usage.
+ */
+
+static __u8 steelseries_srws1_rdesc_fixed[] = {
+0x05, 0x01, /* Usage Page (Desktop) */
+0x09, 0x08, /* Usage (MultiAxis), Changed */
+0xA1, 0x01, /* Collection (Application), */
+0xA1, 0x02, /* Collection (Logical), */
+0x95, 0x01, /* Report Count (1), */
+0x05, 0x01, /* Changed Usage Page (Desktop), */
+0x09, 0x30, /* Changed Usage (X), */
+0x16, 0xF8, 0xF8, /* Logical Minimum (-1800), */
+0x26, 0x08, 0x07, /* Logical Maximum (1800), */
+0x65, 0x14, /* Unit (Degrees), */
+0x55, 0x0F, /* Unit Exponent (15), */
+0x75, 0x10, /* Report Size (16), */
+0x81, 0x02, /* Input (Variable), */
+0x09, 0x31, /* Changed Usage (Y), */
+0x15, 0x00, /* Logical Minimum (0), */
+0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
+0x75, 0x0C, /* Report Size (12), */
+0x81, 0x02, /* Input (Variable), */
+0x09, 0x32, /* Changed Usage (Z), */
+0x15, 0x00, /* Logical Minimum (0), */
+0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
+0x75, 0x0C, /* Report Size (12), */
+0x81, 0x02, /* Input (Variable), */
+0x05, 0x01, /* Usage Page (Desktop), */
+0x09, 0x39, /* Usage (Hat Switch), */
+0x25, 0x07, /* Logical Maximum (7), */
+0x35, 0x00, /* Physical Minimum (0), */
+0x46, 0x3B, 0x01, /* Physical Maximum (315), */
+0x65, 0x14, /* Unit (Degrees), */
+0x75, 0x04, /* Report Size (4), */
+0x95, 0x01, /* Report Count (1), */
+0x81, 0x02, /* Input (Variable), */
+0x25, 0x01, /* Logical Maximum (1), */
+0x45, 0x01, /* Physical Maximum (1), */
+0x65, 0x00, /* Unit, */
+0x75, 0x01, /* Report Size (1), */
+0x95, 0x03, /* Report Count (3), */
+0x81, 0x01, /* Input (Constant), */
+0x05, 0x09, /* Usage Page (Button), */
+0x19, 0x01, /* Usage Minimum (01h), */
+0x29, 0x11, /* Usage Maximum (11h), */
+0x95, 0x11, /* Report Count (17), */
+0x81, 0x02, /* Input (Variable), */
+ /* ---- Dial patch starts here ---- */
+0x05, 0x01, /* Usage Page (Desktop), */
+0x09, 0x33, /* Usage (RX), */
+0x75, 0x04, /* Report Size (4), */
+0x95, 0x02, /* Report Count (2), */
+0x15, 0x00, /* Logical Minimum (0), */
+0x25, 0x0b, /* Logical Maximum (b), */
+0x81, 0x02, /* Input (Variable), */
+0x09, 0x35, /* Usage (RZ), */
+0x75, 0x04, /* Report Size (4), */
+0x95, 0x01, /* Report Count (1), */
+0x25, 0x03, /* Logical Maximum (3), */
+0x81, 0x02, /* Input (Variable), */
+ /* ---- Dial patch ends here ---- */
+0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+0x09, 0x01, /* Usage (01h), */
+0x75, 0x04, /* Changed Report Size (4), */
+0x95, 0x0D, /* Changed Report Count (13), */
+0x81, 0x02, /* Input (Variable), */
+0xC0, /* End Collection, */
+0xA1, 0x02, /* Collection (Logical), */
+0x09, 0x02, /* Usage (02h), */
+0x75, 0x08, /* Report Size (8), */
+0x95, 0x10, /* Report Count (16), */
+0x91, 0x02, /* Output (Variable), */
+0xC0, /* End Collection, */
+0xC0 /* End Collection */
+};
+
+#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+static void steelseries_srws1_set_leds(struct hid_device *hdev, __u16 leds)
+{
+ struct list_head *report_list = &hdev->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
+ __s32 *value = report->field[0]->value;
+
+ value[0] = 0x40;
+ value[1] = leds & 0xFF;
+ value[2] = leds >> 8;
+ value[3] = 0x00;
+ value[4] = 0x00;
+ value[5] = 0x00;
+ value[6] = 0x00;
+ value[7] = 0x00;
+ value[8] = 0x00;
+ value[9] = 0x00;
+ value[10] = 0x00;
+ value[11] = 0x00;
+ value[12] = 0x00;
+ value[13] = 0x00;
+ value[14] = 0x00;
+ value[15] = 0x00;
+
+ usbhid_submit_report(hdev, report, USB_DIR_OUT);
+
+ /* Note: LED change does not show on device until the device is read/polled */
+}
+
+static void steelseries_srws1_led_all_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct device *dev = led_cdev->dev->parent;
+ struct hid_device *hid = container_of(dev, struct hid_device, dev);
+ struct steelseries_srws1_data *drv_data = hid_get_drvdata(hid);
+
+ if (!drv_data) {
+ hid_err(hid, "Device data not found.");
+ return;
+ }
+
+ if (value == LED_OFF)
+ drv_data->led_state = 0;
+ else
+ drv_data->led_state = (1 << (SRWS1_NUMBER_LEDS + 1)) - 1;
+
+ steelseries_srws1_set_leds(hid, drv_data->led_state);
+}
+
+static enum led_brightness steelseries_srws1_led_all_get_brightness(struct led_classdev *led_cdev)
+{
+ struct device *dev = led_cdev->dev->parent;
+ struct hid_device *hid = container_of(dev, struct hid_device, dev);
+ struct steelseries_srws1_data *drv_data;
+
+ drv_data = hid_get_drvdata(hid);
+
+ if (!drv_data) {
+ hid_err(hid, "Device data not found.");
+ return LED_OFF;
+ }
+
+ return (drv_data->led_state >> SRWS1_NUMBER_LEDS) ? LED_FULL : LED_OFF;
+}
+
+static void steelseries_srws1_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct device *dev = led_cdev->dev->parent;
+ struct hid_device *hid = container_of(dev, struct hid_device, dev);
+ struct steelseries_srws1_data *drv_data = hid_get_drvdata(hid);
+ int i, state = 0;
+
+ if (!drv_data) {
+ hid_err(hid, "Device data not found.");
+ return;
+ }
+
+ for (i = 0; i < SRWS1_NUMBER_LEDS; i++) {
+ if (led_cdev != drv_data->led[i])
+ continue;
+
+ state = (drv_data->led_state >> i) & 1;
+ if (value == LED_OFF && state) {
+ drv_data->led_state &= ~(1 << i);
+ steelseries_srws1_set_leds(hid, drv_data->led_state);
+ } else if (value != LED_OFF && !state) {
+ drv_data->led_state |= 1 << i;
+ steelseries_srws1_set_leds(hid, drv_data->led_state);
+ }
+ break;
+ }
+}
+
+static enum led_brightness steelseries_srws1_led_get_brightness(struct led_classdev *led_cdev)
+{
+ struct device *dev = led_cdev->dev->parent;
+ struct hid_device *hid = container_of(dev, struct hid_device, dev);
+ struct steelseries_srws1_data *drv_data;
+ int i, value = 0;
+
+ drv_data = hid_get_drvdata(hid);
+
+ if (!drv_data) {
+ hid_err(hid, "Device data not found.");
+ return LED_OFF;
+ }
+
+ for (i = 0; i < SRWS1_NUMBER_LEDS; i++)
+ if (led_cdev == drv_data->led[i]) {
+ value = (drv_data->led_state >> i) & 1;
+ break;
+ }
+
+ return value ? LED_FULL : LED_OFF;
+}
+
+static int steelseries_srws1_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int ret, i;
+ struct led_classdev *led;
+ size_t name_sz;
+ char *name;
+
+ struct steelseries_srws1_data *drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL);
+
+ if (drv_data == NULL) {
+ hid_err(hdev, "can't alloc SRW-S1 memory\n");
+ return -ENOMEM;
+ }
+
+ hid_set_drvdata(hdev, drv_data);
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed\n");
+ goto err_free;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ goto err_free;
+ }
+
+ /* register led subsystem */
+ drv_data->led_state = 0;
+ for (i = 0; i < SRWS1_NUMBER_LEDS + 1; i++)
+ drv_data->led[i] = NULL;
+
+ steelseries_srws1_set_leds(hdev, 0);
+
+ name_sz = strlen(hdev->uniq) + 16;
+
+ /* 'ALL', for setting all LEDs simultaneously */
+ led = kzalloc(sizeof(struct led_classdev)+name_sz, GFP_KERNEL);
+ if (!led) {
+ hid_err(hdev, "can't allocate memory for LED ALL\n");
+ goto err_led;
+ }
+
+ name = (void *)(&led[1]);
+ snprintf(name, name_sz, "SRWS1::%s::RPMALL", hdev->uniq);
+ led->name = name;
+ led->brightness = 0;
+ led->max_brightness = 1;
+ led->brightness_get = steelseries_srws1_led_all_get_brightness;
+ led->brightness_set = steelseries_srws1_led_all_set_brightness;
+
+ drv_data->led[SRWS1_NUMBER_LEDS] = led;
+ ret = led_classdev_register(&hdev->dev, led);
+ if (ret)
+ goto err_led;
+
+ /* Each individual LED */
+ for (i = 0; i < SRWS1_NUMBER_LEDS; i++) {
+ led = kzalloc(sizeof(struct led_classdev)+name_sz, GFP_KERNEL);
+ if (!led) {
+ hid_err(hdev, "can't allocate memory for LED %d\n", i);
+ goto err_led;
+ }
+
+ name = (void *)(&led[1]);
+ snprintf(name, name_sz, "SRWS1::%s::RPM%d", hdev->uniq, i+1);
+ led->name = name;
+ led->brightness = 0;
+ led->max_brightness = 1;
+ led->brightness_get = steelseries_srws1_led_get_brightness;
+ led->brightness_set = steelseries_srws1_led_set_brightness;
+
+ drv_data->led[i] = led;
+ ret = led_classdev_register(&hdev->dev, led);
+
+ if (ret) {
+ hid_err(hdev, "failed to register LED %d. Aborting.\n", i);
+err_led:
+ /* Deregister all LEDs (if any) */
+ for (i = 0; i < SRWS1_NUMBER_LEDS + 1; i++) {
+ led = drv_data->led[i];
+ drv_data->led[i] = NULL;
+ if (!led)
+ continue;
+ led_classdev_unregister(led);
+ kfree(led);
+ }
+ goto out; /* but let the driver continue without LEDs */
+ }
+ }
+out:
+ return 0;
+err_free:
+ kfree(drv_data);
+ return ret;
+}
+
+static void steelseries_srws1_remove(struct hid_device *hdev)
+{
+ int i;
+ struct led_classdev *led;
+
+ struct steelseries_srws1_data *drv_data = hid_get_drvdata(hdev);
+
+ if (drv_data) {
+ /* Deregister LEDs (if any) */
+ for (i = 0; i < SRWS1_NUMBER_LEDS + 1; i++) {
+ led = drv_data->led[i];
+ drv_data->led[i] = NULL;
+ if (!led)
+ continue;
+ led_classdev_unregister(led);
+ kfree(led);
+ }
+
+ }
+
+ hid_hw_stop(hdev);
+ kfree(drv_data);
+ return;
+}
+#endif
+
+static __u8 *steelseries_srws1_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ if (*rsize >= 115 && rdesc[11] == 0x02 && rdesc[13] == 0xc8
+ && rdesc[29] == 0xbb && rdesc[40] == 0xc5) {
+ hid_info(hdev, "Fixing up Steelseries SRW-S1 report descriptor\n");
+ rdesc = steelseries_srws1_rdesc_fixed;
+ *rsize = sizeof(steelseries_srws1_rdesc_fixed);
+ }
+ return rdesc;
+}
+
+static const struct hid_device_id steelseries_srws1_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, steelseries_srws1_devices);
+
+static struct hid_driver steelseries_srws1_driver = {
+ .name = "steelseries_srws1",
+ .id_table = steelseries_srws1_devices,
+#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+ .probe = steelseries_srws1_probe,
+ .remove = steelseries_srws1_remove,
+#endif
+ .report_fixup = steelseries_srws1_report_fixup
+};
+
+static int __init steelseries_srws1_init(void)
+{
+ return hid_register_driver(&steelseries_srws1_driver);
+}
+
+static void __exit steelseries_srws1_exit(void)
+{
+ hid_unregister_driver(&steelseries_srws1_driver);
+}
+
+module_init(steelseries_srws1_init);
+module_exit(steelseries_srws1_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-sunplus.c b/drivers/hid/hid-sunplus.c
index 45b4b066a262..87fc91e1c8de 100644
--- a/drivers/hid/hid-sunplus.c
+++ b/drivers/hid/hid-sunplus.c
@@ -63,17 +63,6 @@ static struct hid_driver sp_driver = {
.report_fixup = sp_report_fixup,
.input_mapping = sp_input_mapping,
};
+module_hid_driver(sp_driver);
-static int __init sp_init(void)
-{
- return hid_register_driver(&sp_driver);
-}
-
-static void __exit sp_exit(void)
-{
- hid_unregister_driver(&sp_driver);
-}
-
-module_init(sp_init);
-module_exit(sp_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-thingm.c b/drivers/hid/hid-thingm.c
new file mode 100644
index 000000000000..2055a52e9a20
--- /dev/null
+++ b/drivers/hid/hid-thingm.c
@@ -0,0 +1,272 @@
+/*
+ * ThingM blink(1) USB RGB LED driver
+ *
+ * Copyright 2013 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ */
+
+#include <linux/hid.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "hid-ids.h"
+
+#define BLINK1_CMD_SIZE 9
+
+#define blink1_rgb_to_r(rgb) ((rgb & 0xFF0000) >> 16)
+#define blink1_rgb_to_g(rgb) ((rgb & 0x00FF00) >> 8)
+#define blink1_rgb_to_b(rgb) ((rgb & 0x0000FF) >> 0)
+
+/**
+ * struct blink1_data - blink(1) device specific data
+ * @hdev: HID device.
+ * @led_cdev: LED class instance.
+ * @rgb: 8-bit per channel RGB notation.
+ * @fade: fade time in hundredths of a second.
+ * @brightness: brightness coefficient.
+ * @play: play/pause in-memory patterns.
+ */
+struct blink1_data {
+ struct hid_device *hdev;
+ struct led_classdev led_cdev;
+ u32 rgb;
+ u16 fade;
+ u8 brightness;
+ bool play;
+};
+
+static int blink1_send_command(struct blink1_data *data,
+ u8 buf[BLINK1_CMD_SIZE])
+{
+ int ret;
+
+ hid_dbg(data->hdev, "command: %d%c%.2x%.2x%.2x%.2x%.2x%.2x%.2x\n",
+ buf[0], buf[1], buf[2], buf[3], buf[4],
+ buf[5], buf[6], buf[7], buf[8]);
+
+ ret = data->hdev->hid_output_raw_report(data->hdev, buf,
+ BLINK1_CMD_SIZE, HID_FEATURE_REPORT);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int blink1_update_color(struct blink1_data *data)
+{
+ u8 buf[BLINK1_CMD_SIZE] = { 1, 'n', 0, 0, 0, 0, 0, 0, 0 };
+
+ if (data->brightness) {
+ unsigned int coef = DIV_ROUND_CLOSEST(255, data->brightness);
+
+ buf[2] = DIV_ROUND_CLOSEST(blink1_rgb_to_r(data->rgb), coef);
+ buf[3] = DIV_ROUND_CLOSEST(blink1_rgb_to_g(data->rgb), coef);
+ buf[4] = DIV_ROUND_CLOSEST(blink1_rgb_to_b(data->rgb), coef);
+ }
+
+ if (data->fade) {
+ buf[1] = 'c';
+ buf[5] = (data->fade & 0xFF00) >> 8;
+ buf[6] = (data->fade & 0x00FF);
+ }
+
+ return blink1_send_command(data, buf);
+}
+
+static void blink1_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct blink1_data *data = dev_get_drvdata(led_cdev->dev->parent);
+
+ data->brightness = brightness;
+ if (blink1_update_color(data))
+ hid_err(data->hdev, "failed to update color\n");
+}
+
+static enum led_brightness blink1_led_get(struct led_classdev *led_cdev)
+{
+ struct blink1_data *data = dev_get_drvdata(led_cdev->dev->parent);
+
+ return data->brightness;
+}
+
+static ssize_t blink1_show_rgb(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct blink1_data *data = dev_get_drvdata(dev->parent);
+
+ return sprintf(buf, "%.6X\n", data->rgb);
+}
+
+static ssize_t blink1_store_rgb(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct blink1_data *data = dev_get_drvdata(dev->parent);
+ long unsigned int rgb;
+ int ret;
+
+ ret = kstrtoul(buf, 16, &rgb);
+ if (ret)
+ return ret;
+
+ /* RGB triplet notation is 24-bit hexadecimal */
+ if (rgb > 0xFFFFFF)
+ return -EINVAL;
+
+ data->rgb = rgb;
+ ret = blink1_update_color(data);
+
+ return ret ? ret : count;
+}
+
+static DEVICE_ATTR(rgb, S_IRUGO | S_IWUSR, blink1_show_rgb, blink1_store_rgb);
+
+static ssize_t blink1_show_fade(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct blink1_data *data = dev_get_drvdata(dev->parent);
+
+ return sprintf(buf, "%d\n", data->fade * 10);
+}
+
+static ssize_t blink1_store_fade(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct blink1_data *data = dev_get_drvdata(dev->parent);
+ long unsigned int fade;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &fade);
+ if (ret)
+ return ret;
+
+ /* blink(1) accepts 16-bit fade time, number of 10ms ticks */
+ fade = DIV_ROUND_CLOSEST(fade, 10);
+ if (fade > 65535)
+ return -EINVAL;
+
+ data->fade = fade;
+
+ return count;
+}
+
+static DEVICE_ATTR(fade, S_IRUGO | S_IWUSR,
+ blink1_show_fade, blink1_store_fade);
+
+static ssize_t blink1_show_play(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct blink1_data *data = dev_get_drvdata(dev->parent);
+
+ return sprintf(buf, "%d\n", data->play);
+}
+
+static ssize_t blink1_store_play(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct blink1_data *data = dev_get_drvdata(dev->parent);
+ u8 cmd[BLINK1_CMD_SIZE] = { 1, 'p', 0, 0, 0, 0, 0, 0, 0 };
+ long unsigned int play;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &play);
+ if (ret)
+ return ret;
+
+ data->play = !!play;
+ cmd[2] = data->play;
+ ret = blink1_send_command(data, cmd);
+
+ return ret ? ret : count;
+}
+
+static DEVICE_ATTR(play, S_IRUGO | S_IWUSR,
+ blink1_show_play, blink1_store_play);
+
+static const struct attribute_group blink1_sysfs_group = {
+ .attrs = (struct attribute *[]) {
+ &dev_attr_rgb.attr,
+ &dev_attr_fade.attr,
+ &dev_attr_play.attr,
+ NULL
+ },
+};
+
+static int thingm_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ struct blink1_data *data;
+ struct led_classdev *led;
+ char led_name[13];
+ int ret;
+
+ data = devm_kzalloc(&hdev->dev, sizeof(struct blink1_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ hid_set_drvdata(hdev, data);
+ data->hdev = hdev;
+ data->rgb = 0xFFFFFF; /* set a default white color */
+
+ ret = hid_parse(hdev);
+ if (ret)
+ goto error;
+
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ if (ret)
+ goto error;
+
+ /* blink(1) serial numbers range is 0x1A001000 to 0x1A002FFF */
+ led = &data->led_cdev;
+ snprintf(led_name, sizeof(led_name), "blink1::%s", hdev->uniq + 4);
+ led->name = led_name;
+ led->brightness_set = blink1_led_set;
+ led->brightness_get = blink1_led_get;
+ ret = led_classdev_register(&hdev->dev, led);
+ if (ret)
+ goto stop;
+
+ ret = sysfs_create_group(&led->dev->kobj, &blink1_sysfs_group);
+ if (ret)
+ goto remove_led;
+
+ return 0;
+
+remove_led:
+ led_classdev_unregister(led);
+stop:
+ hid_hw_stop(hdev);
+error:
+ return ret;
+}
+
+static void thingm_remove(struct hid_device *hdev)
+{
+ struct blink1_data *data = hid_get_drvdata(hdev);
+ struct led_classdev *led = &data->led_cdev;
+
+ sysfs_remove_group(&led->dev->kobj, &blink1_sysfs_group);
+ led_classdev_unregister(led);
+ hid_hw_stop(hdev);
+}
+
+static const struct hid_device_id thingm_table[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, thingm_table);
+
+static struct hid_driver thingm_driver = {
+ .name = "thingm",
+ .probe = thingm_probe,
+ .remove = thingm_remove,
+ .id_table = thingm_table,
+};
+
+module_hid_driver(thingm_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vivien Didelot <vivien.didelot@savoirfairelinux.com>");
+MODULE_DESCRIPTION("ThingM blink(1) USB RGB LED driver");
diff --git a/drivers/hid/hid-tivo.c b/drivers/hid/hid-tivo.c
index 9f85f827607f..d790d8d71f7f 100644
--- a/drivers/hid/hid-tivo.c
+++ b/drivers/hid/hid-tivo.c
@@ -73,18 +73,7 @@ static struct hid_driver tivo_driver = {
.id_table = tivo_devices,
.input_mapping = tivo_input_mapping,
};
+module_hid_driver(tivo_driver);
-static int __init tivo_init(void)
-{
- return hid_register_driver(&tivo_driver);
-}
-
-static void __exit tivo_exit(void)
-{
- hid_unregister_driver(&tivo_driver);
-}
-
-module_init(tivo_init);
-module_exit(tivo_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
index 83a933b9c2e9..e4fcf3f702a5 100644
--- a/drivers/hid/hid-tmff.c
+++ b/drivers/hid/hid-tmff.c
@@ -261,17 +261,6 @@ static struct hid_driver tm_driver = {
.id_table = tm_devices,
.probe = tm_probe,
};
+module_hid_driver(tm_driver);
-static int __init tm_init(void)
-{
- return hid_register_driver(&tm_driver);
-}
-
-static void __exit tm_exit(void)
-{
- hid_unregister_driver(&tm_driver);
-}
-
-module_init(tm_init);
-module_exit(tm_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-topseed.c b/drivers/hid/hid-topseed.c
index 613ff7b1d746..8a5b843e9dd6 100644
--- a/drivers/hid/hid-topseed.c
+++ b/drivers/hid/hid-topseed.c
@@ -76,17 +76,6 @@ static struct hid_driver ts_driver = {
.id_table = ts_devices,
.input_mapping = ts_input_mapping,
};
+module_hid_driver(ts_driver);
-static int __init ts_init(void)
-{
- return hid_register_driver(&ts_driver);
-}
-
-static void __exit ts_exit(void)
-{
- hid_unregister_driver(&ts_driver);
-}
-
-module_init(ts_init);
-module_exit(ts_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-twinhan.c b/drivers/hid/hid-twinhan.c
index f23456b1fd4b..c08c36443f83 100644
--- a/drivers/hid/hid-twinhan.c
+++ b/drivers/hid/hid-twinhan.c
@@ -131,17 +131,6 @@ static struct hid_driver twinhan_driver = {
.id_table = twinhan_devices,
.input_mapping = twinhan_input_mapping,
};
+module_hid_driver(twinhan_driver);
-static int __init twinhan_init(void)
-{
- return hid_register_driver(&twinhan_driver);
-}
-
-static void __exit twinhan_exit(void)
-{
- hid_unregister_driver(&twinhan_driver);
-}
-
-module_init(twinhan_init);
-module_exit(twinhan_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c
index 2e56a1fd2375..fb8b516ff0ed 100644
--- a/drivers/hid/hid-uclogic.c
+++ b/drivers/hid/hid-uclogic.c
@@ -650,17 +650,6 @@ static struct hid_driver uclogic_driver = {
.id_table = uclogic_devices,
.report_fixup = uclogic_report_fixup,
};
+module_hid_driver(uclogic_driver);
-static int __init uclogic_init(void)
-{
- return hid_register_driver(&uclogic_driver);
-}
-
-static void __exit uclogic_exit(void)
-{
- hid_unregister_driver(&uclogic_driver);
-}
-
-module_init(uclogic_init);
-module_exit(uclogic_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index 2f60da9ed066..a4a8bb0da688 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -953,23 +953,7 @@ static struct hid_driver wacom_driver = {
.raw_event = wacom_raw_event,
.input_mapped = wacom_input_mapped,
};
+module_hid_driver(wacom_driver);
-static int __init wacom_init(void)
-{
- int ret;
-
- ret = hid_register_driver(&wacom_driver);
- if (ret)
- pr_err("can't register wacom driver\n");
- return ret;
-}
-
-static void __exit wacom_exit(void)
-{
- hid_unregister_driver(&wacom_driver);
-}
-
-module_init(wacom_init);
-module_exit(wacom_exit);
MODULE_DESCRIPTION("Driver for Wacom Graphire Bluetooth and Wacom Intuos4 WL");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-waltop.c b/drivers/hid/hid-waltop.c
index bb536ab5941e..059931d7b392 100644
--- a/drivers/hid/hid-waltop.c
+++ b/drivers/hid/hid-waltop.c
@@ -779,17 +779,6 @@ static struct hid_driver waltop_driver = {
.report_fixup = waltop_report_fixup,
.raw_event = waltop_raw_event,
};
+module_hid_driver(waltop_driver);
-static int __init waltop_init(void)
-{
- return hid_register_driver(&waltop_driver);
-}
-
-static void __exit waltop_exit(void)
-{
- hid_unregister_driver(&waltop_driver);
-}
-
-module_init(waltop_init);
-module_exit(waltop_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
index 84e2fbec5fbb..0fb8ab93db68 100644
--- a/drivers/hid/hid-wiimote-core.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -1294,25 +1294,8 @@ static struct hid_driver wiimote_hid_driver = {
.remove = wiimote_hid_remove,
.raw_event = wiimote_hid_event,
};
+module_hid_driver(wiimote_hid_driver);
-static int __init wiimote_init(void)
-{
- int ret;
-
- ret = hid_register_driver(&wiimote_hid_driver);
- if (ret)
- pr_err("Can't register wiimote hid driver\n");
-
- return ret;
-}
-
-static void __exit wiimote_exit(void)
-{
- hid_unregister_driver(&wiimote_hid_driver);
-}
-
-module_init(wiimote_init);
-module_exit(wiimote_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
MODULE_DESCRIPTION(WIIMOTE_NAME " Device Driver");
diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
index eec329197c16..90124ffaa2a5 100644
--- a/drivers/hid/hid-wiimote-debug.c
+++ b/drivers/hid/hid-wiimote-debug.c
@@ -31,7 +31,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
unsigned long flags;
ssize_t ret;
char buf[16];
- __u16 size;
+ __u16 size = 0;
if (s == 0)
return -EINVAL;
diff --git a/drivers/hid/hid-wiimote-ext.c b/drivers/hid/hid-wiimote-ext.c
index 38ae87772e96..0472191d4a72 100644
--- a/drivers/hid/hid-wiimote-ext.c
+++ b/drivers/hid/hid-wiimote-ext.c
@@ -403,14 +403,14 @@ static void handler_nunchuck(struct wiimote_ext *ext, const __u8 *payload)
if (ext->motionp) {
input_report_key(ext->input,
- wiiext_keymap[WIIEXT_KEY_Z], !!(payload[5] & 0x04));
+ wiiext_keymap[WIIEXT_KEY_Z], !(payload[5] & 0x04));
input_report_key(ext->input,
- wiiext_keymap[WIIEXT_KEY_C], !!(payload[5] & 0x08));
+ wiiext_keymap[WIIEXT_KEY_C], !(payload[5] & 0x08));
} else {
input_report_key(ext->input,
- wiiext_keymap[WIIEXT_KEY_Z], !!(payload[5] & 0x01));
+ wiiext_keymap[WIIEXT_KEY_Z], !(payload[5] & 0x01));
input_report_key(ext->input,
- wiiext_keymap[WIIEXT_KEY_C], !!(payload[5] & 0x02));
+ wiiext_keymap[WIIEXT_KEY_C], !(payload[5] & 0x02));
}
input_sync(ext->input);
diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c
index f6ba81df71bd..af66452592e9 100644
--- a/drivers/hid/hid-zpff.c
+++ b/drivers/hid/hid-zpff.c
@@ -152,17 +152,6 @@ static struct hid_driver zp_driver = {
.id_table = zp_devices,
.probe = zp_probe,
};
+module_hid_driver(zp_driver);
-static int __init zp_init(void)
-{
- return hid_register_driver(&zp_driver);
-}
-
-static void __exit zp_exit(void)
-{
- hid_unregister_driver(&zp_driver);
-}
-
-module_init(zp_init);
-module_exit(zp_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-zydacron.c b/drivers/hid/hid-zydacron.c
index 1ad85f2257b4..e4cddeccd6b5 100644
--- a/drivers/hid/hid-zydacron.c
+++ b/drivers/hid/hid-zydacron.c
@@ -219,17 +219,6 @@ static struct hid_driver zc_driver = {
.probe = zc_probe,
.remove = zc_remove,
};
+module_hid_driver(zc_driver);
-static int __init zc_init(void)
-{
- return hid_register_driver(&zc_driver);
-}
-
-static void __exit zc_exit(void)
-{
- hid_unregister_driver(&zc_driver);
-}
-
-module_init(zc_init);
-module_exit(zc_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 413a73187d33..f3bbbce8353b 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -581,6 +581,7 @@ int __init hidraw_init(void)
if (result < 0)
goto error_class;
+ printk(KERN_INFO "hidraw: raw HID events driver (C) Jiri Kosina\n");
out:
return result;
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 12e4fdc810bf..ec7930217a6d 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -34,6 +34,7 @@
#include <linux/kernel.h>
#include <linux/hid.h>
#include <linux/mutex.h>
+#include <linux/acpi.h>
#include <linux/i2c/i2c-hid.h>
@@ -139,6 +140,8 @@ struct i2c_hid {
unsigned long flags; /* device flags */
wait_queue_head_t wait; /* For waiting the interrupt */
+
+ struct i2c_hid_platform_data pdata;
};
static int __i2c_hid_command(struct i2c_client *client,
@@ -540,13 +543,24 @@ static int i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
{
struct i2c_client *client = hid->driver_data;
int report_id = buf[0];
+ int ret;
if (report_type == HID_INPUT_REPORT)
return -EINVAL;
- return i2c_hid_set_report(client,
+ if (report_id) {
+ buf++;
+ count--;
+ }
+
+ ret = i2c_hid_set_report(client,
report_type == HID_FEATURE_REPORT ? 0x03 : 0x02,
report_id, buf, count);
+
+ if (report_id && ret >= 0)
+ ret++; /* add report_id to the number of transfered bytes */
+
+ return ret;
}
static int i2c_hid_parse(struct hid_device *hid)
@@ -810,6 +824,70 @@ static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
return 0;
}
+#ifdef CONFIG_ACPI
+static int i2c_hid_acpi_pdata(struct i2c_client *client,
+ struct i2c_hid_platform_data *pdata)
+{
+ static u8 i2c_hid_guid[] = {
+ 0xF7, 0xF6, 0xDF, 0x3C, 0x67, 0x42, 0x55, 0x45,
+ 0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE,
+ };
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object params[4], *obj;
+ struct acpi_object_list input;
+ struct acpi_device *adev;
+ acpi_handle handle;
+
+ handle = ACPI_HANDLE(&client->dev);
+ if (!handle || acpi_bus_get_device(handle, &adev))
+ return -ENODEV;
+
+ input.count = ARRAY_SIZE(params);
+ input.pointer = params;
+
+ params[0].type = ACPI_TYPE_BUFFER;
+ params[0].buffer.length = sizeof(i2c_hid_guid);
+ params[0].buffer.pointer = i2c_hid_guid;
+ params[1].type = ACPI_TYPE_INTEGER;
+ params[1].integer.value = 1;
+ params[2].type = ACPI_TYPE_INTEGER;
+ params[2].integer.value = 1; /* HID function */
+ params[3].type = ACPI_TYPE_INTEGER;
+ params[3].integer.value = 0;
+
+ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DSM", &input, &buf))) {
+ dev_err(&client->dev, "device _DSM execution failed\n");
+ return -ENODEV;
+ }
+
+ obj = (union acpi_object *)buf.pointer;
+ if (obj->type != ACPI_TYPE_INTEGER) {
+ dev_err(&client->dev, "device _DSM returned invalid type: %d\n",
+ obj->type);
+ kfree(buf.pointer);
+ return -EINVAL;
+ }
+
+ pdata->hid_descriptor_address = obj->integer.value;
+
+ kfree(buf.pointer);
+ return 0;
+}
+
+static const struct acpi_device_id i2c_hid_acpi_match[] = {
+ {"ACPI0C50", 0 },
+ {"PNP0C50", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, i2c_hid_acpi_match);
+#else
+static inline int i2c_hid_acpi_pdata(struct i2c_client *client,
+ struct i2c_hid_platform_data *pdata)
+{
+ return -ENODEV;
+}
+#endif
+
static int i2c_hid_probe(struct i2c_client *client,
const struct i2c_device_id *dev_id)
{
@@ -821,11 +899,6 @@ static int i2c_hid_probe(struct i2c_client *client,
dbg_hid("HID probe called for i2c 0x%02x\n", client->addr);
- if (!platform_data) {
- dev_err(&client->dev, "HID register address not provided\n");
- return -EINVAL;
- }
-
if (!client->irq) {
dev_err(&client->dev,
"HID over i2c has not been provided an Int IRQ\n");
@@ -836,11 +909,22 @@ static int i2c_hid_probe(struct i2c_client *client,
if (!ihid)
return -ENOMEM;
+ if (!platform_data) {
+ ret = i2c_hid_acpi_pdata(client, &ihid->pdata);
+ if (ret) {
+ dev_err(&client->dev,
+ "HID register address not provided\n");
+ goto err;
+ }
+ } else {
+ ihid->pdata = *platform_data;
+ }
+
i2c_set_clientdata(client, ihid);
ihid->client = client;
- hidRegister = platform_data->hid_descriptor_address;
+ hidRegister = ihid->pdata.hid_descriptor_address;
ihid->wHIDDescRegister = cpu_to_le16(hidRegister);
init_waitqueue_head(&ihid->wait);
@@ -873,6 +957,7 @@ static int i2c_hid_probe(struct i2c_client *client,
hid->hid_get_raw_report = i2c_hid_get_raw_report;
hid->hid_output_raw_report = i2c_hid_output_raw_report;
hid->dev.parent = &client->dev;
+ ACPI_HANDLE_SET(&hid->dev, ACPI_HANDLE(&client->dev));
hid->bus = BUS_I2C;
hid->version = le16_to_cpu(ihid->hdesc.bcdVersion);
hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
@@ -964,6 +1049,7 @@ static struct i2c_driver i2c_hid_driver = {
.name = "i2c_hid",
.owner = THIS_MODULE,
.pm = &i2c_hid_pm,
+ .acpi_match_table = ACPI_PTR(i2c_hid_acpi_match),
},
.probe = i2c_hid_probe,
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 714cd8cc9579..fc307e0422af 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -11,6 +11,7 @@
*/
#include <linux/atomic.h>
+#include <linux/compat.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/hid.h>
@@ -276,6 +277,94 @@ static struct hid_ll_driver uhid_hid_driver = {
.parse = uhid_hid_parse,
};
+#ifdef CONFIG_COMPAT
+
+/* Apparently we haven't stepped on these rakes enough times yet. */
+struct uhid_create_req_compat {
+ __u8 name[128];
+ __u8 phys[64];
+ __u8 uniq[64];
+
+ compat_uptr_t rd_data;
+ __u16 rd_size;
+
+ __u16 bus;
+ __u32 vendor;
+ __u32 product;
+ __u32 version;
+ __u32 country;
+} __attribute__((__packed__));
+
+static int uhid_event_from_user(const char __user *buffer, size_t len,
+ struct uhid_event *event)
+{
+ if (is_compat_task()) {
+ u32 type;
+
+ if (get_user(type, buffer))
+ return -EFAULT;
+
+ if (type == UHID_CREATE) {
+ /*
+ * This is our messed up request with compat pointer.
+ * It is largish (more than 256 bytes) so we better
+ * allocate it from the heap.
+ */
+ struct uhid_create_req_compat *compat;
+
+ compat = kmalloc(sizeof(*compat), GFP_KERNEL);
+ if (!compat)
+ return -ENOMEM;
+
+ buffer += sizeof(type);
+ len -= sizeof(type);
+ if (copy_from_user(compat, buffer,
+ min(len, sizeof(*compat)))) {
+ kfree(compat);
+ return -EFAULT;
+ }
+
+ /* Shuffle the data over to proper structure */
+ event->type = type;
+
+ memcpy(event->u.create.name, compat->name,
+ sizeof(compat->name));
+ memcpy(event->u.create.phys, compat->phys,
+ sizeof(compat->phys));
+ memcpy(event->u.create.uniq, compat->uniq,
+ sizeof(compat->uniq));
+
+ event->u.create.rd_data = compat_ptr(compat->rd_data);
+ event->u.create.rd_size = compat->rd_size;
+
+ event->u.create.bus = compat->bus;
+ event->u.create.vendor = compat->vendor;
+ event->u.create.product = compat->product;
+ event->u.create.version = compat->version;
+ event->u.create.country = compat->country;
+
+ kfree(compat);
+ return 0;
+ }
+ /* All others can be copied directly */
+ }
+
+ if (copy_from_user(event, buffer, min(len, sizeof(*event))))
+ return -EFAULT;
+
+ return 0;
+}
+#else
+static int uhid_event_from_user(const char __user *buffer, size_t len,
+ struct uhid_event *event)
+{
+ if (copy_from_user(event, buffer, min(len, sizeof(*event))))
+ return -EFAULT;
+
+ return 0;
+}
+#endif
+
static int uhid_dev_create(struct uhid_device *uhid,
const struct uhid_event *ev)
{
@@ -498,10 +587,10 @@ static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
memset(&uhid->input_buf, 0, sizeof(uhid->input_buf));
len = min(count, sizeof(uhid->input_buf));
- if (copy_from_user(&uhid->input_buf, buffer, len)) {
- ret = -EFAULT;
+
+ ret = uhid_event_from_user(buffer, len, &uhid->input_buf);
+ if (ret)
goto unlock;
- }
switch (uhid->input_buf.type) {
case UHID_CREATE:
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index ac9e35228254..e0e6abf1cd3b 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -70,6 +70,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index b38ef6d8d049..64630f15f181 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -2,7 +2,7 @@ menu "Microsoft Hyper-V guest support"
config HYPERV
tristate "Microsoft Hyper-V client drivers"
- depends on X86 && ACPI && PCI
+ depends on X86 && ACPI && PCI && X86_LOCAL_APIC
help
Select this option to run Linux as a Hyper-V client operating
system.
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 773a2f25a8f0..0b122f8c7005 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -55,7 +55,7 @@ static void vmbus_setevent(struct vmbus_channel *channel)
[channel->monitor_grp].pending);
} else {
- vmbus_set_event(channel->offermsg.child_relid);
+ vmbus_set_event(channel);
}
}
@@ -181,7 +181,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
PAGE_SHIFT;
- open_msg->server_contextarea_gpadlhandle = 0;
+ open_msg->target_vp = newchannel->target_vp;
if (userdatalen > MAX_USER_DEFINED_BYTES) {
err = -EINVAL;
@@ -564,6 +564,7 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
struct scatterlist bufferlist[3];
u64 aligned_data = 0;
int ret;
+ bool signal = false;
/* Setup the descriptor */
@@ -580,9 +581,9 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
sg_set_buf(&bufferlist[2], &aligned_data,
packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
+ ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
- if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
+ if (ret == 0 && signal)
vmbus_setevent(channel);
return ret;
@@ -606,6 +607,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
u32 packetlen_aligned;
struct scatterlist bufferlist[3];
u64 aligned_data = 0;
+ bool signal = false;
if (pagecount > MAX_PAGE_BUFFER_COUNT)
return -EINVAL;
@@ -641,9 +643,9 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
sg_set_buf(&bufferlist[2], &aligned_data,
packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
+ ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
- if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
+ if (ret == 0 && signal)
vmbus_setevent(channel);
return ret;
@@ -665,6 +667,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
u32 packetlen_aligned;
struct scatterlist bufferlist[3];
u64 aligned_data = 0;
+ bool signal = false;
u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
multi_pagebuffer->len);
@@ -703,9 +706,9 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
sg_set_buf(&bufferlist[2], &aligned_data,
packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
+ ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
- if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
+ if (ret == 0 && signal)
vmbus_setevent(channel);
return ret;
@@ -732,6 +735,7 @@ int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
u32 packetlen;
u32 userlen;
int ret;
+ bool signal = false;
*buffer_actual_len = 0;
*requestid = 0;
@@ -758,8 +762,10 @@ int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
/* Copy over the packet to the user buffer */
ret = hv_ringbuffer_read(&channel->inbound, buffer, userlen,
- (desc.offset8 << 3));
+ (desc.offset8 << 3), &signal);
+ if (signal)
+ vmbus_setevent(channel);
return 0;
}
@@ -774,8 +780,8 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
{
struct vmpacket_descriptor desc;
u32 packetlen;
- u32 userlen;
int ret;
+ bool signal = false;
*buffer_actual_len = 0;
*requestid = 0;
@@ -788,7 +794,6 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
packetlen = desc.len8 << 3;
- userlen = packetlen - (desc.offset8 << 3);
*buffer_actual_len = packetlen;
@@ -802,7 +807,11 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
*requestid = desc.trans_id;
/* Copy over the entire packet to the user buffer */
- ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0);
+ ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0,
+ &signal);
+
+ if (signal)
+ vmbus_setevent(channel);
return 0;
}
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 2f84c5cff8d4..53a8600162a5 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -257,6 +257,70 @@ static void vmbus_process_offer(struct work_struct *work)
}
}
+enum {
+ IDE = 0,
+ SCSI,
+ NIC,
+ MAX_PERF_CHN,
+};
+
+/*
+ * This is an array of device_ids (device types) that are performance critical.
+ * We attempt to distribute the interrupt load for these devices across
+ * all available CPUs.
+ */
+static const struct hv_vmbus_device_id hp_devs[] = {
+ /* IDE */
+ { HV_IDE_GUID, },
+ /* Storage - SCSI */
+ { HV_SCSI_GUID, },
+ /* Network */
+ { HV_NIC_GUID, },
+};
+
+
+/*
+ * We use this state to statically distribute the channel interrupt load.
+ */
+static u32 next_vp;
+
+/*
+ * Starting with Win8, we can statically distribute the incoming
+ * channel interrupt load by binding a channel to VCPU. We
+ * implement here a simple round robin scheme for distributing
+ * the interrupt load.
+ * We will bind channels that are not performance critical to cpu 0 and
+ * performance critical channels (IDE, SCSI and Network) will be uniformly
+ * distributed across all available CPUs.
+ */
+static u32 get_vp_index(uuid_le *type_guid)
+{
+ u32 cur_cpu;
+ int i;
+ bool perf_chn = false;
+ u32 max_cpus = num_online_cpus();
+
+ for (i = IDE; i < MAX_PERF_CHN; i++) {
+ if (!memcmp(type_guid->b, hp_devs[i].guid,
+ sizeof(uuid_le))) {
+ perf_chn = true;
+ break;
+ }
+ }
+ if ((vmbus_proto_version == VERSION_WS2008) ||
+ (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
+ /*
+ * Prior to win8, all channel interrupts are
+ * delivered on cpu 0.
+ * Also if the channel is not a performance critical
+ * channel, bind it to cpu 0.
+ */
+ return 0;
+ }
+ cur_cpu = (++next_vp % max_cpus);
+ return 0;
+}
+
/*
* vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
*
@@ -275,6 +339,35 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
return;
}
+ /*
+ * By default we setup state to enable batched
+ * reading. A specific service can choose to
+ * disable this prior to opening the channel.
+ */
+ newchannel->batched_reading = true;
+
+ /*
+ * Setup state for signalling the host.
+ */
+ newchannel->sig_event = (struct hv_input_signal_event *)
+ (ALIGN((unsigned long)
+ &newchannel->sig_buf,
+ HV_HYPERCALL_PARAM_ALIGN));
+
+ newchannel->sig_event->connectionid.asu32 = 0;
+ newchannel->sig_event->connectionid.u.id = VMBUS_EVENT_CONNECTION_ID;
+ newchannel->sig_event->flag_number = 0;
+ newchannel->sig_event->rsvdz = 0;
+
+ if (vmbus_proto_version != VERSION_WS2008) {
+ newchannel->is_dedicated_interrupt =
+ (offer->is_dedicated_interrupt != 0);
+ newchannel->sig_event->connectionid.u.id =
+ offer->connection_id;
+ }
+
+ newchannel->target_vp = get_vp_index(&offer->offer.if_type);
+
memcpy(&newchannel->offermsg, offer,
sizeof(struct vmbus_channel_offer_channel));
newchannel->monitor_grp = (u8)offer->monitorid / 32;
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 650c9f0b6642..253a74ba245c 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -30,6 +30,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/hyperv.h>
+#include <linux/export.h>
#include <asm/hyperv.h>
#include "hyperv_vmbus.h"
@@ -40,15 +41,99 @@ struct vmbus_connection vmbus_connection = {
};
/*
+ * Negotiated protocol version with the host.
+ */
+__u32 vmbus_proto_version;
+EXPORT_SYMBOL_GPL(vmbus_proto_version);
+
+static __u32 vmbus_get_next_version(__u32 current_version)
+{
+ switch (current_version) {
+ case (VERSION_WIN7):
+ return VERSION_WS2008;
+
+ case (VERSION_WIN8):
+ return VERSION_WIN7;
+
+ case (VERSION_WS2008):
+ default:
+ return VERSION_INVAL;
+ }
+}
+
+static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
+ __u32 version)
+{
+ int ret = 0;
+ struct vmbus_channel_initiate_contact *msg;
+ unsigned long flags;
+ int t;
+
+ init_completion(&msginfo->waitevent);
+
+ msg = (struct vmbus_channel_initiate_contact *)msginfo->msg;
+
+ msg->header.msgtype = CHANNELMSG_INITIATE_CONTACT;
+ msg->vmbus_version_requested = version;
+ msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
+ msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages);
+ msg->monitor_page2 = virt_to_phys(
+ (void *)((unsigned long)vmbus_connection.monitor_pages +
+ PAGE_SIZE));
+
+ /*
+ * Add to list before we send the request since we may
+ * receive the response before returning from this routine
+ */
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_add_tail(&msginfo->msglistentry,
+ &vmbus_connection.chn_msg_list);
+
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ ret = vmbus_post_msg(msg,
+ sizeof(struct vmbus_channel_initiate_contact));
+ if (ret != 0) {
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&msginfo->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
+ flags);
+ return ret;
+ }
+
+ /* Wait for the connection response */
+ t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
+ if (t == 0) {
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
+ flags);
+ list_del(&msginfo->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
+ flags);
+ return -ETIMEDOUT;
+ }
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&msginfo->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ /* Check if successful */
+ if (msginfo->response.version_response.version_supported) {
+ vmbus_connection.conn_state = CONNECTED;
+ } else {
+ return -ECONNREFUSED;
+ }
+
+ return ret;
+}
+
+/*
* vmbus_connect - Sends a connect request on the partition service connection
*/
int vmbus_connect(void)
{
int ret = 0;
- int t;
struct vmbus_channel_msginfo *msginfo = NULL;
- struct vmbus_channel_initiate_contact *msg;
- unsigned long flags;
+ __u32 version;
/* Initialize the vmbus connection */
vmbus_connection.conn_state = CONNECTING;
@@ -99,69 +184,38 @@ int vmbus_connect(void)
goto cleanup;
}
- init_completion(&msginfo->waitevent);
-
- msg = (struct vmbus_channel_initiate_contact *)msginfo->msg;
-
- msg->header.msgtype = CHANNELMSG_INITIATE_CONTACT;
- msg->vmbus_version_requested = VMBUS_REVISION_NUMBER;
- msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
- msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages);
- msg->monitor_page2 = virt_to_phys(
- (void *)((unsigned long)vmbus_connection.monitor_pages +
- PAGE_SIZE));
-
/*
- * Add to list before we send the request since we may
- * receive the response before returning from this routine
+ * Negotiate a compatible VMBUS version number with the
+ * host. We start with the highest number we can support
+ * and work our way down until we negotiate a compatible
+ * version.
*/
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
- list_add_tail(&msginfo->msglistentry,
- &vmbus_connection.chn_msg_list);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+ version = VERSION_CURRENT;
- ret = vmbus_post_msg(msg,
- sizeof(struct vmbus_channel_initiate_contact));
- if (ret != 0) {
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
- list_del(&msginfo->msglistentry);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
- flags);
- goto cleanup;
- }
+ do {
+ ret = vmbus_negotiate_version(msginfo, version);
+ if (ret == 0)
+ break;
- /* Wait for the connection response */
- t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
- if (t == 0) {
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
- flags);
- list_del(&msginfo->msglistentry);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
- flags);
- ret = -ETIMEDOUT;
- goto cleanup;
- }
+ version = vmbus_get_next_version(version);
+ } while (version != VERSION_INVAL);
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
- list_del(&msginfo->msglistentry);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
-
- /* Check if successful */
- if (msginfo->response.version_response.version_supported) {
- vmbus_connection.conn_state = CONNECTED;
- } else {
- pr_err("Unable to connect, "
- "Version %d not supported by Hyper-V\n",
- VMBUS_REVISION_NUMBER);
- ret = -ECONNREFUSED;
+ if (version == VERSION_INVAL)
goto cleanup;
- }
+
+ vmbus_proto_version = version;
+ pr_info("Hyper-V Host Build:%d-%d.%d-%d-%d.%d; Vmbus version:%d.%d\n",
+ host_info_eax, host_info_ebx >> 16,
+ host_info_ebx & 0xFFFF, host_info_ecx,
+ host_info_edx >> 24, host_info_edx & 0xFFFFFF,
+ version >> 16, version & 0xFFFF);
kfree(msginfo);
return 0;
cleanup:
+ pr_err("Unable to connect to host\n");
vmbus_connection.conn_state = DISCONNECTED;
if (vmbus_connection.work_queue)
@@ -212,6 +266,9 @@ static void process_chn_event(u32 relid)
{
struct vmbus_channel *channel;
unsigned long flags;
+ void *arg;
+ bool read_state;
+ u32 bytes_to_read;
/*
* Find the channel based on this relid and invokes the
@@ -234,10 +291,29 @@ static void process_chn_event(u32 relid)
*/
spin_lock_irqsave(&channel->inbound_lock, flags);
- if (channel->onchannel_callback != NULL)
- channel->onchannel_callback(channel->channel_callback_context);
- else
+ if (channel->onchannel_callback != NULL) {
+ arg = channel->channel_callback_context;
+ read_state = channel->batched_reading;
+ /*
+ * This callback reads the messages sent by the host.
+ * We can optimize host to guest signaling by ensuring:
+ * 1. While reading the channel, we disable interrupts from
+ * host.
+ * 2. Ensure that we process all posted messages from the host
+ * before returning from this callback.
+ * 3. Once we return, enable signaling from the host. Once this
+ * state is set we check to see if additional packets are
+ * available to read. In this case we repeat the process.
+ */
+
+ do {
+ hv_begin_read(&channel->inbound);
+ channel->onchannel_callback(arg);
+ bytes_to_read = hv_end_read(&channel->inbound);
+ } while (read_state && (bytes_to_read != 0));
+ } else {
pr_err("no channel callback for relid - %u\n", relid);
+ }
spin_unlock_irqrestore(&channel->inbound_lock, flags);
}
@@ -248,10 +324,32 @@ static void process_chn_event(u32 relid)
void vmbus_on_event(unsigned long data)
{
u32 dword;
- u32 maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5;
+ u32 maxdword;
int bit;
u32 relid;
- u32 *recv_int_page = vmbus_connection.recv_int_page;
+ u32 *recv_int_page = NULL;
+ void *page_addr;
+ int cpu = smp_processor_id();
+ union hv_synic_event_flags *event;
+
+ if ((vmbus_proto_version == VERSION_WS2008) ||
+ (vmbus_proto_version == VERSION_WIN7)) {
+ maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5;
+ recv_int_page = vmbus_connection.recv_int_page;
+ } else {
+ /*
+ * When the host is win8 and beyond, the event page
+ * can be directly checked to get the id of the channel
+ * that has the interrupt pending.
+ */
+ maxdword = HV_EVENT_FLAGS_DWORD_COUNT;
+ page_addr = hv_context.synic_event_page[cpu];
+ event = (union hv_synic_event_flags *)page_addr +
+ VMBUS_MESSAGE_SINT;
+ recv_int_page = event->flags32;
+ }
+
+
/* Check events */
if (!recv_int_page)
@@ -307,12 +405,16 @@ int vmbus_post_msg(void *buffer, size_t buflen)
/*
* vmbus_set_event - Send an event notification to the parent
*/
-int vmbus_set_event(u32 child_relid)
+int vmbus_set_event(struct vmbus_channel *channel)
{
- /* Each u32 represents 32 channels */
- sync_set_bit(child_relid & 31,
- (unsigned long *)vmbus_connection.send_int_page +
- (child_relid >> 5));
+ u32 child_relid = channel->offermsg.child_relid;
+
+ if (!channel->is_dedicated_interrupt) {
+ /* Each u32 represents 32 channels */
+ sync_set_bit(child_relid & 31,
+ (unsigned long *)vmbus_connection.send_int_page +
+ (child_relid >> 5));
+ }
- return hv_signal_event();
+ return hv_signal_event(channel->sig_event);
}
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 3648f8f0f368..1c5481da6e4a 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -27,6 +27,7 @@
#include <linux/vmalloc.h>
#include <linux/hyperv.h>
#include <linux/version.h>
+#include <linux/interrupt.h>
#include <asm/hyperv.h>
#include "hyperv_vmbus.h"
@@ -34,13 +35,16 @@
struct hv_context hv_context = {
.synic_initialized = false,
.hypercall_page = NULL,
- .signal_event_param = NULL,
- .signal_event_buffer = NULL,
};
/*
* query_hypervisor_info - Get version info of the windows hypervisor
*/
+unsigned int host_info_eax;
+unsigned int host_info_ebx;
+unsigned int host_info_ecx;
+unsigned int host_info_edx;
+
static int query_hypervisor_info(void)
{
unsigned int eax;
@@ -70,13 +74,10 @@ static int query_hypervisor_info(void)
edx = 0;
op = HVCPUID_VERSION;
cpuid(op, &eax, &ebx, &ecx, &edx);
- pr_info("Hyper-V Host OS Build:%d-%d.%d-%d-%d.%d\n",
- eax,
- ebx >> 16,
- ebx & 0xFFFF,
- ecx,
- edx >> 24,
- edx & 0xFFFFFF);
+ host_info_eax = eax;
+ host_info_ebx = ebx;
+ host_info_ecx = ecx;
+ host_info_edx = edx;
}
return max_leaf;
}
@@ -137,6 +138,10 @@ int hv_init(void)
memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
memset(hv_context.synic_message_page, 0,
sizeof(void *) * NR_CPUS);
+ memset(hv_context.vp_index, 0,
+ sizeof(int) * NR_CPUS);
+ memset(hv_context.event_dpc, 0,
+ sizeof(void *) * NR_CPUS);
max_leaf = query_hypervisor_info();
@@ -168,24 +173,6 @@ int hv_init(void)
hv_context.hypercall_page = virtaddr;
- /* Setup the global signal event param for the signal event hypercall */
- hv_context.signal_event_buffer =
- kmalloc(sizeof(struct hv_input_signal_event_buffer),
- GFP_KERNEL);
- if (!hv_context.signal_event_buffer)
- goto cleanup;
-
- hv_context.signal_event_param =
- (struct hv_input_signal_event *)
- (ALIGN((unsigned long)
- hv_context.signal_event_buffer,
- HV_HYPERCALL_PARAM_ALIGN));
- hv_context.signal_event_param->connectionid.asu32 = 0;
- hv_context.signal_event_param->connectionid.u.id =
- VMBUS_EVENT_CONNECTION_ID;
- hv_context.signal_event_param->flag_number = 0;
- hv_context.signal_event_param->rsvdz = 0;
-
return 0;
cleanup:
@@ -213,10 +200,6 @@ void hv_cleanup(void)
/* Reset our OS id */
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
- kfree(hv_context.signal_event_buffer);
- hv_context.signal_event_buffer = NULL;
- hv_context.signal_event_param = NULL;
-
if (hv_context.hypercall_page) {
hypercall_msr.as_uint64 = 0;
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
@@ -273,13 +256,12 @@ int hv_post_message(union hv_connection_id connection_id,
*
* This involves a hypercall.
*/
-u16 hv_signal_event(void)
+u16 hv_signal_event(void *con_id)
{
u16 status;
- status = do_hypercall(HVCALL_SIGNAL_EVENT,
- hv_context.signal_event_param,
- NULL) & 0xFFFF;
+ status = (do_hypercall(HVCALL_SIGNAL_EVENT, con_id, NULL) & 0xFFFF);
+
return status;
}
@@ -297,6 +279,7 @@ void hv_synic_init(void *irqarg)
union hv_synic_siefp siefp;
union hv_synic_sint shared_sint;
union hv_synic_scontrol sctrl;
+ u64 vp_index;
u32 irq_vector = *((u32 *)(irqarg));
int cpu = smp_processor_id();
@@ -307,6 +290,15 @@ void hv_synic_init(void *irqarg)
/* Check the version */
rdmsrl(HV_X64_MSR_SVERSION, version);
+ hv_context.event_dpc[cpu] = (struct tasklet_struct *)
+ kmalloc(sizeof(struct tasklet_struct),
+ GFP_ATOMIC);
+ if (hv_context.event_dpc[cpu] == NULL) {
+ pr_err("Unable to allocate event dpc\n");
+ goto cleanup;
+ }
+ tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu);
+
hv_context.synic_message_page[cpu] =
(void *)get_zeroed_page(GFP_ATOMIC);
@@ -345,7 +337,7 @@ void hv_synic_init(void *irqarg)
shared_sint.as_uint64 = 0;
shared_sint.vector = irq_vector; /* HV_SHARED_SINT_IDT_VECTOR + 0x20; */
shared_sint.masked = false;
- shared_sint.auto_eoi = false;
+ shared_sint.auto_eoi = true;
wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
@@ -356,6 +348,14 @@ void hv_synic_init(void *irqarg)
wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
hv_context.synic_initialized = true;
+
+ /*
+ * Setup the mapping between Hyper-V's notion
+ * of cpuid and Linux' notion of cpuid.
+ * This array will be indexed using Linux cpuid.
+ */
+ rdmsrl(HV_X64_MSR_VP_INDEX, vp_index);
+ hv_context.vp_index[cpu] = (u32)vp_index;
return;
cleanup:
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index f6c0011a0337..37873213e24f 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -29,7 +29,6 @@
#include <linux/memory_hotplug.h>
#include <linux/memory.h>
#include <linux/notifier.h>
-#include <linux/mman.h>
#include <linux/percpu_counter.h>
#include <linux/hyperv.h>
@@ -403,7 +402,7 @@ struct dm_info_header {
*/
struct dm_info_msg {
- struct dm_info_header header;
+ struct dm_header hdr;
__u32 reserved;
__u32 info_size;
__u8 info[];
@@ -415,10 +414,17 @@ struct dm_info_msg {
static bool hot_add;
static bool do_hot_add;
+/*
+ * Delay reporting memory pressure by
+ * the specified number of seconds.
+ */
+static uint pressure_report_delay = 30;
module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
+module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
+MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
static atomic_t trans_id = ATOMIC_INIT(0);
static int dm_ring_size = (5 * PAGE_SIZE);
@@ -503,16 +509,48 @@ static void hot_add_req(struct hv_dynmem_device *dm, struct dm_hot_add *msg)
static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
{
- switch (msg->header.type) {
+ struct dm_info_header *info_hdr;
+
+ info_hdr = (struct dm_info_header *)msg->info;
+
+ switch (info_hdr->type) {
case INFO_TYPE_MAX_PAGE_CNT:
pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n");
- pr_info("Data Size is %d\n", msg->header.data_size);
+ pr_info("Data Size is %d\n", info_hdr->data_size);
break;
default:
- pr_info("Received Unknown type: %d\n", msg->header.type);
+ pr_info("Received Unknown type: %d\n", info_hdr->type);
}
}
+unsigned long compute_balloon_floor(void)
+{
+ unsigned long min_pages;
+#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
+ /* Simple continuous piecewiese linear function:
+ * max MiB -> min MiB gradient
+ * 0 0
+ * 16 16
+ * 32 24
+ * 128 72 (1/2)
+ * 512 168 (1/4)
+ * 2048 360 (1/8)
+ * 8192 552 (1/32)
+ * 32768 1320
+ * 131072 4392
+ */
+ if (totalram_pages < MB2PAGES(128))
+ min_pages = MB2PAGES(8) + (totalram_pages >> 1);
+ else if (totalram_pages < MB2PAGES(512))
+ min_pages = MB2PAGES(40) + (totalram_pages >> 2);
+ else if (totalram_pages < MB2PAGES(2048))
+ min_pages = MB2PAGES(104) + (totalram_pages >> 3);
+ else
+ min_pages = MB2PAGES(296) + (totalram_pages >> 5);
+#undef MB2PAGES
+ return min_pages;
+}
+
/*
* Post our status as it relates memory pressure to the
* host. Host expects the guests to post this status
@@ -526,15 +564,30 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
static void post_status(struct hv_dynmem_device *dm)
{
struct dm_status status;
+ struct sysinfo val;
-
+ if (pressure_report_delay > 0) {
+ --pressure_report_delay;
+ return;
+ }
+ si_meminfo(&val);
memset(&status, 0, sizeof(struct dm_status));
status.hdr.type = DM_STATUS_REPORT;
status.hdr.size = sizeof(struct dm_status);
status.hdr.trans_id = atomic_inc_return(&trans_id);
-
- status.num_committed = vm_memory_committed();
+ /*
+ * The host expects the guest to report free memory.
+ * Further, the host expects the pressure information to
+ * include the ballooned out pages.
+ * For a given amount of memory that we are managing, we
+ * need to compute a floor below which we should not balloon.
+ * Compute this and add it to the pressure report.
+ */
+ status.num_avail = val.freeram;
+ status.num_committed = vm_memory_committed() +
+ dm->num_pages_ballooned +
+ compute_balloon_floor();
vmbus_sendpacket(dm->dev->channel, &status,
sizeof(struct dm_status),
@@ -543,8 +596,6 @@ static void post_status(struct hv_dynmem_device *dm)
}
-
-
static void free_balloon_pages(struct hv_dynmem_device *dm,
union dm_mem_page_range *range_array)
{
@@ -879,7 +930,7 @@ static int balloon_probe(struct hv_device *dev,
balloon_onchannelcallback, dev);
if (ret)
- return ret;
+ goto probe_error0;
dm_device.dev = dev;
dm_device.state = DM_INITIALIZING;
@@ -891,7 +942,7 @@ static int balloon_probe(struct hv_device *dev,
kthread_run(dm_thread_func, &dm_device, "hv_balloon");
if (IS_ERR(dm_device.thread)) {
ret = PTR_ERR(dm_device.thread);
- goto probe_error0;
+ goto probe_error1;
}
hv_set_drvdata(dev, &dm_device);
@@ -914,12 +965,12 @@ static int balloon_probe(struct hv_device *dev,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret)
- goto probe_error1;
+ goto probe_error2;
t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
- goto probe_error1;
+ goto probe_error2;
}
/*
@@ -928,7 +979,7 @@ static int balloon_probe(struct hv_device *dev,
*/
if (dm_device.state == DM_INIT_ERROR) {
ret = -ETIMEDOUT;
- goto probe_error1;
+ goto probe_error2;
}
/*
* Now submit our capabilities to the host.
@@ -961,12 +1012,12 @@ static int balloon_probe(struct hv_device *dev,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret)
- goto probe_error1;
+ goto probe_error2;
t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
- goto probe_error1;
+ goto probe_error2;
}
/*
@@ -975,18 +1026,20 @@ static int balloon_probe(struct hv_device *dev,
*/
if (dm_device.state == DM_INIT_ERROR) {
ret = -ETIMEDOUT;
- goto probe_error1;
+ goto probe_error2;
}
dm_device.state = DM_INITIALIZED;
return 0;
-probe_error1:
+probe_error2:
kthread_stop(dm_device.thread);
-probe_error0:
+probe_error1:
vmbus_close(dev->channel);
+probe_error0:
+ kfree(send_buffer);
return ret;
}
@@ -999,6 +1052,7 @@ static int balloon_remove(struct hv_device *dev)
vmbus_close(dev->channel);
kthread_stop(dm->thread);
+ kfree(send_buffer);
return 0;
}
@@ -1006,9 +1060,7 @@ static int balloon_remove(struct hv_device *dev)
static const struct hv_vmbus_device_id id_table[] = {
/* Dynamic Memory Class ID */
/* 525074DC-8985-46e2-8057-A307DC18A502 */
- { VMBUS_DEVICE(0xdc, 0x74, 0x50, 0X52, 0x85, 0x89, 0xe2, 0x46,
- 0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
- },
+ { HV_DM_GUID, },
{ },
};
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index a0667de7a04c..1d4cbd8e8261 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -49,6 +49,16 @@ static struct hv_util_service util_kvp = {
.util_deinit = hv_kvp_deinit,
};
+static void perform_shutdown(struct work_struct *dummy)
+{
+ orderly_poweroff(true);
+}
+
+/*
+ * Perform the shutdown operation in a thread context.
+ */
+static DECLARE_WORK(shutdown_work, perform_shutdown);
+
static void shutdown_onchannelcallback(void *context)
{
struct vmbus_channel *channel = context;
@@ -106,7 +116,7 @@ static void shutdown_onchannelcallback(void *context)
}
if (execute_shutdown == true)
- orderly_poweroff(true);
+ schedule_work(&shutdown_work);
}
/*
@@ -274,6 +284,16 @@ static int util_probe(struct hv_device *dev,
}
}
+ /*
+ * The set of services managed by the util driver are not performance
+ * critical and do not need batched reading. Furthermore, some services
+ * such as KVP can only handle one message from the host at a time.
+ * Turn off batched reading for all util drivers before we open the
+ * channel.
+ */
+
+ set_channel_read_state(dev->channel, false);
+
ret = vmbus_open(dev->channel, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0,
srv->util_cb, dev->channel);
if (ret)
@@ -304,21 +324,21 @@ static int util_remove(struct hv_device *dev)
static const struct hv_vmbus_device_id id_table[] = {
/* Shutdown guid */
- { VMBUS_DEVICE(0x31, 0x60, 0x0B, 0X0E, 0x13, 0x52, 0x34, 0x49,
- 0x81, 0x8B, 0x38, 0XD9, 0x0C, 0xED, 0x39, 0xDB)
- .driver_data = (unsigned long)&util_shutdown },
+ { HV_SHUTDOWN_GUID,
+ .driver_data = (unsigned long)&util_shutdown
+ },
/* Time synch guid */
- { VMBUS_DEVICE(0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49,
- 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
- .driver_data = (unsigned long)&util_timesynch },
+ { HV_TS_GUID,
+ .driver_data = (unsigned long)&util_timesynch
+ },
/* Heartbeat guid */
- { VMBUS_DEVICE(0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e,
- 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
- .driver_data = (unsigned long)&util_heartbeat },
+ { HV_HEART_BEAT_GUID,
+ .driver_data = (unsigned long)&util_heartbeat
+ },
/* KVP guid */
- { VMBUS_DEVICE(0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d,
- 0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x3, 0xe6)
- .driver_data = (unsigned long)&util_kvp },
+ { HV_KVP_GUID,
+ .driver_data = (unsigned long)&util_kvp
+ },
{ },
};
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index d8d1fadb398a..12f2f9e989f7 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -101,15 +101,6 @@ enum hv_message_type {
/* Define invalid partition identifier. */
#define HV_PARTITION_ID_INVALID ((u64)0x0)
-/* Define connection identifier type. */
-union hv_connection_id {
- u32 asu32;
- struct {
- u32 id:24;
- u32 reserved:8;
- } u;
-};
-
/* Define port identifier type. */
union hv_port_id {
u32 asu32;
@@ -338,13 +329,6 @@ struct hv_input_post_message {
u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
};
-/* Definition of the hv_signal_event hypercall input structure. */
-struct hv_input_signal_event {
- union hv_connection_id connectionid;
- u16 flag_number;
- u16 rsvdz;
-};
-
/*
* Versioning definitions used for guests reporting themselves to the
* hypervisor, and visa versa.
@@ -498,11 +482,6 @@ static const uuid_le VMBUS_SERVICE_ID = {
-struct hv_input_signal_event_buffer {
- u64 align8;
- struct hv_input_signal_event event;
-};
-
struct hv_context {
/* We only support running on top of Hyper-V
* So at this point this really can only contain the Hyper-V ID
@@ -513,16 +492,24 @@ struct hv_context {
bool synic_initialized;
- /*
- * This is used as an input param to HvCallSignalEvent hypercall. The
- * input param is immutable in our usage and must be dynamic mem (vs
- * stack or global). */
- struct hv_input_signal_event_buffer *signal_event_buffer;
- /* 8-bytes aligned of the buffer above */
- struct hv_input_signal_event *signal_event_param;
-
void *synic_message_page[NR_CPUS];
void *synic_event_page[NR_CPUS];
+ /*
+ * Hypervisor's notion of virtual processor ID is different from
+ * Linux' notion of CPU ID. This information can only be retrieved
+ * in the context of the calling CPU. Setup a map for easy access
+ * to this information:
+ *
+ * vp_index[a] is the Hyper-V's processor ID corresponding to
+ * Linux cpuid 'a'.
+ */
+ u32 vp_index[NR_CPUS];
+ /*
+ * Starting with win8, we can take channel interrupts on any CPU;
+ * we will manage the tasklet that handles events on a per CPU
+ * basis.
+ */
+ struct tasklet_struct *event_dpc[NR_CPUS];
};
extern struct hv_context hv_context;
@@ -538,12 +525,19 @@ extern int hv_post_message(union hv_connection_id connection_id,
enum hv_message_type message_type,
void *payload, size_t payload_size);
-extern u16 hv_signal_event(void);
+extern u16 hv_signal_event(void *con_id);
extern void hv_synic_init(void *irqarg);
extern void hv_synic_cleanup(void *arg);
+/*
+ * Host version information.
+ */
+extern unsigned int host_info_eax;
+extern unsigned int host_info_ebx;
+extern unsigned int host_info_ecx;
+extern unsigned int host_info_edx;
/* Interface */
@@ -555,7 +549,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
struct scatterlist *sglist,
- u32 sgcount);
+ u32 sgcount, bool *signal);
int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer,
u32 buflen);
@@ -563,13 +557,16 @@ int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer,
int hv_ringbuffer_read(struct hv_ring_buffer_info *ring_info,
void *buffer,
u32 buflen,
- u32 offset);
+ u32 offset, bool *signal);
-u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *ring_info);
void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
struct hv_ring_buffer_debug_info *debug_info);
+void hv_begin_read(struct hv_ring_buffer_info *rbi);
+
+u32 hv_end_read(struct hv_ring_buffer_info *rbi);
+
/*
* Maximum channels is determined by the size of the interrupt page
* which is PAGE_SIZE. 1/2 of PAGE_SIZE is for send endpoint interrupt
@@ -657,7 +654,7 @@ int vmbus_connect(void);
int vmbus_post_msg(void *buffer, size_t buflen);
-int vmbus_set_event(u32 child_relid);
+int vmbus_set_event(struct vmbus_channel *channel);
void vmbus_on_event(unsigned long data);
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 7233c88f01b8..cafa72ffdc30 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -29,6 +29,105 @@
#include "hyperv_vmbus.h"
+void hv_begin_read(struct hv_ring_buffer_info *rbi)
+{
+ rbi->ring_buffer->interrupt_mask = 1;
+ smp_mb();
+}
+
+u32 hv_end_read(struct hv_ring_buffer_info *rbi)
+{
+ u32 read;
+ u32 write;
+
+ rbi->ring_buffer->interrupt_mask = 0;
+ smp_mb();
+
+ /*
+ * Now check to see if the ring buffer is still empty.
+ * If it is not, we raced and we need to process new
+ * incoming messages.
+ */
+ hv_get_ringbuffer_availbytes(rbi, &read, &write);
+
+ return read;
+}
+
+/*
+ * When we write to the ring buffer, check if the host needs to
+ * be signaled. Here is the details of this protocol:
+ *
+ * 1. The host guarantees that while it is draining the
+ * ring buffer, it will set the interrupt_mask to
+ * indicate it does not need to be interrupted when
+ * new data is placed.
+ *
+ * 2. The host guarantees that it will completely drain
+ * the ring buffer before exiting the read loop. Further,
+ * once the ring buffer is empty, it will clear the
+ * interrupt_mask and re-check to see if new data has
+ * arrived.
+ */
+
+static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
+{
+ if (rbi->ring_buffer->interrupt_mask)
+ return false;
+
+ /*
+ * This is the only case we need to signal when the
+ * ring transitions from being empty to non-empty.
+ */
+ if (old_write == rbi->ring_buffer->read_index)
+ return true;
+
+ return false;
+}
+
+/*
+ * To optimize the flow management on the send-side,
+ * when the sender is blocked because of lack of
+ * sufficient space in the ring buffer, potential the
+ * consumer of the ring buffer can signal the producer.
+ * This is controlled by the following parameters:
+ *
+ * 1. pending_send_sz: This is the size in bytes that the
+ * producer is trying to send.
+ * 2. The feature bit feat_pending_send_sz set to indicate if
+ * the consumer of the ring will signal when the ring
+ * state transitions from being full to a state where
+ * there is room for the producer to send the pending packet.
+ */
+
+static bool hv_need_to_signal_on_read(u32 old_rd,
+ struct hv_ring_buffer_info *rbi)
+{
+ u32 prev_write_sz;
+ u32 cur_write_sz;
+ u32 r_size;
+ u32 write_loc = rbi->ring_buffer->write_index;
+ u32 read_loc = rbi->ring_buffer->read_index;
+ u32 pending_sz = rbi->ring_buffer->pending_send_sz;
+
+ /*
+ * If the other end is not blocked on write don't bother.
+ */
+ if (pending_sz == 0)
+ return false;
+
+ r_size = rbi->ring_datasize;
+ cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
+ read_loc - write_loc;
+
+ prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) :
+ old_rd - write_loc;
+
+
+ if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
+ return true;
+
+ return false;
+}
/*
* hv_get_next_write_location()
@@ -239,19 +338,6 @@ void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
}
}
-
-/*
- *
- * hv_get_ringbuffer_interrupt_mask()
- *
- * Get the interrupt mask for the specified ring buffer
- *
- */
-u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *rbi)
-{
- return rbi->ring_buffer->interrupt_mask;
-}
-
/*
*
* hv_ringbuffer_init()
@@ -298,7 +384,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
*
*/
int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
- struct scatterlist *sglist, u32 sgcount)
+ struct scatterlist *sglist, u32 sgcount, bool *signal)
{
int i = 0;
u32 bytes_avail_towrite;
@@ -307,6 +393,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
struct scatterlist *sg;
u32 next_write_location;
+ u32 old_write;
u64 prev_indices = 0;
unsigned long flags;
@@ -335,6 +422,8 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
/* Write to the ring buffer */
next_write_location = hv_get_next_write_location(outring_info);
+ old_write = next_write_location;
+
for_each_sg(sglist, sg, sgcount, i)
{
next_write_location = hv_copyto_ringbuffer(outring_info,
@@ -351,14 +440,16 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
&prev_indices,
sizeof(u64));
- /* Make sure we flush all writes before updating the writeIndex */
- smp_wmb();
+ /* Issue a full memory barrier before updating the write index */
+ smp_mb();
/* Now, update the write location */
hv_set_next_write_location(outring_info, next_write_location);
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+
+ *signal = hv_need_to_signal(old_write, outring_info);
return 0;
}
@@ -414,13 +505,14 @@ int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
*
*/
int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
- u32 buflen, u32 offset)
+ u32 buflen, u32 offset, bool *signal)
{
u32 bytes_avail_towrite;
u32 bytes_avail_toread;
u32 next_read_location = 0;
u64 prev_indices = 0;
unsigned long flags;
+ u32 old_read;
if (buflen <= 0)
return -EINVAL;
@@ -431,6 +523,8 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
&bytes_avail_toread,
&bytes_avail_towrite);
+ old_read = bytes_avail_toread;
+
/* Make sure there is something to read */
if (bytes_avail_toread < buflen) {
spin_unlock_irqrestore(&inring_info->ring_lock, flags);
@@ -461,5 +555,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
spin_unlock_irqrestore(&inring_info->ring_lock, flags);
+ *signal = hv_need_to_signal_on_read(old_read, inring_info);
+
return 0;
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 8e1a9ec53003..cf19dfa5ead1 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -33,6 +33,7 @@
#include <acpi/acpi_bus.h>
#include <linux/completion.h>
#include <linux/hyperv.h>
+#include <linux/kernel_stat.h>
#include <asm/hyperv.h>
#include <asm/hypervisor.h>
#include "hyperv_vmbus.h"
@@ -41,7 +42,6 @@
static struct acpi_device *hv_acpi_dev;
static struct tasklet_struct msg_dpc;
-static struct tasklet_struct event_dpc;
static struct completion probe_event;
static int irq;
@@ -454,21 +454,40 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id)
union hv_synic_event_flags *event;
bool handled = false;
+ page_addr = hv_context.synic_event_page[cpu];
+ if (page_addr == NULL)
+ return IRQ_NONE;
+
+ event = (union hv_synic_event_flags *)page_addr +
+ VMBUS_MESSAGE_SINT;
/*
* Check for events before checking for messages. This is the order
* in which events and messages are checked in Windows guests on
* Hyper-V, and the Windows team suggested we do the same.
*/
- page_addr = hv_context.synic_event_page[cpu];
- event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT;
+ if ((vmbus_proto_version == VERSION_WS2008) ||
+ (vmbus_proto_version == VERSION_WIN7)) {
- /* Since we are a child, we only need to check bit 0 */
- if (sync_test_and_clear_bit(0, (unsigned long *) &event->flags32[0])) {
+ /* Since we are a child, we only need to check bit 0 */
+ if (sync_test_and_clear_bit(0,
+ (unsigned long *) &event->flags32[0])) {
+ handled = true;
+ }
+ } else {
+ /*
+ * Our host is win8 or above. The signaling mechanism
+ * has changed and we can directly look at the event page.
+ * If bit n is set then we have an interrup on the channel
+ * whose id is n.
+ */
handled = true;
- tasklet_schedule(&event_dpc);
}
+ if (handled)
+ tasklet_schedule(hv_context.event_dpc[cpu]);
+
+
page_addr = hv_context.synic_message_page[cpu];
msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
@@ -485,6 +504,19 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id)
}
/*
+ * vmbus interrupt flow handler:
+ * vmbus interrupts can concurrently occur on multiple CPUs and
+ * can be handled concurrently.
+ */
+
+static void vmbus_flow_handler(unsigned int irq, struct irq_desc *desc)
+{
+ kstat_incr_irqs_this_cpu(irq, desc);
+
+ desc->action->handler(irq, desc->action->dev_id);
+}
+
+/*
* vmbus_bus_init -Main vmbus driver initialization routine.
*
* Here, we
@@ -506,7 +538,6 @@ static int vmbus_bus_init(int irq)
}
tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0);
- tasklet_init(&event_dpc, vmbus_on_event, 0);
ret = bus_register(&hv_bus);
if (ret)
@@ -520,6 +551,13 @@ static int vmbus_bus_init(int irq)
goto err_unregister;
}
+ /*
+ * Vmbus interrupts can be handled concurrently on
+ * different CPUs. Establish an appropriate interrupt flow
+ * handler that can support this model.
+ */
+ irq_set_handler(irq, vmbus_flow_handler);
+
vector = IRQ0_VECTOR + irq;
/*
@@ -575,8 +613,6 @@ int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, c
ret = driver_register(&hv_driver->driver);
- vmbus_request_offers();
-
return ret;
}
EXPORT_SYMBOL_GPL(__vmbus_driver_register);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 32f238f3caea..89ac1cb26f24 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -180,11 +180,11 @@ config SENSORS_ADM9240
will be called adm9240.
config SENSORS_ADT7410
- tristate "Analog Devices ADT7410"
+ tristate "Analog Devices ADT7410/ADT7420"
depends on I2C
help
If you say yes here you get support for the Analog Devices
- ADT7410 temperature monitoring chip.
+ ADT7410 and ADT7420 temperature monitoring chips.
This driver can also be built as a module. If so, the module
will be called adt7410.
@@ -506,7 +506,8 @@ config SENSORS_IT87
help
If you say yes here you get support for ITE IT8705F, IT8712F,
IT8716F, IT8718F, IT8720F, IT8721F, IT8726F, IT8728F, IT8758E,
- IT8782F, and IT8783E/F sensor chips, and the SiS950 clone.
+ IT8771E, IT8772E, IT8782F, and IT8783E/F sensor chips, and the
+ SiS950 clone.
This driver can also be built as a module. If so, the module
will be called it87.
@@ -529,8 +530,8 @@ config SENSORS_JC42
temperature sensors, which are used on many DDR3 memory modules for
mobile devices and servers. Support will include, but not be limited
to, ADT7408, AT30TS00, CAT34TS02, CAT6095, MAX6604, MCP9804, MCP9805,
- MCP98242, MCP98243, MCP9843, SE97, SE98, STTS424(E), STTS2002,
- STTS3000, TSE2002B3, TSE2002GB2, TS3000B3, and TS3000GB2.
+ MCP98242, MCP98243, MCP98244, MCP9843, SE97, SE98, STTS424(E),
+ STTS2002, STTS3000, TSE2002B3, TSE2002GB2, TS3000B3, and TS3000GB2.
This driver can also be built as a module. If so, the module
will be called jc42.
@@ -854,6 +855,17 @@ config SENSORS_MAX6650
This driver can also be built as a module. If so, the module
will be called max6650.
+config SENSORS_MAX6697
+ tristate "Maxim MAX6697 and compatibles"
+ depends on I2C
+ help
+ If you say yes here you get support for MAX6581, MAX6602, MAX6622,
+ MAX6636, MAX6689, MAX6693, MAX6694, MAX6697, MAX6698, and MAX6699
+ temperature sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called max6697.
+
config SENSORS_MCP3021
tristate "Microchip MCP3021 and compatibles"
depends on I2C
@@ -1145,6 +1157,16 @@ config SENSORS_AMC6821
This driver can also be build as a module. If so, the module
will be called amc6821.
+config SENSORS_INA209
+ tristate "TI / Burr Brown INA209"
+ depends on I2C
+ help
+ If you say yes here you get support for the TI / Burr Brown INA209
+ voltage / current / power monitor I2C interface.
+
+ This driver can also be built as a module. If so, the module will
+ be called ina209.
+
config SENSORS_INA2XX
tristate "Texas Instruments INA219 and compatibles"
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 5da287443f6c..8d6d97ea7c1e 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -65,6 +65,7 @@ obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o
obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o
obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o
obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o
+obj-$(CONFIG_SENSORS_INA209) += ina209.o
obj-$(CONFIG_SENSORS_INA2XX) += ina2xx.o
obj-$(CONFIG_SENSORS_IT87) += it87.o
obj-$(CONFIG_SENSORS_JC42) += jc42.o
@@ -99,6 +100,7 @@ obj-$(CONFIG_SENSORS_MAX197) += max197.o
obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
obj-$(CONFIG_SENSORS_MAX6642) += max6642.o
obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
+obj-$(CONFIG_SENSORS_MAX6697) += max6697.o
obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o
obj-$(CONFIG_SENSORS_NTC_THERMISTOR) += ntc_thermistor.o
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 1672e2a5db46..6351aba8819c 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -911,7 +911,7 @@ exit:
return res;
}
-static int acpi_power_meter_remove(struct acpi_device *device, int type)
+static int acpi_power_meter_remove(struct acpi_device *device)
{
struct acpi_power_meter_resource *resource;
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index f3a5d4764eb9..5d501adc3e54 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -137,7 +137,7 @@ static ssize_t set_max_min(struct device *dev,
if (ret < 0)
return ret;
- temp = SENSORS_LIMIT(temp, -40000, 85000);
+ temp = clamp_val(temp, -40000, 85000);
temp = (temp + (temp < 0 ? -500 : 500)) / 1000;
mutex_lock(&data->lock);
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index fd1d1b15854e..71bcba8abfc0 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -193,7 +193,7 @@ static ssize_t set_temp_max(struct device *dev,
temp /= 1000;
mutex_lock(&data->update_lock);
- data->temp_max[index] = SENSORS_LIMIT(temp, -128, 127);
+ data->temp_max[index] = clamp_val(temp, -128, 127);
if (!read_only)
i2c_smbus_write_byte_data(client, ADM1021_REG_TOS_W(index),
data->temp_max[index]);
@@ -218,7 +218,7 @@ static ssize_t set_temp_min(struct device *dev,
temp /= 1000;
mutex_lock(&data->update_lock);
- data->temp_min[index] = SENSORS_LIMIT(temp, -128, 127);
+ data->temp_min[index] = clamp_val(temp, -128, 127);
if (!read_only)
i2c_smbus_write_byte_data(client, ADM1021_REG_THYST_W(index),
data->temp_min[index]);
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index 0f068e7297ee..ea09046e651d 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -197,7 +197,7 @@ static int adm1026_scaling[] = { /* .001 Volts */
};
#define NEG12_OFFSET 16000
#define SCALE(val, from, to) (((val)*(to) + ((from)/2))/(from))
-#define INS_TO_REG(n, val) (SENSORS_LIMIT(SCALE(val, adm1026_scaling[n], 192),\
+#define INS_TO_REG(n, val) (clamp_val(SCALE(val, adm1026_scaling[n], 192),\
0, 255))
#define INS_FROM_REG(n, val) (SCALE(val, 192, adm1026_scaling[n]))
@@ -207,7 +207,7 @@ static int adm1026_scaling[] = { /* .001 Volts */
* 22500 kHz * 60 (sec/min) * 2 (pulse) / 2 (pulse/rev) == 1350000
*/
#define FAN_TO_REG(val, div) ((val) <= 0 ? 0xff : \
- SENSORS_LIMIT(1350000 / ((val) * (div)), \
+ clamp_val(1350000 / ((val) * (div)), \
1, 254))
#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : (val) == 0xff ? 0 : \
1350000 / ((val) * (div)))
@@ -215,14 +215,14 @@ static int adm1026_scaling[] = { /* .001 Volts */
#define DIV_TO_REG(val) ((val) >= 8 ? 3 : (val) >= 4 ? 2 : (val) >= 2 ? 1 : 0)
/* Temperature is reported in 1 degC increments */
-#define TEMP_TO_REG(val) (SENSORS_LIMIT(((val) + ((val) < 0 ? -500 : 500)) \
+#define TEMP_TO_REG(val) (clamp_val(((val) + ((val) < 0 ? -500 : 500)) \
/ 1000, -127, 127))
#define TEMP_FROM_REG(val) ((val) * 1000)
-#define OFFSET_TO_REG(val) (SENSORS_LIMIT(((val) + ((val) < 0 ? -500 : 500)) \
+#define OFFSET_TO_REG(val) (clamp_val(((val) + ((val) < 0 ? -500 : 500)) \
/ 1000, -127, 127))
#define OFFSET_FROM_REG(val) ((val) * 1000)
-#define PWM_TO_REG(val) (SENSORS_LIMIT(val, 0, 255))
+#define PWM_TO_REG(val) (clamp_val(val, 0, 255))
#define PWM_FROM_REG(val) (val)
#define PWM_MIN_TO_REG(val) ((val) & 0xf0)
@@ -233,7 +233,7 @@ static int adm1026_scaling[] = { /* .001 Volts */
* indicates that the DAC could be used to drive the fans, but in our
* example board (Arima HDAMA) it isn't connected to the fans at all.
*/
-#define DAC_TO_REG(val) (SENSORS_LIMIT(((((val) * 255) + 500) / 2500), 0, 255))
+#define DAC_TO_REG(val) (clamp_val(((((val) * 255) + 500) / 2500), 0, 255))
#define DAC_FROM_REG(val) (((val) * 2500) / 255)
/*
@@ -933,7 +933,7 @@ static void fixup_fan_min(struct device *dev, int fan, int old_div)
return;
new_min = data->fan_min[fan] * old_div / new_div;
- new_min = SENSORS_LIMIT(new_min, 1, 254);
+ new_min = clamp_val(new_min, 1, 254);
data->fan_min[fan] = new_min;
adm1026_write_value(client, ADM1026_REG_FAN_MIN(fan), new_min);
}
@@ -1527,7 +1527,7 @@ static ssize_t set_auto_pwm_min(struct device *dev,
return err;
mutex_lock(&data->update_lock);
- data->pwm1.auto_pwm_min = SENSORS_LIMIT(val, 0, 255);
+ data->pwm1.auto_pwm_min = clamp_val(val, 0, 255);
if (data->pwm1.enable == 2) { /* apply immediately */
data->pwm1.pwm = PWM_TO_REG((data->pwm1.pwm & 0x0f) |
PWM_MIN_TO_REG(data->pwm1.auto_pwm_min));
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index c6a4631e833f..253ea396106d 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -162,13 +162,13 @@ adm1031_write_value(struct i2c_client *client, u8 reg, unsigned int value)
static int FAN_TO_REG(int reg, int div)
{
int tmp;
- tmp = FAN_FROM_REG(SENSORS_LIMIT(reg, 0, 65535), div);
+ tmp = FAN_FROM_REG(clamp_val(reg, 0, 65535), div);
return tmp > 255 ? 255 : tmp;
}
#define FAN_DIV_FROM_REG(reg) (1<<(((reg)&0xc0)>>6))
-#define PWM_TO_REG(val) (SENSORS_LIMIT((val), 0, 255) >> 4)
+#define PWM_TO_REG(val) (clamp_val((val), 0, 255) >> 4)
#define PWM_FROM_REG(val) ((val) << 4)
#define FAN_CHAN_FROM_REG(reg) (((reg) >> 5) & 7)
@@ -675,7 +675,7 @@ static ssize_t set_temp_offset(struct device *dev,
if (ret)
return ret;
- val = SENSORS_LIMIT(val, -15000, 15000);
+ val = clamp_val(val, -15000, 15000);
mutex_lock(&data->update_lock);
data->temp_offset[nr] = TEMP_OFFSET_TO_REG(val);
adm1031_write_value(client, ADM1031_REG_TEMP_OFFSET(nr),
@@ -696,7 +696,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875);
+ val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
mutex_lock(&data->update_lock);
data->temp_min[nr] = TEMP_TO_REG(val);
adm1031_write_value(client, ADM1031_REG_TEMP_MIN(nr),
@@ -717,7 +717,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875);
+ val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
mutex_lock(&data->update_lock);
data->temp_max[nr] = TEMP_TO_REG(val);
adm1031_write_value(client, ADM1031_REG_TEMP_MAX(nr),
@@ -738,7 +738,7 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875);
+ val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
mutex_lock(&data->update_lock);
data->temp_crit[nr] = TEMP_TO_REG(val);
adm1031_write_value(client, ADM1031_REG_TEMP_CRIT(nr),
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index dafa477715e3..2416628e0ab1 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -98,13 +98,13 @@ static inline unsigned int IN_FROM_REG(u8 reg, int n)
static inline u8 IN_TO_REG(unsigned long val, int n)
{
- return SENSORS_LIMIT(SCALE(val, 192, nom_mv[n]), 0, 255);
+ return clamp_val(SCALE(val, 192, nom_mv[n]), 0, 255);
}
/* temperature range: -40..125, 127 disables temperature alarm */
static inline s8 TEMP_TO_REG(long val)
{
- return SENSORS_LIMIT(SCALE(val, 1, 1000), -40, 127);
+ return clamp_val(SCALE(val, 1, 1000), -40, 127);
}
/* two fans, each with low fan speed limit */
@@ -122,7 +122,7 @@ static inline unsigned int FAN_FROM_REG(u8 reg, u8 div)
/* analog out 0..1250mV */
static inline u8 AOUT_TO_REG(unsigned long val)
{
- return SENSORS_LIMIT(SCALE(val, 255, 1250), 0, 255);
+ return clamp_val(SCALE(val, 255, 1250), 0, 255);
}
static inline unsigned int AOUT_FROM_REG(u8 reg)
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index 409b5c16defb..ba962ac4b81f 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -163,9 +163,9 @@ static int ads7828_probe(struct i2c_client *client,
/* Bound Vref with min/max values if it was provided */
if (data->vref_mv)
- data->vref_mv = SENSORS_LIMIT(data->vref_mv,
- ADS7828_EXT_VREF_MV_MIN,
- ADS7828_EXT_VREF_MV_MAX);
+ data->vref_mv = clamp_val(data->vref_mv,
+ ADS7828_EXT_VREF_MV_MIN,
+ ADS7828_EXT_VREF_MV_MAX);
else
data->vref_mv = ADS7828_INT_VREF_MV;
diff --git a/drivers/hwmon/adt7410.c b/drivers/hwmon/adt7410.c
index 030c8d7c33a5..99a7290da0a3 100644
--- a/drivers/hwmon/adt7410.c
+++ b/drivers/hwmon/adt7410.c
@@ -78,10 +78,6 @@ enum adt7410_type { /* keep sorted in alphabetical order */
adt7410,
};
-/* Addresses scanned */
-static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b,
- I2C_CLIENT_END };
-
static const u8 ADT7410_REG_TEMP[4] = {
ADT7410_TEMPERATURE, /* input */
ADT7410_T_ALARM_HIGH, /* high */
@@ -173,8 +169,8 @@ abort:
static s16 ADT7410_TEMP_TO_REG(long temp)
{
- return DIV_ROUND_CLOSEST(SENSORS_LIMIT(temp, ADT7410_TEMP_MIN,
- ADT7410_TEMP_MAX) * 128, 1000);
+ return DIV_ROUND_CLOSEST(clamp_val(temp, ADT7410_TEMP_MIN,
+ ADT7410_TEMP_MAX) * 128, 1000);
}
static int ADT7410_REG_TO_TEMP(struct adt7410_data *data, s16 reg)
@@ -269,9 +265,9 @@ static ssize_t adt7410_set_t_hyst(struct device *dev,
return ret;
/* convert absolute hysteresis value to a 4 bit delta value */
limit = ADT7410_REG_TO_TEMP(data, data->temp[1]);
- hyst = SENSORS_LIMIT(hyst, ADT7410_TEMP_MIN, ADT7410_TEMP_MAX);
- data->hyst = SENSORS_LIMIT(DIV_ROUND_CLOSEST(limit - hyst, 1000),
- 0, ADT7410_T_HYST_MASK);
+ hyst = clamp_val(hyst, ADT7410_TEMP_MIN, ADT7410_TEMP_MAX);
+ data->hyst = clamp_val(DIV_ROUND_CLOSEST(limit - hyst, 1000), 0,
+ ADT7410_T_HYST_MASK);
ret = i2c_smbus_write_byte_data(client, ADT7410_T_HYST, data->hyst);
if (ret)
return ret;
@@ -364,6 +360,7 @@ static int adt7410_probe(struct i2c_client *client,
/*
* Set to 16 bit resolution, continous conversion and comparator mode.
*/
+ ret &= ~ADT7410_MODE_MASK;
data->config = ret | ADT7410_FULL | ADT7410_RESOLUTION |
ADT7410_EVENT_MODE;
if (data->config != data->oldconfig) {
@@ -410,11 +407,12 @@ static int adt7410_remove(struct i2c_client *client)
static const struct i2c_device_id adt7410_ids[] = {
{ "adt7410", adt7410, },
+ { "adt7420", adt7410, },
{ /* LIST END */ }
};
MODULE_DEVICE_TABLE(i2c, adt7410_ids);
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int adt7410_suspend(struct device *dev)
{
int ret;
@@ -436,10 +434,8 @@ static int adt7410_resume(struct device *dev)
return ret;
}
-static const struct dev_pm_ops adt7410_dev_pm_ops = {
- .suspend = adt7410_suspend,
- .resume = adt7410_resume,
-};
+static SIMPLE_DEV_PM_OPS(adt7410_dev_pm_ops, adt7410_suspend, adt7410_resume);
+
#define ADT7410_DEV_PM_OPS (&adt7410_dev_pm_ops)
#else
#define ADT7410_DEV_PM_OPS NULL
@@ -454,11 +450,11 @@ static struct i2c_driver adt7410_driver = {
.probe = adt7410_probe,
.remove = adt7410_remove,
.id_table = adt7410_ids,
- .address_list = normal_i2c,
+ .address_list = I2C_ADDRS(0x48, 0x49, 0x4a, 0x4b),
};
module_i2c_driver(adt7410_driver);
MODULE_AUTHOR("Hartmut Knaack");
-MODULE_DESCRIPTION("ADT7410 driver");
+MODULE_DESCRIPTION("ADT7410/ADT7420 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index 98a7d81e25c5..69481d3a3d23 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -836,7 +836,7 @@ static ssize_t set_temp_min(struct device *dev,
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->temp_min[attr->index] = temp;
@@ -874,7 +874,7 @@ static ssize_t set_temp_max(struct device *dev,
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->temp_max[attr->index] = temp;
@@ -939,7 +939,7 @@ static ssize_t set_volt_max(struct device *dev,
temp *= 1000; /* convert mV to uV */
temp = DIV_ROUND_CLOSEST(temp, x);
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->volt_max[attr->index] = temp;
@@ -981,7 +981,7 @@ static ssize_t set_volt_min(struct device *dev,
temp *= 1000; /* convert mV to uV */
temp = DIV_ROUND_CLOSEST(temp, x);
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->volt_min[attr->index] = temp;
@@ -1071,7 +1071,7 @@ static ssize_t set_fan_min(struct device *dev,
temp = FAN_RPM_TO_PERIOD(temp);
temp >>= 8;
- temp = SENSORS_LIMIT(temp, 1, 255);
+ temp = clamp_val(temp, 1, 255);
mutex_lock(&data->lock);
data->fan_min[attr->index] = temp;
@@ -1149,7 +1149,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->pwm[attr->index] = temp;
@@ -1179,7 +1179,7 @@ static ssize_t set_pwm_max(struct device *dev,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->pwm_max = temp;
@@ -1211,7 +1211,7 @@ static ssize_t set_pwm_min(struct device *dev,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->pwm_min[attr->index] = temp;
@@ -1246,7 +1246,7 @@ static ssize_t set_pwm_hyst(struct device *dev,
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = SENSORS_LIMIT(temp, 0, 15);
+ temp = clamp_val(temp, 0, 15);
/* package things up */
temp &= ADT7462_PWM_HYST_MASK;
@@ -1333,7 +1333,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->pwm_tmin[attr->index] = temp;
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 39ecb1a3b9ef..b83bf4bb95eb 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -452,7 +452,7 @@ static ssize_t set_auto_update_interval(struct device *dev,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
- temp = SENSORS_LIMIT(temp, 0, 60000);
+ temp = clamp_val(temp, 0, 60000);
mutex_lock(&data->lock);
data->auto_update_interval = temp;
@@ -481,7 +481,7 @@ static ssize_t set_num_temp_sensors(struct device *dev,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
- temp = SENSORS_LIMIT(temp, -1, 10);
+ temp = clamp_val(temp, -1, 10);
mutex_lock(&data->lock);
data->num_temp_sensors = temp;
@@ -515,7 +515,7 @@ static ssize_t set_temp_min(struct device *dev,
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->temp_min[attr->index] = temp;
@@ -549,7 +549,7 @@ static ssize_t set_temp_max(struct device *dev,
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->temp_max[attr->index] = temp;
@@ -604,7 +604,7 @@ static ssize_t set_fan_max(struct device *dev,
return -EINVAL;
temp = FAN_RPM_TO_PERIOD(temp);
- temp = SENSORS_LIMIT(temp, 1, 65534);
+ temp = clamp_val(temp, 1, 65534);
mutex_lock(&data->lock);
data->fan_max[attr->index] = temp;
@@ -641,7 +641,7 @@ static ssize_t set_fan_min(struct device *dev,
return -EINVAL;
temp = FAN_RPM_TO_PERIOD(temp);
- temp = SENSORS_LIMIT(temp, 1, 65534);
+ temp = clamp_val(temp, 1, 65534);
mutex_lock(&data->lock);
data->fan_min[attr->index] = temp;
@@ -717,7 +717,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->pwm[attr->index] = temp;
@@ -749,7 +749,7 @@ static ssize_t set_pwm_max(struct device *dev,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->pwm_max[attr->index] = temp;
@@ -782,7 +782,7 @@ static ssize_t set_pwm_min(struct device *dev,
if (kstrtol(buf, 10, &temp))
return -EINVAL;
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->pwm_min[attr->index] = temp;
@@ -826,7 +826,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = SENSORS_LIMIT(temp, 0, 255);
+ temp = clamp_val(temp, 0, 255);
mutex_lock(&data->lock);
data->pwm_tmin[attr->index] = temp;
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 989e54c39252..22d008bbdc10 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -201,10 +201,10 @@ static inline u16 temp2reg(struct adt7475_data *data, long val)
u16 ret;
if (!(data->config5 & CONFIG5_TWOSCOMP)) {
- val = SENSORS_LIMIT(val, -64000, 191000);
+ val = clamp_val(val, -64000, 191000);
ret = (val + 64500) / 1000;
} else {
- val = SENSORS_LIMIT(val, -128000, 127000);
+ val = clamp_val(val, -128000, 127000);
if (val < -500)
ret = (256500 + val) / 1000;
else
@@ -240,7 +240,7 @@ static inline u16 rpm2tach(unsigned long rpm)
if (rpm == 0)
return 0;
- return SENSORS_LIMIT((90000 * 60) / rpm, 1, 0xFFFF);
+ return clamp_val((90000 * 60) / rpm, 1, 0xFFFF);
}
/* Scaling factors for voltage inputs, taken from the ADT7490 datasheet */
@@ -271,7 +271,7 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
reg = (volt * 1024) / 2250;
else
reg = (volt * r[1] * 1024) / ((r[0] + r[1]) * 2250);
- return SENSORS_LIMIT(reg, 0, 1023) & (0xff << 2);
+ return clamp_val(reg, 0, 1023) & (0xff << 2);
}
static u16 adt7475_read_word(struct i2c_client *client, int reg)
@@ -451,10 +451,10 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *attr,
switch (sattr->nr) {
case OFFSET:
if (data->config5 & CONFIG5_TEMPOFFSET) {
- val = SENSORS_LIMIT(val, -63000, 127000);
+ val = clamp_val(val, -63000, 127000);
out = data->temp[OFFSET][sattr->index] = val / 1000;
} else {
- val = SENSORS_LIMIT(val, -63000, 64000);
+ val = clamp_val(val, -63000, 64000);
out = data->temp[OFFSET][sattr->index] = val / 500;
}
break;
@@ -471,7 +471,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *attr,
adt7475_read_hystersis(client);
temp = reg2temp(data, data->temp[THERM][sattr->index]);
- val = SENSORS_LIMIT(val, temp - 15000, temp);
+ val = clamp_val(val, temp - 15000, temp);
val = (temp - val) / 1000;
if (sattr->index != 1) {
@@ -577,7 +577,7 @@ static ssize_t set_point2(struct device *dev, struct device_attribute *attr,
* to figure the range
*/
temp = reg2temp(data, data->temp[AUTOMIN][sattr->index]);
- val = SENSORS_LIMIT(val, temp + autorange_table[0],
+ val = clamp_val(val, temp + autorange_table[0],
temp + autorange_table[ARRAY_SIZE(autorange_table) - 1]);
val -= temp;
@@ -701,7 +701,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
break;
}
- data->pwm[sattr->nr][sattr->index] = SENSORS_LIMIT(val, 0, 0xFF);
+ data->pwm[sattr->nr][sattr->index] = clamp_val(val, 0, 0xFF);
i2c_smbus_write_byte_data(client, reg,
data->pwm[sattr->nr][sattr->index]);
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index ae482e3afdac..4fe49d2bfe1d 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -241,7 +241,7 @@ static ssize_t set_temp(
int ret = kstrtol(buf, 10, &val);
if (ret)
return ret;
- val = SENSORS_LIMIT(val / 1000, -128, 127);
+ val = clamp_val(val / 1000, -128, 127);
mutex_lock(&data->update_lock);
data->temp[ix] = val;
@@ -332,7 +332,7 @@ static ssize_t set_pwm1(
return ret;
mutex_lock(&data->update_lock);
- data->pwm1 = SENSORS_LIMIT(val , 0, 255);
+ data->pwm1 = clamp_val(val , 0, 255);
i2c_smbus_write_byte_data(client, AMC6821_REG_DCY, data->pwm1);
mutex_unlock(&data->update_lock);
return count;
@@ -499,11 +499,11 @@ static ssize_t set_temp_auto_point_temp(
mutex_lock(&data->update_lock);
switch (ix) {
case 0:
- ptemp[0] = SENSORS_LIMIT(val / 1000, 0,
- data->temp1_auto_point_temp[1]);
- ptemp[0] = SENSORS_LIMIT(ptemp[0], 0,
- data->temp2_auto_point_temp[1]);
- ptemp[0] = SENSORS_LIMIT(ptemp[0], 0, 63);
+ ptemp[0] = clamp_val(val / 1000, 0,
+ data->temp1_auto_point_temp[1]);
+ ptemp[0] = clamp_val(ptemp[0], 0,
+ data->temp2_auto_point_temp[1]);
+ ptemp[0] = clamp_val(ptemp[0], 0, 63);
if (i2c_smbus_write_byte_data(
client,
AMC6821_REG_PSV_TEMP,
@@ -515,20 +515,12 @@ static ssize_t set_temp_auto_point_temp(
goto EXIT;
break;
case 1:
- ptemp[1] = SENSORS_LIMIT(
- val / 1000,
- (ptemp[0] & 0x7C) + 4,
- 124);
+ ptemp[1] = clamp_val(val / 1000, (ptemp[0] & 0x7C) + 4, 124);
ptemp[1] &= 0x7C;
- ptemp[2] = SENSORS_LIMIT(
- ptemp[2], ptemp[1] + 1,
- 255);
+ ptemp[2] = clamp_val(ptemp[2], ptemp[1] + 1, 255);
break;
case 2:
- ptemp[2] = SENSORS_LIMIT(
- val / 1000,
- ptemp[1]+1,
- 255);
+ ptemp[2] = clamp_val(val / 1000, ptemp[1]+1, 255);
break;
default:
dev_dbg(dev, "Unknown attr->index (%d).\n", ix);
@@ -561,7 +553,7 @@ static ssize_t set_pwm1_auto_point_pwm(
return ret;
mutex_lock(&data->update_lock);
- data->pwm1_auto_point_pwm[1] = SENSORS_LIMIT(val, 0, 254);
+ data->pwm1_auto_point_pwm[1] = clamp_val(val, 0, 254);
if (i2c_smbus_write_byte_data(client, AMC6821_REG_DCY_LOW_TEMP,
data->pwm1_auto_point_pwm[1])) {
dev_err(&client->dev, "Register write error, aborting.\n");
@@ -629,7 +621,7 @@ static ssize_t set_fan(
val = 1 > val ? 0xFFFF : 6000000/val;
mutex_lock(&data->update_lock);
- data->fan[ix] = (u16) SENSORS_LIMIT(val, 1, 0xFFFF);
+ data->fan[ix] = (u16) clamp_val(val, 1, 0xFFFF);
if (i2c_smbus_write_byte_data(client, fan_reg_low[ix],
data->fan[ix] & 0xFF)) {
dev_err(&client->dev, "Register write error, aborting.\n");
diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c
index 520e5bf4f76d..6ac612cabda1 100644
--- a/drivers/hwmon/asb100.c
+++ b/drivers/hwmon/asb100.c
@@ -114,7 +114,7 @@ static const u16 asb100_reg_temp_hyst[] = {0, 0x3a, 0x153, 0x253, 0x19};
*/
static u8 IN_TO_REG(unsigned val)
{
- unsigned nval = SENSORS_LIMIT(val, ASB100_IN_MIN, ASB100_IN_MAX);
+ unsigned nval = clamp_val(val, ASB100_IN_MIN, ASB100_IN_MAX);
return (nval + 8) / 16;
}
@@ -129,8 +129,8 @@ static u8 FAN_TO_REG(long rpm, int div)
return 0;
if (rpm == 0)
return 255;
- rpm = SENSORS_LIMIT(rpm, 1, 1000000);
- return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+ rpm = clamp_val(rpm, 1, 1000000);
+ return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
static int FAN_FROM_REG(u8 val, int div)
@@ -148,7 +148,7 @@ static int FAN_FROM_REG(u8 val, int div)
*/
static u8 TEMP_TO_REG(long temp)
{
- int ntemp = SENSORS_LIMIT(temp, ASB100_TEMP_MIN, ASB100_TEMP_MAX);
+ int ntemp = clamp_val(temp, ASB100_TEMP_MIN, ASB100_TEMP_MAX);
ntemp += (ntemp < 0 ? -500 : 500);
return (u8)(ntemp / 1000);
}
@@ -164,7 +164,7 @@ static int TEMP_FROM_REG(u8 reg)
*/
static u8 ASB100_PWM_TO_REG(int pwm)
{
- pwm = SENSORS_LIMIT(pwm, 0, 255);
+ pwm = clamp_val(pwm, 0, 255);
return (u8)(pwm / 16);
}
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index b867aab78049..da7f5b5d5db5 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -191,7 +191,7 @@ static ssize_t store_u8(struct device *dev, struct device_attribute *attr,
if (kstrtol(buf, 10, &reqval))
return -EINVAL;
- reqval = SENSORS_LIMIT(reqval, 0, 255);
+ reqval = clamp_val(reqval, 0, 255);
mutex_lock(&data->update_lock);
data->reg[param->msb[0]] = reqval;
@@ -224,7 +224,7 @@ static ssize_t store_bitmask(struct device *dev,
if (kstrtol(buf, 10, &reqval))
return -EINVAL;
- reqval = SENSORS_LIMIT(reqval, 0, param->mask[0]);
+ reqval = clamp_val(reqval, 0, param->mask[0]);
reqval = (reqval & param->mask[0]) << param->shift[0];
@@ -274,7 +274,7 @@ static ssize_t store_fan16(struct device *dev,
* generating an alarm.
*/
reqval =
- (reqval <= 0 ? 0xffff : SENSORS_LIMIT(5400000 / reqval, 0, 0xfffe));
+ (reqval <= 0 ? 0xffff : clamp_val(5400000 / reqval, 0, 0xfffe));
mutex_lock(&data->update_lock);
data->reg[param->msb[0]] = (reqval >> 8) & 0xff;
@@ -343,11 +343,11 @@ static ssize_t store_in8(struct device *dev, struct device_attribute *attr,
if (kstrtol(buf, 10, &reqval))
return -EINVAL;
- reqval = SENSORS_LIMIT(reqval, 0, 0xffff);
+ reqval = clamp_val(reqval, 0, 0xffff);
reqval = reqval * 0xc0 / asc7621_in_scaling[nr];
- reqval = SENSORS_LIMIT(reqval, 0, 0xff);
+ reqval = clamp_val(reqval, 0, 0xff);
mutex_lock(&data->update_lock);
data->reg[param->msb[0]] = reqval;
@@ -376,7 +376,7 @@ static ssize_t store_temp8(struct device *dev,
if (kstrtol(buf, 10, &reqval))
return -EINVAL;
- reqval = SENSORS_LIMIT(reqval, -127000, 127000);
+ reqval = clamp_val(reqval, -127000, 127000);
temp = reqval / 1000;
@@ -432,7 +432,7 @@ static ssize_t store_temp62(struct device *dev,
if (kstrtol(buf, 10, &reqval))
return -EINVAL;
- reqval = SENSORS_LIMIT(reqval, -32000, 31750);
+ reqval = clamp_val(reqval, -32000, 31750);
i = reqval / 1000;
f = reqval - (i * 1000);
temp = i << 2;
@@ -468,7 +468,7 @@ static ssize_t show_ap2_temp(struct device *dev,
auto_point1 = ((s8) data->reg[param->msb[1]]) * 1000;
regval =
((data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0]);
- temp = auto_point1 + asc7621_range_map[SENSORS_LIMIT(regval, 0, 15)];
+ temp = auto_point1 + asc7621_range_map[clamp_val(regval, 0, 15)];
mutex_unlock(&data->update_lock);
return sprintf(buf, "%d\n", temp);
@@ -489,7 +489,7 @@ static ssize_t store_ap2_temp(struct device *dev,
mutex_lock(&data->update_lock);
auto_point1 = data->reg[param->msb[1]] * 1000;
- reqval = SENSORS_LIMIT(reqval, auto_point1 + 2000, auto_point1 + 80000);
+ reqval = clamp_val(reqval, auto_point1 + 2000, auto_point1 + 80000);
for (i = ARRAY_SIZE(asc7621_range_map) - 1; i >= 0; i--) {
if (reqval >= auto_point1 + asc7621_range_map[i]) {
@@ -523,7 +523,7 @@ static ssize_t show_pwm_ac(struct device *dev,
regval = config | (altbit << 3);
mutex_unlock(&data->update_lock);
- return sprintf(buf, "%u\n", map[SENSORS_LIMIT(regval, 0, 15)]);
+ return sprintf(buf, "%u\n", map[clamp_val(regval, 0, 15)]);
}
static ssize_t store_pwm_ac(struct device *dev,
@@ -663,7 +663,7 @@ static ssize_t show_pwm_freq(struct device *dev,
u8 regval =
(data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0];
- regval = SENSORS_LIMIT(regval, 0, 15);
+ regval = clamp_val(regval, 0, 15);
return sprintf(buf, "%u\n", asc7621_pwm_freq_map[regval]);
}
@@ -711,7 +711,7 @@ static ssize_t show_pwm_ast(struct device *dev,
u8 regval =
(data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0];
- regval = SENSORS_LIMIT(regval, 0, 7);
+ regval = clamp_val(regval, 0, 7);
return sprintf(buf, "%u\n", asc7621_pwm_auto_spinup_map[regval]);
@@ -759,7 +759,7 @@ static ssize_t show_temp_st(struct device *dev,
SETUP_SHOW_data_param(dev, attr);
u8 regval =
(data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0];
- regval = SENSORS_LIMIT(regval, 0, 7);
+ regval = clamp_val(regval, 0, 7);
return sprintf(buf, "%u\n", asc7621_temp_smoothing_time_map[regval]);
}
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 56dbcfb3e301..b25c64302cbc 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -190,7 +190,7 @@ struct atk_acpi_input_buf {
};
static int atk_add(struct acpi_device *device);
-static int atk_remove(struct acpi_device *device, int type);
+static int atk_remove(struct acpi_device *device);
static void atk_print_sensor(struct atk_data *data, union acpi_object *obj);
static int atk_read_value(struct atk_sensor_data *sensor, u64 *value);
static void atk_free_sensors(struct atk_data *data);
@@ -1416,7 +1416,7 @@ out:
return err;
}
-static int atk_remove(struct acpi_device *device, int type)
+static int atk_remove(struct acpi_device *device)
{
struct atk_data *data = device->driver_data;
dev_dbg(&device->dev, "removing...\n");
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index d64923d63537..3f1e297663ad 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -198,7 +198,7 @@ struct tjmax {
static const struct tjmax __cpuinitconst tjmax_table[] = {
{ "CPU 230", 100000 }, /* Model 0x1c, stepping 2 */
{ "CPU 330", 125000 }, /* Model 0x1c, stepping 2 */
- { "CPU CE4110", 110000 }, /* Model 0x1c, stepping 10 */
+ { "CPU CE4110", 110000 }, /* Model 0x1c, stepping 10 Sodaville */
{ "CPU CE4150", 110000 }, /* Model 0x1c, stepping 10 */
{ "CPU CE4170", 110000 }, /* Model 0x1c, stepping 10 */
};
@@ -212,7 +212,7 @@ struct tjmax_model {
#define ANY 0xff
static const struct tjmax_model __cpuinitconst tjmax_model_table[] = {
- { 0x1c, 10, 100000 }, /* D4xx, N4xx, D5xx, N5xx */
+ { 0x1c, 10, 100000 }, /* D4xx, K4xx, N4xx, D5xx, K5xx, N5xx */
{ 0x1c, ANY, 90000 }, /* Z5xx, N2xx, possibly others
* Note: Also matches 230 and 330,
* which are covered by tjmax_table
@@ -222,6 +222,7 @@ static const struct tjmax_model __cpuinitconst tjmax_model_table[] = {
* is undetectable by software
*/
{ 0x27, ANY, 90000 }, /* Atom Medfield (Z2460) */
+ { 0x35, ANY, 90000 }, /* Atom Clover Trail/Cloverview (Z2760) */
{ 0x36, ANY, 100000 }, /* Atom Cedar Trail/Cedarview (N2xxx, D2xxx) */
};
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index 7430f70ae452..c347c94f2f73 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -277,7 +277,7 @@ static inline int IN_FROM_REG(int reg, int nominal, int res)
static inline int IN_TO_REG(int val, int nominal)
{
- return SENSORS_LIMIT((val * 192 + nominal / 2) / nominal, 0, 255);
+ return clamp_val((val * 192 + nominal / 2) / nominal, 0, 255);
}
/*
@@ -293,8 +293,7 @@ static inline int TEMP_FROM_REG(int reg, int res)
static inline int TEMP_TO_REG(int val)
{
- return SENSORS_LIMIT((val < 0 ? val - 500 : val + 500) / 1000,
- -128, 127);
+ return clamp_val((val < 0 ? val - 500 : val + 500) / 1000, -128, 127);
}
/* Temperature range */
@@ -332,7 +331,7 @@ static inline int TEMP_HYST_FROM_REG(int reg, int ix)
static inline int TEMP_HYST_TO_REG(int val, int ix, int reg)
{
- int hyst = SENSORS_LIMIT((val + 500) / 1000, 0, 15);
+ int hyst = clamp_val((val + 500) / 1000, 0, 15);
return (ix == 1) ? (reg & 0xf0) | hyst : (reg & 0x0f) | (hyst << 4);
}
@@ -349,10 +348,10 @@ static inline int FAN_FROM_REG(int reg, int tpc)
static inline int FAN_TO_REG(int val, int tpc)
{
if (tpc) {
- return SENSORS_LIMIT(val / tpc, 0, 0xffff);
+ return clamp_val(val / tpc, 0, 0xffff);
} else {
return (val <= 0) ? 0xffff :
- SENSORS_LIMIT(90000 * 60 / val, 0, 0xfffe);
+ clamp_val(90000 * 60 / val, 0, 0xfffe);
}
}
@@ -1282,7 +1281,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
switch (fn) {
case SYS_PWM:
- data->pwm[ix] = SENSORS_LIMIT(val, 0, 255);
+ data->pwm[ix] = clamp_val(val, 0, 255);
dme1737_write(data, DME1737_REG_PWM(ix), data->pwm[ix]);
break;
case SYS_PWM_FREQ:
@@ -1450,7 +1449,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
break;
case SYS_PWM_AUTO_POINT1_PWM:
/* Only valid for pwm[1-3] */
- data->pwm_min[ix] = SENSORS_LIMIT(val, 0, 255);
+ data->pwm_min[ix] = clamp_val(val, 0, 255);
dme1737_write(data, DME1737_REG_PWM_MIN(ix),
data->pwm_min[ix]);
break;
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index 77f434c58236..b07305622087 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -405,7 +405,7 @@ static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
if (rpm_target == 0)
data->fan_target = 0x1fff;
else
- data->fan_target = SENSORS_LIMIT(
+ data->fan_target = clamp_val(
(FAN_RPM_FACTOR * data->fan_multiplier) / rpm_target,
0, 0x1fff);
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index 789bd4fb329b..936898f82f94 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -220,7 +220,7 @@ static ssize_t set_in(struct device *dev, struct device_attribute *devattr,
: EMC6W201_REG_IN_HIGH(nr);
mutex_lock(&data->update_lock);
- data->in[sf][nr] = SENSORS_LIMIT(val, 0, 255);
+ data->in[sf][nr] = clamp_val(val, 0, 255);
err = emc6w201_write8(client, reg, data->in[sf][nr]);
mutex_unlock(&data->update_lock);
@@ -257,7 +257,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
: EMC6W201_REG_TEMP_HIGH(nr);
mutex_lock(&data->update_lock);
- data->temp[sf][nr] = SENSORS_LIMIT(val, -127, 128);
+ data->temp[sf][nr] = clamp_val(val, -127, 128);
err = emc6w201_write8(client, reg, data->temp[sf][nr]);
mutex_unlock(&data->update_lock);
@@ -298,7 +298,7 @@ static ssize_t set_fan(struct device *dev, struct device_attribute *devattr,
val = 0xFFFF;
} else {
val = DIV_ROUND_CLOSEST(5400000U, val);
- val = SENSORS_LIMIT(val, 0, 0xFFFE);
+ val = clamp_val(val, 0, 0xFFFE);
}
mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index bb7275cc47f3..cfb02dd91aad 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -1350,7 +1350,7 @@ static ssize_t store_fan_full_speed(struct device *dev,
if (err)
return err;
- val = SENSORS_LIMIT(val, 23, 1500000);
+ val = clamp_val(val, 23, 1500000);
val = fan_to_reg(val);
mutex_lock(&data->update_lock);
@@ -1438,7 +1438,7 @@ static ssize_t store_in_max(struct device *dev, struct device_attribute
return err;
val /= 8;
- val = SENSORS_LIMIT(val, 0, 255);
+ val = clamp_val(val, 0, 255);
mutex_lock(&data->update_lock);
f71882fg_write8(data, F71882FG_REG_IN1_HIGH, val);
@@ -1542,7 +1542,7 @@ static ssize_t store_temp_max(struct device *dev, struct device_attribute
return err;
val /= 1000;
- val = SENSORS_LIMIT(val, 0, 255);
+ val = clamp_val(val, 0, 255);
mutex_lock(&data->update_lock);
f71882fg_write8(data, F71882FG_REG_TEMP_HIGH(nr), val);
@@ -1589,8 +1589,7 @@ static ssize_t store_temp_max_hyst(struct device *dev, struct device_attribute
/* convert abs to relative and check */
data->temp_high[nr] = f71882fg_read8(data, F71882FG_REG_TEMP_HIGH(nr));
- val = SENSORS_LIMIT(val, data->temp_high[nr] - 15,
- data->temp_high[nr]);
+ val = clamp_val(val, data->temp_high[nr] - 15, data->temp_high[nr]);
val = data->temp_high[nr] - val;
/* convert value to register contents */
@@ -1627,7 +1626,7 @@ static ssize_t store_temp_crit(struct device *dev, struct device_attribute
return err;
val /= 1000;
- val = SENSORS_LIMIT(val, 0, 255);
+ val = clamp_val(val, 0, 255);
mutex_lock(&data->update_lock);
f71882fg_write8(data, F71882FG_REG_TEMP_OVT(nr), val);
@@ -1754,7 +1753,7 @@ static ssize_t store_pwm(struct device *dev,
if (err)
return err;
- val = SENSORS_LIMIT(val, 0, 255);
+ val = clamp_val(val, 0, 255);
mutex_lock(&data->update_lock);
data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
@@ -1805,7 +1804,7 @@ static ssize_t store_simple_pwm(struct device *dev,
if (err)
return err;
- val = SENSORS_LIMIT(val, 0, 255);
+ val = clamp_val(val, 0, 255);
mutex_lock(&data->update_lock);
f71882fg_write8(data, F71882FG_REG_PWM(nr), val);
@@ -1932,7 +1931,7 @@ static ssize_t store_pwm_auto_point_pwm(struct device *dev,
if (err)
return err;
- val = SENSORS_LIMIT(val, 0, 255);
+ val = clamp_val(val, 0, 255);
mutex_lock(&data->update_lock);
data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
@@ -1991,8 +1990,8 @@ static ssize_t store_pwm_auto_point_temp_hyst(struct device *dev,
mutex_lock(&data->update_lock);
data->pwm_auto_point_temp[nr][point] =
f71882fg_read8(data, F71882FG_REG_POINT_TEMP(nr, point));
- val = SENSORS_LIMIT(val, data->pwm_auto_point_temp[nr][point] - 15,
- data->pwm_auto_point_temp[nr][point]);
+ val = clamp_val(val, data->pwm_auto_point_temp[nr][point] - 15,
+ data->pwm_auto_point_temp[nr][point]);
val = data->pwm_auto_point_temp[nr][point] - val;
reg = f71882fg_read8(data, F71882FG_REG_FAN_HYST(nr / 2));
@@ -2126,9 +2125,9 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev,
val /= 1000;
if (data->auto_point_temp_signed)
- val = SENSORS_LIMIT(val, -128, 127);
+ val = clamp_val(val, -128, 127);
else
- val = SENSORS_LIMIT(val, 0, 127);
+ val = clamp_val(val, 0, 127);
mutex_lock(&data->update_lock);
f71882fg_write8(data, F71882FG_REG_POINT_TEMP(pwm, point), val);
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index f7dba229395f..9e300e567f15 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -359,7 +359,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&data->update_lock);
- data->pwm[nr] = SENSORS_LIMIT(val, 0, 255);
+ data->pwm[nr] = clamp_val(val, 0, 255);
f75375_write_pwm(client, nr);
mutex_unlock(&data->update_lock);
return count;
@@ -556,7 +556,7 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
if (err < 0)
return err;
- val = SENSORS_LIMIT(VOLT_TO_REG(val), 0, 0xff);
+ val = clamp_val(VOLT_TO_REG(val), 0, 0xff);
mutex_lock(&data->update_lock);
data->in_max[nr] = val;
f75375_write8(client, F75375_REG_VOLT_HIGH(nr), data->in_max[nr]);
@@ -577,7 +577,7 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
if (err < 0)
return err;
- val = SENSORS_LIMIT(VOLT_TO_REG(val), 0, 0xff);
+ val = clamp_val(VOLT_TO_REG(val), 0, 0xff);
mutex_lock(&data->update_lock);
data->in_min[nr] = val;
f75375_write8(client, F75375_REG_VOLT_LOW(nr), data->in_min[nr]);
@@ -625,7 +625,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
if (err < 0)
return err;
- val = SENSORS_LIMIT(TEMP_TO_REG(val), 0, 127);
+ val = clamp_val(TEMP_TO_REG(val), 0, 127);
mutex_lock(&data->update_lock);
data->temp_high[nr] = val;
f75375_write8(client, F75375_REG_TEMP_HIGH(nr), data->temp_high[nr]);
@@ -646,7 +646,7 @@ static ssize_t set_temp_max_hyst(struct device *dev,
if (err < 0)
return err;
- val = SENSORS_LIMIT(TEMP_TO_REG(val), 0, 127);
+ val = clamp_val(TEMP_TO_REG(val), 0, 127);
mutex_lock(&data->update_lock);
data->temp_max_hyst[nr] = val;
f75375_write8(client, F75375_REG_TEMP_HYST(nr),
@@ -822,7 +822,7 @@ static void f75375_init(struct i2c_client *client, struct f75375_data *data,
if (auto_mode_enabled(f75375s_pdata->pwm_enable[nr]) ||
!duty_mode_enabled(f75375s_pdata->pwm_enable[nr]))
continue;
- data->pwm[nr] = SENSORS_LIMIT(f75375s_pdata->pwm[nr], 0, 255);
+ data->pwm[nr] = clamp_val(f75375s_pdata->pwm[nr], 0, 255);
f75375_write_pwm(client, nr);
}
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index 519ce8b9c142..8af2755cdb87 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -379,7 +379,7 @@ static ssize_t store_temp_max(struct device *dev, struct device_attribute
if (err)
return err;
- v = SENSORS_LIMIT(v / 1000, -128, 127) + 128;
+ v = clamp_val(v / 1000, -128, 127) + 128;
mutex_lock(&data->update_lock);
i2c_smbus_write_byte_data(to_i2c_client(dev),
@@ -540,7 +540,7 @@ static ssize_t store_pwm_auto_point1_pwm(struct device *dev,
/* reg: 0 = allow turning off (except on the syl), 1-255 = 50-100% */
if (v || data->kind == fscsyl) {
- v = SENSORS_LIMIT(v, 128, 255);
+ v = clamp_val(v, 128, 255);
v = (v - 128) * 2 + 1;
}
diff --git a/drivers/hwmon/g760a.c b/drivers/hwmon/g760a.c
index 8b2106f60eda..ea6480b80e7f 100644
--- a/drivers/hwmon/g760a.c
+++ b/drivers/hwmon/g760a.c
@@ -171,7 +171,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *da,
return -EINVAL;
mutex_lock(&data->update_lock);
- data->set_cnt = PWM_TO_CNT(SENSORS_LIMIT(val, 0, 255));
+ data->set_cnt = PWM_TO_CNT(clamp_val(val, 0, 255));
g760a_write_value(client, G760A_REG_SET_CNT, data->set_cnt);
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c
index 2c74673f48e5..e2e5909a34df 100644
--- a/drivers/hwmon/gl518sm.c
+++ b/drivers/hwmon/gl518sm.c
@@ -86,7 +86,7 @@ enum chips { gl518sm_r00, gl518sm_r80 };
#define BOOL_FROM_REG(val) ((val) ? 0 : 1)
#define BOOL_TO_REG(val) ((val) ? 0 : 1)
-#define TEMP_TO_REG(val) SENSORS_LIMIT(((((val) < 0 ? \
+#define TEMP_TO_REG(val) clamp_val(((((val) < 0 ? \
(val) - 500 : \
(val) + 500) / 1000) + 119), 0, 255)
#define TEMP_FROM_REG(val) (((val) - 119) * 1000)
@@ -96,15 +96,15 @@ static inline u8 FAN_TO_REG(long rpm, int div)
long rpmdiv;
if (rpm == 0)
return 0;
- rpmdiv = SENSORS_LIMIT(rpm, 1, 960000) * div;
- return SENSORS_LIMIT((480000 + rpmdiv / 2) / rpmdiv, 1, 255);
+ rpmdiv = clamp_val(rpm, 1, 960000) * div;
+ return clamp_val((480000 + rpmdiv / 2) / rpmdiv, 1, 255);
}
#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : (480000 / ((val) * (div))))
-#define IN_TO_REG(val) SENSORS_LIMIT((((val) + 9) / 19), 0, 255)
+#define IN_TO_REG(val) clamp_val((((val) + 9) / 19), 0, 255)
#define IN_FROM_REG(val) ((val) * 19)
-#define VDD_TO_REG(val) SENSORS_LIMIT((((val) * 4 + 47) / 95), 0, 255)
+#define VDD_TO_REG(val) clamp_val((((val) * 4 + 47) / 95), 0, 255)
#define VDD_FROM_REG(val) (((val) * 95 + 2) / 4)
#define DIV_FROM_REG(val) (1 << (val))
diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c
index a21ff252f2f1..ed56e09c3dd7 100644
--- a/drivers/hwmon/gl520sm.c
+++ b/drivers/hwmon/gl520sm.c
@@ -144,10 +144,10 @@ static ssize_t get_cpu_vid(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(cpu0_vid, S_IRUGO, get_cpu_vid, NULL);
#define VDD_FROM_REG(val) (((val) * 95 + 2) / 4)
-#define VDD_TO_REG(val) SENSORS_LIMIT((((val) * 4 + 47) / 95), 0, 255)
+#define VDD_TO_REG(val) clamp_val((((val) * 4 + 47) / 95), 0, 255)
#define IN_FROM_REG(val) ((val) * 19)
-#define IN_TO_REG(val) SENSORS_LIMIT((((val) + 9) / 19), 0, 255)
+#define IN_TO_REG(val) clamp_val((((val) + 9) / 19), 0, 255)
static ssize_t get_in_input(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -285,8 +285,7 @@ static SENSOR_DEVICE_ATTR(in4_max, S_IRUGO | S_IWUSR,
#define DIV_FROM_REG(val) (1 << (val))
#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : (480000 / ((val) << (div))))
#define FAN_TO_REG(val, div) ((val) <= 0 ? 0 : \
- SENSORS_LIMIT((480000 + ((val) << ((div)-1))) / ((val) << (div)), 1, \
- 255))
+ clamp_val((480000 + ((val) << ((div)-1))) / ((val) << (div)), 1, 255))
static ssize_t get_fan_input(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -450,7 +449,7 @@ static DEVICE_ATTR(fan1_off, S_IRUGO | S_IWUSR,
get_fan_off, set_fan_off);
#define TEMP_FROM_REG(val) (((val) - 130) * 1000)
-#define TEMP_TO_REG(val) SENSORS_LIMIT(((((val) < 0 ? \
+#define TEMP_TO_REG(val) clamp_val(((((val) < 0 ? \
(val) - 500 : (val) + 500) / 1000) + 130), 0, 255)
static ssize_t get_temp_input(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 4e04c1228e51..39781945a5d2 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -422,7 +422,7 @@ static int gpio_fan_get_of_pdata(struct device *dev,
/* Fill GPIO pin array */
pdata->num_ctrl = of_gpio_count(node);
- if (!pdata->num_ctrl) {
+ if (pdata->num_ctrl <= 0) {
dev_err(dev, "gpios DT property empty / missing");
return -ENODEV;
}
@@ -477,7 +477,7 @@ static int gpio_fan_get_of_pdata(struct device *dev,
pdata->speed = speed;
/* Alarm GPIO if one exists */
- if (of_gpio_named_count(node, "alarm-gpios")) {
+ if (of_gpio_named_count(node, "alarm-gpios") > 0) {
struct gpio_fan_alarm *alarm;
int val;
enum of_gpio_flags flags;
diff --git a/drivers/hwmon/ina209.c b/drivers/hwmon/ina209.c
new file mode 100644
index 000000000000..c6fdd5bd395e
--- /dev/null
+++ b/drivers/hwmon/ina209.c
@@ -0,0 +1,636 @@
+/*
+ * Driver for the Texas Instruments / Burr Brown INA209
+ * Bidirectional Current/Power Monitor
+ *
+ * Copyright (C) 2012 Guenter Roeck <linux@roeck-us.net>
+ *
+ * Derived from Ira W. Snyder's original driver submission
+ * Copyright (C) 2008 Paul Hays <Paul.Hays@cattail.ca>
+ * Copyright (C) 2008-2009 Ira W. Snyder <iws@ovro.caltech.edu>
+ *
+ * Aligned with ina2xx driver
+ * Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
+ * Thanks to Jan Volkering
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * Datasheet:
+ * http://www.ti.com/lit/gpn/ina209
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/bug.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+#include <linux/platform_data/ina2xx.h>
+
+/* register definitions */
+#define INA209_CONFIGURATION 0x00
+#define INA209_STATUS 0x01
+#define INA209_STATUS_MASK 0x02
+#define INA209_SHUNT_VOLTAGE 0x03
+#define INA209_BUS_VOLTAGE 0x04
+#define INA209_POWER 0x05
+#define INA209_CURRENT 0x06
+#define INA209_SHUNT_VOLTAGE_POS_PEAK 0x07
+#define INA209_SHUNT_VOLTAGE_NEG_PEAK 0x08
+#define INA209_BUS_VOLTAGE_MAX_PEAK 0x09
+#define INA209_BUS_VOLTAGE_MIN_PEAK 0x0a
+#define INA209_POWER_PEAK 0x0b
+#define INA209_SHUNT_VOLTAGE_POS_WARN 0x0c
+#define INA209_SHUNT_VOLTAGE_NEG_WARN 0x0d
+#define INA209_POWER_WARN 0x0e
+#define INA209_BUS_VOLTAGE_OVER_WARN 0x0f
+#define INA209_BUS_VOLTAGE_UNDER_WARN 0x10
+#define INA209_POWER_OVER_LIMIT 0x11
+#define INA209_BUS_VOLTAGE_OVER_LIMIT 0x12
+#define INA209_BUS_VOLTAGE_UNDER_LIMIT 0x13
+#define INA209_CRITICAL_DAC_POS 0x14
+#define INA209_CRITICAL_DAC_NEG 0x15
+#define INA209_CALIBRATION 0x16
+
+#define INA209_REGISTERS 0x17
+
+#define INA209_CONFIG_DEFAULT 0x3c47 /* PGA=8, full range */
+#define INA209_SHUNT_DEFAULT 10000 /* uOhm */
+
+struct ina209_data {
+ struct device *hwmon_dev;
+
+ struct mutex update_lock;
+ bool valid;
+ unsigned long last_updated; /* in jiffies */
+
+ u16 regs[INA209_REGISTERS]; /* All chip registers */
+
+ u16 config_orig; /* Original configuration */
+ u16 calibration_orig; /* Original calibration */
+ u16 update_interval;
+};
+
+static struct ina209_data *ina209_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ina209_data *data = i2c_get_clientdata(client);
+ struct ina209_data *ret = data;
+ s32 val;
+ int i;
+
+ mutex_lock(&data->update_lock);
+
+ if (!data->valid ||
+ time_after(jiffies, data->last_updated + data->update_interval)) {
+ for (i = 0; i < ARRAY_SIZE(data->regs); i++) {
+ val = i2c_smbus_read_word_swapped(client, i);
+ if (val < 0) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->regs[i] = val;
+ }
+ data->last_updated = jiffies;
+ data->valid = true;
+ }
+abort:
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+/*
+ * Read a value from a device register and convert it to the
+ * appropriate sysfs units
+ */
+static long ina209_from_reg(const u8 reg, const u16 val)
+{
+ switch (reg) {
+ case INA209_SHUNT_VOLTAGE:
+ case INA209_SHUNT_VOLTAGE_POS_PEAK:
+ case INA209_SHUNT_VOLTAGE_NEG_PEAK:
+ case INA209_SHUNT_VOLTAGE_POS_WARN:
+ case INA209_SHUNT_VOLTAGE_NEG_WARN:
+ /* LSB=10 uV. Convert to mV. */
+ return DIV_ROUND_CLOSEST(val, 100);
+
+ case INA209_BUS_VOLTAGE:
+ case INA209_BUS_VOLTAGE_MAX_PEAK:
+ case INA209_BUS_VOLTAGE_MIN_PEAK:
+ case INA209_BUS_VOLTAGE_OVER_WARN:
+ case INA209_BUS_VOLTAGE_UNDER_WARN:
+ case INA209_BUS_VOLTAGE_OVER_LIMIT:
+ case INA209_BUS_VOLTAGE_UNDER_LIMIT:
+ /* LSB=4 mV, last 3 bits unused */
+ return (val >> 3) * 4;
+
+ case INA209_CRITICAL_DAC_POS:
+ /* LSB=1 mV, in the upper 8 bits */
+ return val >> 8;
+
+ case INA209_CRITICAL_DAC_NEG:
+ /* LSB=1 mV, in the upper 8 bits */
+ return -1 * (val >> 8);
+
+ case INA209_POWER:
+ case INA209_POWER_PEAK:
+ case INA209_POWER_WARN:
+ case INA209_POWER_OVER_LIMIT:
+ /* LSB=20 mW. Convert to uW */
+ return val * 20 * 1000L;
+
+ case INA209_CURRENT:
+ /* LSB=1 mA (selected). Is in mA */
+ return val;
+ }
+
+ /* programmer goofed */
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+/*
+ * Take a value and convert it to register format, clamping the value
+ * to the appropriate range.
+ */
+static int ina209_to_reg(u8 reg, u16 old, long val)
+{
+ switch (reg) {
+ case INA209_SHUNT_VOLTAGE_POS_WARN:
+ case INA209_SHUNT_VOLTAGE_NEG_WARN:
+ /* Limit to +- 320 mV, 10 uV LSB */
+ return clamp_val(val, -320, 320) * 100;
+
+ case INA209_BUS_VOLTAGE_OVER_WARN:
+ case INA209_BUS_VOLTAGE_UNDER_WARN:
+ case INA209_BUS_VOLTAGE_OVER_LIMIT:
+ case INA209_BUS_VOLTAGE_UNDER_LIMIT:
+ /*
+ * Limit to 0-32000 mV, 4 mV LSB
+ *
+ * The last three bits aren't part of the value, but we'll
+ * preserve them in their original state.
+ */
+ return (DIV_ROUND_CLOSEST(clamp_val(val, 0, 32000), 4) << 3)
+ | (old & 0x7);
+
+ case INA209_CRITICAL_DAC_NEG:
+ /*
+ * Limit to -255-0 mV, 1 mV LSB
+ * Convert the value to a positive value for the register
+ *
+ * The value lives in the top 8 bits only, be careful
+ * and keep original value of other bits.
+ */
+ return (clamp_val(-val, 0, 255) << 8) | (old & 0xff);
+
+ case INA209_CRITICAL_DAC_POS:
+ /*
+ * Limit to 0-255 mV, 1 mV LSB
+ *
+ * The value lives in the top 8 bits only, be careful
+ * and keep original value of other bits.
+ */
+ return (clamp_val(val, 0, 255) << 8) | (old & 0xff);
+
+ case INA209_POWER_WARN:
+ case INA209_POWER_OVER_LIMIT:
+ /* 20 mW LSB */
+ return DIV_ROUND_CLOSEST(val, 20 * 1000);
+ }
+
+ /* Other registers are read-only, return access error */
+ return -EACCES;
+}
+
+static int ina209_interval_from_reg(u16 reg)
+{
+ return 68 >> (15 - ((reg >> 3) & 0x0f));
+}
+
+static u16 ina209_reg_from_interval(u16 config, long interval)
+{
+ int i, adc;
+
+ if (interval <= 0) {
+ adc = 8;
+ } else {
+ adc = 15;
+ for (i = 34 + 34 / 2; i; i >>= 1) {
+ if (i < interval)
+ break;
+ adc--;
+ }
+ }
+ return (config & 0xf807) | (adc << 3) | (adc << 7);
+}
+
+static ssize_t ina209_set_interval(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ina209_data *data = ina209_update_device(dev);
+ long val;
+ u16 regval;
+ int ret;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&data->update_lock);
+ regval = ina209_reg_from_interval(data->regs[INA209_CONFIGURATION],
+ val);
+ i2c_smbus_write_word_swapped(client, INA209_CONFIGURATION, regval);
+ data->regs[INA209_CONFIGURATION] = regval;
+ data->update_interval = ina209_interval_from_reg(regval);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t ina209_show_interval(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ina209_data *data = i2c_get_clientdata(client);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", data->update_interval);
+}
+
+/*
+ * History is reset by writing 1 into bit 0 of the respective peak register.
+ * Since more than one peak register may be affected by the scope of a
+ * reset_history attribute write, use a bit mask in attr->index to identify
+ * which registers are affected.
+ */
+static u16 ina209_reset_history_regs[] = {
+ INA209_SHUNT_VOLTAGE_POS_PEAK,
+ INA209_SHUNT_VOLTAGE_NEG_PEAK,
+ INA209_BUS_VOLTAGE_MAX_PEAK,
+ INA209_BUS_VOLTAGE_MIN_PEAK,
+ INA209_POWER_PEAK
+};
+
+static ssize_t ina209_reset_history(struct device *dev,
+ struct device_attribute *da,
+ const char *buf,
+ size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ina209_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ u32 mask = attr->index;
+ long val;
+ int i, ret;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&data->update_lock);
+ for (i = 0; i < ARRAY_SIZE(ina209_reset_history_regs); i++) {
+ if (mask & (1 << i))
+ i2c_smbus_write_word_swapped(client,
+ ina209_reset_history_regs[i], 1);
+ }
+ data->valid = false;
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t ina209_set_value(struct device *dev,
+ struct device_attribute *da,
+ const char *buf,
+ size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ina209_data *data = ina209_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int reg = attr->index;
+ long val;
+ int ret;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&data->update_lock);
+ ret = ina209_to_reg(reg, data->regs[reg], val);
+ if (ret < 0) {
+ count = ret;
+ goto abort;
+ }
+ i2c_smbus_write_word_swapped(client, reg, ret);
+ data->regs[reg] = ret;
+abort:
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t ina209_show_value(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct ina209_data *data = ina209_update_device(dev);
+ long val;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ val = ina209_from_reg(attr->index, data->regs[attr->index]);
+ return snprintf(buf, PAGE_SIZE, "%ld\n", val);
+}
+
+static ssize_t ina209_show_alarm(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct ina209_data *data = ina209_update_device(dev);
+ const unsigned int mask = attr->index;
+ u16 status;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ status = data->regs[INA209_STATUS];
+
+ /*
+ * All alarms are in the INA209_STATUS register. To avoid a long
+ * switch statement, the mask is passed in attr->index
+ */
+ return snprintf(buf, PAGE_SIZE, "%u\n", !!(status & mask));
+}
+
+/* Shunt voltage, history, limits, alarms */
+static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ina209_show_value, NULL,
+ INA209_SHUNT_VOLTAGE);
+static SENSOR_DEVICE_ATTR(in0_input_highest, S_IRUGO, ina209_show_value, NULL,
+ INA209_SHUNT_VOLTAGE_POS_PEAK);
+static SENSOR_DEVICE_ATTR(in0_input_lowest, S_IRUGO, ina209_show_value, NULL,
+ INA209_SHUNT_VOLTAGE_NEG_PEAK);
+static SENSOR_DEVICE_ATTR(in0_reset_history, S_IWUSR, NULL,
+ ina209_reset_history, (1 << 0) | (1 << 1));
+static SENSOR_DEVICE_ATTR(in0_max, S_IRUGO | S_IWUSR, ina209_show_value,
+ ina209_set_value, INA209_SHUNT_VOLTAGE_POS_WARN);
+static SENSOR_DEVICE_ATTR(in0_min, S_IRUGO | S_IWUSR, ina209_show_value,
+ ina209_set_value, INA209_SHUNT_VOLTAGE_NEG_WARN);
+static SENSOR_DEVICE_ATTR(in0_crit_max, S_IRUGO | S_IWUSR, ina209_show_value,
+ ina209_set_value, INA209_CRITICAL_DAC_POS);
+static SENSOR_DEVICE_ATTR(in0_crit_min, S_IRUGO | S_IWUSR, ina209_show_value,
+ ina209_set_value, INA209_CRITICAL_DAC_NEG);
+
+static SENSOR_DEVICE_ATTR(in0_min_alarm, S_IRUGO, ina209_show_alarm, NULL,
+ 1 << 11);
+static SENSOR_DEVICE_ATTR(in0_max_alarm, S_IRUGO, ina209_show_alarm, NULL,
+ 1 << 12);
+static SENSOR_DEVICE_ATTR(in0_crit_min_alarm, S_IRUGO, ina209_show_alarm, NULL,
+ 1 << 6);
+static SENSOR_DEVICE_ATTR(in0_crit_max_alarm, S_IRUGO, ina209_show_alarm, NULL,
+ 1 << 7);
+
+/* Bus voltage, history, limits, alarms */
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, ina209_show_value, NULL,
+ INA209_BUS_VOLTAGE);
+static SENSOR_DEVICE_ATTR(in1_input_highest, S_IRUGO, ina209_show_value, NULL,
+ INA209_BUS_VOLTAGE_MAX_PEAK);
+static SENSOR_DEVICE_ATTR(in1_input_lowest, S_IRUGO, ina209_show_value, NULL,
+ INA209_BUS_VOLTAGE_MIN_PEAK);
+static SENSOR_DEVICE_ATTR(in1_reset_history, S_IWUSR, NULL,
+ ina209_reset_history, (1 << 2) | (1 << 3));
+static SENSOR_DEVICE_ATTR(in1_max, S_IRUGO | S_IWUSR, ina209_show_value,
+ ina209_set_value, INA209_BUS_VOLTAGE_OVER_WARN);
+static SENSOR_DEVICE_ATTR(in1_min, S_IRUGO | S_IWUSR, ina209_show_value,
+ ina209_set_value, INA209_BUS_VOLTAGE_UNDER_WARN);
+static SENSOR_DEVICE_ATTR(in1_crit_max, S_IRUGO | S_IWUSR, ina209_show_value,
+ ina209_set_value, INA209_BUS_VOLTAGE_OVER_LIMIT);
+static SENSOR_DEVICE_ATTR(in1_crit_min, S_IRUGO | S_IWUSR, ina209_show_value,
+ ina209_set_value, INA209_BUS_VOLTAGE_UNDER_LIMIT);
+
+static SENSOR_DEVICE_ATTR(in1_min_alarm, S_IRUGO, ina209_show_alarm, NULL,
+ 1 << 14);
+static SENSOR_DEVICE_ATTR(in1_max_alarm, S_IRUGO, ina209_show_alarm, NULL,
+ 1 << 15);
+static SENSOR_DEVICE_ATTR(in1_crit_min_alarm, S_IRUGO, ina209_show_alarm, NULL,
+ 1 << 9);
+static SENSOR_DEVICE_ATTR(in1_crit_max_alarm, S_IRUGO, ina209_show_alarm, NULL,
+ 1 << 10);
+
+/* Power */
+static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina209_show_value, NULL,
+ INA209_POWER);
+static SENSOR_DEVICE_ATTR(power1_input_highest, S_IRUGO, ina209_show_value,
+ NULL, INA209_POWER_PEAK);
+static SENSOR_DEVICE_ATTR(power1_reset_history, S_IWUSR, NULL,
+ ina209_reset_history, 1 << 4);
+static SENSOR_DEVICE_ATTR(power1_max, S_IRUGO | S_IWUSR, ina209_show_value,
+ ina209_set_value, INA209_POWER_WARN);
+static SENSOR_DEVICE_ATTR(power1_crit, S_IRUGO | S_IWUSR, ina209_show_value,
+ ina209_set_value, INA209_POWER_OVER_LIMIT);
+
+static SENSOR_DEVICE_ATTR(power1_max_alarm, S_IRUGO, ina209_show_alarm, NULL,
+ 1 << 13);
+static SENSOR_DEVICE_ATTR(power1_crit_alarm, S_IRUGO, ina209_show_alarm, NULL,
+ 1 << 8);
+
+/* Current */
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ina209_show_value, NULL,
+ INA209_CURRENT);
+
+static SENSOR_DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR,
+ ina209_show_interval, ina209_set_interval, 0);
+
+/*
+ * Finally, construct an array of pointers to members of the above objects,
+ * as required for sysfs_create_group()
+ */
+static struct attribute *ina209_attributes[] = {
+ &sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_in0_input_highest.dev_attr.attr,
+ &sensor_dev_attr_in0_input_lowest.dev_attr.attr,
+ &sensor_dev_attr_in0_reset_history.dev_attr.attr,
+ &sensor_dev_attr_in0_max.dev_attr.attr,
+ &sensor_dev_attr_in0_min.dev_attr.attr,
+ &sensor_dev_attr_in0_crit_max.dev_attr.attr,
+ &sensor_dev_attr_in0_crit_min.dev_attr.attr,
+ &sensor_dev_attr_in0_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_in0_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_in0_crit_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_in0_crit_min_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in1_input_highest.dev_attr.attr,
+ &sensor_dev_attr_in1_input_lowest.dev_attr.attr,
+ &sensor_dev_attr_in1_reset_history.dev_attr.attr,
+ &sensor_dev_attr_in1_max.dev_attr.attr,
+ &sensor_dev_attr_in1_min.dev_attr.attr,
+ &sensor_dev_attr_in1_crit_max.dev_attr.attr,
+ &sensor_dev_attr_in1_crit_min.dev_attr.attr,
+ &sensor_dev_attr_in1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_in1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_in1_crit_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_in1_crit_min_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_power1_input.dev_attr.attr,
+ &sensor_dev_attr_power1_input_highest.dev_attr.attr,
+ &sensor_dev_attr_power1_reset_history.dev_attr.attr,
+ &sensor_dev_attr_power1_max.dev_attr.attr,
+ &sensor_dev_attr_power1_crit.dev_attr.attr,
+ &sensor_dev_attr_power1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_power1_crit_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_curr1_input.dev_attr.attr,
+
+ &sensor_dev_attr_update_interval.dev_attr.attr,
+
+ NULL,
+};
+
+static const struct attribute_group ina209_group = {
+ .attrs = ina209_attributes,
+};
+
+static void ina209_restore_conf(struct i2c_client *client,
+ struct ina209_data *data)
+{
+ /* Restore initial configuration */
+ i2c_smbus_write_word_swapped(client, INA209_CONFIGURATION,
+ data->config_orig);
+ i2c_smbus_write_word_swapped(client, INA209_CALIBRATION,
+ data->calibration_orig);
+}
+
+static int ina209_init_client(struct i2c_client *client,
+ struct ina209_data *data)
+{
+ struct ina2xx_platform_data *pdata = dev_get_platdata(&client->dev);
+ u32 shunt;
+ int reg;
+
+ reg = i2c_smbus_read_word_swapped(client, INA209_CALIBRATION);
+ if (reg < 0)
+ return reg;
+ data->calibration_orig = reg;
+
+ reg = i2c_smbus_read_word_swapped(client, INA209_CONFIGURATION);
+ if (reg < 0)
+ return reg;
+ data->config_orig = reg;
+
+ if (pdata) {
+ if (pdata->shunt_uohms <= 0)
+ return -EINVAL;
+ shunt = pdata->shunt_uohms;
+ } else if (!of_property_read_u32(client->dev.of_node, "shunt-resistor",
+ &shunt)) {
+ if (shunt == 0)
+ return -EINVAL;
+ } else {
+ shunt = data->calibration_orig ?
+ 40960000 / data->calibration_orig : INA209_SHUNT_DEFAULT;
+ }
+
+ i2c_smbus_write_word_swapped(client, INA209_CONFIGURATION,
+ INA209_CONFIG_DEFAULT);
+ data->update_interval = ina209_interval_from_reg(INA209_CONFIG_DEFAULT);
+
+ /*
+ * Calibrate current LSB to 1mA. Shunt is in uOhms.
+ * See equation 13 in datasheet.
+ */
+ i2c_smbus_write_word_swapped(client, INA209_CALIBRATION,
+ clamp_val(40960000 / shunt, 1, 65535));
+
+ /* Clear status register */
+ i2c_smbus_read_word_swapped(client, INA209_STATUS);
+
+ return 0;
+}
+
+static int ina209_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ struct ina209_data *data;
+ int ret;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ ret = ina209_init_client(client, data);
+ if (ret)
+ return ret;
+
+ /* Register sysfs hooks */
+ ret = sysfs_create_group(&client->dev.kobj, &ina209_group);
+ if (ret)
+ goto out_restore_conf;
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ ret = PTR_ERR(data->hwmon_dev);
+ goto out_hwmon_device_register;
+ }
+
+ return 0;
+
+out_hwmon_device_register:
+ sysfs_remove_group(&client->dev.kobj, &ina209_group);
+out_restore_conf:
+ ina209_restore_conf(client, data);
+ return ret;
+}
+
+static int ina209_remove(struct i2c_client *client)
+{
+ struct ina209_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &ina209_group);
+ ina209_restore_conf(client, data);
+
+ return 0;
+}
+
+static const struct i2c_device_id ina209_id[] = {
+ { "ina209", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ina209_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver ina209_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "ina209",
+ },
+ .probe = ina209_probe,
+ .remove = ina209_remove,
+ .id_table = ina209_id,
+};
+
+module_i2c_driver(ina209_driver);
+
+MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>, Paul Hays <Paul.Hays@cattail.ca>, Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("INA209 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 117d66fcded6..37fc980fde24 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -19,6 +19,8 @@
* IT8726F Super I/O chip w/LPC interface
* IT8728F Super I/O chip w/LPC interface
* IT8758E Super I/O chip w/LPC interface
+ * IT8771E Super I/O chip w/LPC interface
+ * IT8772E Super I/O chip w/LPC interface
* IT8782F Super I/O chip w/LPC interface
* IT8783E/F Super I/O chip w/LPC interface
* Sis950 A clone of the IT8705F
@@ -61,8 +63,8 @@
#define DRVNAME "it87"
-enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728, it8782,
- it8783 };
+enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728, it8771,
+ it8772, it8782, it8783 };
static unsigned short force_id;
module_param(force_id, ushort, 0);
@@ -140,6 +142,8 @@ static inline void superio_exit(void)
#define IT8721F_DEVID 0x8721
#define IT8726F_DEVID 0x8726
#define IT8728F_DEVID 0x8728
+#define IT8771E_DEVID 0x8771
+#define IT8772E_DEVID 0x8772
#define IT8782F_DEVID 0x8782
#define IT8783E_DEVID 0x8783
#define IT87_ACT_REG 0x30
@@ -281,6 +285,24 @@ static const struct it87_devices it87_devices[] = {
| FEAT_TEMP_OFFSET | FEAT_TEMP_PECI,
.peci_mask = 0x07,
},
+ [it8771] = {
+ .name = "it8771",
+ .features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI,
+ /* PECI: guesswork */
+ /* 12mV ADC (OHM) */
+ /* 16 bit fans (OHM) */
+ .peci_mask = 0x07,
+ },
+ [it8772] = {
+ .name = "it8772",
+ .features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI,
+ /* PECI (coreboot) */
+ /* 12mV ADC (HWSensors4, OHM) */
+ /* 16 bit fans (HWSensors4, OHM) */
+ .peci_mask = 0x07,
+ },
[it8782] = {
.name = "it8782",
.features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET
@@ -384,7 +406,7 @@ static int adc_lsb(const struct it87_data *data, int nr)
static u8 in_to_reg(const struct it87_data *data, int nr, long val)
{
val = DIV_ROUND_CLOSEST(val, adc_lsb(data, nr));
- return SENSORS_LIMIT(val, 0, 255);
+ return clamp_val(val, 0, 255);
}
static int in_from_reg(const struct it87_data *data, int nr, int val)
@@ -396,16 +418,15 @@ static inline u8 FAN_TO_REG(long rpm, int div)
{
if (rpm == 0)
return 255;
- rpm = SENSORS_LIMIT(rpm, 1, 1000000);
- return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1,
- 254);
+ rpm = clamp_val(rpm, 1, 1000000);
+ return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
static inline u16 FAN16_TO_REG(long rpm)
{
if (rpm == 0)
return 0xffff;
- return SENSORS_LIMIT((1350000 + rpm) / (rpm * 2), 1, 0xfffe);
+ return clamp_val((1350000 + rpm) / (rpm * 2), 1, 0xfffe);
}
#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : (val) == 255 ? 0 : \
@@ -414,8 +435,8 @@ static inline u16 FAN16_TO_REG(long rpm)
#define FAN16_FROM_REG(val) ((val) == 0 ? -1 : (val) == 0xffff ? 0 : \
1350000 / ((val) * 2))
-#define TEMP_TO_REG(val) (SENSORS_LIMIT(((val) < 0 ? (((val) - 500) / 1000) : \
- ((val) + 500) / 1000), -128, 127))
+#define TEMP_TO_REG(val) (clamp_val(((val) < 0 ? (((val) - 500) / 1000) : \
+ ((val) + 500) / 1000), -128, 127))
#define TEMP_FROM_REG(val) ((val) * 1000)
static u8 pwm_to_reg(const struct it87_data *data, long val)
@@ -1709,6 +1730,12 @@ static int __init it87_find(unsigned short *address,
case IT8728F_DEVID:
sio_data->type = it8728;
break;
+ case IT8771E_DEVID:
+ sio_data->type = it8771;
+ break;
+ case IT8772E_DEVID:
+ sio_data->type = it8772;
+ break;
case IT8782F_DEVID:
sio_data->type = it8782;
break;
@@ -1826,10 +1853,11 @@ static int __init it87_find(unsigned short *address,
reg = superio_inb(IT87_SIO_GPIO3_REG);
if (sio_data->type == it8721 || sio_data->type == it8728 ||
+ sio_data->type == it8771 || sio_data->type == it8772 ||
sio_data->type == it8782) {
/*
* IT8721F/IT8758E, and IT8782F don't have VID pins
- * at all, not sure about the IT8728F.
+ * at all, not sure about the IT8728F and compatibles.
*/
sio_data->skip_vid = 1;
} else {
@@ -1883,7 +1911,9 @@ static int __init it87_find(unsigned short *address,
if (reg & (1 << 0))
sio_data->internal |= (1 << 0);
if ((reg & (1 << 1)) || sio_data->type == it8721 ||
- sio_data->type == it8728)
+ sio_data->type == it8728 ||
+ sio_data->type == it8771 ||
+ sio_data->type == it8772)
sio_data->internal |= (1 << 1);
/*
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index e21e43c13156..4a58f130fd4e 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -103,6 +103,9 @@ static const unsigned short normal_i2c[] = {
#define MCP98243_DEVID 0x2100
#define MCP98243_DEVID_MASK 0xfffc
+#define MCP98244_DEVID 0x2200
+#define MCP98244_DEVID_MASK 0xfffc
+
#define MCP9843_DEVID 0x0000 /* Also matches mcp9805 */
#define MCP9843_DEVID_MASK 0xfffe
@@ -147,6 +150,7 @@ static struct jc42_chips jc42_chips[] = {
{ MCP_MANID, MCP9804_DEVID, MCP9804_DEVID_MASK },
{ MCP_MANID, MCP98242_DEVID, MCP98242_DEVID_MASK },
{ MCP_MANID, MCP98243_DEVID, MCP98243_DEVID_MASK },
+ { MCP_MANID, MCP98244_DEVID, MCP98244_DEVID_MASK },
{ MCP_MANID, MCP9843_DEVID, MCP9843_DEVID_MASK },
{ NXP_MANID, SE97_DEVID, SE97_DEVID_MASK },
{ ONS_MANID, CAT6095_DEVID, CAT6095_DEVID_MASK },
@@ -237,9 +241,9 @@ static struct i2c_driver jc42_driver = {
static u16 jc42_temp_to_reg(int temp, bool extended)
{
- int ntemp = SENSORS_LIMIT(temp,
- extended ? JC42_TEMP_MIN_EXTENDED :
- JC42_TEMP_MIN, JC42_TEMP_MAX);
+ int ntemp = clamp_val(temp,
+ extended ? JC42_TEMP_MIN_EXTENDED :
+ JC42_TEMP_MIN, JC42_TEMP_MAX);
/* convert from 0.001 to 0.0625 resolution */
return (ntemp * 2 / 125) & 0x1fff;
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index eed4d9401788..f644a2e57599 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -209,9 +209,9 @@ static inline int lut_temp_to_reg(struct lm63_data *data, long val)
{
val -= data->temp2_offset;
if (data->lut_temp_highres)
- return DIV_ROUND_CLOSEST(SENSORS_LIMIT(val, 0, 127500), 500);
+ return DIV_ROUND_CLOSEST(clamp_val(val, 0, 127500), 500);
else
- return DIV_ROUND_CLOSEST(SENSORS_LIMIT(val, 0, 127000), 1000);
+ return DIV_ROUND_CLOSEST(clamp_val(val, 0, 127000), 1000);
}
/*
@@ -415,7 +415,7 @@ static ssize_t set_pwm1(struct device *dev, struct device_attribute *devattr,
return err;
reg = nr ? LM63_REG_LUT_PWM(nr - 1) : LM63_REG_PWM_VALUE;
- val = SENSORS_LIMIT(val, 0, 255);
+ val = clamp_val(val, 0, 255);
mutex_lock(&data->update_lock);
data->pwm1[nr] = data->pwm_highres ? val :
@@ -700,7 +700,7 @@ static ssize_t set_update_interval(struct device *dev,
return err;
mutex_lock(&data->update_lock);
- lm63_set_convrate(client, data, SENSORS_LIMIT(val, 0, 100000));
+ lm63_set_convrate(client, data, clamp_val(val, 0, 100000));
mutex_unlock(&data->update_lock);
return count;
diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
index 7272176a9ec7..9bde9644b102 100644
--- a/drivers/hwmon/lm73.c
+++ b/drivers/hwmon/lm73.c
@@ -8,6 +8,7 @@
* Guillaume Ligneul <guillaume.ligneul@gmail.com>
* Adrien Demarez <adrien.demarez@bolloretelecom.eu>
* Jeremy Laine <jeremy.laine@bolloretelecom.eu>
+ * Chris Verges <kg4ysn@gmail.com>
*
* This software program is licensed subject to the GNU General Public License
* (GPL).Version 2,June 1991, available at
@@ -36,11 +37,30 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c,
#define LM73_ID 0x9001 /* 0x0190, byte-swapped */
#define DRVNAME "lm73"
-#define LM73_TEMP_MIN (-40)
-#define LM73_TEMP_MAX 150
+#define LM73_TEMP_MIN (-256000 / 250)
+#define LM73_TEMP_MAX (255750 / 250)
-/*-----------------------------------------------------------------------*/
+#define LM73_CTRL_RES_SHIFT 5
+#define LM73_CTRL_RES_MASK (BIT(5) | BIT(6))
+#define LM73_CTRL_TO_MASK BIT(7)
+
+#define LM73_CTRL_HI_SHIFT 2
+#define LM73_CTRL_LO_SHIFT 1
+
+static const unsigned short lm73_convrates[] = {
+ 14, /* 11-bits (0.25000 C/LSB): RES1 Bit = 0, RES0 Bit = 0 */
+ 28, /* 12-bits (0.12500 C/LSB): RES1 Bit = 0, RES0 Bit = 1 */
+ 56, /* 13-bits (0.06250 C/LSB): RES1 Bit = 1, RES0 Bit = 0 */
+ 112, /* 14-bits (0.03125 C/LSB): RES1 Bit = 1, RES0 Bit = 1 */
+};
+struct lm73_data {
+ struct device *hwmon_dev;
+ struct mutex lock;
+ u8 ctrl; /* control register value */
+};
+
+/*-----------------------------------------------------------------------*/
static ssize_t set_temp(struct device *dev, struct device_attribute *da,
const char *buf, size_t count)
@@ -56,8 +76,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
return status;
/* Write value */
- value = (short) SENSORS_LIMIT(temp/250, (LM73_TEMP_MIN*4),
- (LM73_TEMP_MAX*4)) << 5;
+ value = clamp_val(temp / 250, LM73_TEMP_MIN, LM73_TEMP_MAX) << 5;
err = i2c_smbus_write_word_swapped(client, attr->index, value);
return (err < 0) ? err : count;
}
@@ -79,6 +98,73 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
}
+static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm73_data *data = i2c_get_clientdata(client);
+ unsigned long convrate;
+ s32 err;
+ int res = 0;
+
+ err = kstrtoul(buf, 10, &convrate);
+ if (err < 0)
+ return err;
+
+ /*
+ * Convert the desired conversion rate into register bits.
+ * res is already initialized, and everything past the second-to-last
+ * value in the array is treated as belonging to the last value
+ * in the array.
+ */
+ while (res < (ARRAY_SIZE(lm73_convrates) - 1) &&
+ convrate > lm73_convrates[res])
+ res++;
+
+ mutex_lock(&data->lock);
+ data->ctrl &= LM73_CTRL_TO_MASK;
+ data->ctrl |= res << LM73_CTRL_RES_SHIFT;
+ err = i2c_smbus_write_byte_data(client, LM73_REG_CTRL, data->ctrl);
+ mutex_unlock(&data->lock);
+
+ if (err < 0)
+ return err;
+
+ return count;
+}
+
+static ssize_t show_convrate(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm73_data *data = i2c_get_clientdata(client);
+ int res;
+
+ res = (data->ctrl & LM73_CTRL_RES_MASK) >> LM73_CTRL_RES_SHIFT;
+ return scnprintf(buf, PAGE_SIZE, "%hu\n", lm73_convrates[res]);
+}
+
+static ssize_t show_maxmin_alarm(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct lm73_data *data = i2c_get_clientdata(client);
+ s32 ctrl;
+
+ mutex_lock(&data->lock);
+ ctrl = i2c_smbus_read_byte_data(client, LM73_REG_CTRL);
+ if (ctrl < 0)
+ goto abort;
+ data->ctrl = ctrl;
+ mutex_unlock(&data->lock);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", (ctrl >> attr->index) & 1);
+
+abort:
+ mutex_unlock(&data->lock);
+ return ctrl;
+}
/*-----------------------------------------------------------------------*/
@@ -90,13 +176,20 @@ static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO,
show_temp, set_temp, LM73_REG_MIN);
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
show_temp, NULL, LM73_REG_INPUT);
-
+static SENSOR_DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO,
+ show_convrate, set_convrate, 0);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO,
+ show_maxmin_alarm, NULL, LM73_CTRL_HI_SHIFT);
+static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO,
+ show_maxmin_alarm, NULL, LM73_CTRL_LO_SHIFT);
static struct attribute *lm73_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_min.dev_attr.attr,
-
+ &sensor_dev_attr_update_interval.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
NULL
};
@@ -111,23 +204,36 @@ static const struct attribute_group lm73_group = {
static int
lm73_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
- struct device *hwmon_dev;
int status;
+ struct lm73_data *data;
+ int ctrl;
+
+ data = devm_kzalloc(&client->dev, sizeof(struct lm73_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->lock);
+
+ ctrl = i2c_smbus_read_byte_data(client, LM73_REG_CTRL);
+ if (ctrl < 0)
+ return ctrl;
+ data->ctrl = ctrl;
/* Register sysfs hooks */
status = sysfs_create_group(&client->dev.kobj, &lm73_group);
if (status)
return status;
- hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(hwmon_dev)) {
- status = PTR_ERR(hwmon_dev);
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ status = PTR_ERR(data->hwmon_dev);
goto exit_remove;
}
- i2c_set_clientdata(client, hwmon_dev);
dev_info(&client->dev, "%s: sensor '%s'\n",
- dev_name(hwmon_dev), client->name);
+ dev_name(data->hwmon_dev), client->name);
return 0;
@@ -138,9 +244,9 @@ exit_remove:
static int lm73_remove(struct i2c_client *client)
{
- struct device *hwmon_dev = i2c_get_clientdata(client);
+ struct lm73_data *data = i2c_get_clientdata(client);
- hwmon_device_unregister(hwmon_dev);
+ hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm73_group);
return 0;
}
diff --git a/drivers/hwmon/lm75.h b/drivers/hwmon/lm75.h
index 89aa9098ba5b..668ff4721323 100644
--- a/drivers/hwmon/lm75.h
+++ b/drivers/hwmon/lm75.h
@@ -36,7 +36,7 @@
REG: (0.5C/bit, two's complement) << 7 */
static inline u16 LM75_TEMP_TO_REG(long temp)
{
- int ntemp = SENSORS_LIMIT(temp, LM75_TEMP_MIN, LM75_TEMP_MAX);
+ int ntemp = clamp_val(temp, LM75_TEMP_MIN, LM75_TEMP_MAX);
ntemp += (ntemp < 0 ? -250 : 250);
return (u16)((ntemp / 500) << 7);
}
diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c
index f82acf67acf5..f17beb5e6dd6 100644
--- a/drivers/hwmon/lm77.c
+++ b/drivers/hwmon/lm77.c
@@ -101,7 +101,7 @@ static struct i2c_driver lm77_driver = {
*/
static inline s16 LM77_TEMP_TO_REG(int temp)
{
- int ntemp = SENSORS_LIMIT(temp, LM77_TEMP_MIN, LM77_TEMP_MAX);
+ int ntemp = clamp_val(temp, LM77_TEMP_MIN, LM77_TEMP_MAX);
return (ntemp / 500) * 8;
}
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 53d6ee8ffa33..483538fa1bd5 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -85,7 +85,7 @@ enum chips { lm78, lm79 };
*/
static inline u8 IN_TO_REG(unsigned long val)
{
- unsigned long nval = SENSORS_LIMIT(val, 0, 4080);
+ unsigned long nval = clamp_val(val, 0, 4080);
return (nval + 8) / 16;
}
#define IN_FROM_REG(val) ((val) * 16)
@@ -94,7 +94,7 @@ static inline u8 FAN_TO_REG(long rpm, int div)
{
if (rpm <= 0)
return 255;
- return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+ return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
static inline int FAN_FROM_REG(u8 val, int div)
@@ -108,7 +108,7 @@ static inline int FAN_FROM_REG(u8 val, int div)
*/
static inline s8 TEMP_TO_REG(int val)
{
- int nval = SENSORS_LIMIT(val, -128000, 127000) ;
+ int nval = clamp_val(val, -128000, 127000) ;
return nval < 0 ? (nval - 500) / 1000 : (nval + 500) / 1000;
}
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index 28a8b71f4571..357fbb998728 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -72,15 +72,15 @@ static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d,
* Fixing this is just not worth it.
*/
-#define IN_TO_REG(val) (SENSORS_LIMIT(((val) + 5) / 10, 0, 255))
+#define IN_TO_REG(val) (clamp_val(((val) + 5) / 10, 0, 255))
#define IN_FROM_REG(val) ((val) * 10)
static inline unsigned char FAN_TO_REG(unsigned rpm, unsigned div)
{
if (rpm == 0)
return 255;
- rpm = SENSORS_LIMIT(rpm, 1, 1000000);
- return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+ rpm = clamp_val(rpm, 1, 1000000);
+ return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \
@@ -102,7 +102,7 @@ static inline long TEMP_FROM_REG(u16 temp)
#define TEMP_LIMIT_FROM_REG(val) (((val) > 0x80 ? \
(val) - 0x100 : (val)) * 1000)
-#define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val) < 0 ? \
+#define TEMP_LIMIT_TO_REG(val) clamp_val((val) < 0 ? \
((val) - 500) / 1000 : ((val) + 500) / 1000, 0, 255)
#define DIV_FROM_REG(val) (1 << (val))
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 9f2dd77e1e0e..47ade8ba152d 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -139,7 +139,7 @@ static const int lm85_scaling[] = { /* .001 Volts */
#define SCALE(val, from, to) (((val) * (to) + ((from) / 2)) / (from))
#define INS_TO_REG(n, val) \
- SENSORS_LIMIT(SCALE(val, lm85_scaling[n], 192), 0, 255)
+ clamp_val(SCALE(val, lm85_scaling[n], 192), 0, 255)
#define INSEXT_FROM_REG(n, val, ext) \
SCALE(((val) << 4) + (ext), 192 << 4, lm85_scaling[n])
@@ -151,19 +151,19 @@ static inline u16 FAN_TO_REG(unsigned long val)
{
if (!val)
return 0xffff;
- return SENSORS_LIMIT(5400000 / val, 1, 0xfffe);
+ return clamp_val(5400000 / val, 1, 0xfffe);
}
#define FAN_FROM_REG(val) ((val) == 0 ? -1 : (val) == 0xffff ? 0 : \
5400000 / (val))
/* Temperature is reported in .001 degC increments */
#define TEMP_TO_REG(val) \
- SENSORS_LIMIT(SCALE(val, 1000, 1), -127, 127)
+ clamp_val(SCALE(val, 1000, 1), -127, 127)
#define TEMPEXT_FROM_REG(val, ext) \
SCALE(((val) << 4) + (ext), 16, 1000)
#define TEMP_FROM_REG(val) ((val) * 1000)
-#define PWM_TO_REG(val) SENSORS_LIMIT(val, 0, 255)
+#define PWM_TO_REG(val) clamp_val(val, 0, 255)
#define PWM_FROM_REG(val) (val)
@@ -258,7 +258,7 @@ static int ZONE_TO_REG(int zone)
return i << 5;
}
-#define HYST_TO_REG(val) SENSORS_LIMIT(((val) + 500) / 1000, 0, 15)
+#define HYST_TO_REG(val) clamp_val(((val) + 500) / 1000, 0, 15)
#define HYST_FROM_REG(val) ((val) * 1000)
/*
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 863412a02bdd..8eeb141c85ac 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -931,7 +931,7 @@ static ssize_t set_update_interval(struct device *dev,
return err;
mutex_lock(&data->update_lock);
- lm90_set_convrate(client, data, SENSORS_LIMIT(val, 0, 100000));
+ lm90_set_convrate(client, data, clamp_val(val, 0, 100000));
mutex_unlock(&data->update_lock);
return count;
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index 1a003f73e4e4..b40f34cdb3ca 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -371,8 +371,8 @@ static unsigned LM93_IN_FROM_REG(int nr, u8 reg)
static u8 LM93_IN_TO_REG(int nr, unsigned val)
{
/* range limit */
- const long mV = SENSORS_LIMIT(val,
- lm93_vin_val_min[nr], lm93_vin_val_max[nr]);
+ const long mV = clamp_val(val,
+ lm93_vin_val_min[nr], lm93_vin_val_max[nr]);
/* try not to lose too much precision here */
const long uV = mV * 1000;
@@ -385,8 +385,8 @@ static u8 LM93_IN_TO_REG(int nr, unsigned val)
const long intercept = uV_min - slope * lm93_vin_reg_min[nr];
u8 result = ((uV - intercept + (slope/2)) / slope);
- result = SENSORS_LIMIT(result,
- lm93_vin_reg_min[nr], lm93_vin_reg_max[nr]);
+ result = clamp_val(result,
+ lm93_vin_reg_min[nr], lm93_vin_reg_max[nr]);
return result;
}
@@ -411,10 +411,10 @@ static u8 LM93_IN_REL_TO_REG(unsigned val, int upper, int vid)
{
long uV_offset = vid * 1000 - val * 10000;
if (upper) {
- uV_offset = SENSORS_LIMIT(uV_offset, 12500, 200000);
+ uV_offset = clamp_val(uV_offset, 12500, 200000);
return (u8)((uV_offset / 12500 - 1) << 4);
} else {
- uV_offset = SENSORS_LIMIT(uV_offset, -400000, -25000);
+ uV_offset = clamp_val(uV_offset, -400000, -25000);
return (u8)((uV_offset / -25000 - 1) << 0);
}
}
@@ -437,7 +437,7 @@ static int LM93_TEMP_FROM_REG(u8 reg)
*/
static u8 LM93_TEMP_TO_REG(long temp)
{
- int ntemp = SENSORS_LIMIT(temp, LM93_TEMP_MIN, LM93_TEMP_MAX);
+ int ntemp = clamp_val(temp, LM93_TEMP_MIN, LM93_TEMP_MAX);
ntemp += (ntemp < 0 ? -500 : 500);
return (u8)(ntemp / 1000);
}
@@ -472,7 +472,7 @@ static u8 LM93_TEMP_OFFSET_TO_REG(int off, int mode)
{
int factor = mode ? 5 : 10;
- off = SENSORS_LIMIT(off, LM93_TEMP_OFFSET_MIN,
+ off = clamp_val(off, LM93_TEMP_OFFSET_MIN,
mode ? LM93_TEMP_OFFSET_MAX1 : LM93_TEMP_OFFSET_MAX0);
return (u8)((off + factor/2) / factor);
}
@@ -620,8 +620,8 @@ static u16 LM93_FAN_TO_REG(long rpm)
if (rpm == 0) {
count = 0x3fff;
} else {
- rpm = SENSORS_LIMIT(rpm, 1, 1000000);
- count = SENSORS_LIMIT((1350000 + rpm) / rpm, 1, 0x3ffe);
+ rpm = clamp_val(rpm, 1, 1000000);
+ count = clamp_val((1350000 + rpm) / rpm, 1, 0x3ffe);
}
regs = count << 2;
@@ -692,7 +692,7 @@ static int LM93_RAMP_FROM_REG(u8 reg)
*/
static u8 LM93_RAMP_TO_REG(int ramp)
{
- ramp = SENSORS_LIMIT(ramp, LM93_RAMP_MIN, LM93_RAMP_MAX);
+ ramp = clamp_val(ramp, LM93_RAMP_MIN, LM93_RAMP_MAX);
return (u8)((ramp + 2) / 5);
}
@@ -702,7 +702,7 @@ static u8 LM93_RAMP_TO_REG(int ramp)
*/
static u8 LM93_PROCHOT_TO_REG(long prochot)
{
- prochot = SENSORS_LIMIT(prochot, 0, 255);
+ prochot = clamp_val(prochot, 0, 255);
return (u8)prochot;
}
@@ -2052,7 +2052,7 @@ static ssize_t store_pwm_auto_channels(struct device *dev,
return err;
mutex_lock(&data->update_lock);
- data->block9[nr][LM93_PWM_CTL1] = SENSORS_LIMIT(val, 0, 255);
+ data->block9[nr][LM93_PWM_CTL1] = clamp_val(val, 0, 255);
lm93_write_byte(client, LM93_REG_PWM_CTL(nr, LM93_PWM_CTL1),
data->block9[nr][LM93_PWM_CTL1]);
mutex_unlock(&data->update_lock);
@@ -2397,7 +2397,7 @@ static ssize_t store_prochot_override_duty_cycle(struct device *dev,
mutex_lock(&data->update_lock);
data->prochot_override = (data->prochot_override & 0xf0) |
- SENSORS_LIMIT(val, 0, 15);
+ clamp_val(val, 0, 15);
lm93_write_byte(client, LM93_REG_PROCHOT_OVERRIDE,
data->prochot_override);
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c
index 2915fd908364..a6c85f0ff8f3 100644
--- a/drivers/hwmon/lm95245.c
+++ b/drivers/hwmon/lm95245.c
@@ -259,7 +259,7 @@ static ssize_t set_limit(struct device *dev, struct device_attribute *attr,
val /= 1000;
- val = SENSORS_LIMIT(val, 0, (index == 6 ? 127 : 255));
+ val = clamp_val(val, 0, (index == 6 ? 127 : 255));
mutex_lock(&data->update_lock);
@@ -284,7 +284,7 @@ static ssize_t set_crit_hyst(struct device *dev, struct device_attribute *attr,
val /= 1000;
- val = SENSORS_LIMIT(val, 0, 31);
+ val = clamp_val(val, 0, 31);
mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index e0019c69d1bb..2fa2c02f5569 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -118,7 +118,7 @@ static inline int LIMIT_TO_MV(int limit, int range)
static inline int MV_TO_LIMIT(int mv, int range)
{
- return SENSORS_LIMIT(DIV_ROUND_CLOSEST(mv * 256, range), 0, 255);
+ return clamp_val(DIV_ROUND_CLOSEST(mv * 256, range), 0, 255);
}
static inline int ADC_TO_CURR(int adc, int gain)
diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
index 666d9f6263eb..a7626358c95d 100644
--- a/drivers/hwmon/max1668.c
+++ b/drivers/hwmon/max1668.c
@@ -215,7 +215,7 @@ static ssize_t set_temp_max(struct device *dev,
return ret;
mutex_lock(&data->update_lock);
- data->temp_max[index] = SENSORS_LIMIT(temp/1000, -128, 127);
+ data->temp_max[index] = clamp_val(temp/1000, -128, 127);
if (i2c_smbus_write_byte_data(client,
MAX1668_REG_LIMH_WR(index),
data->temp_max[index]))
@@ -240,7 +240,7 @@ static ssize_t set_temp_min(struct device *dev,
return ret;
mutex_lock(&data->update_lock);
- data->temp_min[index] = SENSORS_LIMIT(temp/1000, -128, 127);
+ data->temp_min[index] = clamp_val(temp/1000, -128, 127);
if (i2c_smbus_write_byte_data(client,
MAX1668_REG_LIML_WR(index),
data->temp_max[index]))
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index 6e60036abfa7..3e7b4269f5b9 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -74,7 +74,7 @@ static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 };
#define FAN_FROM_REG(val, rpm_range) ((val) == 0 || (val) == 255 ? \
0 : (rpm_ranges[rpm_range] * 30) / (val))
-#define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val) / 1000, 0, 255)
+#define TEMP_LIMIT_TO_REG(val) clamp_val((val) / 1000, 0, 255)
/*
* Client data (each client gets its own)
@@ -312,7 +312,7 @@ static ssize_t set_pwm(struct device *dev,
if (res)
return res;
- val = SENSORS_LIMIT(val, 0, 255);
+ val = clamp_val(val, 0, 255);
mutex_lock(&data->update_lock);
data->pwm[attr->index] = (u8)(val * 120 / 255);
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index 223461a6d70f..57d58cd32206 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -239,7 +239,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->temp_high[attr2->nr] = SENSORS_LIMIT(temp_to_reg(val), 0, 255);
+ data->temp_high[attr2->nr] = clamp_val(temp_to_reg(val), 0, 255);
i2c_smbus_write_byte_data(client, attr2->index,
data->temp_high[attr2->nr]);
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index f739f83bafb9..3c16cbd4c002 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -245,7 +245,7 @@ static ssize_t set_target(struct device *dev, struct device_attribute *devattr,
if (err)
return err;
- rpm = SENSORS_LIMIT(rpm, FAN_RPM_MIN, FAN_RPM_MAX);
+ rpm = clamp_val(rpm, FAN_RPM_MIN, FAN_RPM_MAX);
/*
* Divide the required speed by 60 to get from rpm to rps, then
@@ -313,7 +313,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
if (err)
return err;
- pwm = SENSORS_LIMIT(pwm, 0, 255);
+ pwm = clamp_val(pwm, 0, 255);
mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
new file mode 100644
index 000000000000..bf4aa3777fc1
--- /dev/null
+++ b/drivers/hwmon/max6697.c
@@ -0,0 +1,726 @@
+/*
+ * Copyright (c) 2012 Guenter Roeck <linux@roeck-us.net>
+ *
+ * based on max1668.c
+ * Copyright (c) 2011 David George <david.george@ska.ac.za>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+
+#include <linux/platform_data/max6697.h>
+
+enum chips { max6581, max6602, max6622, max6636, max6689, max6693, max6694,
+ max6697, max6698, max6699 };
+
+/* Report local sensor as temp1 */
+
+static const u8 MAX6697_REG_TEMP[] = {
+ 0x07, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x08 };
+static const u8 MAX6697_REG_TEMP_EXT[] = {
+ 0x57, 0x09, 0x52, 0x53, 0x54, 0x55, 0x56, 0 };
+static const u8 MAX6697_REG_MAX[] = {
+ 0x17, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x18 };
+static const u8 MAX6697_REG_CRIT[] = {
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27 };
+
+/*
+ * Map device tree / platform data register bit map to chip bit map.
+ * Applies to alert register and over-temperature register.
+ */
+#define MAX6697_MAP_BITS(reg) ((((reg) & 0x7e) >> 1) | \
+ (((reg) & 0x01) << 6) | ((reg) & 0x80))
+
+#define MAX6697_REG_STAT(n) (0x44 + (n))
+
+#define MAX6697_REG_CONFIG 0x41
+#define MAX6581_CONF_EXTENDED (1 << 1)
+#define MAX6693_CONF_BETA (1 << 2)
+#define MAX6697_CONF_RESISTANCE (1 << 3)
+#define MAX6697_CONF_TIMEOUT (1 << 5)
+#define MAX6697_REG_ALERT_MASK 0x42
+#define MAX6697_REG_OVERT_MASK 0x43
+
+#define MAX6581_REG_RESISTANCE 0x4a
+#define MAX6581_REG_IDEALITY 0x4b
+#define MAX6581_REG_IDEALITY_SELECT 0x4c
+#define MAX6581_REG_OFFSET 0x4d
+#define MAX6581_REG_OFFSET_SELECT 0x4e
+
+#define MAX6697_CONV_TIME 156 /* ms per channel, worst case */
+
+struct max6697_chip_data {
+ int channels;
+ u32 have_ext;
+ u32 have_crit;
+ u32 have_fault;
+ u8 valid_conf;
+ const u8 *alarm_map;
+};
+
+struct max6697_data {
+ struct device *hwmon_dev;
+
+ enum chips type;
+ const struct max6697_chip_data *chip;
+
+ int update_interval; /* in milli-seconds */
+ int temp_offset; /* in degrees C */
+
+ struct mutex update_lock;
+ unsigned long last_updated; /* In jiffies */
+ bool valid; /* true if following fields are valid */
+
+ /* 1x local and up to 7x remote */
+ u8 temp[8][4]; /* [nr][0]=temp [1]=ext [2]=max [3]=crit */
+#define MAX6697_TEMP_INPUT 0
+#define MAX6697_TEMP_EXT 1
+#define MAX6697_TEMP_MAX 2
+#define MAX6697_TEMP_CRIT 3
+ u32 alarms;
+};
+
+/* Diode fault status bits on MAX6581 are right shifted by one bit */
+static const u8 max6581_alarm_map[] = {
+ 0, 0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23 };
+
+static const struct max6697_chip_data max6697_chip_data[] = {
+ [max6581] = {
+ .channels = 8,
+ .have_crit = 0xff,
+ .have_ext = 0x7f,
+ .have_fault = 0xfe,
+ .valid_conf = MAX6581_CONF_EXTENDED | MAX6697_CONF_TIMEOUT,
+ .alarm_map = max6581_alarm_map,
+ },
+ [max6602] = {
+ .channels = 5,
+ .have_crit = 0x12,
+ .have_ext = 0x02,
+ .have_fault = 0x1e,
+ .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
+ },
+ [max6622] = {
+ .channels = 5,
+ .have_crit = 0x12,
+ .have_ext = 0x02,
+ .have_fault = 0x1e,
+ .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
+ },
+ [max6636] = {
+ .channels = 7,
+ .have_crit = 0x72,
+ .have_ext = 0x02,
+ .have_fault = 0x7e,
+ .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
+ },
+ [max6689] = {
+ .channels = 7,
+ .have_crit = 0x72,
+ .have_ext = 0x02,
+ .have_fault = 0x7e,
+ .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
+ },
+ [max6693] = {
+ .channels = 7,
+ .have_crit = 0x72,
+ .have_ext = 0x02,
+ .have_fault = 0x7e,
+ .valid_conf = MAX6697_CONF_RESISTANCE | MAX6693_CONF_BETA |
+ MAX6697_CONF_TIMEOUT,
+ },
+ [max6694] = {
+ .channels = 5,
+ .have_crit = 0x12,
+ .have_ext = 0x02,
+ .have_fault = 0x1e,
+ .valid_conf = MAX6697_CONF_RESISTANCE | MAX6693_CONF_BETA |
+ MAX6697_CONF_TIMEOUT,
+ },
+ [max6697] = {
+ .channels = 7,
+ .have_crit = 0x72,
+ .have_ext = 0x02,
+ .have_fault = 0x7e,
+ .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
+ },
+ [max6698] = {
+ .channels = 7,
+ .have_crit = 0x72,
+ .have_ext = 0x02,
+ .have_fault = 0x0e,
+ .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
+ },
+ [max6699] = {
+ .channels = 5,
+ .have_crit = 0x12,
+ .have_ext = 0x02,
+ .have_fault = 0x1e,
+ .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
+ },
+};
+
+static struct max6697_data *max6697_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6697_data *data = i2c_get_clientdata(client);
+ struct max6697_data *ret = data;
+ int val;
+ int i;
+ u32 alarms;
+
+ mutex_lock(&data->update_lock);
+
+ if (data->valid &&
+ !time_after(jiffies, data->last_updated
+ + msecs_to_jiffies(data->update_interval)))
+ goto abort;
+
+ for (i = 0; i < data->chip->channels; i++) {
+ if (data->chip->have_ext & (1 << i)) {
+ val = i2c_smbus_read_byte_data(client,
+ MAX6697_REG_TEMP_EXT[i]);
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->temp[i][MAX6697_TEMP_EXT] = val;
+ }
+
+ val = i2c_smbus_read_byte_data(client, MAX6697_REG_TEMP[i]);
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->temp[i][MAX6697_TEMP_INPUT] = val;
+
+ val = i2c_smbus_read_byte_data(client, MAX6697_REG_MAX[i]);
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->temp[i][MAX6697_TEMP_MAX] = val;
+
+ if (data->chip->have_crit & (1 << i)) {
+ val = i2c_smbus_read_byte_data(client,
+ MAX6697_REG_CRIT[i]);
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->temp[i][MAX6697_TEMP_CRIT] = val;
+ }
+ }
+
+ alarms = 0;
+ for (i = 0; i < 3; i++) {
+ val = i2c_smbus_read_byte_data(client, MAX6697_REG_STAT(i));
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ alarms = (alarms << 8) | val;
+ }
+ data->alarms = alarms;
+ data->last_updated = jiffies;
+ data->valid = true;
+abort:
+ mutex_unlock(&data->update_lock);
+
+ return ret;
+}
+
+static ssize_t show_temp_input(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ struct max6697_data *data = max6697_update_device(dev);
+ int temp;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ temp = (data->temp[index][MAX6697_TEMP_INPUT] - data->temp_offset) << 3;
+ temp |= data->temp[index][MAX6697_TEMP_EXT] >> 5;
+
+ return sprintf(buf, "%d\n", temp * 125);
+}
+
+static ssize_t show_temp(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int nr = to_sensor_dev_attr_2(devattr)->nr;
+ int index = to_sensor_dev_attr_2(devattr)->index;
+ struct max6697_data *data = max6697_update_device(dev);
+ int temp;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ temp = data->temp[nr][index];
+ temp -= data->temp_offset;
+
+ return sprintf(buf, "%d\n", temp * 1000);
+}
+
+static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int index = to_sensor_dev_attr(attr)->index;
+ struct max6697_data *data = max6697_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ if (data->chip->alarm_map)
+ index = data->chip->alarm_map[index];
+
+ return sprintf(buf, "%u\n", (data->alarms >> index) & 0x1);
+}
+
+static ssize_t set_temp(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ int nr = to_sensor_dev_attr_2(devattr)->nr;
+ int index = to_sensor_dev_attr_2(devattr)->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6697_data *data = i2c_get_clientdata(client);
+ long temp;
+ int ret;
+
+ ret = kstrtol(buf, 10, &temp);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&data->update_lock);
+ temp = DIV_ROUND_CLOSEST(temp, 1000) + data->temp_offset;
+ temp = clamp_val(temp, 0, data->type == max6581 ? 255 : 127);
+ data->temp[nr][index] = temp;
+ ret = i2c_smbus_write_byte_data(client,
+ index == 2 ? MAX6697_REG_MAX[nr]
+ : MAX6697_REG_CRIT[nr],
+ temp);
+ mutex_unlock(&data->update_lock);
+
+ return ret < 0 ? ret : count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input, NULL, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 0, MAX6697_TEMP_MAX);
+static SENSOR_DEVICE_ATTR_2(temp1_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 0, MAX6697_TEMP_CRIT);
+
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_input, NULL, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 1, MAX6697_TEMP_MAX);
+static SENSOR_DEVICE_ATTR_2(temp2_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 1, MAX6697_TEMP_CRIT);
+
+static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp_input, NULL, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 2, MAX6697_TEMP_MAX);
+static SENSOR_DEVICE_ATTR_2(temp3_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 2, MAX6697_TEMP_CRIT);
+
+static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp_input, NULL, 3);
+static SENSOR_DEVICE_ATTR_2(temp4_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 3, MAX6697_TEMP_MAX);
+static SENSOR_DEVICE_ATTR_2(temp4_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 3, MAX6697_TEMP_CRIT);
+
+static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_temp_input, NULL, 4);
+static SENSOR_DEVICE_ATTR_2(temp5_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 4, MAX6697_TEMP_MAX);
+static SENSOR_DEVICE_ATTR_2(temp5_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 4, MAX6697_TEMP_CRIT);
+
+static SENSOR_DEVICE_ATTR(temp6_input, S_IRUGO, show_temp_input, NULL, 5);
+static SENSOR_DEVICE_ATTR_2(temp6_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 5, MAX6697_TEMP_MAX);
+static SENSOR_DEVICE_ATTR_2(temp6_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 5, MAX6697_TEMP_CRIT);
+
+static SENSOR_DEVICE_ATTR(temp7_input, S_IRUGO, show_temp_input, NULL, 6);
+static SENSOR_DEVICE_ATTR_2(temp7_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 6, MAX6697_TEMP_MAX);
+static SENSOR_DEVICE_ATTR_2(temp7_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 6, MAX6697_TEMP_CRIT);
+
+static SENSOR_DEVICE_ATTR(temp8_input, S_IRUGO, show_temp_input, NULL, 7);
+static SENSOR_DEVICE_ATTR_2(temp8_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 7, MAX6697_TEMP_MAX);
+static SENSOR_DEVICE_ATTR_2(temp8_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
+ 7, MAX6697_TEMP_CRIT);
+
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 22);
+static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 16);
+static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_alarm, NULL, 17);
+static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_alarm, NULL, 18);
+static SENSOR_DEVICE_ATTR(temp5_max_alarm, S_IRUGO, show_alarm, NULL, 19);
+static SENSOR_DEVICE_ATTR(temp6_max_alarm, S_IRUGO, show_alarm, NULL, 20);
+static SENSOR_DEVICE_ATTR(temp7_max_alarm, S_IRUGO, show_alarm, NULL, 21);
+static SENSOR_DEVICE_ATTR(temp8_max_alarm, S_IRUGO, show_alarm, NULL, 23);
+
+static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14);
+static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 8);
+static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 9);
+static SENSOR_DEVICE_ATTR(temp4_crit_alarm, S_IRUGO, show_alarm, NULL, 10);
+static SENSOR_DEVICE_ATTR(temp5_crit_alarm, S_IRUGO, show_alarm, NULL, 11);
+static SENSOR_DEVICE_ATTR(temp6_crit_alarm, S_IRUGO, show_alarm, NULL, 12);
+static SENSOR_DEVICE_ATTR(temp7_crit_alarm, S_IRUGO, show_alarm, NULL, 13);
+static SENSOR_DEVICE_ATTR(temp8_crit_alarm, S_IRUGO, show_alarm, NULL, 15);
+
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_alarm, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_fault, S_IRUGO, show_alarm, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp6_fault, S_IRUGO, show_alarm, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp7_fault, S_IRUGO, show_alarm, NULL, 6);
+static SENSOR_DEVICE_ATTR(temp8_fault, S_IRUGO, show_alarm, NULL, 7);
+
+static struct attribute *max6697_attributes[8][7] = {
+ {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+ NULL
+ }, {
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_max.dev_attr.attr,
+ &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_fault.dev_attr.attr,
+ NULL
+ }, {
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_max.dev_attr.attr,
+ &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_crit.dev_attr.attr,
+ &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_fault.dev_attr.attr,
+ NULL
+ }, {
+ &sensor_dev_attr_temp4_input.dev_attr.attr,
+ &sensor_dev_attr_temp4_max.dev_attr.attr,
+ &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp4_crit.dev_attr.attr,
+ &sensor_dev_attr_temp4_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp4_fault.dev_attr.attr,
+ NULL
+ }, {
+ &sensor_dev_attr_temp5_input.dev_attr.attr,
+ &sensor_dev_attr_temp5_max.dev_attr.attr,
+ &sensor_dev_attr_temp5_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp5_crit.dev_attr.attr,
+ &sensor_dev_attr_temp5_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp5_fault.dev_attr.attr,
+ NULL
+ }, {
+ &sensor_dev_attr_temp6_input.dev_attr.attr,
+ &sensor_dev_attr_temp6_max.dev_attr.attr,
+ &sensor_dev_attr_temp6_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp6_crit.dev_attr.attr,
+ &sensor_dev_attr_temp6_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp6_fault.dev_attr.attr,
+ NULL
+ }, {
+ &sensor_dev_attr_temp7_input.dev_attr.attr,
+ &sensor_dev_attr_temp7_max.dev_attr.attr,
+ &sensor_dev_attr_temp7_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp7_crit.dev_attr.attr,
+ &sensor_dev_attr_temp7_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp7_fault.dev_attr.attr,
+ NULL
+ }, {
+ &sensor_dev_attr_temp8_input.dev_attr.attr,
+ &sensor_dev_attr_temp8_max.dev_attr.attr,
+ &sensor_dev_attr_temp8_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp8_crit.dev_attr.attr,
+ &sensor_dev_attr_temp8_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp8_fault.dev_attr.attr,
+ NULL
+ }
+};
+
+static const struct attribute_group max6697_group[8] = {
+ { .attrs = max6697_attributes[0] },
+ { .attrs = max6697_attributes[1] },
+ { .attrs = max6697_attributes[2] },
+ { .attrs = max6697_attributes[3] },
+ { .attrs = max6697_attributes[4] },
+ { .attrs = max6697_attributes[5] },
+ { .attrs = max6697_attributes[6] },
+ { .attrs = max6697_attributes[7] },
+};
+
+static void max6697_get_config_of(struct device_node *node,
+ struct max6697_platform_data *pdata)
+{
+ int len;
+ const __be32 *prop;
+
+ prop = of_get_property(node, "smbus-timeout-disable", &len);
+ if (prop)
+ pdata->smbus_timeout_disable = true;
+ prop = of_get_property(node, "extended-range-enable", &len);
+ if (prop)
+ pdata->extended_range_enable = true;
+ prop = of_get_property(node, "beta-compensation-enable", &len);
+ if (prop)
+ pdata->beta_compensation = true;
+ prop = of_get_property(node, "alert-mask", &len);
+ if (prop && len == sizeof(u32))
+ pdata->alert_mask = be32_to_cpu(prop[0]);
+ prop = of_get_property(node, "over-temperature-mask", &len);
+ if (prop && len == sizeof(u32))
+ pdata->over_temperature_mask = be32_to_cpu(prop[0]);
+ prop = of_get_property(node, "resistance-cancellation", &len);
+ if (prop) {
+ if (len == sizeof(u32))
+ pdata->resistance_cancellation = be32_to_cpu(prop[0]);
+ else
+ pdata->resistance_cancellation = 0xfe;
+ }
+ prop = of_get_property(node, "transistor-ideality", &len);
+ if (prop && len == 2 * sizeof(u32)) {
+ pdata->ideality_mask = be32_to_cpu(prop[0]);
+ pdata->ideality_value = be32_to_cpu(prop[1]);
+ }
+}
+
+static int max6697_init_chip(struct i2c_client *client)
+{
+ struct max6697_data *data = i2c_get_clientdata(client);
+ struct max6697_platform_data *pdata = dev_get_platdata(&client->dev);
+ struct max6697_platform_data p;
+ const struct max6697_chip_data *chip = data->chip;
+ int factor = chip->channels;
+ int ret, reg;
+
+ /*
+ * Don't touch configuration if neither platform data nor OF
+ * configuration was specified. If that is the case, use the
+ * current chip configuration.
+ */
+ if (!pdata && !client->dev.of_node) {
+ reg = i2c_smbus_read_byte_data(client, MAX6697_REG_CONFIG);
+ if (reg < 0)
+ return reg;
+ if (data->type == max6581) {
+ if (reg & MAX6581_CONF_EXTENDED)
+ data->temp_offset = 64;
+ reg = i2c_smbus_read_byte_data(client,
+ MAX6581_REG_RESISTANCE);
+ if (reg < 0)
+ return reg;
+ factor += hweight8(reg);
+ } else {
+ if (reg & MAX6697_CONF_RESISTANCE)
+ factor++;
+ }
+ goto done;
+ }
+
+ if (client->dev.of_node) {
+ memset(&p, 0, sizeof(p));
+ max6697_get_config_of(client->dev.of_node, &p);
+ pdata = &p;
+ }
+
+ reg = 0;
+ if (pdata->smbus_timeout_disable &&
+ (chip->valid_conf & MAX6697_CONF_TIMEOUT)) {
+ reg |= MAX6697_CONF_TIMEOUT;
+ }
+ if (pdata->extended_range_enable &&
+ (chip->valid_conf & MAX6581_CONF_EXTENDED)) {
+ reg |= MAX6581_CONF_EXTENDED;
+ data->temp_offset = 64;
+ }
+ if (pdata->resistance_cancellation &&
+ (chip->valid_conf & MAX6697_CONF_RESISTANCE)) {
+ reg |= MAX6697_CONF_RESISTANCE;
+ factor++;
+ }
+ if (pdata->beta_compensation &&
+ (chip->valid_conf & MAX6693_CONF_BETA)) {
+ reg |= MAX6693_CONF_BETA;
+ }
+
+ ret = i2c_smbus_write_byte_data(client, MAX6697_REG_CONFIG, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client, MAX6697_REG_ALERT_MASK,
+ MAX6697_MAP_BITS(pdata->alert_mask));
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client, MAX6697_REG_OVERT_MASK,
+ MAX6697_MAP_BITS(pdata->over_temperature_mask));
+ if (ret < 0)
+ return ret;
+
+ if (data->type == max6581) {
+ factor += hweight8(pdata->resistance_cancellation >> 1);
+ ret = i2c_smbus_write_byte_data(client, MAX6581_REG_RESISTANCE,
+ pdata->resistance_cancellation >> 1);
+ if (ret < 0)
+ return ret;
+ ret = i2c_smbus_write_byte_data(client, MAX6581_REG_IDEALITY,
+ pdata->ideality_mask >> 1);
+ if (ret < 0)
+ return ret;
+ ret = i2c_smbus_write_byte_data(client,
+ MAX6581_REG_IDEALITY_SELECT,
+ pdata->ideality_value);
+ if (ret < 0)
+ return ret;
+ }
+done:
+ data->update_interval = factor * MAX6697_CONV_TIME;
+ return 0;
+}
+
+static void max6697_remove_files(struct i2c_client *client)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(max6697_group); i++)
+ sysfs_remove_group(&client->dev.kobj, &max6697_group[i]);
+}
+
+static int max6697_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ struct device *dev = &client->dev;
+ struct max6697_data *data;
+ int i, err;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ data = devm_kzalloc(dev, sizeof(struct max6697_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->type = id->driver_data;
+ data->chip = &max6697_chip_data[data->type];
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ err = max6697_init_chip(client);
+ if (err)
+ return err;
+
+ for (i = 0; i < data->chip->channels; i++) {
+ err = sysfs_create_file(&dev->kobj,
+ max6697_attributes[i][0]);
+ if (err)
+ goto error;
+ err = sysfs_create_file(&dev->kobj,
+ max6697_attributes[i][1]);
+ if (err)
+ goto error;
+ err = sysfs_create_file(&dev->kobj,
+ max6697_attributes[i][2]);
+ if (err)
+ goto error;
+
+ if (data->chip->have_crit & (1 << i)) {
+ err = sysfs_create_file(&dev->kobj,
+ max6697_attributes[i][3]);
+ if (err)
+ goto error;
+ err = sysfs_create_file(&dev->kobj,
+ max6697_attributes[i][4]);
+ if (err)
+ goto error;
+ }
+ if (data->chip->have_fault & (1 << i)) {
+ err = sysfs_create_file(&dev->kobj,
+ max6697_attributes[i][5]);
+ if (err)
+ goto error;
+ }
+ }
+
+ data->hwmon_dev = hwmon_device_register(dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ max6697_remove_files(client);
+ return err;
+}
+
+static int max6697_remove(struct i2c_client *client)
+{
+ struct max6697_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ max6697_remove_files(client);
+
+ return 0;
+}
+
+static const struct i2c_device_id max6697_id[] = {
+ { "max6581", max6581 },
+ { "max6602", max6602 },
+ { "max6622", max6622 },
+ { "max6636", max6636 },
+ { "max6689", max6689 },
+ { "max6693", max6693 },
+ { "max6694", max6694 },
+ { "max6697", max6697 },
+ { "max6698", max6698 },
+ { "max6699", max6699 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max6697_id);
+
+static struct i2c_driver max6697_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "max6697",
+ },
+ .probe = max6697_probe,
+ .remove = max6697_remove,
+ .id_table = max6697_id,
+};
+
+module_i2c_driver(max6697_driver);
+
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("MAX6697 temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index a87eb8986e36..b5f63f9c0ce1 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -43,7 +43,7 @@ struct ntc_compensation {
* The following compensation tables are from the specification of Murata NTC
* Thermistors Datasheet
*/
-const struct ntc_compensation ncpXXwb473[] = {
+static const struct ntc_compensation ncpXXwb473[] = {
{ .temp_C = -40, .ohm = 1747920 },
{ .temp_C = -35, .ohm = 1245428 },
{ .temp_C = -30, .ohm = 898485 },
@@ -79,7 +79,7 @@ const struct ntc_compensation ncpXXwb473[] = {
{ .temp_C = 120, .ohm = 1615 },
{ .temp_C = 125, .ohm = 1406 },
};
-const struct ntc_compensation ncpXXwl333[] = {
+static const struct ntc_compensation ncpXXwl333[] = {
{ .temp_C = -40, .ohm = 1610154 },
{ .temp_C = -35, .ohm = 1130850 },
{ .temp_C = -30, .ohm = 802609 },
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 60745a535821..4f9eb0af5229 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -72,7 +72,7 @@ config SENSORS_MAX34440
default n
help
If you say yes here you get hardware monitoring support for Maxim
- MAX34440, MAX34441, and MAX34446.
+ MAX34440, MAX34441, MAX34446, MAX34460, and MAX34461.
This driver can also be built as a module. If so, the module will
be called max34440.
diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
index 2ada7b021fbe..7e930c3ce1ab 100644
--- a/drivers/hwmon/pmbus/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -2,6 +2,7 @@
* Hardware monitoring driver for Maxim MAX34440/MAX34441
*
* Copyright (c) 2011 Ericsson AB.
+ * Copyright (c) 2012 Guenter Roeck
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -25,7 +26,7 @@
#include <linux/i2c.h>
#include "pmbus.h"
-enum chips { max34440, max34441, max34446 };
+enum chips { max34440, max34441, max34446, max34460, max34461 };
#define MAX34440_MFR_VOUT_PEAK 0xd4
#define MAX34440_MFR_IOUT_PEAK 0xd5
@@ -87,7 +88,8 @@ static int max34440_read_word_data(struct i2c_client *client, int page, int reg)
MAX34446_MFR_POUT_PEAK);
break;
case PMBUS_VIRT_READ_TEMP_AVG:
- if (data->id != max34446)
+ if (data->id != max34446 && data->id != max34460 &&
+ data->id != max34461)
return -ENXIO;
ret = pmbus_read_word_data(client, page,
MAX34446_MFR_TEMPERATURE_AVG);
@@ -322,6 +324,73 @@ static struct pmbus_driver_info max34440_info[] = {
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
},
+ [max34460] = {
+ .pages = 18,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 2,
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[4] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[5] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[6] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[7] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[8] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[9] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[10] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[11] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[13] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[14] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[15] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[16] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .read_byte_data = max34440_read_byte_data,
+ .read_word_data = max34440_read_word_data,
+ .write_word_data = max34440_write_word_data,
+ },
+ [max34461] = {
+ .pages = 23,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 2,
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[4] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[5] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[6] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[7] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[8] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[9] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[10] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[11] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[12] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[13] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[14] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[15] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ /* page 16 is reserved */
+ .func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[21] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .read_byte_data = max34440_read_byte_data,
+ .read_word_data = max34440_read_word_data,
+ .write_word_data = max34440_write_word_data,
+ },
};
static int max34440_probe(struct i2c_client *client,
@@ -343,6 +412,8 @@ static const struct i2c_device_id max34440_id[] = {
{"max34440", max34440},
{"max34441", max34441},
{"max34446", max34446},
+ {"max34460", max34460},
+ {"max34461", max34461},
{}
};
MODULE_DEVICE_TABLE(i2c, max34440_id);
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index 3fe03dc47eb7..fa9beb3eb60c 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -2,6 +2,7 @@
* pmbus.h - Common defines and structures for PMBus devices
*
* Copyright (c) 2010, 2011 Ericsson AB.
+ * Copyright (c) 2012 Guenter Roeck
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -177,6 +178,13 @@
#define PMBUS_VIRT_READ_TEMP2_MAX (PMBUS_VIRT_BASE + 28)
#define PMBUS_VIRT_RESET_TEMP2_HISTORY (PMBUS_VIRT_BASE + 29)
+#define PMBUS_VIRT_READ_VMON (PMBUS_VIRT_BASE + 30)
+#define PMBUS_VIRT_VMON_UV_WARN_LIMIT (PMBUS_VIRT_BASE + 31)
+#define PMBUS_VIRT_VMON_OV_WARN_LIMIT (PMBUS_VIRT_BASE + 32)
+#define PMBUS_VIRT_VMON_UV_FAULT_LIMIT (PMBUS_VIRT_BASE + 33)
+#define PMBUS_VIRT_VMON_OV_FAULT_LIMIT (PMBUS_VIRT_BASE + 34)
+#define PMBUS_VIRT_STATUS_VMON (PMBUS_VIRT_BASE + 35)
+
/*
* CAPABILITY
*/
@@ -317,6 +325,8 @@ enum pmbus_sensor_classes {
#define PMBUS_HAVE_STATUS_TEMP (1 << 15)
#define PMBUS_HAVE_STATUS_FAN12 (1 << 16)
#define PMBUS_HAVE_STATUS_FAN34 (1 << 17)
+#define PMBUS_HAVE_VMON (1 << 18)
+#define PMBUS_HAVE_STATUS_VMON (1 << 19)
enum pmbus_data_format { linear = 0, direct, vid };
@@ -359,6 +369,7 @@ struct pmbus_driver_info {
/* Function declarations */
+void pmbus_clear_cache(struct i2c_client *client);
int pmbus_set_page(struct i2c_client *client, u8 page);
int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg);
int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word);
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 7d19b1bb9ce6..80eef50c50fd 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -2,6 +2,7 @@
* Hardware monitoring driver for PMBus devices
*
* Copyright (c) 2010, 2011 Ericsson AB.
+ * Copyright (c) 2012 Guenter Roeck
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -31,45 +32,10 @@
#include "pmbus.h"
/*
- * Constants needed to determine number of sensors, booleans, and labels.
+ * Number of additional attribute pointers to allocate
+ * with each call to krealloc
*/
-#define PMBUS_MAX_INPUT_SENSORS 22 /* 10*volt, 7*curr, 5*power */
-#define PMBUS_VOUT_SENSORS_PER_PAGE 9 /* input, min, max, lcrit,
- crit, lowest, highest, avg,
- reset */
-#define PMBUS_IOUT_SENSORS_PER_PAGE 8 /* input, min, max, crit,
- lowest, highest, avg,
- reset */
-#define PMBUS_POUT_SENSORS_PER_PAGE 7 /* input, cap, max, crit,
- * highest, avg, reset
- */
-#define PMBUS_MAX_SENSORS_PER_FAN 1 /* input */
-#define PMBUS_MAX_SENSORS_PER_TEMP 9 /* input, min, max, lcrit,
- * crit, lowest, highest, avg,
- * reset
- */
-
-#define PMBUS_MAX_INPUT_BOOLEANS 7 /* v: min_alarm, max_alarm,
- lcrit_alarm, crit_alarm;
- c: alarm, crit_alarm;
- p: crit_alarm */
-#define PMBUS_VOUT_BOOLEANS_PER_PAGE 4 /* min_alarm, max_alarm,
- lcrit_alarm, crit_alarm */
-#define PMBUS_IOUT_BOOLEANS_PER_PAGE 3 /* alarm, lcrit_alarm,
- crit_alarm */
-#define PMBUS_POUT_BOOLEANS_PER_PAGE 3 /* cap_alarm, alarm, crit_alarm
- */
-#define PMBUS_MAX_BOOLEANS_PER_FAN 2 /* alarm, fault */
-#define PMBUS_MAX_BOOLEANS_PER_TEMP 4 /* min_alarm, max_alarm,
- lcrit_alarm, crit_alarm */
-
-#define PMBUS_MAX_INPUT_LABELS 4 /* vin, vcap, iin, pin */
-
-/*
- * status, status_vout, status_iout, status_fans, status_fan34, and status_temp
- * are paged. status_input is unpaged.
- */
-#define PB_NUM_STATUS_REG (PMBUS_PAGES * 6 + 1)
+#define PMBUS_ATTR_ALLOC_SIZE 32
/*
* Index into status register array, per status register group
@@ -79,14 +45,18 @@
#define PB_STATUS_IOUT_BASE (PB_STATUS_VOUT_BASE + PMBUS_PAGES)
#define PB_STATUS_FAN_BASE (PB_STATUS_IOUT_BASE + PMBUS_PAGES)
#define PB_STATUS_FAN34_BASE (PB_STATUS_FAN_BASE + PMBUS_PAGES)
-#define PB_STATUS_INPUT_BASE (PB_STATUS_FAN34_BASE + PMBUS_PAGES)
-#define PB_STATUS_TEMP_BASE (PB_STATUS_INPUT_BASE + 1)
+#define PB_STATUS_TEMP_BASE (PB_STATUS_FAN34_BASE + PMBUS_PAGES)
+#define PB_STATUS_INPUT_BASE (PB_STATUS_TEMP_BASE + PMBUS_PAGES)
+#define PB_STATUS_VMON_BASE (PB_STATUS_INPUT_BASE + 1)
+
+#define PB_NUM_STATUS_REG (PB_STATUS_VMON_BASE + 1)
#define PMBUS_NAME_SIZE 24
struct pmbus_sensor {
+ struct pmbus_sensor *next;
char name[PMBUS_NAME_SIZE]; /* sysfs sensor name */
- struct sensor_device_attribute attribute;
+ struct device_attribute attribute;
u8 page; /* page number */
u16 reg; /* register */
enum pmbus_sensor_classes class; /* sensor class */
@@ -94,19 +64,28 @@ struct pmbus_sensor {
int data; /* Sensor data.
Negative if there was a read error */
};
+#define to_pmbus_sensor(_attr) \
+ container_of(_attr, struct pmbus_sensor, attribute)
struct pmbus_boolean {
char name[PMBUS_NAME_SIZE]; /* sysfs boolean name */
struct sensor_device_attribute attribute;
+ struct pmbus_sensor *s1;
+ struct pmbus_sensor *s2;
};
+#define to_pmbus_boolean(_attr) \
+ container_of(_attr, struct pmbus_boolean, attribute)
struct pmbus_label {
char name[PMBUS_NAME_SIZE]; /* sysfs label name */
- struct sensor_device_attribute attribute;
+ struct device_attribute attribute;
char label[PMBUS_NAME_SIZE]; /* label */
};
+#define to_pmbus_label(_attr) \
+ container_of(_attr, struct pmbus_label, attribute)
struct pmbus_data {
+ struct device *dev;
struct device *hwmon_dev;
u32 flags; /* from platform data */
@@ -117,29 +96,9 @@ struct pmbus_data {
int max_attributes;
int num_attributes;
- struct attribute **attributes;
struct attribute_group group;
- /*
- * Sensors cover both sensor and limit registers.
- */
- int max_sensors;
- int num_sensors;
struct pmbus_sensor *sensors;
- /*
- * Booleans are used for alarms.
- * Values are determined from status registers.
- */
- int max_booleans;
- int num_booleans;
- struct pmbus_boolean *booleans;
- /*
- * Labels are used to map generic names (e.g., "in1")
- * to PMBus specific names (e.g., "vin" or "vout1").
- */
- int max_labels;
- int num_labels;
- struct pmbus_label *labels;
struct mutex update_lock;
bool valid;
@@ -150,10 +109,19 @@ struct pmbus_data {
* so we keep them all together.
*/
u8 status[PB_NUM_STATUS_REG];
+ u8 status_register;
u8 currpage;
};
+void pmbus_clear_cache(struct i2c_client *client)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+
+ data->valid = false;
+}
+EXPORT_SYMBOL_GPL(pmbus_clear_cache);
+
int pmbus_set_page(struct i2c_client *client, u8 page)
{
struct pmbus_data *data = i2c_get_clientdata(client);
@@ -318,9 +286,10 @@ EXPORT_SYMBOL_GPL(pmbus_clear_faults);
static int pmbus_check_status_cml(struct i2c_client *client)
{
+ struct pmbus_data *data = i2c_get_clientdata(client);
int status, status2;
- status = _pmbus_read_byte_data(client, -1, PMBUS_STATUS_BYTE);
+ status = _pmbus_read_byte_data(client, -1, data->status_register);
if (status < 0 || (status & PB_STATUS_CML)) {
status2 = _pmbus_read_byte_data(client, -1, PMBUS_STATUS_CML);
if (status2 < 0 || (status2 & PB_CML_FAULT_INVALID_COMMAND))
@@ -329,29 +298,30 @@ static int pmbus_check_status_cml(struct i2c_client *client)
return 0;
}
-bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg)
+static bool pmbus_check_register(struct i2c_client *client,
+ int (*func)(struct i2c_client *client,
+ int page, int reg),
+ int page, int reg)
{
int rv;
struct pmbus_data *data = i2c_get_clientdata(client);
- rv = _pmbus_read_byte_data(client, page, reg);
+ rv = func(client, page, reg);
if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK))
rv = pmbus_check_status_cml(client);
pmbus_clear_fault_page(client, -1);
return rv >= 0;
}
+
+bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg)
+{
+ return pmbus_check_register(client, _pmbus_read_byte_data, page, reg);
+}
EXPORT_SYMBOL_GPL(pmbus_check_byte_register);
bool pmbus_check_word_register(struct i2c_client *client, int page, int reg)
{
- int rv;
- struct pmbus_data *data = i2c_get_clientdata(client);
-
- rv = _pmbus_read_word_data(client, page, reg);
- if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK))
- rv = pmbus_check_status_cml(client);
- pmbus_clear_fault_page(client, -1);
- return rv >= 0;
+ return pmbus_check_register(client, _pmbus_read_word_data, page, reg);
}
EXPORT_SYMBOL_GPL(pmbus_check_word_register);
@@ -363,53 +333,43 @@ const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client *client)
}
EXPORT_SYMBOL_GPL(pmbus_get_driver_info);
+static struct _pmbus_status {
+ u32 func;
+ u16 base;
+ u16 reg;
+} pmbus_status[] = {
+ { PMBUS_HAVE_STATUS_VOUT, PB_STATUS_VOUT_BASE, PMBUS_STATUS_VOUT },
+ { PMBUS_HAVE_STATUS_IOUT, PB_STATUS_IOUT_BASE, PMBUS_STATUS_IOUT },
+ { PMBUS_HAVE_STATUS_TEMP, PB_STATUS_TEMP_BASE,
+ PMBUS_STATUS_TEMPERATURE },
+ { PMBUS_HAVE_STATUS_FAN12, PB_STATUS_FAN_BASE, PMBUS_STATUS_FAN_12 },
+ { PMBUS_HAVE_STATUS_FAN34, PB_STATUS_FAN34_BASE, PMBUS_STATUS_FAN_34 },
+};
+
static struct pmbus_data *pmbus_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct pmbus_data *data = i2c_get_clientdata(client);
const struct pmbus_driver_info *info = data->info;
+ struct pmbus_sensor *sensor;
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
- int i;
+ int i, j;
- for (i = 0; i < info->pages; i++)
+ for (i = 0; i < info->pages; i++) {
data->status[PB_STATUS_BASE + i]
= _pmbus_read_byte_data(client, i,
- PMBUS_STATUS_BYTE);
- for (i = 0; i < info->pages; i++) {
- if (!(info->func[i] & PMBUS_HAVE_STATUS_VOUT))
- continue;
- data->status[PB_STATUS_VOUT_BASE + i]
- = _pmbus_read_byte_data(client, i, PMBUS_STATUS_VOUT);
- }
- for (i = 0; i < info->pages; i++) {
- if (!(info->func[i] & PMBUS_HAVE_STATUS_IOUT))
- continue;
- data->status[PB_STATUS_IOUT_BASE + i]
- = _pmbus_read_byte_data(client, i, PMBUS_STATUS_IOUT);
- }
- for (i = 0; i < info->pages; i++) {
- if (!(info->func[i] & PMBUS_HAVE_STATUS_TEMP))
- continue;
- data->status[PB_STATUS_TEMP_BASE + i]
- = _pmbus_read_byte_data(client, i,
- PMBUS_STATUS_TEMPERATURE);
- }
- for (i = 0; i < info->pages; i++) {
- if (!(info->func[i] & PMBUS_HAVE_STATUS_FAN12))
- continue;
- data->status[PB_STATUS_FAN_BASE + i]
- = _pmbus_read_byte_data(client, i,
- PMBUS_STATUS_FAN_12);
- }
-
- for (i = 0; i < info->pages; i++) {
- if (!(info->func[i] & PMBUS_HAVE_STATUS_FAN34))
- continue;
- data->status[PB_STATUS_FAN34_BASE + i]
- = _pmbus_read_byte_data(client, i,
- PMBUS_STATUS_FAN_34);
+ data->status_register);
+ for (j = 0; j < ARRAY_SIZE(pmbus_status); j++) {
+ struct _pmbus_status *s = &pmbus_status[j];
+
+ if (!(info->func[i] & s->func))
+ continue;
+ data->status[s->base + i]
+ = _pmbus_read_byte_data(client, i,
+ s->reg);
+ }
}
if (info->func[0] & PMBUS_HAVE_STATUS_INPUT)
@@ -417,9 +377,12 @@ static struct pmbus_data *pmbus_update_device(struct device *dev)
= _pmbus_read_byte_data(client, 0,
PMBUS_STATUS_INPUT);
- for (i = 0; i < data->num_sensors; i++) {
- struct pmbus_sensor *sensor = &data->sensors[i];
+ if (info->func[0] & PMBUS_HAVE_STATUS_VMON)
+ data->status[PB_STATUS_VMON_BASE]
+ = _pmbus_read_byte_data(client, 0,
+ PMBUS_VIRT_STATUS_VMON);
+ for (sensor = data->sensors; sensor; sensor = sensor->next) {
if (!data->valid || sensor->update)
sensor->data
= _pmbus_read_word_data(client,
@@ -657,7 +620,7 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
static u16 pmbus_data2reg_vid(struct pmbus_data *data,
enum pmbus_sensor_classes class, long val)
{
- val = SENSORS_LIMIT(val, 500, 1600);
+ val = clamp_val(val, 500, 1600);
return 2 + DIV_ROUND_CLOSEST((1600 - val) * 100, 625);
}
@@ -684,25 +647,20 @@ static u16 pmbus_data2reg(struct pmbus_data *data,
/*
* Return boolean calculated from converted data.
- * <index> defines a status register index and mask, and optionally
- * two sensor indexes.
- * The upper half-word references the two sensors,
- * two sensor indices.
- * The upper half-word references the two optional sensors,
- * the lower half word references status register and mask.
- * The function returns true if (status[reg] & mask) is true and,
- * if specified, if v1 >= v2.
- * To determine if an object exceeds upper limits, specify <v, limit>.
- * To determine if an object exceeds lower limits, specify <limit, v>.
+ * <index> defines a status register index and mask.
+ * The mask is in the lower 8 bits, the register index is in bits 8..23.
*
- * For booleans created with pmbus_add_boolean_reg(), only the lower 16 bits of
- * index are set. s1 and s2 (the sensor index values) are zero in this case.
- * The function returns true if (status[reg] & mask) is true.
+ * The associated pmbus_boolean structure contains optional pointers to two
+ * sensor attributes. If specified, those attributes are compared against each
+ * other to determine if a limit has been exceeded.
*
- * If the boolean was created with pmbus_add_boolean_cmp(), a comparison against
- * a specified limit has to be performed to determine the boolean result.
+ * If the sensor attribute pointers are NULL, the function returns true if
+ * (status[reg] & mask) is true.
+ *
+ * If sensor attribute pointers are provided, a comparison against a specified
+ * limit has to be performed to determine the boolean result.
* In this case, the function returns true if v1 >= v2 (where v1 and v2 are
- * sensor values referenced by sensor indices s1 and s2).
+ * sensor values referenced by sensor attribute pointers s1 and s2).
*
* To determine if an object exceeds upper limits, specify <s1,s2> = <v,limit>.
* To determine if an object exceeds lower limits, specify <s1,s2> = <limit,v>.
@@ -710,11 +668,12 @@ static u16 pmbus_data2reg(struct pmbus_data *data,
* If a negative value is stored in any of the referenced registers, this value
* reflects an error code which will be returned.
*/
-static int pmbus_get_boolean(struct pmbus_data *data, int index)
+static int pmbus_get_boolean(struct pmbus_data *data, struct pmbus_boolean *b,
+ int index)
{
- u8 s1 = (index >> 24) & 0xff;
- u8 s2 = (index >> 16) & 0xff;
- u8 reg = (index >> 8) & 0xff;
+ struct pmbus_sensor *s1 = b->s1;
+ struct pmbus_sensor *s2 = b->s2;
+ u16 reg = (index >> 8) & 0xffff;
u8 mask = index & 0xff;
int ret, status;
u8 regval;
@@ -724,21 +683,21 @@ static int pmbus_get_boolean(struct pmbus_data *data, int index)
return status;
regval = status & mask;
- if (!s1 && !s2)
+ if (!s1 && !s2) {
ret = !!regval;
- else {
+ } else if (!s1 || !s2) {
+ BUG();
+ return 0;
+ } else {
long v1, v2;
- struct pmbus_sensor *sensor1, *sensor2;
- sensor1 = &data->sensors[s1];
- if (sensor1->data < 0)
- return sensor1->data;
- sensor2 = &data->sensors[s2];
- if (sensor2->data < 0)
- return sensor2->data;
+ if (s1->data < 0)
+ return s1->data;
+ if (s2->data < 0)
+ return s2->data;
- v1 = pmbus_reg2data(data, sensor1);
- v2 = pmbus_reg2data(data, sensor2);
+ v1 = pmbus_reg2data(data, s1);
+ v2 = pmbus_reg2data(data, s2);
ret = !!(regval && v1 >= v2);
}
return ret;
@@ -748,23 +707,22 @@ static ssize_t pmbus_show_boolean(struct device *dev,
struct device_attribute *da, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct pmbus_boolean *boolean = to_pmbus_boolean(attr);
struct pmbus_data *data = pmbus_update_device(dev);
int val;
- val = pmbus_get_boolean(data, attr->index);
+ val = pmbus_get_boolean(data, boolean, attr->index);
if (val < 0)
return val;
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
static ssize_t pmbus_show_sensor(struct device *dev,
- struct device_attribute *da, char *buf)
+ struct device_attribute *devattr, char *buf)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct pmbus_data *data = pmbus_update_device(dev);
- struct pmbus_sensor *sensor;
+ struct pmbus_sensor *sensor = to_pmbus_sensor(devattr);
- sensor = &data->sensors[attr->index];
if (sensor->data < 0)
return sensor->data;
@@ -775,10 +733,9 @@ static ssize_t pmbus_set_sensor(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct pmbus_data *data = i2c_get_clientdata(client);
- struct pmbus_sensor *sensor = &data->sensors[attr->index];
+ struct pmbus_sensor *sensor = to_pmbus_sensor(devattr);
ssize_t rv = count;
long val = 0;
int ret;
@@ -793,7 +750,7 @@ static ssize_t pmbus_set_sensor(struct device *dev,
if (ret < 0)
rv = ret;
else
- data->sensors[attr->index].data = regval;
+ sensor->data = regval;
mutex_unlock(&data->update_lock);
return rv;
}
@@ -801,102 +758,130 @@ static ssize_t pmbus_set_sensor(struct device *dev,
static ssize_t pmbus_show_label(struct device *dev,
struct device_attribute *da, char *buf)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct pmbus_data *data = i2c_get_clientdata(client);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct pmbus_label *label = to_pmbus_label(da);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- data->labels[attr->index].label);
+ return snprintf(buf, PAGE_SIZE, "%s\n", label->label);
}
-#define PMBUS_ADD_ATTR(data, _name, _idx, _mode, _type, _show, _set) \
-do { \
- struct sensor_device_attribute *a \
- = &data->_type##s[data->num_##_type##s].attribute; \
- BUG_ON(data->num_attributes >= data->max_attributes); \
- sysfs_attr_init(&a->dev_attr.attr); \
- a->dev_attr.attr.name = _name; \
- a->dev_attr.attr.mode = _mode; \
- a->dev_attr.show = _show; \
- a->dev_attr.store = _set; \
- a->index = _idx; \
- data->attributes[data->num_attributes] = &a->dev_attr.attr; \
- data->num_attributes++; \
-} while (0)
-
-#define PMBUS_ADD_GET_ATTR(data, _name, _type, _idx) \
- PMBUS_ADD_ATTR(data, _name, _idx, S_IRUGO, _type, \
- pmbus_show_##_type, NULL)
-
-#define PMBUS_ADD_SET_ATTR(data, _name, _type, _idx) \
- PMBUS_ADD_ATTR(data, _name, _idx, S_IWUSR | S_IRUGO, _type, \
- pmbus_show_##_type, pmbus_set_##_type)
-
-static void pmbus_add_boolean(struct pmbus_data *data,
- const char *name, const char *type, int seq,
- int idx)
+static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
{
- struct pmbus_boolean *boolean;
-
- BUG_ON(data->num_booleans >= data->max_booleans);
-
- boolean = &data->booleans[data->num_booleans];
+ if (data->num_attributes >= data->max_attributes - 1) {
+ data->max_attributes += PMBUS_ATTR_ALLOC_SIZE;
+ data->group.attrs = krealloc(data->group.attrs,
+ sizeof(struct attribute *) *
+ data->max_attributes, GFP_KERNEL);
+ if (data->group.attrs == NULL)
+ return -ENOMEM;
+ }
- snprintf(boolean->name, sizeof(boolean->name), "%s%d_%s",
- name, seq, type);
- PMBUS_ADD_GET_ATTR(data, boolean->name, boolean, idx);
- data->num_booleans++;
+ data->group.attrs[data->num_attributes++] = attr;
+ data->group.attrs[data->num_attributes] = NULL;
+ return 0;
}
-static void pmbus_add_boolean_reg(struct pmbus_data *data,
- const char *name, const char *type,
- int seq, int reg, int bit)
+static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
+ const char *name,
+ umode_t mode,
+ ssize_t (*show)(struct device *dev,
+ struct device_attribute *attr,
+ char *buf),
+ ssize_t (*store)(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count))
{
- pmbus_add_boolean(data, name, type, seq, (reg << 8) | bit);
+ sysfs_attr_init(&dev_attr->attr);
+ dev_attr->attr.name = name;
+ dev_attr->attr.mode = mode;
+ dev_attr->show = show;
+ dev_attr->store = store;
}
-static void pmbus_add_boolean_cmp(struct pmbus_data *data,
- const char *name, const char *type,
- int seq, int i1, int i2, int reg, int mask)
+static void pmbus_attr_init(struct sensor_device_attribute *a,
+ const char *name,
+ umode_t mode,
+ ssize_t (*show)(struct device *dev,
+ struct device_attribute *attr,
+ char *buf),
+ ssize_t (*store)(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count),
+ int idx)
{
- pmbus_add_boolean(data, name, type, seq,
- (i1 << 24) | (i2 << 16) | (reg << 8) | mask);
+ pmbus_dev_attr_init(&a->dev_attr, name, mode, show, store);
+ a->index = idx;
}
-static void pmbus_add_sensor(struct pmbus_data *data,
+static int pmbus_add_boolean(struct pmbus_data *data,
const char *name, const char *type, int seq,
- int page, int reg, enum pmbus_sensor_classes class,
- bool update, bool readonly)
+ struct pmbus_sensor *s1,
+ struct pmbus_sensor *s2,
+ u16 reg, u8 mask)
+{
+ struct pmbus_boolean *boolean;
+ struct sensor_device_attribute *a;
+
+ boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
+ if (!boolean)
+ return -ENOMEM;
+
+ a = &boolean->attribute;
+
+ snprintf(boolean->name, sizeof(boolean->name), "%s%d_%s",
+ name, seq, type);
+ boolean->s1 = s1;
+ boolean->s2 = s2;
+ pmbus_attr_init(a, boolean->name, S_IRUGO, pmbus_show_boolean, NULL,
+ (reg << 8) | mask);
+
+ return pmbus_add_attribute(data, &a->dev_attr.attr);
+}
+
+static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
+ const char *name, const char *type,
+ int seq, int page, int reg,
+ enum pmbus_sensor_classes class,
+ bool update, bool readonly)
{
struct pmbus_sensor *sensor;
+ struct device_attribute *a;
- BUG_ON(data->num_sensors >= data->max_sensors);
+ sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
+ if (!sensor)
+ return NULL;
+ a = &sensor->attribute;
- sensor = &data->sensors[data->num_sensors];
snprintf(sensor->name, sizeof(sensor->name), "%s%d_%s",
name, seq, type);
sensor->page = page;
sensor->reg = reg;
sensor->class = class;
sensor->update = update;
- if (readonly)
- PMBUS_ADD_GET_ATTR(data, sensor->name, sensor,
- data->num_sensors);
- else
- PMBUS_ADD_SET_ATTR(data, sensor->name, sensor,
- data->num_sensors);
- data->num_sensors++;
+ pmbus_dev_attr_init(a, sensor->name,
+ readonly ? S_IRUGO : S_IRUGO | S_IWUSR,
+ pmbus_show_sensor, pmbus_set_sensor);
+
+ if (pmbus_add_attribute(data, &a->attr))
+ return NULL;
+
+ sensor->next = data->sensors;
+ data->sensors = sensor;
+
+ return sensor;
}
-static void pmbus_add_label(struct pmbus_data *data,
- const char *name, int seq,
- const char *lstring, int index)
+static int pmbus_add_label(struct pmbus_data *data,
+ const char *name, int seq,
+ const char *lstring, int index)
{
struct pmbus_label *label;
+ struct device_attribute *a;
- BUG_ON(data->num_labels >= data->max_labels);
+ label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
+ if (!label)
+ return -ENOMEM;
+
+ a = &label->attribute;
- label = &data->labels[data->num_labels];
snprintf(label->name, sizeof(label->name), "%s%d_label", name, seq);
if (!index)
strncpy(label->label, lstring, sizeof(label->label) - 1);
@@ -904,65 +889,8 @@ static void pmbus_add_label(struct pmbus_data *data,
snprintf(label->label, sizeof(label->label), "%s%d", lstring,
index);
- PMBUS_ADD_GET_ATTR(data, label->name, label, data->num_labels);
- data->num_labels++;
-}
-
-/*
- * Determine maximum number of sensors, booleans, and labels.
- * To keep things simple, only make a rough high estimate.
- */
-static void pmbus_find_max_attr(struct i2c_client *client,
- struct pmbus_data *data)
-{
- const struct pmbus_driver_info *info = data->info;
- int page, max_sensors, max_booleans, max_labels;
-
- max_sensors = PMBUS_MAX_INPUT_SENSORS;
- max_booleans = PMBUS_MAX_INPUT_BOOLEANS;
- max_labels = PMBUS_MAX_INPUT_LABELS;
-
- for (page = 0; page < info->pages; page++) {
- if (info->func[page] & PMBUS_HAVE_VOUT) {
- max_sensors += PMBUS_VOUT_SENSORS_PER_PAGE;
- max_booleans += PMBUS_VOUT_BOOLEANS_PER_PAGE;
- max_labels++;
- }
- if (info->func[page] & PMBUS_HAVE_IOUT) {
- max_sensors += PMBUS_IOUT_SENSORS_PER_PAGE;
- max_booleans += PMBUS_IOUT_BOOLEANS_PER_PAGE;
- max_labels++;
- }
- if (info->func[page] & PMBUS_HAVE_POUT) {
- max_sensors += PMBUS_POUT_SENSORS_PER_PAGE;
- max_booleans += PMBUS_POUT_BOOLEANS_PER_PAGE;
- max_labels++;
- }
- if (info->func[page] & PMBUS_HAVE_FAN12) {
- max_sensors += 2 * PMBUS_MAX_SENSORS_PER_FAN;
- max_booleans += 2 * PMBUS_MAX_BOOLEANS_PER_FAN;
- }
- if (info->func[page] & PMBUS_HAVE_FAN34) {
- max_sensors += 2 * PMBUS_MAX_SENSORS_PER_FAN;
- max_booleans += 2 * PMBUS_MAX_BOOLEANS_PER_FAN;
- }
- if (info->func[page] & PMBUS_HAVE_TEMP) {
- max_sensors += PMBUS_MAX_SENSORS_PER_TEMP;
- max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP;
- }
- if (info->func[page] & PMBUS_HAVE_TEMP2) {
- max_sensors += PMBUS_MAX_SENSORS_PER_TEMP;
- max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP;
- }
- if (info->func[page] & PMBUS_HAVE_TEMP3) {
- max_sensors += PMBUS_MAX_SENSORS_PER_TEMP;
- max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP;
- }
- }
- data->max_sensors = max_sensors;
- data->max_booleans = max_booleans;
- data->max_labels = max_labels;
- data->max_attributes = max_sensors + max_booleans + max_labels;
+ pmbus_dev_attr_init(a, label->name, S_IRUGO, pmbus_show_label, NULL);
+ return pmbus_add_attribute(data, &a->attr);
}
/*
@@ -975,12 +903,12 @@ static void pmbus_find_max_attr(struct i2c_client *client,
*/
struct pmbus_limit_attr {
u16 reg; /* Limit register */
+ u16 sbit; /* Alarm attribute status bit */
bool update; /* True if register needs updates */
bool low; /* True if low limit; for limits with compare
functions only */
const char *attr; /* Attribute name */
const char *alarm; /* Alarm attribute name */
- u32 sbit; /* Alarm attribute status bit */
};
/*
@@ -988,7 +916,9 @@ struct pmbus_limit_attr {
* description includes a reference to the associated limit attributes.
*/
struct pmbus_sensor_attr {
- u8 reg; /* sensor register */
+ u16 reg; /* sensor register */
+ u8 gbit; /* generic status bit */
+ u8 nlimit; /* # of limit registers */
enum pmbus_sensor_classes class;/* sensor class */
const char *label; /* sensor label */
bool paged; /* true if paged sensor */
@@ -997,47 +927,47 @@ struct pmbus_sensor_attr {
u32 func; /* sensor mask */
u32 sfunc; /* sensor status mask */
int sbase; /* status base register */
- u32 gbit; /* generic status bit */
const struct pmbus_limit_attr *limit;/* limit registers */
- int nlimit; /* # of limit registers */
};
/*
* Add a set of limit attributes and, if supported, the associated
* alarm attributes.
+ * returns 0 if no alarm register found, 1 if an alarm register was found,
+ * < 0 on errors.
*/
-static bool pmbus_add_limit_attrs(struct i2c_client *client,
- struct pmbus_data *data,
- const struct pmbus_driver_info *info,
- const char *name, int index, int page,
- int cbase,
- const struct pmbus_sensor_attr *attr)
+static int pmbus_add_limit_attrs(struct i2c_client *client,
+ struct pmbus_data *data,
+ const struct pmbus_driver_info *info,
+ const char *name, int index, int page,
+ struct pmbus_sensor *base,
+ const struct pmbus_sensor_attr *attr)
{
const struct pmbus_limit_attr *l = attr->limit;
int nlimit = attr->nlimit;
- bool have_alarm = false;
- int i, cindex;
+ int have_alarm = 0;
+ int i, ret;
+ struct pmbus_sensor *curr;
for (i = 0; i < nlimit; i++) {
if (pmbus_check_word_register(client, page, l->reg)) {
- cindex = data->num_sensors;
- pmbus_add_sensor(data, name, l->attr, index, page,
- l->reg, attr->class,
- attr->update || l->update,
- false);
+ curr = pmbus_add_sensor(data, name, l->attr, index,
+ page, l->reg, attr->class,
+ attr->update || l->update,
+ false);
+ if (!curr)
+ return -ENOMEM;
if (l->sbit && (info->func[page] & attr->sfunc)) {
- if (attr->compare) {
- pmbus_add_boolean_cmp(data, name,
- l->alarm, index,
- l->low ? cindex : cbase,
- l->low ? cbase : cindex,
- attr->sbase + page, l->sbit);
- } else {
- pmbus_add_boolean_reg(data, name,
- l->alarm, index,
- attr->sbase + page, l->sbit);
- }
- have_alarm = true;
+ ret = pmbus_add_boolean(data, name,
+ l->alarm, index,
+ attr->compare ? l->low ? curr : base
+ : NULL,
+ attr->compare ? l->low ? base : curr
+ : NULL,
+ attr->sbase + page, l->sbit);
+ if (ret)
+ return ret;
+ have_alarm = 1;
}
}
l++;
@@ -1045,45 +975,59 @@ static bool pmbus_add_limit_attrs(struct i2c_client *client,
return have_alarm;
}
-static void pmbus_add_sensor_attrs_one(struct i2c_client *client,
- struct pmbus_data *data,
- const struct pmbus_driver_info *info,
- const char *name,
- int index, int page,
- const struct pmbus_sensor_attr *attr)
+static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
+ struct pmbus_data *data,
+ const struct pmbus_driver_info *info,
+ const char *name,
+ int index, int page,
+ const struct pmbus_sensor_attr *attr)
{
- bool have_alarm;
- int cbase = data->num_sensors;
-
- if (attr->label)
- pmbus_add_label(data, name, index, attr->label,
- attr->paged ? page + 1 : 0);
- pmbus_add_sensor(data, name, "input", index, page, attr->reg,
- attr->class, true, true);
+ struct pmbus_sensor *base;
+ int ret;
+
+ if (attr->label) {
+ ret = pmbus_add_label(data, name, index, attr->label,
+ attr->paged ? page + 1 : 0);
+ if (ret)
+ return ret;
+ }
+ base = pmbus_add_sensor(data, name, "input", index, page, attr->reg,
+ attr->class, true, true);
+ if (!base)
+ return -ENOMEM;
if (attr->sfunc) {
- have_alarm = pmbus_add_limit_attrs(client, data, info, name,
- index, page, cbase, attr);
+ ret = pmbus_add_limit_attrs(client, data, info, name,
+ index, page, base, attr);
+ if (ret < 0)
+ return ret;
/*
* Add generic alarm attribute only if there are no individual
* alarm attributes, if there is a global alarm bit, and if
* the generic status register for this page is accessible.
*/
- if (!have_alarm && attr->gbit &&
- pmbus_check_byte_register(client, page, PMBUS_STATUS_BYTE))
- pmbus_add_boolean_reg(data, name, "alarm", index,
- PB_STATUS_BASE + page,
- attr->gbit);
+ if (!ret && attr->gbit &&
+ pmbus_check_byte_register(client, page,
+ data->status_register)) {
+ ret = pmbus_add_boolean(data, name, "alarm", index,
+ NULL, NULL,
+ PB_STATUS_BASE + page,
+ attr->gbit);
+ if (ret)
+ return ret;
+ }
}
+ return 0;
}
-static void pmbus_add_sensor_attrs(struct i2c_client *client,
- struct pmbus_data *data,
- const char *name,
- const struct pmbus_sensor_attr *attrs,
- int nattrs)
+static int pmbus_add_sensor_attrs(struct i2c_client *client,
+ struct pmbus_data *data,
+ const char *name,
+ const struct pmbus_sensor_attr *attrs,
+ int nattrs)
{
const struct pmbus_driver_info *info = data->info;
int index, i;
+ int ret;
index = 1;
for (i = 0; i < nattrs; i++) {
@@ -1093,12 +1037,16 @@ static void pmbus_add_sensor_attrs(struct i2c_client *client,
for (page = 0; page < pages; page++) {
if (!(info->func[page] & attrs->func))
continue;
- pmbus_add_sensor_attrs_one(client, data, info, name,
- index, page, attrs);
+ ret = pmbus_add_sensor_attrs_one(client, data, info,
+ name, index, page,
+ attrs);
+ if (ret)
+ return ret;
index++;
}
attrs++;
}
+ return 0;
}
static const struct pmbus_limit_attr vin_limit_attrs[] = {
@@ -1140,6 +1088,30 @@ static const struct pmbus_limit_attr vin_limit_attrs[] = {
},
};
+static const struct pmbus_limit_attr vmon_limit_attrs[] = {
+ {
+ .reg = PMBUS_VIRT_VMON_UV_WARN_LIMIT,
+ .attr = "min",
+ .alarm = "min_alarm",
+ .sbit = PB_VOLTAGE_UV_WARNING,
+ }, {
+ .reg = PMBUS_VIRT_VMON_UV_FAULT_LIMIT,
+ .attr = "lcrit",
+ .alarm = "lcrit_alarm",
+ .sbit = PB_VOLTAGE_UV_FAULT,
+ }, {
+ .reg = PMBUS_VIRT_VMON_OV_WARN_LIMIT,
+ .attr = "max",
+ .alarm = "max_alarm",
+ .sbit = PB_VOLTAGE_OV_WARNING,
+ }, {
+ .reg = PMBUS_VIRT_VMON_OV_FAULT_LIMIT,
+ .attr = "crit",
+ .alarm = "crit_alarm",
+ .sbit = PB_VOLTAGE_OV_FAULT,
+ }
+};
+
static const struct pmbus_limit_attr vout_limit_attrs[] = {
{
.reg = PMBUS_VOUT_UV_WARN_LIMIT,
@@ -1191,6 +1163,15 @@ static const struct pmbus_sensor_attr voltage_attributes[] = {
.limit = vin_limit_attrs,
.nlimit = ARRAY_SIZE(vin_limit_attrs),
}, {
+ .reg = PMBUS_VIRT_READ_VMON,
+ .class = PSC_VOLTAGE_IN,
+ .label = "vmon",
+ .func = PMBUS_HAVE_VMON,
+ .sfunc = PMBUS_HAVE_STATUS_VMON,
+ .sbase = PB_STATUS_VMON_BASE,
+ .limit = vmon_limit_attrs,
+ .nlimit = ARRAY_SIZE(vmon_limit_attrs),
+ }, {
.reg = PMBUS_READ_VCAP,
.class = PSC_VOLTAGE_IN,
.label = "vcap",
@@ -1553,12 +1534,13 @@ static const u32 pmbus_fan_status_flags[] = {
};
/* Fans */
-static void pmbus_add_fan_attributes(struct i2c_client *client,
- struct pmbus_data *data)
+static int pmbus_add_fan_attributes(struct i2c_client *client,
+ struct pmbus_data *data)
{
const struct pmbus_driver_info *info = data->info;
int index = 1;
int page;
+ int ret;
for (page = 0; page < info->pages; page++) {
int f;
@@ -1584,9 +1566,10 @@ static void pmbus_add_fan_attributes(struct i2c_client *client,
(!(regval & (PB_FAN_1_INSTALLED >> ((f & 1) * 4)))))
continue;
- pmbus_add_sensor(data, "fan", "input", index, page,
- pmbus_fan_registers[f], PSC_FAN, true,
- true);
+ if (pmbus_add_sensor(data, "fan", "input", index,
+ page, pmbus_fan_registers[f],
+ PSC_FAN, true, true) == NULL)
+ return -ENOMEM;
/*
* Each fan status register covers multiple fans,
@@ -1601,39 +1584,55 @@ static void pmbus_add_fan_attributes(struct i2c_client *client,
base = PB_STATUS_FAN34_BASE + page;
else
base = PB_STATUS_FAN_BASE + page;
- pmbus_add_boolean_reg(data, "fan", "alarm",
- index, base,
+ ret = pmbus_add_boolean(data, "fan",
+ "alarm", index, NULL, NULL, base,
PB_FAN_FAN1_WARNING >> (f & 1));
- pmbus_add_boolean_reg(data, "fan", "fault",
- index, base,
+ if (ret)
+ return ret;
+ ret = pmbus_add_boolean(data, "fan",
+ "fault", index, NULL, NULL, base,
PB_FAN_FAN1_FAULT >> (f & 1));
+ if (ret)
+ return ret;
}
index++;
}
}
+ return 0;
}
-static void pmbus_find_attributes(struct i2c_client *client,
- struct pmbus_data *data)
+static int pmbus_find_attributes(struct i2c_client *client,
+ struct pmbus_data *data)
{
+ int ret;
+
/* Voltage sensors */
- pmbus_add_sensor_attrs(client, data, "in", voltage_attributes,
- ARRAY_SIZE(voltage_attributes));
+ ret = pmbus_add_sensor_attrs(client, data, "in", voltage_attributes,
+ ARRAY_SIZE(voltage_attributes));
+ if (ret)
+ return ret;
/* Current sensors */
- pmbus_add_sensor_attrs(client, data, "curr", current_attributes,
- ARRAY_SIZE(current_attributes));
+ ret = pmbus_add_sensor_attrs(client, data, "curr", current_attributes,
+ ARRAY_SIZE(current_attributes));
+ if (ret)
+ return ret;
/* Power sensors */
- pmbus_add_sensor_attrs(client, data, "power", power_attributes,
- ARRAY_SIZE(power_attributes));
+ ret = pmbus_add_sensor_attrs(client, data, "power", power_attributes,
+ ARRAY_SIZE(power_attributes));
+ if (ret)
+ return ret;
/* Temperature sensors */
- pmbus_add_sensor_attrs(client, data, "temp", temp_attributes,
- ARRAY_SIZE(temp_attributes));
+ ret = pmbus_add_sensor_attrs(client, data, "temp", temp_attributes,
+ ARRAY_SIZE(temp_attributes));
+ if (ret)
+ return ret;
/* Fans */
- pmbus_add_fan_attributes(client, data);
+ ret = pmbus_add_fan_attributes(client, data);
+ return ret;
}
/*
@@ -1672,127 +1671,119 @@ static int pmbus_identify_common(struct i2c_client *client,
}
}
- /* Determine maximum number of sensors, booleans, and labels */
- pmbus_find_max_attr(client, data);
pmbus_clear_fault_page(client, 0);
return 0;
}
-int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
- struct pmbus_driver_info *info)
+static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
+ struct pmbus_driver_info *info)
{
- const struct pmbus_platform_data *pdata = client->dev.platform_data;
- struct pmbus_data *data;
+ struct device *dev = &client->dev;
int ret;
- if (!info) {
- dev_err(&client->dev, "Missing chip information");
- return -ENODEV;
- }
-
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_BYTE
- | I2C_FUNC_SMBUS_BYTE_DATA
- | I2C_FUNC_SMBUS_WORD_DATA))
- return -ENODEV;
-
- data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
- if (!data) {
- dev_err(&client->dev, "No memory to allocate driver data\n");
- return -ENOMEM;
- }
-
- i2c_set_clientdata(client, data);
- mutex_init(&data->update_lock);
-
- /* Bail out if PMBus status register does not exist. */
- if (i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE) < 0) {
- dev_err(&client->dev, "PMBus status register not found\n");
- return -ENODEV;
+ /*
+ * Some PMBus chips don't support PMBUS_STATUS_BYTE, so try
+ * to use PMBUS_STATUS_WORD instead if that is the case.
+ * Bail out if both registers are not supported.
+ */
+ data->status_register = PMBUS_STATUS_BYTE;
+ ret = i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE);
+ if (ret < 0 || ret == 0xff) {
+ data->status_register = PMBUS_STATUS_WORD;
+ ret = i2c_smbus_read_word_data(client, PMBUS_STATUS_WORD);
+ if (ret < 0 || ret == 0xffff) {
+ dev_err(dev, "PMBus status register not found\n");
+ return -ENODEV;
+ }
}
- if (pdata)
- data->flags = pdata->flags;
- data->info = info;
-
pmbus_clear_faults(client);
if (info->identify) {
ret = (*info->identify)(client, info);
if (ret < 0) {
- dev_err(&client->dev, "Chip identification failed\n");
+ dev_err(dev, "Chip identification failed\n");
return ret;
}
}
if (info->pages <= 0 || info->pages > PMBUS_PAGES) {
- dev_err(&client->dev, "Bad number of PMBus pages: %d\n",
- info->pages);
+ dev_err(dev, "Bad number of PMBus pages: %d\n", info->pages);
return -ENODEV;
}
ret = pmbus_identify_common(client, data);
if (ret < 0) {
- dev_err(&client->dev, "Failed to identify chip capabilities\n");
+ dev_err(dev, "Failed to identify chip capabilities\n");
return ret;
}
+ return 0;
+}
- ret = -ENOMEM;
- data->sensors = devm_kzalloc(&client->dev, sizeof(struct pmbus_sensor)
- * data->max_sensors, GFP_KERNEL);
- if (!data->sensors) {
- dev_err(&client->dev, "No memory to allocate sensor data\n");
- return -ENOMEM;
- }
+int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
+ struct pmbus_driver_info *info)
+{
+ struct device *dev = &client->dev;
+ const struct pmbus_platform_data *pdata = dev->platform_data;
+ struct pmbus_data *data;
+ int ret;
- data->booleans = devm_kzalloc(&client->dev, sizeof(struct pmbus_boolean)
- * data->max_booleans, GFP_KERNEL);
- if (!data->booleans) {
- dev_err(&client->dev, "No memory to allocate boolean data\n");
- return -ENOMEM;
- }
+ if (!info)
+ return -ENODEV;
- data->labels = devm_kzalloc(&client->dev, sizeof(struct pmbus_label)
- * data->max_labels, GFP_KERNEL);
- if (!data->labels) {
- dev_err(&client->dev, "No memory to allocate label data\n");
- return -ENOMEM;
- }
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_BYTE
+ | I2C_FUNC_SMBUS_BYTE_DATA
+ | I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
- data->attributes = devm_kzalloc(&client->dev, sizeof(struct attribute *)
- * data->max_attributes, GFP_KERNEL);
- if (!data->attributes) {
- dev_err(&client->dev, "No memory to allocate attribute data\n");
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
return -ENOMEM;
- }
- pmbus_find_attributes(client, data);
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+ data->dev = dev;
+
+ if (pdata)
+ data->flags = pdata->flags;
+ data->info = info;
+
+ ret = pmbus_init_common(client, data, info);
+ if (ret < 0)
+ return ret;
+
+ ret = pmbus_find_attributes(client, data);
+ if (ret)
+ goto out_kfree;
/*
* If there are no attributes, something is wrong.
* Bail out instead of trying to register nothing.
*/
if (!data->num_attributes) {
- dev_err(&client->dev, "No attributes found\n");
- return -ENODEV;
+ dev_err(dev, "No attributes found\n");
+ ret = -ENODEV;
+ goto out_kfree;
}
/* Register sysfs hooks */
- data->group.attrs = data->attributes;
- ret = sysfs_create_group(&client->dev.kobj, &data->group);
+ ret = sysfs_create_group(&dev->kobj, &data->group);
if (ret) {
- dev_err(&client->dev, "Failed to create sysfs entries\n");
- return ret;
+ dev_err(dev, "Failed to create sysfs entries\n");
+ goto out_kfree;
}
- data->hwmon_dev = hwmon_device_register(&client->dev);
+ data->hwmon_dev = hwmon_device_register(dev);
if (IS_ERR(data->hwmon_dev)) {
ret = PTR_ERR(data->hwmon_dev);
- dev_err(&client->dev, "Failed to register hwmon device\n");
+ dev_err(dev, "Failed to register hwmon device\n");
goto out_hwmon_device_register;
}
return 0;
out_hwmon_device_register:
- sysfs_remove_group(&client->dev.kobj, &data->group);
+ sysfs_remove_group(&dev->kobj, &data->group);
+out_kfree:
+ kfree(data->group.attrs);
return ret;
}
EXPORT_SYMBOL_GPL(pmbus_do_probe);
@@ -1802,6 +1793,7 @@ int pmbus_do_remove(struct i2c_client *client)
struct pmbus_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &data->group);
+ kfree(data->group.attrs);
return 0;
}
EXPORT_SYMBOL_GPL(pmbus_do_remove);
diff --git a/drivers/hwmon/pmbus/zl6100.c b/drivers/hwmon/pmbus/zl6100.c
index fc5eed8e85bb..819644121259 100644
--- a/drivers/hwmon/pmbus/zl6100.c
+++ b/drivers/hwmon/pmbus/zl6100.c
@@ -2,6 +2,7 @@
* Hardware monitoring driver for ZL6100 and compatibles
*
* Copyright (c) 2011 Ericsson AB.
+ * Copyright (c) 2012 Guenter Roeck
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -45,12 +46,87 @@ struct zl6100_data {
#define ZL6100_MFR_XTEMP_ENABLE (1 << 7)
+#define MFR_VMON_OV_FAULT_LIMIT 0xf5
+#define MFR_VMON_UV_FAULT_LIMIT 0xf6
+#define MFR_READ_VMON 0xf7
+
+#define VMON_UV_WARNING (1 << 5)
+#define VMON_OV_WARNING (1 << 4)
+#define VMON_UV_FAULT (1 << 1)
+#define VMON_OV_FAULT (1 << 0)
+
#define ZL6100_WAIT_TIME 1000 /* uS */
static ushort delay = ZL6100_WAIT_TIME;
module_param(delay, ushort, 0644);
MODULE_PARM_DESC(delay, "Delay between chip accesses in uS");
+/* Convert linear sensor value to milli-units */
+static long zl6100_l2d(s16 l)
+{
+ s16 exponent;
+ s32 mantissa;
+ long val;
+
+ exponent = l >> 11;
+ mantissa = ((s16)((l & 0x7ff) << 5)) >> 5;
+
+ val = mantissa;
+
+ /* scale result to milli-units */
+ val = val * 1000L;
+
+ if (exponent >= 0)
+ val <<= exponent;
+ else
+ val >>= -exponent;
+
+ return val;
+}
+
+#define MAX_MANTISSA (1023 * 1000)
+#define MIN_MANTISSA (511 * 1000)
+
+static u16 zl6100_d2l(long val)
+{
+ s16 exponent = 0, mantissa;
+ bool negative = false;
+
+ /* simple case */
+ if (val == 0)
+ return 0;
+
+ if (val < 0) {
+ negative = true;
+ val = -val;
+ }
+
+ /* Reduce large mantissa until it fits into 10 bit */
+ while (val >= MAX_MANTISSA && exponent < 15) {
+ exponent++;
+ val >>= 1;
+ }
+ /* Increase small mantissa to improve precision */
+ while (val < MIN_MANTISSA && exponent > -15) {
+ exponent--;
+ val <<= 1;
+ }
+
+ /* Convert mantissa from milli-units to units */
+ mantissa = DIV_ROUND_CLOSEST(val, 1000);
+
+ /* Ensure that resulting number is within range */
+ if (mantissa > 0x3ff)
+ mantissa = 0x3ff;
+
+ /* restore sign */
+ if (negative)
+ mantissa = -mantissa;
+
+ /* Convert to 5 bit exponent, 11 bit mantissa */
+ return (mantissa & 0x7ff) | ((exponent << 11) & 0xf800);
+}
+
/* Some chips need a delay between accesses */
static inline void zl6100_wait(const struct zl6100_data *data)
{
@@ -65,9 +141,9 @@ static int zl6100_read_word_data(struct i2c_client *client, int page, int reg)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
struct zl6100_data *data = to_zl6100_data(info);
- int ret;
+ int ret, vreg;
- if (page || reg >= PMBUS_VIRT_BASE)
+ if (page > 0)
return -ENXIO;
if (data->id == zl2005) {
@@ -83,9 +159,39 @@ static int zl6100_read_word_data(struct i2c_client *client, int page, int reg)
}
}
+ switch (reg) {
+ case PMBUS_VIRT_READ_VMON:
+ vreg = MFR_READ_VMON;
+ break;
+ case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
+ case PMBUS_VIRT_VMON_OV_FAULT_LIMIT:
+ vreg = MFR_VMON_OV_FAULT_LIMIT;
+ break;
+ case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
+ case PMBUS_VIRT_VMON_UV_FAULT_LIMIT:
+ vreg = MFR_VMON_UV_FAULT_LIMIT;
+ break;
+ default:
+ if (reg >= PMBUS_VIRT_BASE)
+ return -ENXIO;
+ vreg = reg;
+ break;
+ }
+
zl6100_wait(data);
- ret = pmbus_read_word_data(client, page, reg);
+ ret = pmbus_read_word_data(client, page, vreg);
data->access = ktime_get();
+ if (ret < 0)
+ return ret;
+
+ switch (reg) {
+ case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
+ ret = zl6100_d2l(DIV_ROUND_CLOSEST(zl6100_l2d(ret) * 9, 10));
+ break;
+ case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
+ ret = zl6100_d2l(DIV_ROUND_CLOSEST(zl6100_l2d(ret) * 11, 10));
+ break;
+ }
return ret;
}
@@ -94,13 +200,35 @@ static int zl6100_read_byte_data(struct i2c_client *client, int page, int reg)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
struct zl6100_data *data = to_zl6100_data(info);
- int ret;
+ int ret, status;
if (page > 0)
return -ENXIO;
zl6100_wait(data);
- ret = pmbus_read_byte_data(client, page, reg);
+
+ switch (reg) {
+ case PMBUS_VIRT_STATUS_VMON:
+ ret = pmbus_read_byte_data(client, 0,
+ PMBUS_STATUS_MFR_SPECIFIC);
+ if (ret < 0)
+ break;
+
+ status = 0;
+ if (ret & VMON_UV_WARNING)
+ status |= PB_VOLTAGE_UV_WARNING;
+ if (ret & VMON_OV_WARNING)
+ status |= PB_VOLTAGE_OV_WARNING;
+ if (ret & VMON_UV_FAULT)
+ status |= PB_VOLTAGE_UV_FAULT;
+ if (ret & VMON_OV_FAULT)
+ status |= PB_VOLTAGE_OV_FAULT;
+ ret = status;
+ break;
+ default:
+ ret = pmbus_read_byte_data(client, page, reg);
+ break;
+ }
data->access = ktime_get();
return ret;
@@ -111,13 +239,38 @@ static int zl6100_write_word_data(struct i2c_client *client, int page, int reg,
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
struct zl6100_data *data = to_zl6100_data(info);
- int ret;
+ int ret, vreg;
- if (page || reg >= PMBUS_VIRT_BASE)
+ if (page > 0)
return -ENXIO;
+ switch (reg) {
+ case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
+ word = zl6100_d2l(DIV_ROUND_CLOSEST(zl6100_l2d(word) * 10, 9));
+ vreg = MFR_VMON_OV_FAULT_LIMIT;
+ pmbus_clear_cache(client);
+ break;
+ case PMBUS_VIRT_VMON_OV_FAULT_LIMIT:
+ vreg = MFR_VMON_OV_FAULT_LIMIT;
+ pmbus_clear_cache(client);
+ break;
+ case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
+ word = zl6100_d2l(DIV_ROUND_CLOSEST(zl6100_l2d(word) * 10, 11));
+ vreg = MFR_VMON_UV_FAULT_LIMIT;
+ pmbus_clear_cache(client);
+ break;
+ case PMBUS_VIRT_VMON_UV_FAULT_LIMIT:
+ vreg = MFR_VMON_UV_FAULT_LIMIT;
+ pmbus_clear_cache(client);
+ break;
+ default:
+ if (reg >= PMBUS_VIRT_BASE)
+ return -ENXIO;
+ vreg = reg;
+ }
+
zl6100_wait(data);
- ret = pmbus_write_word_data(client, page, reg, word);
+ ret = pmbus_write_word_data(client, page, vreg, word);
data->access = ktime_get();
return ret;
@@ -225,6 +378,13 @@ static int zl6100_probe(struct i2c_client *client,
| PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
| PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+ /*
+ * ZL2004, ZL9101M, and ZL9117M support monitoring an extra voltage
+ * (VMON for ZL2004, VDRV for ZL9101M and ZL9117M). Report it as vmon.
+ */
+ if (data->id == zl2004 || data->id == zl9101 || data->id == zl9117)
+ info->func[0] |= PMBUS_HAVE_VMON | PMBUS_HAVE_STATUS_VMON;
+
ret = i2c_smbus_read_word_data(client, ZL6100_MFR_CONFIG);
if (ret < 0)
return ret;
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 1c85d39df171..bfe326e896df 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -139,12 +139,12 @@ static const u8 sht15_crc8_table[] = {
* @reg: associated regulator (if specified).
* @nb: notifier block to handle notifications of voltage
* changes.
- * @supply_uV: local copy of supply voltage used to allow use of
+ * @supply_uv: local copy of supply voltage used to allow use of
* regulator consumer if available.
- * @supply_uV_valid: indicates that an updated value has not yet been
+ * @supply_uv_valid: indicates that an updated value has not yet been
* obtained from the regulator and so any calculations
* based upon it will be invalid.
- * @update_supply_work: work struct that is used to update the supply_uV.
+ * @update_supply_work: work struct that is used to update the supply_uv.
* @interrupt_handled: flag used to indicate a handler has been scheduled.
*/
struct sht15_data {
@@ -166,8 +166,8 @@ struct sht15_data {
struct device *hwmon_dev;
struct regulator *reg;
struct notifier_block nb;
- int supply_uV;
- bool supply_uV_valid;
+ int supply_uv;
+ bool supply_uv_valid;
struct work_struct update_supply_work;
atomic_t interrupt_handled;
};
@@ -212,11 +212,13 @@ static u8 sht15_crc8(struct sht15_data *data,
*
* This implements section 3.4 of the data sheet
*/
-static void sht15_connection_reset(struct sht15_data *data)
+static int sht15_connection_reset(struct sht15_data *data)
{
- int i;
+ int i, err;
- gpio_direction_output(data->pdata->gpio_data, 1);
+ err = gpio_direction_output(data->pdata->gpio_data, 1);
+ if (err)
+ return err;
ndelay(SHT15_TSCKL);
gpio_set_value(data->pdata->gpio_sck, 0);
ndelay(SHT15_TSCKL);
@@ -226,6 +228,7 @@ static void sht15_connection_reset(struct sht15_data *data)
gpio_set_value(data->pdata->gpio_sck, 0);
ndelay(SHT15_TSCKL);
}
+ return 0;
}
/**
@@ -251,10 +254,14 @@ static inline void sht15_send_bit(struct sht15_data *data, int val)
* conservative ones used in implementation. This implements
* figure 12 on the data sheet.
*/
-static void sht15_transmission_start(struct sht15_data *data)
+static int sht15_transmission_start(struct sht15_data *data)
{
+ int err;
+
/* ensure data is high and output */
- gpio_direction_output(data->pdata->gpio_data, 1);
+ err = gpio_direction_output(data->pdata->gpio_data, 1);
+ if (err)
+ return err;
ndelay(SHT15_TSU);
gpio_set_value(data->pdata->gpio_sck, 0);
ndelay(SHT15_TSCKL);
@@ -270,6 +277,7 @@ static void sht15_transmission_start(struct sht15_data *data)
ndelay(SHT15_TSU);
gpio_set_value(data->pdata->gpio_sck, 0);
ndelay(SHT15_TSCKL);
+ return 0;
}
/**
@@ -293,13 +301,19 @@ static void sht15_send_byte(struct sht15_data *data, u8 byte)
*/
static int sht15_wait_for_response(struct sht15_data *data)
{
- gpio_direction_input(data->pdata->gpio_data);
+ int err;
+
+ err = gpio_direction_input(data->pdata->gpio_data);
+ if (err)
+ return err;
gpio_set_value(data->pdata->gpio_sck, 1);
ndelay(SHT15_TSCKH);
if (gpio_get_value(data->pdata->gpio_data)) {
gpio_set_value(data->pdata->gpio_sck, 0);
dev_err(data->dev, "Command not acknowledged\n");
- sht15_connection_reset(data);
+ err = sht15_connection_reset(data);
+ if (err)
+ return err;
return -EIO;
}
gpio_set_value(data->pdata->gpio_sck, 0);
@@ -317,12 +331,13 @@ static int sht15_wait_for_response(struct sht15_data *data)
*/
static int sht15_send_cmd(struct sht15_data *data, u8 cmd)
{
- int ret = 0;
+ int err;
- sht15_transmission_start(data);
+ err = sht15_transmission_start(data);
+ if (err)
+ return err;
sht15_send_byte(data, cmd);
- ret = sht15_wait_for_response(data);
- return ret;
+ return sht15_wait_for_response(data);
}
/**
@@ -352,9 +367,13 @@ static int sht15_soft_reset(struct sht15_data *data)
* Each byte of data is acknowledged by pulling the data line
* low for one clock pulse.
*/
-static void sht15_ack(struct sht15_data *data)
+static int sht15_ack(struct sht15_data *data)
{
- gpio_direction_output(data->pdata->gpio_data, 0);
+ int err;
+
+ err = gpio_direction_output(data->pdata->gpio_data, 0);
+ if (err)
+ return err;
ndelay(SHT15_TSU);
gpio_set_value(data->pdata->gpio_sck, 1);
ndelay(SHT15_TSU);
@@ -362,7 +381,7 @@ static void sht15_ack(struct sht15_data *data)
ndelay(SHT15_TSU);
gpio_set_value(data->pdata->gpio_data, 1);
- gpio_direction_input(data->pdata->gpio_data);
+ return gpio_direction_input(data->pdata->gpio_data);
}
/**
@@ -371,14 +390,19 @@ static void sht15_ack(struct sht15_data *data)
*
* This is basically a NAK (single clock pulse, data high).
*/
-static void sht15_end_transmission(struct sht15_data *data)
+static int sht15_end_transmission(struct sht15_data *data)
{
- gpio_direction_output(data->pdata->gpio_data, 1);
+ int err;
+
+ err = gpio_direction_output(data->pdata->gpio_data, 1);
+ if (err)
+ return err;
ndelay(SHT15_TSU);
gpio_set_value(data->pdata->gpio_sck, 1);
ndelay(SHT15_TSCKH);
gpio_set_value(data->pdata->gpio_sck, 0);
ndelay(SHT15_TSCKL);
+ return 0;
}
/**
@@ -410,17 +434,19 @@ static u8 sht15_read_byte(struct sht15_data *data)
*/
static int sht15_send_status(struct sht15_data *data, u8 status)
{
- int ret;
-
- ret = sht15_send_cmd(data, SHT15_WRITE_STATUS);
- if (ret)
- return ret;
- gpio_direction_output(data->pdata->gpio_data, 1);
+ int err;
+
+ err = sht15_send_cmd(data, SHT15_WRITE_STATUS);
+ if (err)
+ return err;
+ err = gpio_direction_output(data->pdata->gpio_data, 1);
+ if (err)
+ return err;
ndelay(SHT15_TSU);
sht15_send_byte(data, status);
- ret = sht15_wait_for_response(data);
- if (ret)
- return ret;
+ err = sht15_wait_for_response(data);
+ if (err)
+ return err;
data->val_status = status;
return 0;
@@ -446,7 +472,7 @@ static int sht15_update_status(struct sht15_data *data)
|| !data->status_valid) {
ret = sht15_send_cmd(data, SHT15_READ_STATUS);
if (ret)
- goto error_ret;
+ goto unlock;
status = sht15_read_byte(data);
if (data->checksumming) {
@@ -458,7 +484,9 @@ static int sht15_update_status(struct sht15_data *data)
== dev_checksum);
}
- sht15_end_transmission(data);
+ ret = sht15_end_transmission(data);
+ if (ret)
+ goto unlock;
/*
* Perform checksum validation on the received data.
@@ -469,27 +497,27 @@ static int sht15_update_status(struct sht15_data *data)
previous_config = data->val_status & 0x07;
ret = sht15_soft_reset(data);
if (ret)
- goto error_ret;
+ goto unlock;
if (previous_config) {
ret = sht15_send_status(data, previous_config);
if (ret) {
dev_err(data->dev,
"CRC validation failed, unable "
"to restore device settings\n");
- goto error_ret;
+ goto unlock;
}
}
ret = -EAGAIN;
- goto error_ret;
+ goto unlock;
}
data->val_status = status;
data->status_valid = true;
data->last_status = jiffies;
}
-error_ret:
- mutex_unlock(&data->read_lock);
+unlock:
+ mutex_unlock(&data->read_lock);
return ret;
}
@@ -511,7 +539,9 @@ static int sht15_measurement(struct sht15_data *data,
if (ret)
return ret;
- gpio_direction_input(data->pdata->gpio_data);
+ ret = gpio_direction_input(data->pdata->gpio_data);
+ if (ret)
+ return ret;
atomic_set(&data->interrupt_handled, 0);
enable_irq(gpio_to_irq(data->pdata->gpio_data));
@@ -524,9 +554,14 @@ static int sht15_measurement(struct sht15_data *data,
ret = wait_event_timeout(data->wait_queue,
(data->state == SHT15_READING_NOTHING),
msecs_to_jiffies(timeout_msecs));
- if (ret == 0) {/* timeout occurred */
+ if (data->state != SHT15_READING_NOTHING) { /* I/O error occurred */
+ data->state = SHT15_READING_NOTHING;
+ return -EIO;
+ } else if (ret == 0) { /* timeout occurred */
disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
- sht15_connection_reset(data);
+ ret = sht15_connection_reset(data);
+ if (ret)
+ return ret;
return -ETIME;
}
@@ -570,17 +605,17 @@ static int sht15_update_measurements(struct sht15_data *data)
data->state = SHT15_READING_HUMID;
ret = sht15_measurement(data, SHT15_MEASURE_RH, 160);
if (ret)
- goto error_ret;
+ goto unlock;
data->state = SHT15_READING_TEMP;
ret = sht15_measurement(data, SHT15_MEASURE_TEMP, 400);
if (ret)
- goto error_ret;
+ goto unlock;
data->measurements_valid = true;
data->last_measurement = jiffies;
}
-error_ret:
- mutex_unlock(&data->read_lock);
+unlock:
+ mutex_unlock(&data->read_lock);
return ret;
}
@@ -598,8 +633,8 @@ static inline int sht15_calc_temp(struct sht15_data *data)
for (i = ARRAY_SIZE(temppoints) - 1; i > 0; i--)
/* Find pointer to interpolate */
- if (data->supply_uV > temppoints[i - 1].vdd) {
- d1 = (data->supply_uV - temppoints[i - 1].vdd)
+ if (data->supply_uv > temppoints[i - 1].vdd) {
+ d1 = (data->supply_uv - temppoints[i - 1].vdd)
* (temppoints[i].d1 - temppoints[i - 1].d1)
/ (temppoints[i].vdd - temppoints[i - 1].vdd)
+ temppoints[i - 1].d1;
@@ -818,7 +853,8 @@ static void sht15_bh_read_data(struct work_struct *work_s)
/* Read the data back from the device */
val = sht15_read_byte(data);
val <<= 8;
- sht15_ack(data);
+ if (sht15_ack(data))
+ goto wakeup;
val |= sht15_read_byte(data);
if (data->checksumming) {
@@ -826,7 +862,8 @@ static void sht15_bh_read_data(struct work_struct *work_s)
* Ask the device for a checksum and read it back.
* Note: the device sends the checksum byte reversed.
*/
- sht15_ack(data);
+ if (sht15_ack(data))
+ goto wakeup;
dev_checksum = sht15_reverse(sht15_read_byte(data));
checksum_vals[0] = (data->state == SHT15_READING_TEMP) ?
SHT15_MEASURE_TEMP : SHT15_MEASURE_RH;
@@ -837,7 +874,8 @@ static void sht15_bh_read_data(struct work_struct *work_s)
}
/* Tell the device we are done */
- sht15_end_transmission(data);
+ if (sht15_end_transmission(data))
+ goto wakeup;
switch (data->state) {
case SHT15_READING_TEMP:
@@ -851,6 +889,7 @@ static void sht15_bh_read_data(struct work_struct *work_s)
}
data->state = SHT15_READING_NOTHING;
+wakeup:
wake_up(&data->wait_queue);
}
@@ -859,7 +898,7 @@ static void sht15_update_voltage(struct work_struct *work_s)
struct sht15_data *data
= container_of(work_s, struct sht15_data,
update_supply_work);
- data->supply_uV = regulator_get_voltage(data->reg);
+ data->supply_uv = regulator_get_voltage(data->reg);
}
/**
@@ -878,7 +917,7 @@ static int sht15_invalidate_voltage(struct notifier_block *nb,
struct sht15_data *data = container_of(nb, struct sht15_data, nb);
if (event == REGULATOR_EVENT_VOLTAGE_CHANGE)
- data->supply_uV_valid = false;
+ data->supply_uv_valid = false;
schedule_work(&data->update_supply_work);
return NOTIFY_OK;
@@ -906,7 +945,7 @@ static int sht15_probe(struct platform_device *pdev)
return -EINVAL;
}
data->pdata = pdev->dev.platform_data;
- data->supply_uV = data->pdata->supply_mv * 1000;
+ data->supply_uv = data->pdata->supply_mv * 1000;
if (data->pdata->checksum)
data->checksumming = true;
if (data->pdata->no_otp_reload)
@@ -924,7 +963,7 @@ static int sht15_probe(struct platform_device *pdev)
voltage = regulator_get_voltage(data->reg);
if (voltage)
- data->supply_uV = voltage;
+ data->supply_uv = voltage;
regulator_enable(data->reg);
/*
@@ -942,17 +981,17 @@ static int sht15_probe(struct platform_device *pdev)
}
/* Try requesting the GPIOs */
- ret = devm_gpio_request(&pdev->dev, data->pdata->gpio_sck, "SHT15 sck");
+ ret = devm_gpio_request_one(&pdev->dev, data->pdata->gpio_sck,
+ GPIOF_OUT_INIT_LOW, "SHT15 sck");
if (ret) {
- dev_err(&pdev->dev, "gpio request failed\n");
+ dev_err(&pdev->dev, "clock line GPIO request failed\n");
goto err_release_reg;
}
- gpio_direction_output(data->pdata->gpio_sck, 0);
ret = devm_gpio_request(&pdev->dev, data->pdata->gpio_data,
"SHT15 data");
if (ret) {
- dev_err(&pdev->dev, "gpio request failed\n");
+ dev_err(&pdev->dev, "data line GPIO request failed\n");
goto err_release_reg;
}
@@ -966,7 +1005,9 @@ static int sht15_probe(struct platform_device *pdev)
goto err_release_reg;
}
disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
- sht15_connection_reset(data);
+ ret = sht15_connection_reset(data);
+ if (ret)
+ goto err_release_reg;
ret = sht15_soft_reset(data);
if (ret)
goto err_release_reg;
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 06ce3c911db9..c35847a1a0a3 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -132,7 +132,7 @@ static struct platform_device *pdev;
*/
static inline u8 IN_TO_REG(unsigned long val)
{
- unsigned long nval = SENSORS_LIMIT(val, 0, 4080);
+ unsigned long nval = clamp_val(val, 0, 4080);
return (nval + 8) / 16;
}
#define IN_FROM_REG(val) ((val) * 16)
@@ -141,7 +141,7 @@ static inline u8 FAN_TO_REG(long rpm, int div)
{
if (rpm <= 0)
return 255;
- return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+ return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
static inline int FAN_FROM_REG(u8 val, int div)
@@ -159,7 +159,7 @@ static inline int TEMP_FROM_REG(s8 val)
}
static inline s8 TEMP_TO_REG(int val)
{
- int nval = SENSORS_LIMIT(val, -54120, 157530) ;
+ int nval = clamp_val(val, -54120, 157530) ;
return nval < 0 ? (nval - 5212 - 415) / 830 : (nval - 5212 + 415) / 830;
}
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index dba0c567e7a1..6d8255ccf07a 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -326,7 +326,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute
/* Preserve fan min */
tmp = 192 - (old_div * (192 - data->fan_preload[nr])
+ new_div / 2) / new_div;
- data->fan_preload[nr] = SENSORS_LIMIT(tmp, 0, 191);
+ data->fan_preload[nr] = clamp_val(tmp, 0, 191);
smsc47m1_write_value(data, SMSC47M1_REG_FAN_PRELOAD[nr],
data->fan_preload[nr]);
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index 36a3478d0799..efee4c59239f 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -77,7 +77,7 @@ static inline unsigned int IN_FROM_REG(u8 reg, int n)
static inline u8 IN_TO_REG(unsigned long val, int n)
{
- return SENSORS_LIMIT(SCALE(val, 192, nom_mv[n]), 0, 255);
+ return clamp_val(SCALE(val, 192, nom_mv[n]), 0, 255);
}
/*
@@ -86,7 +86,7 @@ static inline u8 IN_TO_REG(unsigned long val, int n)
*/
static inline s8 TEMP_TO_REG(int val)
{
- return SENSORS_LIMIT(SCALE(val, 1, 1000), -128000, 127000);
+ return clamp_val(SCALE(val, 1, 1000), -128000, 127000);
}
static inline int TEMP_FROM_REG(s8 val)
diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c
index 3c2c48d904e6..4b59eb53b18a 100644
--- a/drivers/hwmon/thmc50.c
+++ b/drivers/hwmon/thmc50.c
@@ -134,7 +134,7 @@ static ssize_t set_analog_out(struct device *dev,
return err;
mutex_lock(&data->update_lock);
- data->analog_out = SENSORS_LIMIT(tmp, 0, 255);
+ data->analog_out = clamp_val(tmp, 0, 255);
i2c_smbus_write_byte_data(client, THMC50_REG_ANALOG_OUT,
data->analog_out);
@@ -187,7 +187,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->temp_min[nr] = SENSORS_LIMIT(val / 1000, -128, 127);
+ data->temp_min[nr] = clamp_val(val / 1000, -128, 127);
i2c_smbus_write_byte_data(client, THMC50_REG_TEMP_MIN[nr],
data->temp_min[nr]);
mutex_unlock(&data->update_lock);
@@ -216,7 +216,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->temp_max[nr] = SENSORS_LIMIT(val / 1000, -128, 127);
+ data->temp_max[nr] = clamp_val(val / 1000, -128, 127);
i2c_smbus_write_byte_data(client, THMC50_REG_TEMP_MAX[nr],
data->temp_max[nr]);
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index b10c3d36ccbc..523dd89ba498 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -115,7 +115,7 @@ static ssize_t tmp102_set_temp(struct device *dev,
if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
- val = SENSORS_LIMIT(val, -256000, 255000);
+ val = clamp_val(val, -256000, 255000);
mutex_lock(&tmp102->lock);
tmp102->temp[sda->index] = val;
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index e62054875164..c85f6967ccc3 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -142,10 +142,10 @@ static int tmp401_register_to_temp(u16 reg, u8 config)
static u16 tmp401_temp_to_register(long temp, u8 config)
{
if (config & TMP401_CONFIG_RANGE) {
- temp = SENSORS_LIMIT(temp, -64000, 191000);
+ temp = clamp_val(temp, -64000, 191000);
temp += 64000;
} else
- temp = SENSORS_LIMIT(temp, 0, 127000);
+ temp = clamp_val(temp, 0, 127000);
return (temp * 160 + 312) / 625;
}
@@ -163,10 +163,10 @@ static int tmp401_crit_register_to_temp(u8 reg, u8 config)
static u8 tmp401_crit_temp_to_register(long temp, u8 config)
{
if (config & TMP401_CONFIG_RANGE) {
- temp = SENSORS_LIMIT(temp, -64000, 191000);
+ temp = clamp_val(temp, -64000, 191000);
temp += 64000;
} else
- temp = SENSORS_LIMIT(temp, 0, 127000);
+ temp = clamp_val(temp, 0, 127000);
return (temp + 500) / 1000;
}
@@ -417,14 +417,14 @@ static ssize_t store_temp_crit_hyst(struct device *dev, struct device_attribute
return -EINVAL;
if (data->config & TMP401_CONFIG_RANGE)
- val = SENSORS_LIMIT(val, -64000, 191000);
+ val = clamp_val(val, -64000, 191000);
else
- val = SENSORS_LIMIT(val, 0, 127000);
+ val = clamp_val(val, 0, 127000);
mutex_lock(&data->update_lock);
temp = tmp401_crit_register_to_temp(data->temp_crit[index],
data->config);
- val = SENSORS_LIMIT(val, temp - 255000, temp);
+ val = clamp_val(val, temp - 255000, temp);
reg = ((temp - val) + 500) / 1000;
i2c_smbus_write_byte_data(to_i2c_client(dev),
diff --git a/drivers/hwmon/vexpress.c b/drivers/hwmon/vexpress.c
index 86d7f6d858b1..d867e6bb2be1 100644
--- a/drivers/hwmon/vexpress.c
+++ b/drivers/hwmon/vexpress.c
@@ -19,6 +19,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/vexpress.h>
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index e0e14a9f1658..3123b30208c5 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -135,17 +135,14 @@ static inline u8 IN_TO_REG(long val, int inNum)
* for the constants.
*/
if (inNum <= 1)
- return (u8)
- SENSORS_LIMIT((val * 21024 - 1205000) / 250000, 0, 255);
+ return (u8) clamp_val((val * 21024 - 1205000) / 250000, 0, 255);
else if (inNum == 2)
- return (u8)
- SENSORS_LIMIT((val * 15737 - 1205000) / 250000, 0, 255);
+ return (u8) clamp_val((val * 15737 - 1205000) / 250000, 0, 255);
else if (inNum == 3)
- return (u8)
- SENSORS_LIMIT((val * 10108 - 1205000) / 250000, 0, 255);
+ return (u8) clamp_val((val * 10108 - 1205000) / 250000, 0, 255);
else
- return (u8)
- SENSORS_LIMIT((val * 41714 - 12050000) / 2500000, 0, 255);
+ return (u8) clamp_val((val * 41714 - 12050000) / 2500000, 0,
+ 255);
}
static inline long IN_FROM_REG(u8 val, int inNum)
@@ -175,8 +172,8 @@ static inline u8 FAN_TO_REG(long rpm, int div)
{
if (rpm == 0)
return 0;
- rpm = SENSORS_LIMIT(rpm, 1, 1000000);
- return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 255);
+ rpm = clamp_val(rpm, 1, 1000000);
+ return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 255);
}
#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : (val) == 255 ? 0 : 1350000 / \
diff --git a/drivers/hwmon/vt1211.c b/drivers/hwmon/vt1211.c
index 751703059fae..dcc62f80f67b 100644
--- a/drivers/hwmon/vt1211.c
+++ b/drivers/hwmon/vt1211.c
@@ -158,7 +158,7 @@ struct vt1211_data {
#define IN_FROM_REG(ix, reg) ((reg) < 3 ? 0 : (ix) == 5 ? \
(((reg) - 3) * 15882 + 479) / 958 : \
(((reg) - 3) * 10000 + 479) / 958)
-#define IN_TO_REG(ix, val) (SENSORS_LIMIT((ix) == 5 ? \
+#define IN_TO_REG(ix, val) (clamp_val((ix) == 5 ? \
((val) * 958 + 7941) / 15882 + 3 : \
((val) * 958 + 5000) / 10000 + 3, 0, 255))
@@ -173,7 +173,7 @@ struct vt1211_data {
(ix) == 1 ? (reg) < 51 ? 0 : \
((reg) - 51) * 1000 : \
((253 - (reg)) * 2200 + 105) / 210)
-#define TEMP_TO_REG(ix, val) SENSORS_LIMIT( \
+#define TEMP_TO_REG(ix, val) clamp_val( \
((ix) == 0 ? ((val) + 500) / 1000 : \
(ix) == 1 ? ((val) + 500) / 1000 + 51 : \
253 - ((val) * 210 + 1100) / 2200), 0, 255)
@@ -183,7 +183,7 @@ struct vt1211_data {
#define RPM_FROM_REG(reg, div) (((reg) == 0) || ((reg) == 255) ? 0 : \
1310720 / (reg) / DIV_FROM_REG(div))
#define RPM_TO_REG(val, div) ((val) == 0 ? 255 : \
- SENSORS_LIMIT((1310720 / (val) / \
+ clamp_val((1310720 / (val) / \
DIV_FROM_REG(div)), 1, 254))
/* ---------------------------------------------------------------------
@@ -687,7 +687,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
data->fan_ctl));
break;
case SHOW_SET_PWM_FREQ:
- val = 135000 / SENSORS_LIMIT(val, 135000 >> 7, 135000);
+ val = 135000 / clamp_val(val, 135000 >> 7, 135000);
/* calculate tmp = log2(val) */
tmp = 0;
for (val >>= 1; val > 0; val >>= 1)
@@ -845,7 +845,7 @@ static ssize_t set_pwm_auto_point_pwm(struct device *dev,
return err;
mutex_lock(&data->update_lock);
- data->pwm_auto_pwm[ix][ap] = SENSORS_LIMIT(val, 0, 255);
+ data->pwm_auto_pwm[ix][ap] = clamp_val(val, 0, 255);
vt1211_write8(data, VT1211_REG_PWM_AUTO_PWM(ix, ap),
data->pwm_auto_pwm[ix][ap]);
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index a56355cef184..988a2a796764 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -147,7 +147,7 @@ static inline u8 FAN_TO_REG(long rpm, int div)
{
if (rpm == 0)
return 0;
- return SENSORS_LIMIT(1310720 / (rpm * div), 1, 255);
+ return clamp_val(1310720 / (rpm * div), 1, 255);
}
#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : 1310720 / ((val) * (div)))
@@ -236,7 +236,7 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->in_min[nr] = SENSORS_LIMIT(((val * 958) / 10000) + 3, 0, 255);
+ data->in_min[nr] = clamp_val(((val * 958) / 10000) + 3, 0, 255);
vt8231_write_value(data, regvoltmin[nr], data->in_min[nr]);
mutex_unlock(&data->update_lock);
return count;
@@ -256,7 +256,7 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->in_max[nr] = SENSORS_LIMIT(((val * 958) / 10000) + 3, 0, 255);
+ data->in_max[nr] = clamp_val(((val * 958) / 10000) + 3, 0, 255);
vt8231_write_value(data, regvoltmax[nr], data->in_max[nr]);
mutex_unlock(&data->update_lock);
return count;
@@ -302,8 +302,8 @@ static ssize_t set_in5_min(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->in_min[5] = SENSORS_LIMIT(((val * 958 * 34) / (10000 * 54)) + 3,
- 0, 255);
+ data->in_min[5] = clamp_val(((val * 958 * 34) / (10000 * 54)) + 3,
+ 0, 255);
vt8231_write_value(data, regvoltmin[5], data->in_min[5]);
mutex_unlock(&data->update_lock);
return count;
@@ -321,8 +321,8 @@ static ssize_t set_in5_max(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->in_max[5] = SENSORS_LIMIT(((val * 958 * 34) / (10000 * 54)) + 3,
- 0, 255);
+ data->in_max[5] = clamp_val(((val * 958 * 34) / (10000 * 54)) + 3,
+ 0, 255);
vt8231_write_value(data, regvoltmax[5], data->in_max[5]);
mutex_unlock(&data->update_lock);
return count;
@@ -380,7 +380,7 @@ static ssize_t set_temp0_max(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->temp_max[0] = SENSORS_LIMIT((val + 500) / 1000, 0, 255);
+ data->temp_max[0] = clamp_val((val + 500) / 1000, 0, 255);
vt8231_write_value(data, regtempmax[0], data->temp_max[0]);
mutex_unlock(&data->update_lock);
return count;
@@ -397,7 +397,7 @@ static ssize_t set_temp0_min(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->temp_min[0] = SENSORS_LIMIT((val + 500) / 1000, 0, 255);
+ data->temp_min[0] = clamp_val((val + 500) / 1000, 0, 255);
vt8231_write_value(data, regtempmin[0], data->temp_min[0]);
mutex_unlock(&data->update_lock);
return count;
@@ -444,7 +444,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->temp_max[nr] = SENSORS_LIMIT(TEMP_MAXMIN_TO_REG(val), 0, 255);
+ data->temp_max[nr] = clamp_val(TEMP_MAXMIN_TO_REG(val), 0, 255);
vt8231_write_value(data, regtempmax[nr], data->temp_max[nr]);
mutex_unlock(&data->update_lock);
return count;
@@ -463,7 +463,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->temp_min[nr] = SENSORS_LIMIT(TEMP_MAXMIN_TO_REG(val), 0, 255);
+ data->temp_min[nr] = clamp_val(TEMP_MAXMIN_TO_REG(val), 0, 255);
vt8231_write_value(data, regtempmin[nr], data->temp_min[nr]);
mutex_unlock(&data->update_lock);
return count;
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 0e8ffd6059a0..0a89211c25f6 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -354,8 +354,8 @@ static inline unsigned int step_time_from_reg(u8 reg, u8 mode)
static inline u8 step_time_to_reg(unsigned int msec, u8 mode)
{
- return SENSORS_LIMIT((mode ? (msec + 50) / 100 :
- (msec + 200) / 400), 1, 255);
+ return clamp_val((mode ? (msec + 50) / 100 : (msec + 200) / 400),
+ 1, 255);
}
static unsigned int fan_from_reg8(u16 reg, unsigned int divreg)
@@ -414,8 +414,7 @@ static inline long in_from_reg(u8 reg, u8 nr, const u16 *scale_in)
static inline u8 in_to_reg(u32 val, u8 nr, const u16 *scale_in)
{
- return SENSORS_LIMIT(DIV_ROUND_CLOSEST(val * 100, scale_in[nr]), 0,
- 255);
+ return clamp_val(DIV_ROUND_CLOSEST(val * 100, scale_in[nr]), 0, 255);
}
/*
@@ -1267,7 +1266,7 @@ store_temp_offset(struct device *dev, struct device_attribute *attr,
if (err < 0)
return err;
- val = SENSORS_LIMIT(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
+ val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
mutex_lock(&data->update_lock);
data->temp_offset[nr] = val;
@@ -1435,7 +1434,7 @@ store_pwm(struct device *dev, struct device_attribute *attr,
if (err < 0)
return err;
- val = SENSORS_LIMIT(val, 0, 255);
+ val = clamp_val(val, 0, 255);
mutex_lock(&data->update_lock);
data->pwm[nr] = val;
@@ -1514,7 +1513,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr,
if (err < 0)
return err;
- val = SENSORS_LIMIT(DIV_ROUND_CLOSEST(val, 1000), 0, 127);
+ val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 127);
mutex_lock(&data->update_lock);
data->target_temp[nr] = val;
@@ -1540,7 +1539,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
return err;
/* Limit the temp to 0C - 15C */
- val = SENSORS_LIMIT(DIV_ROUND_CLOSEST(val, 1000), 0, 15);
+ val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 15);
mutex_lock(&data->update_lock);
if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
@@ -1639,7 +1638,7 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
err = kstrtoul(buf, 10, &val); \
if (err < 0) \
return err; \
- val = SENSORS_LIMIT(val, 1, 255); \
+ val = clamp_val(val, 1, 255); \
mutex_lock(&data->update_lock); \
data->reg[nr] = val; \
w83627ehf_write_value(data, data->REG_##REG[nr], val); \
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index 81f486520cea..3b9ef2d23452 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -254,16 +254,15 @@ static const u8 BIT_SCFG2[] = { 0x10, 0x20, 0x40 };
* these macros are called: arguments may be evaluated more than once.
* Fixing this is just not worth it.
*/
-#define IN_TO_REG(val) (SENSORS_LIMIT((((val) + 8)/16),0,255))
+#define IN_TO_REG(val) (clamp_val((((val) + 8) / 16), 0, 255))
#define IN_FROM_REG(val) ((val) * 16)
static inline u8 FAN_TO_REG(long rpm, int div)
{
if (rpm == 0)
return 255;
- rpm = SENSORS_LIMIT(rpm, 1, 1000000);
- return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1,
- 254);
+ rpm = clamp_val(rpm, 1, 1000000);
+ return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
#define TEMP_MIN (-128000)
@@ -275,9 +274,9 @@ static inline u8 FAN_TO_REG(long rpm, int div)
*/
static u8 TEMP_TO_REG(long temp)
{
- int ntemp = SENSORS_LIMIT(temp, TEMP_MIN, TEMP_MAX);
- ntemp += (ntemp<0 ? -500 : 500);
- return (u8)(ntemp / 1000);
+ int ntemp = clamp_val(temp, TEMP_MIN, TEMP_MAX);
+ ntemp += (ntemp < 0 ? -500 : 500);
+ return (u8)(ntemp / 1000);
}
static int TEMP_FROM_REG(u8 reg)
@@ -287,7 +286,7 @@ static int TEMP_FROM_REG(u8 reg)
#define FAN_FROM_REG(val,div) ((val)==0?-1:(val)==255?0:1350000/((val)*(div)))
-#define PWM_TO_REG(val) (SENSORS_LIMIT((val),0,255))
+#define PWM_TO_REG(val) (clamp_val((val), 0, 255))
static inline unsigned long pwm_freq_from_reg_627hf(u8 reg)
{
@@ -342,7 +341,7 @@ static inline u8 pwm_freq_to_reg(unsigned long val)
static inline u8 DIV_TO_REG(long val)
{
int i;
- val = SENSORS_LIMIT(val, 1, 128) >> 1;
+ val = clamp_val(val, 1, 128) >> 1;
for (i = 0; i < 7; i++) {
if (val == 0)
break;
@@ -614,8 +613,7 @@ static ssize_t store_regs_in_min0(struct device *dev, struct device_attribute *a
/* use VRM9 calculation */
data->in_min[0] =
- SENSORS_LIMIT(((val * 100) - 70000 + 244) / 488, 0,
- 255);
+ clamp_val(((val * 100) - 70000 + 244) / 488, 0, 255);
else
/* use VRM8 (standard) calculation */
data->in_min[0] = IN_TO_REG(val);
@@ -644,8 +642,7 @@ static ssize_t store_regs_in_max0(struct device *dev, struct device_attribute *a
/* use VRM9 calculation */
data->in_max[0] =
- SENSORS_LIMIT(((val * 100) - 70000 + 244) / 488, 0,
- 255);
+ clamp_val(((val * 100) - 70000 + 244) / 488, 0, 255);
else
/* use VRM8 (standard) calculation */
data->in_max[0] = IN_TO_REG(val);
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index 93bd28639595..aeec5b1d81c9 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -159,7 +159,7 @@ static const u8 BIT_SCFG2[] = { 0x10, 0x20, 0x40 };
#define W83781D_DEFAULT_BETA 3435
/* Conversions */
-#define IN_TO_REG(val) SENSORS_LIMIT(((val) + 8) / 16, 0, 255)
+#define IN_TO_REG(val) clamp_val(((val) + 8) / 16, 0, 255)
#define IN_FROM_REG(val) ((val) * 16)
static inline u8
@@ -167,8 +167,8 @@ FAN_TO_REG(long rpm, int div)
{
if (rpm == 0)
return 255;
- rpm = SENSORS_LIMIT(rpm, 1, 1000000);
- return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+ rpm = clamp_val(rpm, 1, 1000000);
+ return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
static inline long
@@ -181,7 +181,7 @@ FAN_FROM_REG(u8 val, int div)
return 1350000 / (val * div);
}
-#define TEMP_TO_REG(val) SENSORS_LIMIT((val) / 1000, -127, 128)
+#define TEMP_TO_REG(val) clamp_val((val) / 1000, -127, 128)
#define TEMP_FROM_REG(val) ((val) * 1000)
#define BEEP_MASK_FROM_REG(val, type) ((type) == as99127f ? \
@@ -195,9 +195,8 @@ static inline u8
DIV_TO_REG(long val, enum chips type)
{
int i;
- val = SENSORS_LIMIT(val, 1,
- ((type == w83781d
- || type == as99127f) ? 8 : 128)) >> 1;
+ val = clamp_val(val, 1,
+ ((type == w83781d || type == as99127f) ? 8 : 128)) >> 1;
for (i = 0; i < 7; i++) {
if (val == 0)
break;
@@ -443,7 +442,7 @@ store_vrm_reg(struct device *dev, struct device_attribute *attr,
err = kstrtoul(buf, 10, &val);
if (err)
return err;
- data->vrm = SENSORS_LIMIT(val, 0, 255);
+ data->vrm = clamp_val(val, 0, 255);
return count;
}
@@ -730,7 +729,7 @@ store_pwm(struct device *dev, struct device_attribute *da, const char *buf,
return err;
mutex_lock(&data->update_lock);
- data->pwm[nr] = SENSORS_LIMIT(val, 0, 255);
+ data->pwm[nr] = clamp_val(val, 0, 255);
w83781d_write_value(data, W83781D_REG_PWM[nr], data->pwm[nr]);
mutex_unlock(&data->update_lock);
return count;
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index ed397c645198..38dddddf8875 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -220,15 +220,15 @@ static inline int w83791d_write(struct i2c_client *client, u8 reg, u8 value)
* in mV as would be measured on the chip input pin, need to just
* multiply/divide by 16 to translate from/to register values.
*/
-#define IN_TO_REG(val) (SENSORS_LIMIT((((val) + 8) / 16), 0, 255))
+#define IN_TO_REG(val) (clamp_val((((val) + 8) / 16), 0, 255))
#define IN_FROM_REG(val) ((val) * 16)
static u8 fan_to_reg(long rpm, int div)
{
if (rpm == 0)
return 255;
- rpm = SENSORS_LIMIT(rpm, 1, 1000000);
- return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+ rpm = clamp_val(rpm, 1, 1000000);
+ return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \
@@ -273,7 +273,7 @@ static u8 div_to_reg(int nr, long val)
int i;
/* fan divisors max out at 128 */
- val = SENSORS_LIMIT(val, 1, 128) >> 1;
+ val = clamp_val(val, 1, 128) >> 1;
for (i = 0; i < 7; i++) {
if (val == 0)
break;
@@ -747,7 +747,7 @@ static ssize_t store_pwm(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&data->update_lock);
- data->pwm[nr] = SENSORS_LIMIT(val, 0, 255);
+ data->pwm[nr] = clamp_val(val, 0, 255);
w83791d_write(client, W83791D_REG_PWM[nr], data->pwm[nr]);
mutex_unlock(&data->update_lock);
return count;
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 301942d08453..5cb83ddf2cc6 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -235,8 +235,8 @@ FAN_TO_REG(long rpm, int div)
{
if (rpm == 0)
return 255;
- rpm = SENSORS_LIMIT(rpm, 1, 1000000);
- return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+ rpm = clamp_val(rpm, 1, 1000000);
+ return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \
@@ -244,16 +244,15 @@ FAN_TO_REG(long rpm, int div)
1350000 / ((val) * (div))))
/* for temp1 */
-#define TEMP1_TO_REG(val) (SENSORS_LIMIT(((val) < 0 ? (val)+0x100*1000 \
- : (val)) / 1000, 0, 0xff))
+#define TEMP1_TO_REG(val) (clamp_val(((val) < 0 ? (val) + 0x100 * 1000 \
+ : (val)) / 1000, 0, 0xff))
#define TEMP1_FROM_REG(val) (((val) & 0x80 ? (val)-0x100 : (val)) * 1000)
/* for temp2 and temp3, because they need additional resolution */
#define TEMP_ADD_FROM_REG(val1, val2) \
((((val1) & 0x80 ? (val1)-0x100 \
: (val1)) * 1000) + ((val2 & 0x80) ? 500 : 0))
#define TEMP_ADD_TO_REG_HIGH(val) \
- (SENSORS_LIMIT(((val) < 0 ? (val)+0x100*1000 \
- : (val)) / 1000, 0, 0xff))
+ (clamp_val(((val) < 0 ? (val) + 0x100 * 1000 : (val)) / 1000, 0, 0xff))
#define TEMP_ADD_TO_REG_LOW(val) ((val%1000) ? 0x80 : 0x00)
#define DIV_FROM_REG(val) (1 << (val))
@@ -262,7 +261,7 @@ static inline u8
DIV_TO_REG(long val)
{
int i;
- val = SENSORS_LIMIT(val, 1, 128) >> 1;
+ val = clamp_val(val, 1, 128) >> 1;
for (i = 0; i < 7; i++) {
if (val == 0)
break;
@@ -397,7 +396,7 @@ static ssize_t store_in_##reg(struct device *dev, \
if (err) \
return err; \
mutex_lock(&data->update_lock); \
- data->in_##reg[nr] = SENSORS_LIMIT(IN_TO_REG(nr, val) / 4, 0, 255); \
+ data->in_##reg[nr] = clamp_val(IN_TO_REG(nr, val) / 4, 0, 255); \
w83792d_write_value(client, W83792D_REG_IN_##REG[nr], \
data->in_##reg[nr]); \
mutex_unlock(&data->update_lock); \
@@ -645,7 +644,7 @@ store_pwm(struct device *dev, struct device_attribute *attr,
err = kstrtoul(buf, 10, &val);
if (err)
return err;
- val = SENSORS_LIMIT(val, 0, 255) >> 4;
+ val = clamp_val(val, 0, 255) >> 4;
mutex_lock(&data->update_lock);
val |= w83792d_read_value(client, W83792D_REG_PWM[nr]) & 0xf0;
@@ -799,7 +798,7 @@ store_thermal_cruise(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
target_mask = w83792d_read_value(client,
W83792D_REG_THERMAL[nr]) & 0x80;
- data->thermal_cruise[nr] = SENSORS_LIMIT(target_tmp, 0, 255);
+ data->thermal_cruise[nr] = clamp_val(target_tmp, 0, 255);
w83792d_write_value(client, W83792D_REG_THERMAL[nr],
(data->thermal_cruise[nr]) | target_mask);
mutex_unlock(&data->update_lock);
@@ -837,7 +836,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
tol_mask = w83792d_read_value(client,
W83792D_REG_TOLERANCE[nr]) & ((nr == 1) ? 0x0f : 0xf0);
- tol_tmp = SENSORS_LIMIT(val, 0, 15);
+ tol_tmp = clamp_val(val, 0, 15);
tol_tmp &= 0x0f;
data->tolerance[nr] = tol_tmp;
if (nr == 1)
@@ -881,7 +880,7 @@ store_sf2_point(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->sf2_points[index][nr] = SENSORS_LIMIT(val, 0, 127);
+ data->sf2_points[index][nr] = clamp_val(val, 0, 127);
mask_tmp = w83792d_read_value(client,
W83792D_REG_POINTS[index][nr]) & 0x80;
w83792d_write_value(client, W83792D_REG_POINTS[index][nr],
@@ -923,7 +922,7 @@ store_sf2_level(struct device *dev, struct device_attribute *attr,
return err;
mutex_lock(&data->update_lock);
- data->sf2_levels[index][nr] = SENSORS_LIMIT((val * 15) / 100, 0, 15);
+ data->sf2_levels[index][nr] = clamp_val((val * 15) / 100, 0, 15);
mask_tmp = w83792d_read_value(client, W83792D_REG_LEVELS[index][nr])
& ((nr == 3) ? 0xf0 : 0x0f);
if (nr == 3)
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 99799fd1d917..660427520c53 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -191,7 +191,7 @@ static inline u16 FAN_TO_REG(long rpm)
{
if (rpm <= 0)
return 0x0fff;
- return SENSORS_LIMIT((1350000 + (rpm >> 1)) / rpm, 1, 0xffe);
+ return clamp_val((1350000 + (rpm >> 1)) / rpm, 1, 0xffe);
}
static inline unsigned long TIME_FROM_REG(u8 reg)
@@ -201,7 +201,7 @@ static inline unsigned long TIME_FROM_REG(u8 reg)
static inline u8 TIME_TO_REG(unsigned long val)
{
- return SENSORS_LIMIT((val + 50) / 100, 0, 0xff);
+ return clamp_val((val + 50) / 100, 0, 0xff);
}
static inline long TEMP_FROM_REG(s8 reg)
@@ -211,7 +211,7 @@ static inline long TEMP_FROM_REG(s8 reg)
static inline s8 TEMP_TO_REG(long val, s8 min, s8 max)
{
- return SENSORS_LIMIT((val + (val < 0 ? -500 : 500)) / 1000, min, max);
+ return clamp_val((val + (val < 0 ? -500 : 500)) / 1000, min, max);
}
struct w83793_data {
@@ -558,7 +558,7 @@ store_pwm(struct device *dev, struct device_attribute *attr,
w83793_write_value(client, W83793_REG_PWM_STOP_TIME(index),
val);
} else {
- val = SENSORS_LIMIT(val, 0, 0xff) >> 2;
+ val = clamp_val(val, 0, 0xff) >> 2;
data->pwm[index][nr] =
w83793_read_value(client, W83793_REG_PWM(index, nr)) & 0xc0;
data->pwm[index][nr] |= val;
@@ -739,7 +739,7 @@ store_sf_setup(struct device *dev, struct device_attribute *attr,
if (nr == SETUP_PWM_DEFAULT) {
data->pwm_default =
w83793_read_value(client, W83793_REG_PWM_DEFAULT) & 0xc0;
- data->pwm_default |= SENSORS_LIMIT(val, 0, 0xff) >> 2;
+ data->pwm_default |= clamp_val(val, 0, 0xff) >> 2;
w83793_write_value(client, W83793_REG_PWM_DEFAULT,
data->pwm_default);
} else if (nr == SETUP_PWM_UPTIME) {
@@ -838,7 +838,7 @@ store_sf_ctrl(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
if (nr == TEMP_FAN_MAP) {
- val = SENSORS_LIMIT(val, 0, 255);
+ val = clamp_val(val, 0, 255);
w83793_write_value(client, W83793_REG_TEMP_FAN_MAP(index), val);
data->temp_fan_map[index] = val;
} else if (nr == TEMP_PWM_ENABLE) {
@@ -907,7 +907,7 @@ store_sf2_pwm(struct device *dev, struct device_attribute *attr,
err = kstrtoul(buf, 10, &val);
if (err)
return err;
- val = SENSORS_LIMIT(val, 0, 0xff) >> 2;
+ val = clamp_val(val, 0, 0xff) >> 2;
mutex_lock(&data->update_lock);
data->sf2_pwm[index][nr] =
@@ -1003,9 +1003,9 @@ store_in(struct device *dev, struct device_attribute *attr,
/* fix the limit values of 5VDD and 5VSB to ALARM mechanism */
if (nr == 1 || nr == 2)
val -= scale_in_add[index] / scale_in[index];
- val = SENSORS_LIMIT(val, 0, 255);
+ val = clamp_val(val, 0, 255);
} else {
- val = SENSORS_LIMIT(val, 0, 0x3FF);
+ val = clamp_val(val, 0, 0x3FF);
data->in_low_bits[nr] =
w83793_read_value(client, W83793_REG_IN_LOW_BITS[nr]);
data->in_low_bits[nr] &= ~(0x03 << (2 * index));
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index 55a4f4894531..e226096148eb 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -262,7 +262,7 @@ static inline u16 fan_to_reg(long rpm)
{
if (rpm <= 0)
return 0x0fff;
- return SENSORS_LIMIT((1350000 + (rpm >> 1)) / rpm, 1, 0xffe);
+ return clamp_val((1350000 + (rpm >> 1)) / rpm, 1, 0xffe);
}
static inline unsigned long time_from_reg(u8 reg)
@@ -272,7 +272,7 @@ static inline unsigned long time_from_reg(u8 reg)
static inline u8 time_to_reg(unsigned long val)
{
- return SENSORS_LIMIT((val + 50) / 100, 0, 0xff);
+ return clamp_val((val + 50) / 100, 0, 0xff);
}
static inline long temp_from_reg(s8 reg)
@@ -282,7 +282,7 @@ static inline long temp_from_reg(s8 reg)
static inline s8 temp_to_reg(long val, s8 min, s8 max)
{
- return SENSORS_LIMIT(val / 1000, min, max);
+ return clamp_val(val / 1000, min, max);
}
static const u16 pwm_freq_cksel0[16] = {
@@ -319,7 +319,7 @@ static u8 pwm_freq_to_reg(unsigned long val, u16 clkin)
/* Best fit for cksel = 1 */
base_clock = clkin * 1000 / ((clkin == 48000) ? 384 : 256);
- reg1 = SENSORS_LIMIT(DIV_ROUND_CLOSEST(base_clock, val), 1, 128);
+ reg1 = clamp_val(DIV_ROUND_CLOSEST(base_clock, val), 1, 128);
best1 = base_clock / reg1;
reg1 = 0x80 | (reg1 - 1);
@@ -889,7 +889,7 @@ store_pwm(struct device *dev, struct device_attribute *attr,
val = pwm_freq_to_reg(val, data->clkin);
break;
default:
- val = SENSORS_LIMIT(val, 0, 0xff);
+ val = clamp_val(val, 0, 0xff);
break;
}
w83795_write(client, W83795_REG_PWM(index, nr), val);
@@ -1126,7 +1126,7 @@ store_temp_pwm_enable(struct device *dev, struct device_attribute *attr,
break;
case TEMP_PWM_FAN_MAP:
mutex_lock(&data->update_lock);
- tmp = SENSORS_LIMIT(tmp, 0, 0xff);
+ tmp = clamp_val(tmp, 0, 0xff);
w83795_write(client, W83795_REG_TFMR(index), tmp);
data->pwm_tfmr[index] = tmp;
mutex_unlock(&data->update_lock);
@@ -1177,13 +1177,13 @@ store_fanin(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
switch (nr) {
case FANIN_TARGET:
- val = fan_to_reg(SENSORS_LIMIT(val, 0, 0xfff));
+ val = fan_to_reg(clamp_val(val, 0, 0xfff));
w83795_write(client, W83795_REG_FTSH(index), val >> 4);
w83795_write(client, W83795_REG_FTSL(index), (val << 4) & 0xf0);
data->target_speed[index] = val;
break;
case FANIN_TOL:
- val = SENSORS_LIMIT(val, 0, 0x3f);
+ val = clamp_val(val, 0, 0x3f);
w83795_write(client, W83795_REG_TFTS, val);
data->tol_speed = val;
break;
@@ -1227,22 +1227,22 @@ store_temp_pwm(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
switch (nr) {
case TEMP_PWM_TTTI:
- val = SENSORS_LIMIT(val, 0, 0x7f);
+ val = clamp_val(val, 0, 0x7f);
w83795_write(client, W83795_REG_TTTI(index), val);
break;
case TEMP_PWM_CTFS:
- val = SENSORS_LIMIT(val, 0, 0x7f);
+ val = clamp_val(val, 0, 0x7f);
w83795_write(client, W83795_REG_CTFS(index), val);
break;
case TEMP_PWM_HCT:
- val = SENSORS_LIMIT(val, 0, 0x0f);
+ val = clamp_val(val, 0, 0x0f);
tmp = w83795_read(client, W83795_REG_HT(index));
tmp &= 0x0f;
tmp |= (val << 4) & 0xf0;
w83795_write(client, W83795_REG_HT(index), tmp);
break;
case TEMP_PWM_HOT:
- val = SENSORS_LIMIT(val, 0, 0x0f);
+ val = clamp_val(val, 0, 0x0f);
tmp = w83795_read(client, W83795_REG_HT(index));
tmp &= 0xf0;
tmp |= val & 0x0f;
@@ -1541,7 +1541,7 @@ store_in(struct device *dev, struct device_attribute *attr,
if ((index >= 17) &&
!((data->has_gain >> (index - 17)) & 1))
val /= 8;
- val = SENSORS_LIMIT(val, 0, 0x3FF);
+ val = clamp_val(val, 0, 0x3FF);
mutex_lock(&data->update_lock);
lsb_idx = IN_LSB_SHIFT_IDX[index][IN_LSB_IDX];
@@ -1596,7 +1596,7 @@ store_sf_setup(struct device *dev, struct device_attribute *attr,
switch (nr) {
case SETUP_PWM_DEFAULT:
- val = SENSORS_LIMIT(val, 0, 0xff);
+ val = clamp_val(val, 0, 0xff);
break;
case SETUP_PWM_UPTIME:
case SETUP_PWM_DOWNTIME:
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
index 79710bcac2f7..edb06cda5a68 100644
--- a/drivers/hwmon/w83l786ng.c
+++ b/drivers/hwmon/w83l786ng.c
@@ -86,8 +86,8 @@ FAN_TO_REG(long rpm, int div)
{
if (rpm == 0)
return 255;
- rpm = SENSORS_LIMIT(rpm, 1, 1000000);
- return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+ rpm = clamp_val(rpm, 1, 1000000);
+ return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \
@@ -95,9 +95,8 @@ FAN_TO_REG(long rpm, int div)
1350000 / ((val) * (div))))
/* for temp */
-#define TEMP_TO_REG(val) (SENSORS_LIMIT(((val) < 0 ? \
- (val) + 0x100 * 1000 \
- : (val)) / 1000, 0, 0xff))
+#define TEMP_TO_REG(val) (clamp_val(((val) < 0 ? (val) + 0x100 * 1000 \
+ : (val)) / 1000, 0, 0xff))
#define TEMP_FROM_REG(val) (((val) & 0x80 ? \
(val) - 0x100 : (val)) * 1000)
@@ -106,7 +105,7 @@ FAN_TO_REG(long rpm, int div)
* in mV as would be measured on the chip input pin, need to just
* multiply/divide by 8 to translate from/to register values.
*/
-#define IN_TO_REG(val) (SENSORS_LIMIT((((val) + 4) / 8), 0, 255))
+#define IN_TO_REG(val) (clamp_val((((val) + 4) / 8), 0, 255))
#define IN_FROM_REG(val) ((val) * 8)
#define DIV_FROM_REG(val) (1 << (val))
@@ -115,7 +114,7 @@ static inline u8
DIV_TO_REG(long val)
{
int i;
- val = SENSORS_LIMIT(val, 1, 128) >> 1;
+ val = clamp_val(val, 1, 128) >> 1;
for (i = 0; i < 7; i++) {
if (val == 0)
break;
@@ -481,7 +480,7 @@ store_pwm(struct device *dev, struct device_attribute *attr,
err = kstrtoul(buf, 10, &val);
if (err)
return err;
- val = SENSORS_LIMIT(val, 0, 255);
+ val = clamp_val(val, 0, 255);
mutex_lock(&data->update_lock);
data->pwm[nr] = val;
@@ -564,7 +563,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
tol_mask = w83l786ng_read_value(client,
W83L786NG_REG_TOLERANCE) & ((nr == 1) ? 0x0f : 0xf0);
- tol_tmp = SENSORS_LIMIT(val, 0, 15);
+ tol_tmp = clamp_val(val, 0, 15);
tol_tmp &= 0x0f;
data->tolerance[nr] = tol_tmp;
if (nr == 1)
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 2f8c76becc6b..46cde098c11c 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -89,7 +89,7 @@ source drivers/i2c/busses/Kconfig
config I2C_STUB
tristate "I2C/SMBus Test Stub"
- depends on EXPERIMENTAL && m
+ depends on m
default 'n'
help
This module may be useful to developers of SMBus client drivers,
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index bdca5111eb9d..8bb810e1900b 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -22,7 +22,7 @@ config I2C_ALI1535
config I2C_ALI1563
tristate "ALI 1563"
- depends on PCI && EXPERIMENTAL
+ depends on PCI
help
If you say yes to this option, support will be included for the SMB
Host controller on Acer Labs Inc. (ALI) M1563 South Bridges. The SMB
@@ -56,7 +56,7 @@ config I2C_AMD756
config I2C_AMD756_S4882
tristate "SMBus multiplexing on the Tyan S4882"
- depends on I2C_AMD756 && X86 && EXPERIMENTAL
+ depends on I2C_AMD756 && X86
help
Enabling this option will add specific SMBus support for the Tyan
S4882 motherboard. On this 4-CPU board, the SMBus is multiplexed
@@ -164,7 +164,7 @@ config I2C_NFORCE2
config I2C_NFORCE2_S4985
tristate "SMBus multiplexing on the Tyan S4985"
- depends on I2C_NFORCE2 && X86 && EXPERIMENTAL
+ depends on I2C_NFORCE2 && X86
help
Enabling this option will add specific SMBus support for the Tyan
S4985 motherboard. On this 4-CPU board, the SMBus is multiplexed
@@ -215,7 +215,7 @@ config I2C_SIS96X
config I2C_VIA
tristate "VIA VT82C586B"
- depends on PCI && EXPERIMENTAL
+ depends on PCI
select I2C_ALGOBIT
help
If you say yes to this option, support will be included for the VIA
@@ -267,7 +267,7 @@ comment "Mac SMBus host controller drivers"
config I2C_HYDRA
tristate "CHRP Apple Hydra Mac I/O I2C interface"
- depends on PCI && PPC_CHRP && EXPERIMENTAL
+ depends on PCI && PPC_CHRP
select I2C_ALGOBIT
help
This supports the use of the I2C interface in the Apple Hydra Mac
@@ -293,7 +293,7 @@ comment "I2C system bus drivers (mostly embedded / system-on-chip)"
config I2C_AT91
tristate "Atmel AT91 I2C Two-Wire interface (TWI)"
- depends on ARCH_AT91 && EXPERIMENTAL
+ depends on ARCH_AT91
help
This supports the use of the I2C interface on Atmel AT91
processors.
@@ -519,7 +519,6 @@ config I2C_NUC900
config I2C_OCORES
tristate "OpenCores I2C Controller"
- depends on EXPERIMENTAL
help
If you say yes to this option, support will be included for the
OpenCores I2C controller. For details see
@@ -712,7 +711,7 @@ config I2C_OCTEON
config I2C_XILINX
tristate "Xilinx I2C Controller"
- depends on EXPERIMENTAL && HAS_IOMEM
+ depends on HAS_IOMEM
help
If you say yes to this option, support will be included for the
Xilinx I2C controller.
@@ -803,7 +802,7 @@ config I2C_PARPORT_LIGHT
config I2C_TAOS_EVM
tristate "TAOS evaluation module"
- depends on EXPERIMENTAL
+ depends on TTY
select SERIO
select SERIO_SERPORT
default n
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 2bfc04d0a1b1..ebc224154695 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -723,9 +723,9 @@ static int at91_twi_probe(struct platform_device *pdev)
if (!dev->pdata)
return -ENODEV;
- dev->base = devm_request_and_ioremap(&pdev->dev, mem);
- if (!dev->base)
- return -EBUSY;
+ dev->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(dev->base))
+ return PTR_ERR(dev->base);
dev->irq = platform_get_irq(pdev, 0);
if (dev->irq < 0)
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index cbba7db9ad59..f5258c205de5 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -34,6 +34,7 @@
#include <linux/io.h>
#include <linux/pm_runtime.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include "i2c-designware-core.h"
/*
@@ -725,3 +726,6 @@ u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev)
return dw_readl(dev, DW_IC_COMP_PARAM_1);
}
EXPORT_SYMBOL_GPL(i2c_dw_read_comp_param);
+
+MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index b9734747d610..a71ece63e917 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -511,9 +511,9 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
return -ENOENT;
}
- base = devm_request_and_ioremap(&pdev->dev, res);
- if (!base)
- return -EBUSY;
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
i2c_imx = devm_kzalloc(&pdev->dev, sizeof(struct imx_i2c_struct),
GFP_KERNEL);
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 1b1a936eccc9..d6abaf2cf2e3 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -127,7 +127,7 @@ struct mxs_i2c_dev {
struct device *dev;
void __iomem *regs;
struct completion cmd_complete;
- u32 cmd_err;
+ int cmd_err;
struct i2c_adapter adapter;
const struct mxs_i2c_speed_config *speed;
@@ -316,7 +316,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
if (msg->len == 0)
return -EINVAL;
- init_completion(&i2c->cmd_complete);
+ INIT_COMPLETION(i2c->cmd_complete);
i2c->cmd_err = 0;
ret = mxs_i2c_dma_setup_xfer(adap, msg, flags);
@@ -473,6 +473,8 @@ static int mxs_i2c_probe(struct platform_device *pdev)
i2c->dev = dev;
i2c->speed = &mxs_i2c_95kHz_config;
+ init_completion(&i2c->cmd_complete);
+
if (dev->of_node) {
err = mxs_i2c_get_ofdata(i2c);
if (err)
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index a873d0ad1acb..a337d08a392d 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -12,6 +12,7 @@
* kind, whether express or implied.
*/
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -364,9 +365,9 @@ static int ocores_i2c_probe(struct platform_device *pdev)
if (!i2c)
return -ENOMEM;
- i2c->base = devm_request_and_ioremap(&pdev->dev, res);
- if (!i2c->base)
- return -EADDRNOTAVAIL;
+ i2c->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(i2c->base))
+ return PTR_ERR(i2c->base);
pdata = pdev->dev.platform_data;
if (pdata) {
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 20d41bfa7c19..3ee188679cf1 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -803,7 +803,7 @@ static int errata_omap3_i462(struct omap_i2c_dev *dev)
if (stat & OMAP_I2C_STAT_AL) {
dev_err(dev->dev, "Arbitration lost\n");
dev->cmd_err |= OMAP_I2C_STAT_AL;
- omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK);
+ omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL);
}
return -EIO;
@@ -963,7 +963,7 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
i2c_omap_errata_i207(dev, stat);
omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
- break;
+ continue;
}
if (stat & OMAP_I2C_STAT_RRDY) {
@@ -989,7 +989,7 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
break;
omap_i2c_ack_stat(dev, OMAP_I2C_STAT_XDR);
- break;
+ continue;
}
if (stat & OMAP_I2C_STAT_XRDY) {
@@ -1103,11 +1103,9 @@ omap_i2c_probe(struct platform_device *pdev)
return -ENOMEM;
}
- dev->base = devm_request_and_ioremap(&pdev->dev, mem);
- if (!dev->base) {
- dev_err(&pdev->dev, "I2C region already claimed\n");
- return -ENOMEM;
- }
+ dev->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(dev->base))
+ return PTR_ERR(dev->base);
match = of_match_device(of_match_ptr(omap_i2c_of_match), &pdev->dev);
if (match) {
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 9bd4d73d29e3..4ba4a95b6b26 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -642,11 +642,9 @@ static int rcar_i2c_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- priv->io = devm_request_and_ioremap(dev, res);
- if (!priv->io) {
- dev_err(dev, "cannot ioremap\n");
- return -ENODEV;
- }
+ priv->io = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->io))
+ return PTR_ERR(priv->io);
priv->irq = platform_get_irq(pdev, 0);
init_waitqueue_head(&priv->wait);
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index a290d089ceaf..c807a6d14f0c 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -1042,11 +1042,10 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
goto err_clk;
}
- i2c->regs = devm_request_and_ioremap(&pdev->dev, res);
+ i2c->regs = devm_ioremap_resource(&pdev->dev, res);
- if (i2c->regs == NULL) {
- dev_err(&pdev->dev, "cannot map IO\n");
- ret = -ENXIO;
+ if (IS_ERR(i2c->regs)) {
+ ret = PTR_ERR(i2c->regs);
goto err_clk;
}
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index 6aafa3d88ff0..c447e8d40b78 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -406,7 +406,7 @@ err:
return -EIO;
}
-static int acpi_smbus_cmi_remove(struct acpi_device *device, int type)
+static int acpi_smbus_cmi_remove(struct acpi_device *device)
{
struct acpi_smbus_cmi *smbus_cmi = acpi_driver_data(device);
diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c
index 3f1818b87974..5a7ad240bd26 100644
--- a/drivers/i2c/busses/i2c-sirf.c
+++ b/drivers/i2c/busses/i2c-sirf.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
+#include <linux/of_i2c.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -308,10 +309,9 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
goto out;
}
- siic->base = devm_request_and_ioremap(&pdev->dev, mem_res);
- if (siic->base == NULL) {
- dev_err(&pdev->dev, "IO remap failed!\n");
- err = -ENOMEM;
+ siic->base = devm_ioremap_resource(&pdev->dev, mem_res);
+ if (IS_ERR(siic->base)) {
+ err = PTR_ERR(siic->base);
goto out;
}
@@ -328,6 +328,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
adap->algo = &i2c_sirfsoc_algo;
adap->algo_data = siic;
+ adap->dev.of_node = pdev->dev.of_node;
adap->dev.parent = &pdev->dev;
adap->nr = pdev->id;
@@ -371,6 +372,8 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
clk_disable(clk);
+ of_i2c_register_devices(adap);
+
dev_info(&pdev->dev, " I2C adapter ready to operate\n");
return 0;
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 580a0c04cb42..60195b590637 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -888,11 +888,11 @@ stu300_probe(struct platform_device *pdev)
if (!res)
return -ENOENT;
- dev->virtbase = devm_request_and_ioremap(&pdev->dev, res);
+ dev->virtbase = devm_ioremap_resource(&pdev->dev, res);
dev_dbg(&pdev->dev, "initialize bus device I2C%d on virtual "
"base %p\n", bus_nr, dev->virtbase);
- if (!dev->virtbase)
- return -ENOMEM;
+ if (IS_ERR(dev->virtbase))
+ return PTR_ERR(dev->virtbase);
dev->irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, dev->irq, stu300_irh, 0, NAME, dev);
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 7b38877ffec1..f0d9923323ea 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -29,11 +29,10 @@
#include <linux/of_i2c.h>
#include <linux/of_device.h>
#include <linux/module.h>
+#include <linux/clk/tegra.h>
#include <asm/unaligned.h>
-#include <mach/clk.h>
-
#define TEGRA_I2C_TIMEOUT (msecs_to_jiffies(1000))
#define BYTES_PER_FIFO_WORD 4
@@ -669,11 +668,9 @@ static int tegra_i2c_probe(struct platform_device *pdev)
return -EINVAL;
}
- base = devm_request_and_ioremap(&pdev->dev, res);
- if (!base) {
- dev_err(&pdev->dev, "Cannot request/ioremap I2C registers\n");
- return -EADDRNOTAVAIL;
- }
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
diff --git a/drivers/i2c/busses/i2c-xlr.c b/drivers/i2c/busses/i2c-xlr.c
index a005265461da..93f029e98c0d 100644
--- a/drivers/i2c/busses/i2c-xlr.c
+++ b/drivers/i2c/busses/i2c-xlr.c
@@ -7,6 +7,7 @@
* warranty of any kind, whether express or implied.
*/
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -225,11 +226,9 @@ static int xlr_i2c_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->iobase = devm_request_and_ioremap(&pdev->dev, res);
- if (!priv->iobase) {
- dev_err(&pdev->dev, "devm_request_and_ioremap failed\n");
- return -EBUSY;
- }
+ priv->iobase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->iobase))
+ return PTR_ERR(priv->iobase);
priv->adap.dev.parent = &pdev->dev;
priv->adap.owner = THIS_MODULE;
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index a0edd9854218..0be5b83c08fa 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -19,7 +19,6 @@ config I2C_MUX_GPIO
config I2C_MUX_PCA9541
tristate "NXP PCA9541 I2C Master Selector"
- depends on EXPERIMENTAL
help
If you say yes here you get support for the NXP PCA9541
I2C Master Selector.
@@ -29,7 +28,6 @@ config I2C_MUX_PCA9541
config I2C_MUX_PCA954x
tristate "Philips PCA954x I2C Mux/switches"
- depends on EXPERIMENTAL
help
If you say yes here you get support for the Philips PCA954x
I2C mux/switch devices.
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index 1e44d04d1b22..a43c0ce5e3d8 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -167,7 +167,7 @@ static int i2c_mux_pinctrl_probe(struct platform_device *pdev)
}
mux->busses = devm_kzalloc(&pdev->dev,
- sizeof(mux->busses) * mux->pdata->bus_count,
+ sizeof(*mux->busses) * mux->pdata->bus_count,
GFP_KERNEL);
if (!mux->busses) {
dev_err(&pdev->dev, "Cannot allocate busses\n");
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 5a26584934ca..02906ca99b41 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -322,8 +322,7 @@ config BLK_DEV_GENERIC
which otherwise might not be supported.
config BLK_DEV_OPTI621
- tristate "OPTi 82C621 chipset enhanced support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ tristate "OPTi 82C621 chipset enhanced support"
select BLK_DEV_IDEPCI
help
This is a driver for the OPTi 82C621 EIDE controller.
@@ -417,7 +416,6 @@ config BLK_DEV_CY82C693
config BLK_DEV_CS5520
tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)"
- depends on EXPERIMENTAL
select BLK_DEV_IDEDMA_PCI
help
Include support for PIO tuning and virtual DMA on the Cyrix MediaGX
@@ -702,11 +700,6 @@ config BLK_DEV_IDE_TX4939
depends on SOC_TX4939
select BLK_DEV_IDEDMA_SFF
-config BLK_DEV_IDE_AT91
- tristate "Atmel AT91 (SAM9, CAP9, AT572D940HF) IDE support"
- depends on ARM && ARCH_AT91 && !ARCH_AT91RM9200 && !ARCH_AT91X40
- select IDE_TIMINGS
-
config BLK_DEV_IDE_ICSIDE
tristate "ICS IDE interface support"
depends on ARM && ARCH_ACORN
@@ -761,8 +754,8 @@ config BLK_DEV_GAYLE
use Gayle IDE interfaces on the Zorro expansion bus.
config BLK_DEV_BUDDHA
- tristate "Buddha/Catweasel/X-Surf IDE interface support (EXPERIMENTAL)"
- depends on ZORRO && EXPERIMENTAL
+ tristate "Buddha/Catweasel/X-Surf IDE interface support"
+ depends on ZORRO
help
This is the IDE driver for the IDE interfaces on the Buddha, Catweasel
and X-Surf expansion boards. It supports up to two interfaces on the
diff --git a/drivers/idle/Kconfig b/drivers/idle/Kconfig
index 8489eb58a52c..4732dfc15447 100644
--- a/drivers/idle/Kconfig
+++ b/drivers/idle/Kconfig
@@ -18,7 +18,6 @@ config I7300_IDLE_IOAT_CHANNEL
config I7300_IDLE
tristate "Intel chipset idle memory power saving driver"
select I7300_IDLE_IOAT_CHANNEL
- depends on EXPERIMENTAL
help
Enable memory power savings when idle with certain Intel server
chipsets. The chipset must have I/O AT support, such as the
diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c
index fa080ebd568f..ffeebc7e9f1c 100644
--- a/drivers/idle/i7300_idle.c
+++ b/drivers/idle/i7300_idle.c
@@ -75,7 +75,7 @@ static unsigned long past_skip;
static struct pci_dev *fbd_dev;
-static spinlock_t i7300_idle_lock;
+static raw_spinlock_t i7300_idle_lock;
static int i7300_idle_active;
static u8 i7300_idle_thrtctl_saved;
@@ -457,7 +457,7 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
idle_begin_time = ktime_get();
}
- spin_lock_irqsave(&i7300_idle_lock, flags);
+ raw_spin_lock_irqsave(&i7300_idle_lock, flags);
if (val == IDLE_START) {
cpumask_set_cpu(smp_processor_id(), idle_cpumask);
@@ -506,7 +506,7 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
}
}
end:
- spin_unlock_irqrestore(&i7300_idle_lock, flags);
+ raw_spin_unlock_irqrestore(&i7300_idle_lock, flags);
return 0;
}
@@ -548,7 +548,7 @@ struct debugfs_file_info {
static int __init i7300_idle_init(void)
{
- spin_lock_init(&i7300_idle_lock);
+ raw_spin_lock_init(&i7300_idle_lock);
total_us = 0;
if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload))
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 4ba384f1ab54..5d6675013864 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -74,7 +74,7 @@ static struct cpuidle_driver intel_idle_driver = {
.en_core_tk_irqen = 1,
};
/* intel_idle.max_cstate=0 disables driver */
-static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1;
+static int max_cstate = CPUIDLE_STATE_MAX - 1;
static unsigned int mwait_substates;
@@ -90,6 +90,7 @@ struct idle_cpu {
* Indicate which enable bits to clear here.
*/
unsigned long auto_demotion_disable_flags;
+ bool disable_promotion_to_c1e;
};
static const struct idle_cpu *icpu;
@@ -109,162 +110,206 @@ static struct cpuidle_state *cpuidle_state_table;
#define CPUIDLE_FLAG_TLB_FLUSHED 0x10000
/*
+ * MWAIT takes an 8-bit "hint" in EAX "suggesting"
+ * the C-state (top nibble) and sub-state (bottom nibble)
+ * 0x00 means "MWAIT(C1)", 0x10 means "MWAIT(C2)" etc.
+ *
+ * We store the hint at the top of our "flags" for each state.
+ */
+#define flg2MWAIT(flags) (((flags) >> 24) & 0xFF)
+#define MWAIT2flg(eax) ((eax & 0xFF) << 24)
+
+/*
* States are indexed by the cstate number,
* which is also the index into the MWAIT hint array.
* Thus C0 is a dummy.
*/
-static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
- { /* MWAIT C0 */ },
- { /* MWAIT C1 */
+static struct cpuidle_state nehalem_cstates[CPUIDLE_STATE_MAX] = {
+ {
.name = "C1-NHM",
.desc = "MWAIT 0x00",
- .flags = CPUIDLE_FLAG_TIME_VALID,
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 3,
.target_residency = 6,
.enter = &intel_idle },
- { /* MWAIT C2 */
+ {
+ .name = "C1E-NHM",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 10,
+ .target_residency = 20,
+ .enter = &intel_idle },
+ {
.name = "C3-NHM",
.desc = "MWAIT 0x10",
- .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 20,
.target_residency = 80,
.enter = &intel_idle },
- { /* MWAIT C3 */
+ {
.name = "C6-NHM",
.desc = "MWAIT 0x20",
- .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 200,
.target_residency = 800,
.enter = &intel_idle },
+ {
+ .enter = NULL }
};
-static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
- { /* MWAIT C0 */ },
- { /* MWAIT C1 */
+static struct cpuidle_state snb_cstates[CPUIDLE_STATE_MAX] = {
+ {
.name = "C1-SNB",
.desc = "MWAIT 0x00",
- .flags = CPUIDLE_FLAG_TIME_VALID,
- .exit_latency = 1,
- .target_residency = 1,
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 2,
+ .target_residency = 2,
+ .enter = &intel_idle },
+ {
+ .name = "C1E-SNB",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 10,
+ .target_residency = 20,
.enter = &intel_idle },
- { /* MWAIT C2 */
+ {
.name = "C3-SNB",
.desc = "MWAIT 0x10",
- .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 80,
.target_residency = 211,
.enter = &intel_idle },
- { /* MWAIT C3 */
+ {
.name = "C6-SNB",
.desc = "MWAIT 0x20",
- .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 104,
.target_residency = 345,
.enter = &intel_idle },
- { /* MWAIT C4 */
+ {
.name = "C7-SNB",
.desc = "MWAIT 0x30",
- .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 109,
.target_residency = 345,
.enter = &intel_idle },
+ {
+ .enter = NULL }
};
-static struct cpuidle_state ivb_cstates[MWAIT_MAX_NUM_CSTATES] = {
- { /* MWAIT C0 */ },
- { /* MWAIT C1 */
+static struct cpuidle_state ivb_cstates[CPUIDLE_STATE_MAX] = {
+ {
.name = "C1-IVB",
.desc = "MWAIT 0x00",
- .flags = CPUIDLE_FLAG_TIME_VALID,
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 1,
.target_residency = 1,
.enter = &intel_idle },
- { /* MWAIT C2 */
+ {
+ .name = "C1E-IVB",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 10,
+ .target_residency = 20,
+ .enter = &intel_idle },
+ {
.name = "C3-IVB",
.desc = "MWAIT 0x10",
- .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 59,
.target_residency = 156,
.enter = &intel_idle },
- { /* MWAIT C3 */
+ {
.name = "C6-IVB",
.desc = "MWAIT 0x20",
- .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 80,
.target_residency = 300,
.enter = &intel_idle },
- { /* MWAIT C4 */
+ {
.name = "C7-IVB",
.desc = "MWAIT 0x30",
- .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 87,
.target_residency = 300,
.enter = &intel_idle },
+ {
+ .enter = NULL }
};
-static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
- { /* MWAIT C0 */ },
- { /* MWAIT C1 */
- .name = "C1-ATM",
+static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = {
+ {
+ .name = "C1-HSW",
.desc = "MWAIT 0x00",
- .flags = CPUIDLE_FLAG_TIME_VALID,
- .exit_latency = 1,
- .target_residency = 4,
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 2,
+ .target_residency = 2,
.enter = &intel_idle },
- { /* MWAIT C2 */
+ {
+ .name = "C1E-HSW",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 10,
+ .target_residency = 20,
+ .enter = &intel_idle },
+ {
+ .name = "C3-HSW",
+ .desc = "MWAIT 0x10",
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 33,
+ .target_residency = 100,
+ .enter = &intel_idle },
+ {
+ .name = "C6-HSW",
+ .desc = "MWAIT 0x20",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 133,
+ .target_residency = 400,
+ .enter = &intel_idle },
+ {
+ .name = "C7s-HSW",
+ .desc = "MWAIT 0x32",
+ .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 166,
+ .target_residency = 500,
+ .enter = &intel_idle },
+ {
+ .enter = NULL }
+};
+
+static struct cpuidle_state atom_cstates[CPUIDLE_STATE_MAX] = {
+ {
+ .name = "C1E-ATM",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 10,
+ .target_residency = 20,
+ .enter = &intel_idle },
+ {
.name = "C2-ATM",
.desc = "MWAIT 0x10",
- .flags = CPUIDLE_FLAG_TIME_VALID,
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 20,
.target_residency = 80,
.enter = &intel_idle },
- { /* MWAIT C3 */ },
- { /* MWAIT C4 */
+ {
.name = "C4-ATM",
.desc = "MWAIT 0x30",
- .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 100,
.target_residency = 400,
.enter = &intel_idle },
- { /* MWAIT C5 */ },
- { /* MWAIT C6 */
+ {
.name = "C6-ATM",
.desc = "MWAIT 0x52",
- .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 140,
.target_residency = 560,
.enter = &intel_idle },
+ {
+ .enter = NULL }
};
-static long get_driver_data(int cstate)
-{
- int driver_data;
- switch (cstate) {
-
- case 1: /* MWAIT C1 */
- driver_data = 0x00;
- break;
- case 2: /* MWAIT C2 */
- driver_data = 0x10;
- break;
- case 3: /* MWAIT C3 */
- driver_data = 0x20;
- break;
- case 4: /* MWAIT C4 */
- driver_data = 0x30;
- break;
- case 5: /* MWAIT C5 */
- driver_data = 0x40;
- break;
- case 6: /* MWAIT C6 */
- driver_data = 0x52;
- break;
- default:
- driver_data = 0x00;
- }
- return driver_data;
-}
-
/**
* intel_idle
* @dev: cpuidle_device
@@ -278,8 +323,7 @@ static int intel_idle(struct cpuidle_device *dev,
{
unsigned long ecx = 1; /* break on interrupt flag */
struct cpuidle_state *state = &drv->states[index];
- struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
- unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage);
+ unsigned long eax = flg2MWAIT(state->flags);
unsigned int cstate;
int cpu = smp_processor_id();
@@ -362,10 +406,19 @@ static void auto_demotion_disable(void *dummy)
msr_bits &= ~(icpu->auto_demotion_disable_flags);
wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
}
+static void c1e_promotion_disable(void *dummy)
+{
+ unsigned long long msr_bits;
+
+ rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
+ msr_bits &= ~0x2;
+ wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
+}
static const struct idle_cpu idle_cpu_nehalem = {
.state_table = nehalem_cstates,
.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
+ .disable_promotion_to_c1e = true,
};
static const struct idle_cpu idle_cpu_atom = {
@@ -379,10 +432,17 @@ static const struct idle_cpu idle_cpu_lincroft = {
static const struct idle_cpu idle_cpu_snb = {
.state_table = snb_cstates,
+ .disable_promotion_to_c1e = true,
};
static const struct idle_cpu idle_cpu_ivb = {
.state_table = ivb_cstates,
+ .disable_promotion_to_c1e = true,
+};
+
+static const struct idle_cpu idle_cpu_hsw = {
+ .state_table = hsw_cstates,
+ .disable_promotion_to_c1e = true,
};
#define ICPU(model, cpu) \
@@ -402,6 +462,9 @@ static const struct x86_cpu_id intel_idle_ids[] = {
ICPU(0x2d, idle_cpu_snb),
ICPU(0x3a, idle_cpu_ivb),
ICPU(0x3e, idle_cpu_ivb),
+ ICPU(0x3c, idle_cpu_hsw),
+ ICPU(0x3f, idle_cpu_hsw),
+ ICPU(0x45, idle_cpu_hsw),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
@@ -448,8 +511,6 @@ static int intel_idle_probe(void)
else
on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
- register_cpu_notifier(&cpu_hotplug_notifier);
-
pr_debug(PREFIX "v" INTEL_IDLE_VERSION
" model 0x%X\n", boot_cpu_data.x86_model);
@@ -486,32 +547,31 @@ static int intel_idle_cpuidle_driver_init(void)
drv->state_count = 1;
- for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
- int num_substates;
+ for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
+ int num_substates, mwait_hint, mwait_cstate, mwait_substate;
- if (cstate > max_cstate) {
+ if (cpuidle_state_table[cstate].enter == NULL)
+ break;
+
+ if (cstate + 1 > max_cstate) {
printk(PREFIX "max_cstate %d reached\n",
max_cstate);
break;
}
+ mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
+ mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint);
+ mwait_substate = MWAIT_HINT2SUBSTATE(mwait_hint);
+
/* does the state exist in CPUID.MWAIT? */
- num_substates = (mwait_substates >> ((cstate) * 4))
+ num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
& MWAIT_SUBSTATE_MASK;
- if (num_substates == 0)
- continue;
- /* is the state not enabled? */
- if (cpuidle_state_table[cstate].enter == NULL) {
- /* does the driver not know about the state? */
- if (*cpuidle_state_table[cstate].name == '\0')
- pr_debug(PREFIX "unaware of model 0x%x"
- " MWAIT %d please"
- " contact lenb@kernel.org\n",
- boot_cpu_data.x86_model, cstate);
+
+ /* if sub-state in table is not enumerated by CPUID */
+ if ((mwait_substate + 1) > num_substates)
continue;
- }
- if ((cstate > 2) &&
+ if (((mwait_cstate + 1) > 2) &&
!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
mark_tsc_unstable("TSC halts in idle"
" states deeper than C2");
@@ -525,6 +585,9 @@ static int intel_idle_cpuidle_driver_init(void)
if (icpu->auto_demotion_disable_flags)
on_each_cpu(auto_demotion_disable, NULL, 1);
+ if (icpu->disable_promotion_to_c1e) /* each-cpu is redundant */
+ on_each_cpu(c1e_promotion_disable, NULL, 1);
+
return 0;
}
@@ -543,25 +606,28 @@ static int intel_idle_cpu_init(int cpu)
dev->state_count = 1;
- for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
- int num_substates;
+ for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
+ int num_substates, mwait_hint, mwait_cstate, mwait_substate;
+
+ if (cpuidle_state_table[cstate].enter == NULL)
+ continue;
- if (cstate > max_cstate) {
+ if (cstate + 1 > max_cstate) {
printk(PREFIX "max_cstate %d reached\n", max_cstate);
break;
}
+ mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
+ mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint);
+ mwait_substate = MWAIT_HINT2SUBSTATE(mwait_hint);
+
/* does the state exist in CPUID.MWAIT? */
- num_substates = (mwait_substates >> ((cstate) * 4))
- & MWAIT_SUBSTATE_MASK;
- if (num_substates == 0)
- continue;
- /* is the state not enabled? */
- if (cpuidle_state_table[cstate].enter == NULL)
- continue;
+ num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
+ & MWAIT_SUBSTATE_MASK;
- dev->states_usage[dev->state_count].driver_data =
- (void *)get_driver_data(cstate);
+ /* if sub-state in table is not enumerated by CPUID */
+ if ((mwait_substate + 1) > num_substates)
+ continue;
dev->state_count += 1;
}
@@ -612,6 +678,7 @@ static int __init intel_idle_init(void)
return retval;
}
}
+ register_cpu_notifier(&cpu_hotplug_notifier);
return 0;
}
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index fe4bcd7c5b12..bb594963f91e 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -8,9 +8,48 @@ config HID_SENSOR_ACCEL_3D
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
tristate "HID Accelerometers 3D"
help
Say yes here to build support for the HID SENSOR
accelerometers 3D.
+config KXSD9
+ tristate "Kionix KXSD9 Accelerometer Driver"
+ depends on SPI
+ help
+ Say yes here to build support for the Kionix KXSD9 accelerometer.
+ Currently this only supports the device via an SPI interface.
+
+config IIO_ST_ACCEL_3AXIS
+ tristate "STMicroelectronics accelerometers 3-Axis Driver"
+ depends on (I2C || SPI_MASTER) && SYSFS
+ select IIO_ST_SENSORS_CORE
+ select IIO_ST_ACCEL_I2C_3AXIS if (I2C)
+ select IIO_ST_ACCEL_SPI_3AXIS if (SPI_MASTER)
+ select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
+ select IIO_ST_ACCEL_BUFFER if (IIO_TRIGGERED_BUFFER)
+ help
+ Say yes here to build support for STMicroelectronics accelerometers:
+ LSM303DLH, LSM303DLHC, LIS3DH, LSM330D, LSM330DL, LSM330DLC,
+ LIS331DLH, LSM303DL, LSM303DLM, LSM330.
+
+ This driver can also be built as a module. If so, will be created
+ these modules:
+ - st_accel (core functions for the driver [it is mandatory]);
+ - st_accel_i2c (necessary for the I2C devices [optional*]);
+ - st_accel_spi (necessary for the SPI devices [optional*]);
+
+ (*) one of these is necessary to do something.
+
+config IIO_ST_ACCEL_I2C_3AXIS
+ tristate
+ depends on IIO_ST_ACCEL_3AXIS
+ depends on IIO_ST_SENSORS_I2C
+
+config IIO_ST_ACCEL_SPI_3AXIS
+ tristate
+ depends on IIO_ST_ACCEL_3AXIS
+ depends on IIO_ST_SENSORS_SPI
+
endmenu
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index 5bc6855a973e..87d8fa264894 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -3,3 +3,12 @@
#
obj-$(CONFIG_HID_SENSOR_ACCEL_3D) += hid-sensor-accel-3d.o
+
+obj-$(CONFIG_IIO_ST_ACCEL_3AXIS) += st_accel.o
+st_accel-y := st_accel_core.o
+st_accel-$(CONFIG_IIO_BUFFER) += st_accel_buffer.o
+
+obj-$(CONFIG_IIO_ST_ACCEL_I2C_3AXIS) += st_accel_i2c.o
+obj-$(CONFIG_IIO_ST_ACCEL_SPI_3AXIS) += st_accel_spi.o
+
+obj-$(CONFIG_KXSD9) += kxsd9.o
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index 0b0c3c66f6c0..dd8ea4284934 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -28,7 +28,6 @@
#include <linux/iio/buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
-#include "../common/hid-sensors/hid-sensor-attributes.h"
#include "../common/hid-sensors/hid-sensor-trigger.h"
/*Format: HID-SENSOR-usage_id_in_hex*/
@@ -44,7 +43,7 @@ enum accel_3d_channel {
struct accel_3d_state {
struct hid_sensor_hub_callbacks callbacks;
- struct hid_sensor_iio_common common_attributes;
+ struct hid_sensor_common common_attributes;
struct hid_sensor_hub_attribute_info accel[ACCEL_3D_CHANNEL_MAX];
u32 accel_val[ACCEL_3D_CHANNEL_MAX];
};
diff --git a/drivers/staging/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index 318331f08d9c..c2229a521ab9 100644
--- a/drivers/staging/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -94,7 +94,6 @@ error_ret:
static int kxsd9_read(struct iio_dev *indio_dev, u8 address)
{
- struct spi_message msg;
int ret;
struct kxsd9_state *st = iio_priv(indio_dev);
struct spi_transfer xfers[] = {
@@ -112,10 +111,7 @@ static int kxsd9_read(struct iio_dev *indio_dev, u8 address)
mutex_lock(&st->buf_lock);
st->tx[0] = KXSD9_READ(address);
- spi_message_init(&msg);
- spi_message_add_tail(&xfers[0], &msg);
- spi_message_add_tail(&xfers[1], &msg);
- ret = spi_sync(st->us, &msg);
+ ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
if (ret)
return ret;
return (((u16)(st->rx[0])) << 8) | (st->rx[1] & 0xF0);
@@ -226,7 +222,7 @@ static int kxsd9_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct kxsd9_state *st;
- int ret = 0;
+ int ret;
indio_dev = iio_device_alloc(sizeof(*st));
if (indio_dev == NULL) {
@@ -245,14 +241,14 @@ static int kxsd9_probe(struct spi_device *spi)
indio_dev->info = &kxsd9_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_free_dev;
-
spi->mode = SPI_MODE_0;
spi_setup(spi);
kxsd9_power_up(st);
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_free_dev;
+
return 0;
error_free_dev:
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
new file mode 100644
index 000000000000..37949b94377d
--- /dev/null
+++ b/drivers/iio/accel/st_accel.h
@@ -0,0 +1,47 @@
+/*
+ * STMicroelectronics accelerometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ * v. 1.0.0
+ * Licensed under the GPL-2.
+ */
+
+#ifndef ST_ACCEL_H
+#define ST_ACCEL_H
+
+#include <linux/types.h>
+#include <linux/iio/common/st_sensors.h>
+
+#define LSM303DLHC_ACCEL_DEV_NAME "lsm303dlhc_accel"
+#define LIS3DH_ACCEL_DEV_NAME "lis3dh"
+#define LSM330D_ACCEL_DEV_NAME "lsm330d_accel"
+#define LSM330DL_ACCEL_DEV_NAME "lsm330dl_accel"
+#define LSM330DLC_ACCEL_DEV_NAME "lsm330dlc_accel"
+#define LIS331DLH_ACCEL_DEV_NAME "lis331dlh"
+#define LSM303DL_ACCEL_DEV_NAME "lsm303dl_accel"
+#define LSM303DLH_ACCEL_DEV_NAME "lsm303dlh_accel"
+#define LSM303DLM_ACCEL_DEV_NAME "lsm303dlm_accel"
+#define LSM330_ACCEL_DEV_NAME "lsm330_accel"
+
+int st_accel_common_probe(struct iio_dev *indio_dev);
+void st_accel_common_remove(struct iio_dev *indio_dev);
+
+#ifdef CONFIG_IIO_BUFFER
+int st_accel_allocate_ring(struct iio_dev *indio_dev);
+void st_accel_deallocate_ring(struct iio_dev *indio_dev);
+int st_accel_trig_set_state(struct iio_trigger *trig, bool state);
+#define ST_ACCEL_TRIGGER_SET_STATE (&st_accel_trig_set_state)
+#else /* CONFIG_IIO_BUFFER */
+static inline int st_accel_allocate_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+static inline void st_accel_deallocate_ring(struct iio_dev *indio_dev)
+{
+}
+#define ST_ACCEL_TRIGGER_SET_STATE NULL
+#endif /* CONFIG_IIO_BUFFER */
+
+#endif /* ST_ACCEL_H */
diff --git a/drivers/iio/accel/st_accel_buffer.c b/drivers/iio/accel/st_accel_buffer.c
new file mode 100644
index 000000000000..6bd82c7f769c
--- /dev/null
+++ b/drivers/iio/accel/st_accel_buffer.c
@@ -0,0 +1,114 @@
+/*
+ * STMicroelectronics accelerometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include "st_accel.h"
+
+int st_accel_trig_set_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *indio_dev = trig->private_data;
+
+ return st_sensors_set_dataready_irq(indio_dev, state);
+}
+
+static int st_accel_buffer_preenable(struct iio_dev *indio_dev)
+{
+ int err;
+
+ err = st_sensors_set_enable(indio_dev, true);
+ if (err < 0)
+ goto st_accel_set_enable_error;
+
+ err = iio_sw_buffer_preenable(indio_dev);
+
+st_accel_set_enable_error:
+ return err;
+}
+
+static int st_accel_buffer_postenable(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *adata = iio_priv(indio_dev);
+
+ adata->buffer_data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
+ if (adata->buffer_data == NULL) {
+ err = -ENOMEM;
+ goto allocate_memory_error;
+ }
+
+ err = st_sensors_set_axis_enable(indio_dev,
+ (u8)indio_dev->active_scan_mask[0]);
+ if (err < 0)
+ goto st_accel_buffer_postenable_error;
+
+ err = iio_triggered_buffer_postenable(indio_dev);
+ if (err < 0)
+ goto st_accel_buffer_postenable_error;
+
+ return err;
+
+st_accel_buffer_postenable_error:
+ kfree(adata->buffer_data);
+allocate_memory_error:
+ return err;
+}
+
+static int st_accel_buffer_predisable(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *adata = iio_priv(indio_dev);
+
+ err = iio_triggered_buffer_predisable(indio_dev);
+ if (err < 0)
+ goto st_accel_buffer_predisable_error;
+
+ err = st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS);
+ if (err < 0)
+ goto st_accel_buffer_predisable_error;
+
+ err = st_sensors_set_enable(indio_dev, false);
+
+st_accel_buffer_predisable_error:
+ kfree(adata->buffer_data);
+ return err;
+}
+
+static const struct iio_buffer_setup_ops st_accel_buffer_setup_ops = {
+ .preenable = &st_accel_buffer_preenable,
+ .postenable = &st_accel_buffer_postenable,
+ .predisable = &st_accel_buffer_predisable,
+};
+
+int st_accel_allocate_ring(struct iio_dev *indio_dev)
+{
+ return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ &st_sensors_trigger_handler, &st_accel_buffer_setup_ops);
+}
+
+void st_accel_deallocate_ring(struct iio_dev *indio_dev)
+{
+ iio_triggered_buffer_cleanup(indio_dev);
+}
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics accelerometers buffer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
new file mode 100644
index 000000000000..e0f5a3ceba5e
--- /dev/null
+++ b/drivers/iio/accel/st_accel_core.c
@@ -0,0 +1,500 @@
+/*
+ * STMicroelectronics accelerometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/buffer.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include "st_accel.h"
+
+/* DEFAULT VALUE FOR SENSORS */
+#define ST_ACCEL_DEFAULT_OUT_X_L_ADDR 0x28
+#define ST_ACCEL_DEFAULT_OUT_Y_L_ADDR 0x2a
+#define ST_ACCEL_DEFAULT_OUT_Z_L_ADDR 0x2c
+
+/* FULLSCALE */
+#define ST_ACCEL_FS_AVL_2G 2
+#define ST_ACCEL_FS_AVL_4G 4
+#define ST_ACCEL_FS_AVL_6G 6
+#define ST_ACCEL_FS_AVL_8G 8
+#define ST_ACCEL_FS_AVL_16G 16
+
+/* CUSTOM VALUES FOR SENSOR 1 */
+#define ST_ACCEL_1_WAI_EXP 0x33
+#define ST_ACCEL_1_ODR_ADDR 0x20
+#define ST_ACCEL_1_ODR_MASK 0xf0
+#define ST_ACCEL_1_ODR_AVL_1HZ_VAL 0x01
+#define ST_ACCEL_1_ODR_AVL_10HZ_VAL 0x02
+#define ST_ACCEL_1_ODR_AVL_25HZ_VAL 0x03
+#define ST_ACCEL_1_ODR_AVL_50HZ_VAL 0x04
+#define ST_ACCEL_1_ODR_AVL_100HZ_VAL 0x05
+#define ST_ACCEL_1_ODR_AVL_200HZ_VAL 0x06
+#define ST_ACCEL_1_ODR_AVL_400HZ_VAL 0x07
+#define ST_ACCEL_1_ODR_AVL_1600HZ_VAL 0x08
+#define ST_ACCEL_1_FS_ADDR 0x23
+#define ST_ACCEL_1_FS_MASK 0x30
+#define ST_ACCEL_1_FS_AVL_2_VAL 0x00
+#define ST_ACCEL_1_FS_AVL_4_VAL 0x01
+#define ST_ACCEL_1_FS_AVL_8_VAL 0x02
+#define ST_ACCEL_1_FS_AVL_16_VAL 0x03
+#define ST_ACCEL_1_FS_AVL_2_GAIN IIO_G_TO_M_S_2(1000)
+#define ST_ACCEL_1_FS_AVL_4_GAIN IIO_G_TO_M_S_2(2000)
+#define ST_ACCEL_1_FS_AVL_8_GAIN IIO_G_TO_M_S_2(4000)
+#define ST_ACCEL_1_FS_AVL_16_GAIN IIO_G_TO_M_S_2(12000)
+#define ST_ACCEL_1_BDU_ADDR 0x23
+#define ST_ACCEL_1_BDU_MASK 0x80
+#define ST_ACCEL_1_DRDY_IRQ_ADDR 0x22
+#define ST_ACCEL_1_DRDY_IRQ_MASK 0x10
+#define ST_ACCEL_1_MULTIREAD_BIT true
+
+/* CUSTOM VALUES FOR SENSOR 2 */
+#define ST_ACCEL_2_WAI_EXP 0x32
+#define ST_ACCEL_2_ODR_ADDR 0x20
+#define ST_ACCEL_2_ODR_MASK 0x18
+#define ST_ACCEL_2_ODR_AVL_50HZ_VAL 0x00
+#define ST_ACCEL_2_ODR_AVL_100HZ_VAL 0x01
+#define ST_ACCEL_2_ODR_AVL_400HZ_VAL 0x02
+#define ST_ACCEL_2_ODR_AVL_1000HZ_VAL 0x03
+#define ST_ACCEL_2_PW_ADDR 0x20
+#define ST_ACCEL_2_PW_MASK 0xe0
+#define ST_ACCEL_2_FS_ADDR 0x23
+#define ST_ACCEL_2_FS_MASK 0x30
+#define ST_ACCEL_2_FS_AVL_2_VAL 0X00
+#define ST_ACCEL_2_FS_AVL_4_VAL 0X01
+#define ST_ACCEL_2_FS_AVL_8_VAL 0x03
+#define ST_ACCEL_2_FS_AVL_2_GAIN IIO_G_TO_M_S_2(1000)
+#define ST_ACCEL_2_FS_AVL_4_GAIN IIO_G_TO_M_S_2(2000)
+#define ST_ACCEL_2_FS_AVL_8_GAIN IIO_G_TO_M_S_2(3900)
+#define ST_ACCEL_2_BDU_ADDR 0x23
+#define ST_ACCEL_2_BDU_MASK 0x80
+#define ST_ACCEL_2_DRDY_IRQ_ADDR 0x22
+#define ST_ACCEL_2_DRDY_IRQ_MASK 0x02
+#define ST_ACCEL_2_MULTIREAD_BIT true
+
+/* CUSTOM VALUES FOR SENSOR 3 */
+#define ST_ACCEL_3_WAI_EXP 0x40
+#define ST_ACCEL_3_ODR_ADDR 0x20
+#define ST_ACCEL_3_ODR_MASK 0xf0
+#define ST_ACCEL_3_ODR_AVL_3HZ_VAL 0x01
+#define ST_ACCEL_3_ODR_AVL_6HZ_VAL 0x02
+#define ST_ACCEL_3_ODR_AVL_12HZ_VAL 0x03
+#define ST_ACCEL_3_ODR_AVL_25HZ_VAL 0x04
+#define ST_ACCEL_3_ODR_AVL_50HZ_VAL 0x05
+#define ST_ACCEL_3_ODR_AVL_100HZ_VAL 0x06
+#define ST_ACCEL_3_ODR_AVL_200HZ_VAL 0x07
+#define ST_ACCEL_3_ODR_AVL_400HZ_VAL 0x08
+#define ST_ACCEL_3_ODR_AVL_800HZ_VAL 0x09
+#define ST_ACCEL_3_ODR_AVL_1600HZ_VAL 0x0a
+#define ST_ACCEL_3_FS_ADDR 0x24
+#define ST_ACCEL_3_FS_MASK 0x38
+#define ST_ACCEL_3_FS_AVL_2_VAL 0X00
+#define ST_ACCEL_3_FS_AVL_4_VAL 0X01
+#define ST_ACCEL_3_FS_AVL_6_VAL 0x02
+#define ST_ACCEL_3_FS_AVL_8_VAL 0x03
+#define ST_ACCEL_3_FS_AVL_16_VAL 0x04
+#define ST_ACCEL_3_FS_AVL_2_GAIN IIO_G_TO_M_S_2(61)
+#define ST_ACCEL_3_FS_AVL_4_GAIN IIO_G_TO_M_S_2(122)
+#define ST_ACCEL_3_FS_AVL_6_GAIN IIO_G_TO_M_S_2(183)
+#define ST_ACCEL_3_FS_AVL_8_GAIN IIO_G_TO_M_S_2(244)
+#define ST_ACCEL_3_FS_AVL_16_GAIN IIO_G_TO_M_S_2(732)
+#define ST_ACCEL_3_BDU_ADDR 0x20
+#define ST_ACCEL_3_BDU_MASK 0x08
+#define ST_ACCEL_3_DRDY_IRQ_ADDR 0x23
+#define ST_ACCEL_3_DRDY_IRQ_MASK 0x80
+#define ST_ACCEL_3_IG1_EN_ADDR 0x23
+#define ST_ACCEL_3_IG1_EN_MASK 0x08
+#define ST_ACCEL_3_MULTIREAD_BIT false
+
+static const struct iio_chan_spec st_accel_12bit_channels[] = {
+ ST_SENSORS_LSM_CHANNELS(IIO_ACCEL, ST_SENSORS_SCAN_X, IIO_MOD_X, IIO_LE,
+ ST_SENSORS_DEFAULT_12_REALBITS, ST_ACCEL_DEFAULT_OUT_X_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_ACCEL, ST_SENSORS_SCAN_Y, IIO_MOD_Y, IIO_LE,
+ ST_SENSORS_DEFAULT_12_REALBITS, ST_ACCEL_DEFAULT_OUT_Y_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_ACCEL, ST_SENSORS_SCAN_Z, IIO_MOD_Z, IIO_LE,
+ ST_SENSORS_DEFAULT_12_REALBITS, ST_ACCEL_DEFAULT_OUT_Z_L_ADDR),
+ IIO_CHAN_SOFT_TIMESTAMP(3)
+};
+
+static const struct iio_chan_spec st_accel_16bit_channels[] = {
+ ST_SENSORS_LSM_CHANNELS(IIO_ACCEL, ST_SENSORS_SCAN_X, IIO_MOD_X, IIO_LE,
+ ST_SENSORS_DEFAULT_16_REALBITS, ST_ACCEL_DEFAULT_OUT_X_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_ACCEL, ST_SENSORS_SCAN_Y, IIO_MOD_Y, IIO_LE,
+ ST_SENSORS_DEFAULT_16_REALBITS, ST_ACCEL_DEFAULT_OUT_Y_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_ACCEL, ST_SENSORS_SCAN_Z, IIO_MOD_Z, IIO_LE,
+ ST_SENSORS_DEFAULT_16_REALBITS, ST_ACCEL_DEFAULT_OUT_Z_L_ADDR),
+ IIO_CHAN_SOFT_TIMESTAMP(3)
+};
+
+static const struct st_sensors st_accel_sensors[] = {
+ {
+ .wai = ST_ACCEL_1_WAI_EXP,
+ .sensors_supported = {
+ [0] = LIS3DH_ACCEL_DEV_NAME,
+ [1] = LSM303DLHC_ACCEL_DEV_NAME,
+ [2] = LSM330D_ACCEL_DEV_NAME,
+ [3] = LSM330DL_ACCEL_DEV_NAME,
+ [4] = LSM330DLC_ACCEL_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_accel_12bit_channels,
+ .odr = {
+ .addr = ST_ACCEL_1_ODR_ADDR,
+ .mask = ST_ACCEL_1_ODR_MASK,
+ .odr_avl = {
+ { 1, ST_ACCEL_1_ODR_AVL_1HZ_VAL, },
+ { 10, ST_ACCEL_1_ODR_AVL_10HZ_VAL, },
+ { 25, ST_ACCEL_1_ODR_AVL_25HZ_VAL, },
+ { 50, ST_ACCEL_1_ODR_AVL_50HZ_VAL, },
+ { 100, ST_ACCEL_1_ODR_AVL_100HZ_VAL, },
+ { 200, ST_ACCEL_1_ODR_AVL_200HZ_VAL, },
+ { 400, ST_ACCEL_1_ODR_AVL_400HZ_VAL, },
+ { 1600, ST_ACCEL_1_ODR_AVL_1600HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_ACCEL_1_ODR_ADDR,
+ .mask = ST_ACCEL_1_ODR_MASK,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = ST_ACCEL_1_FS_ADDR,
+ .mask = ST_ACCEL_1_FS_MASK,
+ .fs_avl = {
+ [0] = {
+ .num = ST_ACCEL_FS_AVL_2G,
+ .value = ST_ACCEL_1_FS_AVL_2_VAL,
+ .gain = ST_ACCEL_1_FS_AVL_2_GAIN,
+ },
+ [1] = {
+ .num = ST_ACCEL_FS_AVL_4G,
+ .value = ST_ACCEL_1_FS_AVL_4_VAL,
+ .gain = ST_ACCEL_1_FS_AVL_4_GAIN,
+ },
+ [2] = {
+ .num = ST_ACCEL_FS_AVL_8G,
+ .value = ST_ACCEL_1_FS_AVL_8_VAL,
+ .gain = ST_ACCEL_1_FS_AVL_8_GAIN,
+ },
+ [3] = {
+ .num = ST_ACCEL_FS_AVL_16G,
+ .value = ST_ACCEL_1_FS_AVL_16_VAL,
+ .gain = ST_ACCEL_1_FS_AVL_16_GAIN,
+ },
+ },
+ },
+ .bdu = {
+ .addr = ST_ACCEL_1_BDU_ADDR,
+ .mask = ST_ACCEL_1_BDU_MASK,
+ },
+ .drdy_irq = {
+ .addr = ST_ACCEL_1_DRDY_IRQ_ADDR,
+ .mask = ST_ACCEL_1_DRDY_IRQ_MASK,
+ },
+ .multi_read_bit = ST_ACCEL_1_MULTIREAD_BIT,
+ .bootime = 2,
+ },
+ {
+ .wai = ST_ACCEL_2_WAI_EXP,
+ .sensors_supported = {
+ [0] = LIS331DLH_ACCEL_DEV_NAME,
+ [1] = LSM303DL_ACCEL_DEV_NAME,
+ [2] = LSM303DLH_ACCEL_DEV_NAME,
+ [3] = LSM303DLM_ACCEL_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_accel_12bit_channels,
+ .odr = {
+ .addr = ST_ACCEL_2_ODR_ADDR,
+ .mask = ST_ACCEL_2_ODR_MASK,
+ .odr_avl = {
+ { 50, ST_ACCEL_2_ODR_AVL_50HZ_VAL, },
+ { 100, ST_ACCEL_2_ODR_AVL_100HZ_VAL, },
+ { 400, ST_ACCEL_2_ODR_AVL_400HZ_VAL, },
+ { 1000, ST_ACCEL_2_ODR_AVL_1000HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_ACCEL_2_PW_ADDR,
+ .mask = ST_ACCEL_2_PW_MASK,
+ .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = ST_ACCEL_2_FS_ADDR,
+ .mask = ST_ACCEL_2_FS_MASK,
+ .fs_avl = {
+ [0] = {
+ .num = ST_ACCEL_FS_AVL_2G,
+ .value = ST_ACCEL_2_FS_AVL_2_VAL,
+ .gain = ST_ACCEL_2_FS_AVL_2_GAIN,
+ },
+ [1] = {
+ .num = ST_ACCEL_FS_AVL_4G,
+ .value = ST_ACCEL_2_FS_AVL_4_VAL,
+ .gain = ST_ACCEL_2_FS_AVL_4_GAIN,
+ },
+ [2] = {
+ .num = ST_ACCEL_FS_AVL_8G,
+ .value = ST_ACCEL_2_FS_AVL_8_VAL,
+ .gain = ST_ACCEL_2_FS_AVL_8_GAIN,
+ },
+ },
+ },
+ .bdu = {
+ .addr = ST_ACCEL_2_BDU_ADDR,
+ .mask = ST_ACCEL_2_BDU_MASK,
+ },
+ .drdy_irq = {
+ .addr = ST_ACCEL_2_DRDY_IRQ_ADDR,
+ .mask = ST_ACCEL_2_DRDY_IRQ_MASK,
+ },
+ .multi_read_bit = ST_ACCEL_2_MULTIREAD_BIT,
+ .bootime = 2,
+ },
+ {
+ .wai = ST_ACCEL_3_WAI_EXP,
+ .sensors_supported = {
+ [0] = LSM330_ACCEL_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_accel_16bit_channels,
+ .odr = {
+ .addr = ST_ACCEL_3_ODR_ADDR,
+ .mask = ST_ACCEL_3_ODR_MASK,
+ .odr_avl = {
+ { 3, ST_ACCEL_3_ODR_AVL_3HZ_VAL },
+ { 6, ST_ACCEL_3_ODR_AVL_6HZ_VAL, },
+ { 12, ST_ACCEL_3_ODR_AVL_12HZ_VAL, },
+ { 25, ST_ACCEL_3_ODR_AVL_25HZ_VAL, },
+ { 50, ST_ACCEL_3_ODR_AVL_50HZ_VAL, },
+ { 100, ST_ACCEL_3_ODR_AVL_100HZ_VAL, },
+ { 200, ST_ACCEL_3_ODR_AVL_200HZ_VAL, },
+ { 400, ST_ACCEL_3_ODR_AVL_400HZ_VAL, },
+ { 800, ST_ACCEL_3_ODR_AVL_800HZ_VAL, },
+ { 1600, ST_ACCEL_3_ODR_AVL_1600HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_ACCEL_3_ODR_ADDR,
+ .mask = ST_ACCEL_3_ODR_MASK,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = ST_ACCEL_3_FS_ADDR,
+ .mask = ST_ACCEL_3_FS_MASK,
+ .fs_avl = {
+ [0] = {
+ .num = ST_ACCEL_FS_AVL_2G,
+ .value = ST_ACCEL_3_FS_AVL_2_VAL,
+ .gain = ST_ACCEL_3_FS_AVL_2_GAIN,
+ },
+ [1] = {
+ .num = ST_ACCEL_FS_AVL_4G,
+ .value = ST_ACCEL_3_FS_AVL_4_VAL,
+ .gain = ST_ACCEL_3_FS_AVL_4_GAIN,
+ },
+ [2] = {
+ .num = ST_ACCEL_FS_AVL_6G,
+ .value = ST_ACCEL_3_FS_AVL_6_VAL,
+ .gain = ST_ACCEL_3_FS_AVL_6_GAIN,
+ },
+ [3] = {
+ .num = ST_ACCEL_FS_AVL_8G,
+ .value = ST_ACCEL_3_FS_AVL_8_VAL,
+ .gain = ST_ACCEL_3_FS_AVL_8_GAIN,
+ },
+ [4] = {
+ .num = ST_ACCEL_FS_AVL_16G,
+ .value = ST_ACCEL_3_FS_AVL_16_VAL,
+ .gain = ST_ACCEL_3_FS_AVL_16_GAIN,
+ },
+ },
+ },
+ .bdu = {
+ .addr = ST_ACCEL_3_BDU_ADDR,
+ .mask = ST_ACCEL_3_BDU_MASK,
+ },
+ .drdy_irq = {
+ .addr = ST_ACCEL_3_DRDY_IRQ_ADDR,
+ .mask = ST_ACCEL_3_DRDY_IRQ_MASK,
+ .ig1 = {
+ .en_addr = ST_ACCEL_3_IG1_EN_ADDR,
+ .en_mask = ST_ACCEL_3_IG1_EN_MASK,
+ },
+ },
+ .multi_read_bit = ST_ACCEL_3_MULTIREAD_BIT,
+ .bootime = 2,
+ },
+};
+
+static int st_accel_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *ch, int *val,
+ int *val2, long mask)
+{
+ int err;
+ struct st_sensor_data *adata = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ err = st_sensors_read_info_raw(indio_dev, ch, val);
+ if (err < 0)
+ goto read_error;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = adata->current_fullscale->gain;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+
+read_error:
+ return err;
+}
+
+static int st_accel_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ int err;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ err = st_sensors_set_fullscale_by_gain(indio_dev, val2);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+static ST_SENSOR_DEV_ATTR_SAMP_FREQ();
+static ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL();
+static ST_SENSORS_DEV_ATTR_SCALE_AVAIL(in_accel_scale_available);
+
+static struct attribute *st_accel_attributes[] = {
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_in_accel_scale_available.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group st_accel_attribute_group = {
+ .attrs = st_accel_attributes,
+};
+
+static const struct iio_info accel_info = {
+ .driver_module = THIS_MODULE,
+ .attrs = &st_accel_attribute_group,
+ .read_raw = &st_accel_read_raw,
+ .write_raw = &st_accel_write_raw,
+};
+
+#ifdef CONFIG_IIO_TRIGGER
+static const struct iio_trigger_ops st_accel_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = ST_ACCEL_TRIGGER_SET_STATE,
+};
+#define ST_ACCEL_TRIGGER_OPS (&st_accel_trigger_ops)
+#else
+#define ST_ACCEL_TRIGGER_OPS NULL
+#endif
+
+int st_accel_common_probe(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *adata = iio_priv(indio_dev);
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &accel_info;
+
+ err = st_sensors_check_device_support(indio_dev,
+ ARRAY_SIZE(st_accel_sensors), st_accel_sensors);
+ if (err < 0)
+ goto st_accel_common_probe_error;
+
+ adata->multiread_bit = adata->sensor->multi_read_bit;
+ indio_dev->channels = adata->sensor->ch;
+ indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
+
+ adata->current_fullscale = (struct st_sensor_fullscale_avl *)
+ &adata->sensor->fs.fs_avl[0];
+ adata->odr = adata->sensor->odr.odr_avl[0].hz;
+
+ err = st_sensors_init_sensor(indio_dev);
+ if (err < 0)
+ goto st_accel_common_probe_error;
+
+ if (adata->get_irq_data_ready(indio_dev) > 0) {
+ err = st_accel_allocate_ring(indio_dev);
+ if (err < 0)
+ goto st_accel_common_probe_error;
+
+ err = st_sensors_allocate_trigger(indio_dev,
+ ST_ACCEL_TRIGGER_OPS);
+ if (err < 0)
+ goto st_accel_probe_trigger_error;
+ }
+
+ err = iio_device_register(indio_dev);
+ if (err)
+ goto st_accel_device_register_error;
+
+ return err;
+
+st_accel_device_register_error:
+ if (adata->get_irq_data_ready(indio_dev) > 0)
+ st_sensors_deallocate_trigger(indio_dev);
+st_accel_probe_trigger_error:
+ if (adata->get_irq_data_ready(indio_dev) > 0)
+ st_accel_deallocate_ring(indio_dev);
+st_accel_common_probe_error:
+ return err;
+}
+EXPORT_SYMBOL(st_accel_common_probe);
+
+void st_accel_common_remove(struct iio_dev *indio_dev)
+{
+ struct st_sensor_data *adata = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ if (adata->get_irq_data_ready(indio_dev) > 0) {
+ st_sensors_deallocate_trigger(indio_dev);
+ st_accel_deallocate_ring(indio_dev);
+ }
+ iio_device_free(indio_dev);
+}
+EXPORT_SYMBOL(st_accel_common_remove);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics accelerometers driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
new file mode 100644
index 000000000000..ffc9d097e484
--- /dev/null
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -0,0 +1,86 @@
+/*
+ * STMicroelectronics accelerometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include <linux/iio/common/st_sensors_i2c.h>
+#include "st_accel.h"
+
+static int st_accel_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct st_sensor_data *adata;
+ int err;
+
+ indio_dev = iio_device_alloc(sizeof(*adata));
+ if (indio_dev == NULL) {
+ err = -ENOMEM;
+ goto iio_device_alloc_error;
+ }
+
+ adata = iio_priv(indio_dev);
+ adata->dev = &client->dev;
+
+ st_sensors_i2c_configure(indio_dev, client, adata);
+
+ err = st_accel_common_probe(indio_dev);
+ if (err < 0)
+ goto st_accel_common_probe_error;
+
+ return 0;
+
+st_accel_common_probe_error:
+ iio_device_free(indio_dev);
+iio_device_alloc_error:
+ return err;
+}
+
+static int st_accel_i2c_remove(struct i2c_client *client)
+{
+ st_accel_common_remove(i2c_get_clientdata(client));
+
+ return 0;
+}
+
+static const struct i2c_device_id st_accel_id_table[] = {
+ { LSM303DLH_ACCEL_DEV_NAME },
+ { LSM303DLHC_ACCEL_DEV_NAME },
+ { LIS3DH_ACCEL_DEV_NAME },
+ { LSM330D_ACCEL_DEV_NAME },
+ { LSM330DL_ACCEL_DEV_NAME },
+ { LSM330DLC_ACCEL_DEV_NAME },
+ { LIS331DLH_ACCEL_DEV_NAME },
+ { LSM303DL_ACCEL_DEV_NAME },
+ { LSM303DLM_ACCEL_DEV_NAME },
+ { LSM330_ACCEL_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, st_accel_id_table);
+
+static struct i2c_driver st_accel_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "st-accel-i2c",
+ },
+ .probe = st_accel_i2c_probe,
+ .remove = st_accel_i2c_remove,
+ .id_table = st_accel_id_table,
+};
+module_i2c_driver(st_accel_driver);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics accelerometers i2c driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
new file mode 100644
index 000000000000..22b35bfea7d2
--- /dev/null
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -0,0 +1,85 @@
+/*
+ * STMicroelectronics accelerometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include <linux/iio/common/st_sensors_spi.h>
+#include "st_accel.h"
+
+static int st_accel_spi_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct st_sensor_data *adata;
+ int err;
+
+ indio_dev = iio_device_alloc(sizeof(*adata));
+ if (indio_dev == NULL) {
+ err = -ENOMEM;
+ goto iio_device_alloc_error;
+ }
+
+ adata = iio_priv(indio_dev);
+ adata->dev = &spi->dev;
+
+ st_sensors_spi_configure(indio_dev, spi, adata);
+
+ err = st_accel_common_probe(indio_dev);
+ if (err < 0)
+ goto st_accel_common_probe_error;
+
+ return 0;
+
+st_accel_common_probe_error:
+ iio_device_free(indio_dev);
+iio_device_alloc_error:
+ return err;
+}
+
+static int st_accel_spi_remove(struct spi_device *spi)
+{
+ st_accel_common_remove(spi_get_drvdata(spi));
+
+ return 0;
+}
+
+static const struct spi_device_id st_accel_id_table[] = {
+ { LSM303DLH_ACCEL_DEV_NAME },
+ { LSM303DLHC_ACCEL_DEV_NAME },
+ { LIS3DH_ACCEL_DEV_NAME },
+ { LSM330D_ACCEL_DEV_NAME },
+ { LSM330DL_ACCEL_DEV_NAME },
+ { LSM330DLC_ACCEL_DEV_NAME },
+ { LIS331DLH_ACCEL_DEV_NAME },
+ { LSM303DL_ACCEL_DEV_NAME },
+ { LSM303DLM_ACCEL_DEV_NAME },
+ { LSM330_ACCEL_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, st_accel_id_table);
+
+static struct spi_driver st_accel_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "st-accel-spi",
+ },
+ .probe = st_accel_spi_probe,
+ .remove = st_accel_spi_remove,
+ .id_table = st_accel_id_table,
+};
+module_spi_driver(st_accel_driver);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics accelerometers spi driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index fe822a14d130..e372257a8494 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -100,10 +100,8 @@ config LP8788_ADC
config MAX1363
tristate "Maxim max1363 ADC driver"
depends on I2C
- select IIO_TRIGGER
- select MAX1363_RING_BUFFER
select IIO_BUFFER
- select IIO_KFIFO_BUF
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for many Maxim i2c analog to digital
converters (ADC). (max1361, max1362, max1363, max1364, max1036,
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index 4a5f639bc684..bbad9b94cd75 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -411,7 +411,11 @@ static int ad7266_probe(struct spi_device *spi)
if (ret)
goto error_put_reg;
- st->vref_uv = regulator_get_voltage(st->reg);
+ ret = regulator_get_voltage(st->reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ st->vref_uv = ret;
} else {
/* Use internal reference */
st->vref_uv = 2500000;
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 04b013561f0f..83c836ba600f 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -80,7 +80,7 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
*timestamp = pf->timestamp;
}
- iio_push_to_buffers(indio_dev, (u8 *)st->buffer);
+ iio_push_to_buffers(idev, (u8 *)st->buffer);
iio_trigger_notify_done(idev->trig);
@@ -557,9 +557,9 @@ static int at91_adc_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- st->reg_base = devm_request_and_ioremap(&pdev->dev, res);
- if (!st->reg_base) {
- ret = -ENOMEM;
+ st->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(st->reg_base)) {
+ ret = PTR_ERR(st->reg_base);
goto error_free_device;
}
diff --git a/drivers/iio/adc/lp8788_adc.c b/drivers/iio/adc/lp8788_adc.c
index 72955e45e9e0..763f57565ee4 100644
--- a/drivers/iio/adc/lp8788_adc.c
+++ b/drivers/iio/adc/lp8788_adc.c
@@ -179,7 +179,7 @@ static int lp8788_iio_map_register(struct iio_dev *indio_dev,
ret = iio_map_array_register(indio_dev, map);
if (ret) {
- dev_err(adc->lp->dev, "iio map err: %d\n", ret);
+ dev_err(&indio_dev->dev, "iio map err: %d\n", ret);
return ret;
}
@@ -187,12 +187,6 @@ static int lp8788_iio_map_register(struct iio_dev *indio_dev,
return 0;
}
-static inline void lp8788_iio_map_unregister(struct iio_dev *indio_dev,
- struct lp8788_adc *adc)
-{
- iio_map_array_unregister(indio_dev, adc->map);
-}
-
static int lp8788_adc_probe(struct platform_device *pdev)
{
struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
@@ -208,13 +202,14 @@ static int lp8788_adc_probe(struct platform_device *pdev)
adc->lp = lp;
platform_set_drvdata(pdev, indio_dev);
+ indio_dev->dev.of_node = pdev->dev.of_node;
ret = lp8788_iio_map_register(indio_dev, lp->pdata, adc);
if (ret)
goto err_iio_map;
mutex_init(&adc->lock);
- indio_dev->dev.parent = lp->dev;
+ indio_dev->dev.parent = &pdev->dev;
indio_dev->name = pdev->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &lp8788_adc_info;
@@ -223,14 +218,14 @@ static int lp8788_adc_probe(struct platform_device *pdev)
ret = iio_device_register(indio_dev);
if (ret) {
- dev_err(lp->dev, "iio dev register err: %d\n", ret);
+ dev_err(&pdev->dev, "iio dev register err: %d\n", ret);
goto err_iio_device;
}
return 0;
err_iio_device:
- lp8788_iio_map_unregister(indio_dev, adc);
+ iio_map_array_unregister(indio_dev);
err_iio_map:
iio_device_free(indio_dev);
return ret;
@@ -239,10 +234,9 @@ err_iio_map:
static int lp8788_adc_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
- struct lp8788_adc *adc = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- lp8788_iio_map_unregister(indio_dev, adc);
+ iio_map_array_unregister(indio_dev);
iio_device_free(indio_dev);
return 0;
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index b5669be6f396..6c1cfb74bdfc 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -39,6 +39,7 @@
#include <linux/iio/driver.h>
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
#define MAX1363_SETUP_BYTE(a) ((a) | 0x80)
@@ -55,7 +56,7 @@
#define MAX1363_SETUP_POWER_UP_INT_REF 0x10
#define MAX1363_SETUP_POWER_DOWN_INT_REF 0x00
-/* think about includeing max11600 etc - more settings */
+/* think about including max11600 etc - more settings */
#define MAX1363_SETUP_EXT_CLOCK 0x08
#define MAX1363_SETUP_INT_CLOCK 0x00
#define MAX1363_SETUP_UNIPOLAR 0x00
@@ -86,7 +87,7 @@
/* max123{6-9} only */
#define MAX1236_SCAN_MID_TO_CHANNEL 0x40
-/* max1363 only - merely part of channel selects or don't care for others*/
+/* max1363 only - merely part of channel selects or don't care for others */
#define MAX1363_CONFIG_EN_MON_MODE_READ 0x18
#define MAX1363_CHANNEL_SEL(a) ((a) << 1)
@@ -133,7 +134,7 @@ enum max1363_modes {
* @mode_list: array of available scan modes
* @default_mode: the scan mode in which the chip starts up
* @int_vref_mv: the internal reference voltage
- * @num_channels: number of channels
+ * @num_modes: number of modes
* @bits: accuracy of the adc in bits
*/
struct max1363_chip_info {
@@ -152,7 +153,7 @@ struct max1363_chip_info {
* @client: i2c_client
* @setupbyte: cache of current device setup byte
* @configbyte: cache of current device config byte
- * @chip_info: chip model specific constants, available modes etc
+ * @chip_info: chip model specific constants, available modes, etc.
* @current_mode: the scan mode of this chip
* @requestedmask: a valid requested set of channels
* @reg: supply regulator
@@ -162,6 +163,8 @@ struct max1363_chip_info {
* @mask_low: bitmask for enabled low thresholds
* @thresh_high: high threshold values
* @thresh_low: low threshold values
+ * @vref: Reference voltage regulator
+ * @vref_uv: Actual (external or internal) reference voltage
*/
struct max1363_state {
struct i2c_client *client;
@@ -181,6 +184,8 @@ struct max1363_state {
/* 4x unipolar first then the fours bipolar ones */
s16 thresh_high[8];
s16 thresh_low[8];
+ struct regulator *vref;
+ u32 vref_uv;
};
#define MAX1363_MODE_SINGLE(_num, _mask) { \
@@ -293,7 +298,7 @@ static const struct max1363_mode max1363_mode_table[] = {
static const struct max1363_mode
*max1363_match_mode(const unsigned long *mask,
-const struct max1363_chip_info *ci)
+ const struct max1363_chip_info *ci)
{
int i;
if (mask)
@@ -334,7 +339,7 @@ static int max1363_read_single_chan(struct iio_dev *indio_dev,
{
int ret = 0;
s32 data;
- char rxbuf[2];
+ u8 rxbuf[2];
struct max1363_state *st = iio_priv(indio_dev);
struct i2c_client *client = st->client;
@@ -366,7 +371,8 @@ static int max1363_read_single_chan(struct iio_dev *indio_dev,
ret = data;
goto error_ret;
}
- data = (s32)(rxbuf[1]) | ((s32)(rxbuf[0] & 0x0F)) << 8;
+ data = (rxbuf[1] | rxbuf[0] << 8) &
+ ((1 << st->chip_info->bits) - 1);
} else {
/* Get reading */
data = i2c_master_recv(client, rxbuf, 1);
@@ -391,6 +397,8 @@ static int max1363_read_raw(struct iio_dev *indio_dev,
{
struct max1363_state *st = iio_priv(indio_dev);
int ret;
+ unsigned long scale_uv;
+
switch (m) {
case IIO_CHAN_INFO_RAW:
ret = max1363_read_single_chan(indio_dev, chan, val, m);
@@ -398,16 +406,10 @@ static int max1363_read_raw(struct iio_dev *indio_dev,
return ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- if ((1 << (st->chip_info->bits + 1)) >
- st->chip_info->int_vref_mv) {
- *val = 0;
- *val2 = 500000;
- return IIO_VAL_INT_PLUS_MICRO;
- } else {
- *val = (st->chip_info->int_vref_mv)
- >> st->chip_info->bits;
- return IIO_VAL_INT;
- }
+ scale_uv = st->vref_uv >> st->chip_info->bits;
+ *val = scale_uv / 1000;
+ *val2 = (scale_uv % 1000) * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
@@ -1388,13 +1390,17 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
static int max1363_initial_setup(struct max1363_state *st)
{
- st->setupbyte = MAX1363_SETUP_AIN3_IS_AIN3_REF_IS_VDD
- | MAX1363_SETUP_POWER_UP_INT_REF
- | MAX1363_SETUP_INT_CLOCK
+ st->setupbyte = MAX1363_SETUP_INT_CLOCK
| MAX1363_SETUP_UNIPOLAR
| MAX1363_SETUP_NORESET;
- /* Set scan mode writes the config anyway so wait until then*/
+ if (st->vref)
+ st->setupbyte |= MAX1363_SETUP_AIN3_IS_REF_EXT_TO_REF;
+ else
+ st->setupbyte |= MAX1363_SETUP_POWER_UP_INT_REF
+ | MAX1363_SETUP_AIN3_IS_AIN3_REF_IS_INT;
+
+ /* Set scan mode writes the config anyway so wait until then */
st->setupbyte = MAX1363_SETUP_BYTE(st->setupbyte);
st->current_mode = &max1363_mode_table[st->chip_info->default_mode];
st->configbyte = MAX1363_CONFIG_BYTE(st->configbyte);
@@ -1408,8 +1414,9 @@ static int max1363_alloc_scan_masks(struct iio_dev *indio_dev)
unsigned long *masks;
int i;
- masks = kzalloc(BITS_TO_LONGS(MAX1363_MAX_CHANNELS)*sizeof(long)*
- (st->chip_info->num_modes + 1), GFP_KERNEL);
+ masks = devm_kzalloc(&indio_dev->dev,
+ BITS_TO_LONGS(MAX1363_MAX_CHANNELS) * sizeof(long) *
+ (st->chip_info->num_modes + 1), GFP_KERNEL);
if (!masks)
return -ENOMEM;
@@ -1423,7 +1430,6 @@ static int max1363_alloc_scan_masks(struct iio_dev *indio_dev)
return 0;
}
-
static irqreturn_t max1363_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
@@ -1483,54 +1489,13 @@ static const struct iio_buffer_setup_ops max1363_buffered_setup_ops = {
.predisable = &iio_triggered_buffer_predisable,
};
-static int max1363_register_buffered_funcs_and_init(struct iio_dev *indio_dev)
-{
- struct max1363_state *st = iio_priv(indio_dev);
- int ret = 0;
-
- indio_dev->buffer = iio_kfifo_allocate(indio_dev);
- if (!indio_dev->buffer) {
- ret = -ENOMEM;
- goto error_ret;
- }
- indio_dev->pollfunc = iio_alloc_pollfunc(NULL,
- &max1363_trigger_handler,
- IRQF_ONESHOT,
- indio_dev,
- "%s_consumer%d",
- st->client->name,
- indio_dev->id);
- if (indio_dev->pollfunc == NULL) {
- ret = -ENOMEM;
- goto error_deallocate_sw_rb;
- }
- /* Buffer functions - here trigger setup related */
- indio_dev->setup_ops = &max1363_buffered_setup_ops;
-
- /* Flag that polled buffering is possible */
- indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
-
- return 0;
-
-error_deallocate_sw_rb:
- iio_kfifo_free(indio_dev->buffer);
-error_ret:
- return ret;
-}
-
-static void max1363_buffer_cleanup(struct iio_dev *indio_dev)
-{
- /* ensure that the trigger has been detached */
- iio_dealloc_pollfunc(indio_dev->pollfunc);
- iio_kfifo_free(indio_dev->buffer);
-}
-
static int max1363_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int ret;
struct max1363_state *st;
struct iio_dev *indio_dev;
+ struct regulator *vref;
indio_dev = iio_device_alloc(sizeof(struct max1363_state));
if (indio_dev == NULL) {
@@ -1538,13 +1503,14 @@ static int max1363_probe(struct i2c_client *client,
goto error_out;
}
+ indio_dev->dev.of_node = client->dev.of_node;
ret = iio_map_array_register(indio_dev, client->dev.platform_data);
if (ret < 0)
goto error_free_device;
st = iio_priv(indio_dev);
- st->reg = regulator_get(&client->dev, "vcc");
+ st->reg = devm_regulator_get(&client->dev, "vcc");
if (IS_ERR(st->reg)) {
ret = PTR_ERR(st->reg);
goto error_unregister_map;
@@ -1552,7 +1518,7 @@ static int max1363_probe(struct i2c_client *client,
ret = regulator_enable(st->reg);
if (ret)
- goto error_put_reg;
+ goto error_unregister_map;
/* this is only used for device removal purposes */
i2c_set_clientdata(client, indio_dev);
@@ -1560,35 +1526,45 @@ static int max1363_probe(struct i2c_client *client,
st->chip_info = &max1363_chip_info_tbl[id->driver_data];
st->client = client;
+ st->vref_uv = st->chip_info->int_vref_mv * 1000;
+ vref = devm_regulator_get(&client->dev, "vref");
+ if (!IS_ERR(vref)) {
+ int vref_uv;
+
+ ret = regulator_enable(vref);
+ if (ret)
+ goto error_disable_reg;
+ st->vref = vref;
+ vref_uv = regulator_get_voltage(vref);
+ if (vref_uv <= 0) {
+ ret = -EINVAL;
+ goto error_disable_reg;
+ }
+ st->vref_uv = vref_uv;
+ }
+
ret = max1363_alloc_scan_masks(indio_dev);
if (ret)
goto error_disable_reg;
- /* Estabilish that the iio_dev is a child of the i2c device */
+ /* Establish that the iio_dev is a child of the i2c device */
indio_dev->dev.parent = &client->dev;
indio_dev->name = id->name;
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
indio_dev->info = st->chip_info->info;
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = st->chip_info->channels;
- indio_dev->num_channels = st->chip_info->num_channels;
ret = max1363_initial_setup(st);
if (ret < 0)
- goto error_free_available_scan_masks;
-
- ret = max1363_register_buffered_funcs_and_init(indio_dev);
- if (ret)
- goto error_free_available_scan_masks;
+ goto error_disable_reg;
- ret = iio_buffer_register(indio_dev,
- st->chip_info->channels,
- st->chip_info->num_channels);
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ &max1363_trigger_handler, &max1363_buffered_setup_ops);
if (ret)
- goto error_cleanup_buffer;
+ goto error_disable_reg;
if (client->irq) {
- ret = request_threaded_irq(st->client->irq,
+ ret = devm_request_threaded_irq(&client->dev, st->client->irq,
NULL,
&max1363_event_handler,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
@@ -1601,23 +1577,18 @@ static int max1363_probe(struct i2c_client *client,
ret = iio_device_register(indio_dev);
if (ret < 0)
- goto error_free_irq;
+ goto error_uninit_buffer;
return 0;
-error_free_irq:
- free_irq(st->client->irq, indio_dev);
+
error_uninit_buffer:
- iio_buffer_unregister(indio_dev);
-error_cleanup_buffer:
- max1363_buffer_cleanup(indio_dev);
-error_free_available_scan_masks:
- kfree(indio_dev->available_scan_masks);
-error_unregister_map:
- iio_map_array_unregister(indio_dev, client->dev.platform_data);
+ iio_triggered_buffer_cleanup(indio_dev);
error_disable_reg:
+ if (st->vref)
+ regulator_disable(st->vref);
regulator_disable(st->reg);
-error_put_reg:
- regulator_put(st->reg);
+error_unregister_map:
+ iio_map_array_unregister(indio_dev);
error_free_device:
iio_device_free(indio_dev);
error_out:
@@ -1630,16 +1601,11 @@ static int max1363_remove(struct i2c_client *client)
struct max1363_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- if (client->irq)
- free_irq(st->client->irq, indio_dev);
- iio_buffer_unregister(indio_dev);
- max1363_buffer_cleanup(indio_dev);
- kfree(indio_dev->available_scan_masks);
- if (!IS_ERR(st->reg)) {
- regulator_disable(st->reg);
- regulator_put(st->reg);
- }
- iio_map_array_unregister(indio_dev, client->dev.platform_data);
+ iio_triggered_buffer_cleanup(indio_dev);
+ if (st->vref)
+ regulator_disable(st->vref);
+ regulator_disable(st->reg);
+ iio_map_array_unregister(indio_dev);
iio_device_free(indio_dev);
return 0;
diff --git a/drivers/iio/buffer_cb.c b/drivers/iio/buffer_cb.c
index 4d40e24f3721..9201022945e9 100644
--- a/drivers/iio/buffer_cb.c
+++ b/drivers/iio/buffer_cb.c
@@ -25,7 +25,7 @@ static struct iio_buffer_access_funcs iio_cb_access = {
.store_to = &iio_buffer_cb_store_to,
};
-struct iio_cb_buffer *iio_channel_get_all_cb(const char *name,
+struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
int (*cb)(u8 *data,
void *private),
void *private)
@@ -46,7 +46,7 @@ struct iio_cb_buffer *iio_channel_get_all_cb(const char *name,
cb_buff->buffer.access = &iio_cb_access;
INIT_LIST_HEAD(&cb_buff->buffer.demux_list);
- cb_buff->channels = iio_channel_get_all(name);
+ cb_buff->channels = iio_channel_get_all(dev);
if (IS_ERR(cb_buff->channels)) {
ret = PTR_ERR(cb_buff->channels);
goto error_free_cb_buff;
diff --git a/drivers/iio/common/Kconfig b/drivers/iio/common/Kconfig
index ed45ee54500c..0b6e97d18fa0 100644
--- a/drivers/iio/common/Kconfig
+++ b/drivers/iio/common/Kconfig
@@ -3,3 +3,4 @@
#
source "drivers/iio/common/hid-sensors/Kconfig"
+source "drivers/iio/common/st_sensors/Kconfig"
diff --git a/drivers/iio/common/Makefile b/drivers/iio/common/Makefile
index 81584009b21b..c2352beb5d97 100644
--- a/drivers/iio/common/Makefile
+++ b/drivers/iio/common/Makefile
@@ -7,3 +7,4 @@
#
obj-y += hid-sensors/
+obj-y += st_sensors/
diff --git a/drivers/iio/common/hid-sensors/Kconfig b/drivers/iio/common/hid-sensors/Kconfig
index ae10778da7aa..1178121b55b0 100644
--- a/drivers/iio/common/hid-sensors/Kconfig
+++ b/drivers/iio/common/hid-sensors/Kconfig
@@ -6,7 +6,7 @@ menu "Hid Sensor IIO Common"
config HID_SENSOR_IIO_COMMON
tristate "Common modules for all HID Sensor IIO drivers"
depends on HID_SENSOR_HUB
- select IIO_TRIGGER if IIO_BUFFER
+ select HID_SENSOR_IIO_TRIGGER if IIO_BUFFER
help
Say yes here to build support for HID sensor to use
HID sensor common processing for attributes and IIO triggers.
@@ -14,6 +14,17 @@ config HID_SENSOR_IIO_COMMON
HID sensor drivers, this module contains processing for those
attributes.
+config HID_SENSOR_IIO_TRIGGER
+ tristate "Common module (trigger) for all HID Sensor IIO drivers"
+ depends on HID_SENSOR_HUB && HID_SENSOR_IIO_COMMON
+ select IIO_TRIGGER
+ help
+ Say yes here to build trigger support for HID sensors.
+ Triggers will be send if all requested attributes were read.
+
+ If this driver is compiled as a module, it will be named
+ hid-sensor-trigger.
+
config HID_SENSOR_ENUM_BASE_QUIRKS
bool "ENUM base quirks for HID Sensor IIO drivers"
depends on HID_SENSOR_IIO_COMMON
diff --git a/drivers/iio/common/hid-sensors/Makefile b/drivers/iio/common/hid-sensors/Makefile
index 1f463e00c242..22e7c5a82325 100644
--- a/drivers/iio/common/hid-sensors/Makefile
+++ b/drivers/iio/common/hid-sensors/Makefile
@@ -3,4 +3,5 @@
#
obj-$(CONFIG_HID_SENSOR_IIO_COMMON) += hid-sensor-iio-common.o
-hid-sensor-iio-common-y := hid-sensor-attributes.o hid-sensor-trigger.o
+obj-$(CONFIG_HID_SENSOR_IIO_TRIGGER) += hid-sensor-trigger.o
+hid-sensor-iio-common-y := hid-sensor-attributes.o
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index 75374955caba..75b54730a963 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -25,7 +25,6 @@
#include <linux/hid-sensor-hub.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
-#include "hid-sensor-attributes.h"
static int pow_10(unsigned power)
{
@@ -114,7 +113,7 @@ static u32 convert_to_vtf_format(int size, int exp, int val1, int val2)
return value;
}
-int hid_sensor_read_samp_freq_value(struct hid_sensor_iio_common *st,
+int hid_sensor_read_samp_freq_value(struct hid_sensor_common *st,
int *val1, int *val2)
{
s32 value;
@@ -141,7 +140,7 @@ int hid_sensor_read_samp_freq_value(struct hid_sensor_iio_common *st,
}
EXPORT_SYMBOL(hid_sensor_read_samp_freq_value);
-int hid_sensor_write_samp_freq_value(struct hid_sensor_iio_common *st,
+int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st,
int val1, int val2)
{
s32 value;
@@ -169,7 +168,7 @@ int hid_sensor_write_samp_freq_value(struct hid_sensor_iio_common *st,
}
EXPORT_SYMBOL(hid_sensor_write_samp_freq_value);
-int hid_sensor_read_raw_hyst_value(struct hid_sensor_iio_common *st,
+int hid_sensor_read_raw_hyst_value(struct hid_sensor_common *st,
int *val1, int *val2)
{
s32 value;
@@ -191,7 +190,7 @@ int hid_sensor_read_raw_hyst_value(struct hid_sensor_iio_common *st,
}
EXPORT_SYMBOL(hid_sensor_read_raw_hyst_value);
-int hid_sensor_write_raw_hyst_value(struct hid_sensor_iio_common *st,
+int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st,
int val1, int val2)
{
s32 value;
@@ -212,7 +211,7 @@ EXPORT_SYMBOL(hid_sensor_write_raw_hyst_value);
int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
u32 usage_id,
- struct hid_sensor_iio_common *st)
+ struct hid_sensor_common *st)
{
sensor_hub_input_get_attribute_info(hsdev,
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.h b/drivers/iio/common/hid-sensors/hid-sensor-attributes.h
deleted file mode 100644
index a4676a0c3de5..000000000000
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * HID Sensors Driver
- * Copyright (c) 2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-#ifndef _HID_SENSORS_ATTRIBUTES_H
-#define _HID_SENSORS_ATTRIBUTES_H
-
-/* Common hid sensor iio structure */
-struct hid_sensor_iio_common {
- struct hid_sensor_hub_device *hsdev;
- struct platform_device *pdev;
- unsigned usage_id;
- bool data_ready;
- struct hid_sensor_hub_attribute_info poll;
- struct hid_sensor_hub_attribute_info report_state;
- struct hid_sensor_hub_attribute_info power_state;
- struct hid_sensor_hub_attribute_info sensitivity;
-};
-
-/*Convert from hid unit expo to regular exponent*/
-static inline int hid_sensor_convert_exponent(int unit_expo)
-{
- if (unit_expo < 0x08)
- return unit_expo;
- else if (unit_expo <= 0x0f)
- return -(0x0f-unit_expo+1);
- else
- return 0;
-}
-
-int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
- u32 usage_id,
- struct hid_sensor_iio_common *st);
-int hid_sensor_write_raw_hyst_value(struct hid_sensor_iio_common *st,
- int val1, int val2);
-int hid_sensor_read_raw_hyst_value(struct hid_sensor_iio_common *st,
- int *val1, int *val2);
-int hid_sensor_write_samp_freq_value(struct hid_sensor_iio_common *st,
- int val1, int val2);
-int hid_sensor_read_samp_freq_value(struct hid_sensor_iio_common *st,
- int *val1, int *val2);
-
-#endif
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index d60198a6ca29..7a525a91105d 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -26,13 +26,12 @@
#include <linux/iio/iio.h>
#include <linux/iio/trigger.h>
#include <linux/iio/sysfs.h>
-#include "hid-sensor-attributes.h"
#include "hid-sensor-trigger.h"
static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig,
bool state)
{
- struct hid_sensor_iio_common *st = trig->private_data;
+ struct hid_sensor_common *st = trig->private_data;
int state_val;
state_val = state ? 1 : 0;
@@ -64,7 +63,7 @@ static const struct iio_trigger_ops hid_sensor_trigger_ops = {
};
int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
- struct hid_sensor_iio_common *attrb)
+ struct hid_sensor_common *attrb)
{
int ret;
struct iio_trigger *trig;
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
index fd982971b1b8..9a8731478eda 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
@@ -20,7 +20,7 @@
#define _HID_SENSOR_TRIGGER_H
int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
- struct hid_sensor_iio_common *attrb);
+ struct hid_sensor_common *attrb);
void hid_sensor_remove_trigger(struct iio_dev *indio_dev);
#endif
diff --git a/drivers/iio/common/st_sensors/Kconfig b/drivers/iio/common/st_sensors/Kconfig
new file mode 100644
index 000000000000..865f1ca33eb9
--- /dev/null
+++ b/drivers/iio/common/st_sensors/Kconfig
@@ -0,0 +1,14 @@
+#
+# STMicroelectronics sensors common library
+#
+
+config IIO_ST_SENSORS_I2C
+ tristate
+
+config IIO_ST_SENSORS_SPI
+ tristate
+
+config IIO_ST_SENSORS_CORE
+ tristate
+ select IIO_ST_SENSORS_I2C if I2C
+ select IIO_ST_SENSORS_SPI if SPI_MASTER
diff --git a/drivers/iio/common/st_sensors/Makefile b/drivers/iio/common/st_sensors/Makefile
new file mode 100644
index 000000000000..9f3e24f3024b
--- /dev/null
+++ b/drivers/iio/common/st_sensors/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the STMicroelectronics sensor common modules.
+#
+
+obj-$(CONFIG_IIO_ST_SENSORS_I2C) += st_sensors_i2c.o
+obj-$(CONFIG_IIO_ST_SENSORS_SPI) += st_sensors_spi.o
+obj-$(CONFIG_IIO_ST_SENSORS_CORE) += st_sensors.o
+st_sensors-y := st_sensors_core.o
+st_sensors-$(CONFIG_IIO_BUFFER) += st_sensors_buffer.o
+st_sensors-$(CONFIG_IIO_TRIGGER) += st_sensors_trigger.o
diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c
new file mode 100644
index 000000000000..09b236d6ee89
--- /dev/null
+++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c
@@ -0,0 +1,116 @@
+/*
+ * STMicroelectronics sensors buffer library driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+#include <linux/interrupt.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/irqreturn.h>
+
+#include <linux/iio/common/st_sensors.h>
+
+
+int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf)
+{
+ int i, n = 0, len;
+ u8 addr[ST_SENSORS_NUMBER_DATA_CHANNELS];
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ for (i = 0; i < ST_SENSORS_NUMBER_DATA_CHANNELS; i++) {
+ if (test_bit(i, indio_dev->active_scan_mask)) {
+ addr[n] = indio_dev->channels[i].address;
+ n++;
+ }
+ }
+ switch (n) {
+ case 1:
+ len = sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev,
+ addr[0], ST_SENSORS_BYTE_FOR_CHANNEL, buf,
+ sdata->multiread_bit);
+ break;
+ case 2:
+ if ((addr[1] - addr[0]) == ST_SENSORS_BYTE_FOR_CHANNEL) {
+ len = sdata->tf->read_multiple_byte(&sdata->tb,
+ sdata->dev, addr[0],
+ ST_SENSORS_BYTE_FOR_CHANNEL*n,
+ buf, sdata->multiread_bit);
+ } else {
+ u8 rx_array[ST_SENSORS_BYTE_FOR_CHANNEL*
+ ST_SENSORS_NUMBER_DATA_CHANNELS];
+ len = sdata->tf->read_multiple_byte(&sdata->tb,
+ sdata->dev, addr[0],
+ ST_SENSORS_BYTE_FOR_CHANNEL*
+ ST_SENSORS_NUMBER_DATA_CHANNELS,
+ rx_array, sdata->multiread_bit);
+ if (len < 0)
+ goto read_data_channels_error;
+
+ for (i = 0; i < n * ST_SENSORS_NUMBER_DATA_CHANNELS;
+ i++) {
+ if (i < n)
+ buf[i] = rx_array[i];
+ else
+ buf[i] = rx_array[n + i];
+ }
+ len = ST_SENSORS_BYTE_FOR_CHANNEL*n;
+ }
+ break;
+ case 3:
+ len = sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev,
+ addr[0], ST_SENSORS_BYTE_FOR_CHANNEL*
+ ST_SENSORS_NUMBER_DATA_CHANNELS,
+ buf, sdata->multiread_bit);
+ break;
+ default:
+ len = -EINVAL;
+ goto read_data_channels_error;
+ }
+ if (len != ST_SENSORS_BYTE_FOR_CHANNEL*n) {
+ len = -EIO;
+ goto read_data_channels_error;
+ }
+
+read_data_channels_error:
+ return len;
+}
+EXPORT_SYMBOL(st_sensors_get_buffer_element);
+
+irqreturn_t st_sensors_trigger_handler(int irq, void *p)
+{
+ int len;
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ len = st_sensors_get_buffer_element(indio_dev, sdata->buffer_data);
+ if (len < 0)
+ goto st_sensors_get_buffer_element_error;
+
+ if (indio_dev->scan_timestamp)
+ *(s64 *)((u8 *)sdata->buffer_data +
+ ALIGN(len, sizeof(s64))) = pf->timestamp;
+
+ iio_push_to_buffers(indio_dev, sdata->buffer_data);
+
+st_sensors_get_buffer_element_error:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(st_sensors_trigger_handler);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics ST-sensors buffer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
new file mode 100644
index 000000000000..0198324a8b0c
--- /dev/null
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -0,0 +1,446 @@
+/*
+ * STMicroelectronics sensors core library driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <asm/unaligned.h>
+
+#include <linux/iio/common/st_sensors.h>
+
+
+#define ST_SENSORS_WAI_ADDRESS 0x0f
+
+static int st_sensors_write_data_with_mask(struct iio_dev *indio_dev,
+ u8 reg_addr, u8 mask, u8 data)
+{
+ int err;
+ u8 new_data;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ err = sdata->tf->read_byte(&sdata->tb, sdata->dev, reg_addr, &new_data);
+ if (err < 0)
+ goto st_sensors_write_data_with_mask_error;
+
+ new_data = ((new_data & (~mask)) | ((data << __ffs(mask)) & mask));
+ err = sdata->tf->write_byte(&sdata->tb, sdata->dev, reg_addr, new_data);
+
+st_sensors_write_data_with_mask_error:
+ return err;
+}
+
+static int st_sensors_match_odr(struct st_sensors *sensor,
+ unsigned int odr, struct st_sensor_odr_avl *odr_out)
+{
+ int i, ret = -EINVAL;
+
+ for (i = 0; i < ST_SENSORS_ODR_LIST_MAX; i++) {
+ if (sensor->odr.odr_avl[i].hz == 0)
+ goto st_sensors_match_odr_error;
+
+ if (sensor->odr.odr_avl[i].hz == odr) {
+ odr_out->hz = sensor->odr.odr_avl[i].hz;
+ odr_out->value = sensor->odr.odr_avl[i].value;
+ ret = 0;
+ break;
+ }
+ }
+
+st_sensors_match_odr_error:
+ return ret;
+}
+
+int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
+{
+ int err;
+ struct st_sensor_odr_avl odr_out;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ err = st_sensors_match_odr(sdata->sensor, odr, &odr_out);
+ if (err < 0)
+ goto st_sensors_match_odr_error;
+
+ if ((sdata->sensor->odr.addr == sdata->sensor->pw.addr) &&
+ (sdata->sensor->odr.mask == sdata->sensor->pw.mask)) {
+ if (sdata->enabled == true) {
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor->odr.addr,
+ sdata->sensor->odr.mask,
+ odr_out.value);
+ } else {
+ err = 0;
+ }
+ } else {
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor->odr.addr, sdata->sensor->odr.mask,
+ odr_out.value);
+ }
+ if (err >= 0)
+ sdata->odr = odr_out.hz;
+
+st_sensors_match_odr_error:
+ return err;
+}
+EXPORT_SYMBOL(st_sensors_set_odr);
+
+static int st_sensors_match_fs(struct st_sensors *sensor,
+ unsigned int fs, int *index_fs_avl)
+{
+ int i, ret = -EINVAL;
+
+ for (i = 0; i < ST_SENSORS_FULLSCALE_AVL_MAX; i++) {
+ if (sensor->fs.fs_avl[i].num == 0)
+ goto st_sensors_match_odr_error;
+
+ if (sensor->fs.fs_avl[i].num == fs) {
+ *index_fs_avl = i;
+ ret = 0;
+ break;
+ }
+ }
+
+st_sensors_match_odr_error:
+ return ret;
+}
+
+static int st_sensors_set_fullscale(struct iio_dev *indio_dev, unsigned int fs)
+{
+ int err, i;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ err = st_sensors_match_fs(sdata->sensor, fs, &i);
+ if (err < 0)
+ goto st_accel_set_fullscale_error;
+
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor->fs.addr,
+ sdata->sensor->fs.mask,
+ sdata->sensor->fs.fs_avl[i].value);
+ if (err < 0)
+ goto st_accel_set_fullscale_error;
+
+ sdata->current_fullscale = (struct st_sensor_fullscale_avl *)
+ &sdata->sensor->fs.fs_avl[i];
+ return err;
+
+st_accel_set_fullscale_error:
+ dev_err(&indio_dev->dev, "failed to set new fullscale.\n");
+ return err;
+}
+
+int st_sensors_set_enable(struct iio_dev *indio_dev, bool enable)
+{
+ bool found;
+ u8 tmp_value;
+ int err = -EINVAL;
+ struct st_sensor_odr_avl odr_out;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ if (enable) {
+ found = false;
+ tmp_value = sdata->sensor->pw.value_on;
+ if ((sdata->sensor->odr.addr == sdata->sensor->pw.addr) &&
+ (sdata->sensor->odr.mask == sdata->sensor->pw.mask)) {
+ err = st_sensors_match_odr(sdata->sensor,
+ sdata->odr, &odr_out);
+ if (err < 0)
+ goto set_enable_error;
+ tmp_value = odr_out.value;
+ found = true;
+ }
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor->pw.addr,
+ sdata->sensor->pw.mask, tmp_value);
+ if (err < 0)
+ goto set_enable_error;
+
+ sdata->enabled = true;
+
+ if (found)
+ sdata->odr = odr_out.hz;
+ } else {
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor->pw.addr,
+ sdata->sensor->pw.mask,
+ sdata->sensor->pw.value_off);
+ if (err < 0)
+ goto set_enable_error;
+
+ sdata->enabled = false;
+ }
+
+set_enable_error:
+ return err;
+}
+EXPORT_SYMBOL(st_sensors_set_enable);
+
+int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable)
+{
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ return st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor->enable_axis.addr,
+ sdata->sensor->enable_axis.mask, axis_enable);
+}
+EXPORT_SYMBOL(st_sensors_set_axis_enable);
+
+int st_sensors_init_sensor(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ mutex_init(&sdata->tb.buf_lock);
+
+ err = st_sensors_set_enable(indio_dev, false);
+ if (err < 0)
+ goto init_error;
+
+ err = st_sensors_set_fullscale(indio_dev,
+ sdata->current_fullscale->num);
+ if (err < 0)
+ goto init_error;
+
+ err = st_sensors_set_odr(indio_dev, sdata->odr);
+ if (err < 0)
+ goto init_error;
+
+ /* set BDU */
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor->bdu.addr, sdata->sensor->bdu.mask, true);
+ if (err < 0)
+ goto init_error;
+
+ err = st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS);
+
+init_error:
+ return err;
+}
+EXPORT_SYMBOL(st_sensors_init_sensor);
+
+int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable)
+{
+ int err;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ /* Enable/Disable the interrupt generator 1. */
+ if (sdata->sensor->drdy_irq.ig1.en_addr > 0) {
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor->drdy_irq.ig1.en_addr,
+ sdata->sensor->drdy_irq.ig1.en_mask, (int)enable);
+ if (err < 0)
+ goto st_accel_set_dataready_irq_error;
+ }
+
+ /* Enable/Disable the interrupt generator for data ready. */
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor->drdy_irq.addr,
+ sdata->sensor->drdy_irq.mask, (int)enable);
+
+st_accel_set_dataready_irq_error:
+ return err;
+}
+EXPORT_SYMBOL(st_sensors_set_dataready_irq);
+
+int st_sensors_set_fullscale_by_gain(struct iio_dev *indio_dev, int scale)
+{
+ int err = -EINVAL, i;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ for (i = 0; i < ST_SENSORS_FULLSCALE_AVL_MAX; i++) {
+ if ((sdata->sensor->fs.fs_avl[i].gain == scale) &&
+ (sdata->sensor->fs.fs_avl[i].gain != 0)) {
+ err = 0;
+ break;
+ }
+ }
+ if (err < 0)
+ goto st_sensors_match_scale_error;
+
+ err = st_sensors_set_fullscale(indio_dev,
+ sdata->sensor->fs.fs_avl[i].num);
+
+st_sensors_match_scale_error:
+ return err;
+}
+EXPORT_SYMBOL(st_sensors_set_fullscale_by_gain);
+
+static int st_sensors_read_axis_data(struct iio_dev *indio_dev,
+ u8 ch_addr, int *data)
+{
+ int err;
+ u8 outdata[ST_SENSORS_BYTE_FOR_CHANNEL];
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ err = sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev,
+ ch_addr, ST_SENSORS_BYTE_FOR_CHANNEL,
+ outdata, sdata->multiread_bit);
+ if (err < 0)
+ goto read_error;
+
+ *data = (s16)get_unaligned_le16(outdata);
+
+read_error:
+ return err;
+}
+
+int st_sensors_read_info_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *ch, int *val)
+{
+ int err;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ mutex_lock(&indio_dev->mlock);
+ if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ err = -EBUSY;
+ goto read_error;
+ } else {
+ err = st_sensors_set_enable(indio_dev, true);
+ if (err < 0)
+ goto read_error;
+
+ msleep((sdata->sensor->bootime * 1000) / sdata->odr);
+ err = st_sensors_read_axis_data(indio_dev, ch->address, val);
+ if (err < 0)
+ goto read_error;
+
+ *val = *val >> ch->scan_type.shift;
+ }
+ mutex_unlock(&indio_dev->mlock);
+
+ return err;
+
+read_error:
+ mutex_unlock(&indio_dev->mlock);
+ return err;
+}
+EXPORT_SYMBOL(st_sensors_read_info_raw);
+
+int st_sensors_check_device_support(struct iio_dev *indio_dev,
+ int num_sensors_list, const struct st_sensors *sensors)
+{
+ u8 wai;
+ int i, n, err;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
+ ST_SENSORS_DEFAULT_WAI_ADDRESS, &wai);
+ if (err < 0) {
+ dev_err(&indio_dev->dev, "failed to read Who-Am-I register.\n");
+ goto read_wai_error;
+ }
+
+ for (i = 0; i < num_sensors_list; i++) {
+ if (sensors[i].wai == wai)
+ break;
+ }
+ if (i == num_sensors_list)
+ goto device_not_supported;
+
+ for (n = 0; n < ARRAY_SIZE(sensors[i].sensors_supported); n++) {
+ if (strcmp(indio_dev->name,
+ &sensors[i].sensors_supported[n][0]) == 0)
+ break;
+ }
+ if (n == ARRAY_SIZE(sensors[i].sensors_supported)) {
+ dev_err(&indio_dev->dev, "device name and WhoAmI mismatch.\n");
+ goto sensor_name_mismatch;
+ }
+
+ sdata->sensor = (struct st_sensors *)&sensors[i];
+
+ return i;
+
+device_not_supported:
+ dev_err(&indio_dev->dev, "device not supported: WhoAmI (0x%x).\n", wai);
+sensor_name_mismatch:
+ err = -ENODEV;
+read_wai_error:
+ return err;
+}
+EXPORT_SYMBOL(st_sensors_check_device_support);
+
+ssize_t st_sensors_sysfs_get_sampling_frequency(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct st_sensor_data *adata = iio_priv(dev_get_drvdata(dev));
+
+ return sprintf(buf, "%d\n", adata->odr);
+}
+EXPORT_SYMBOL(st_sensors_sysfs_get_sampling_frequency);
+
+ssize_t st_sensors_sysfs_set_sampling_frequency(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ int err;
+ unsigned int odr;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+
+ err = kstrtoint(buf, 10, &odr);
+ if (err < 0)
+ goto conversion_error;
+
+ mutex_lock(&indio_dev->mlock);
+ err = st_sensors_set_odr(indio_dev, odr);
+ mutex_unlock(&indio_dev->mlock);
+
+conversion_error:
+ return err < 0 ? err : size;
+}
+EXPORT_SYMBOL(st_sensors_sysfs_set_sampling_frequency);
+
+ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i, len = 0;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ mutex_lock(&indio_dev->mlock);
+ for (i = 0; i < ST_SENSORS_ODR_LIST_MAX; i++) {
+ if (sdata->sensor->odr.odr_avl[i].hz == 0)
+ break;
+
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
+ sdata->sensor->odr.odr_avl[i].hz);
+ }
+ mutex_unlock(&indio_dev->mlock);
+ buf[len - 1] = '\n';
+
+ return len;
+}
+EXPORT_SYMBOL(st_sensors_sysfs_sampling_frequency_avail);
+
+ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i, len = 0;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ mutex_lock(&indio_dev->mlock);
+ for (i = 0; i < ST_SENSORS_FULLSCALE_AVL_MAX; i++) {
+ if (sdata->sensor->fs.fs_avl[i].num == 0)
+ break;
+
+ len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
+ sdata->sensor->fs.fs_avl[i].gain);
+ }
+ mutex_unlock(&indio_dev->mlock);
+ buf[len - 1] = '\n';
+
+ return len;
+}
+EXPORT_SYMBOL(st_sensors_sysfs_scale_avail);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics ST-sensors core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/st_sensors/st_sensors_i2c.c b/drivers/iio/common/st_sensors/st_sensors_i2c.c
new file mode 100644
index 000000000000..38af9440c103
--- /dev/null
+++ b/drivers/iio/common/st_sensors/st_sensors_i2c.c
@@ -0,0 +1,81 @@
+/*
+ * STMicroelectronics sensors i2c library driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/common/st_sensors_i2c.h>
+
+
+#define ST_SENSORS_I2C_MULTIREAD 0x80
+
+static unsigned int st_sensors_i2c_get_irq(struct iio_dev *indio_dev)
+{
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ return to_i2c_client(sdata->dev)->irq;
+}
+
+static int st_sensors_i2c_read_byte(struct st_sensor_transfer_buffer *tb,
+ struct device *dev, u8 reg_addr, u8 *res_byte)
+{
+ int err;
+
+ err = i2c_smbus_read_byte_data(to_i2c_client(dev), reg_addr);
+ if (err < 0)
+ goto st_accel_i2c_read_byte_error;
+
+ *res_byte = err & 0xff;
+
+st_accel_i2c_read_byte_error:
+ return err < 0 ? err : 0;
+}
+
+static int st_sensors_i2c_read_multiple_byte(
+ struct st_sensor_transfer_buffer *tb, struct device *dev,
+ u8 reg_addr, int len, u8 *data, bool multiread_bit)
+{
+ if (multiread_bit)
+ reg_addr |= ST_SENSORS_I2C_MULTIREAD;
+
+ return i2c_smbus_read_i2c_block_data(to_i2c_client(dev),
+ reg_addr, len, data);
+}
+
+static int st_sensors_i2c_write_byte(struct st_sensor_transfer_buffer *tb,
+ struct device *dev, u8 reg_addr, u8 data)
+{
+ return i2c_smbus_write_byte_data(to_i2c_client(dev), reg_addr, data);
+}
+
+static const struct st_sensor_transfer_function st_sensors_tf_i2c = {
+ .read_byte = st_sensors_i2c_read_byte,
+ .write_byte = st_sensors_i2c_write_byte,
+ .read_multiple_byte = st_sensors_i2c_read_multiple_byte,
+};
+
+void st_sensors_i2c_configure(struct iio_dev *indio_dev,
+ struct i2c_client *client, struct st_sensor_data *sdata)
+{
+ i2c_set_clientdata(client, indio_dev);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = client->name;
+
+ sdata->tf = &st_sensors_tf_i2c;
+ sdata->get_irq_data_ready = st_sensors_i2c_get_irq;
+}
+EXPORT_SYMBOL(st_sensors_i2c_configure);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics ST-sensors i2c driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/st_sensors/st_sensors_spi.c b/drivers/iio/common/st_sensors/st_sensors_spi.c
new file mode 100644
index 000000000000..f0aa2f105222
--- /dev/null
+++ b/drivers/iio/common/st_sensors/st_sensors_spi.c
@@ -0,0 +1,128 @@
+/*
+ * STMicroelectronics sensors spi library driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/common/st_sensors_spi.h>
+
+
+#define ST_SENSORS_SPI_MULTIREAD 0xc0
+#define ST_SENSORS_SPI_READ 0x80
+
+static unsigned int st_sensors_spi_get_irq(struct iio_dev *indio_dev)
+{
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ return to_spi_device(sdata->dev)->irq;
+}
+
+static int st_sensors_spi_read(struct st_sensor_transfer_buffer *tb,
+ struct device *dev, u8 reg_addr, int len, u8 *data, bool multiread_bit)
+{
+ struct spi_message msg;
+ int err;
+
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = tb->tx_buf,
+ .bits_per_word = 8,
+ .len = 1,
+ },
+ {
+ .rx_buf = tb->rx_buf,
+ .bits_per_word = 8,
+ .len = len,
+ }
+ };
+
+ mutex_lock(&tb->buf_lock);
+ if ((multiread_bit) && (len > 1))
+ tb->tx_buf[0] = reg_addr | ST_SENSORS_SPI_MULTIREAD;
+ else
+ tb->tx_buf[0] = reg_addr | ST_SENSORS_SPI_READ;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ err = spi_sync(to_spi_device(dev), &msg);
+ if (err)
+ goto acc_spi_read_error;
+
+ memcpy(data, tb->rx_buf, len*sizeof(u8));
+ mutex_unlock(&tb->buf_lock);
+ return len;
+
+acc_spi_read_error:
+ mutex_unlock(&tb->buf_lock);
+ return err;
+}
+
+static int st_sensors_spi_read_byte(struct st_sensor_transfer_buffer *tb,
+ struct device *dev, u8 reg_addr, u8 *res_byte)
+{
+ return st_sensors_spi_read(tb, dev, reg_addr, 1, res_byte, false);
+}
+
+static int st_sensors_spi_read_multiple_byte(
+ struct st_sensor_transfer_buffer *tb, struct device *dev,
+ u8 reg_addr, int len, u8 *data, bool multiread_bit)
+{
+ return st_sensors_spi_read(tb, dev, reg_addr, len, data, multiread_bit);
+}
+
+static int st_sensors_spi_write_byte(struct st_sensor_transfer_buffer *tb,
+ struct device *dev, u8 reg_addr, u8 data)
+{
+ struct spi_message msg;
+ int err;
+
+ struct spi_transfer xfers = {
+ .tx_buf = tb->tx_buf,
+ .bits_per_word = 8,
+ .len = 2,
+ };
+
+ mutex_lock(&tb->buf_lock);
+ tb->tx_buf[0] = reg_addr;
+ tb->tx_buf[1] = data;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers, &msg);
+ err = spi_sync(to_spi_device(dev), &msg);
+ mutex_unlock(&tb->buf_lock);
+
+ return err;
+}
+
+static const struct st_sensor_transfer_function st_sensors_tf_spi = {
+ .read_byte = st_sensors_spi_read_byte,
+ .write_byte = st_sensors_spi_write_byte,
+ .read_multiple_byte = st_sensors_spi_read_multiple_byte,
+};
+
+void st_sensors_spi_configure(struct iio_dev *indio_dev,
+ struct spi_device *spi, struct st_sensor_data *sdata)
+{
+ spi_set_drvdata(spi, indio_dev);
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi->modalias;
+
+ sdata->tf = &st_sensors_tf_spi;
+ sdata->get_irq_data_ready = st_sensors_spi_get_irq;
+}
+EXPORT_SYMBOL(st_sensors_spi_configure);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics ST-sensors spi driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
new file mode 100644
index 000000000000..139ed030abb0
--- /dev/null
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -0,0 +1,77 @@
+/*
+ * STMicroelectronics sensors trigger library driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+#include <linux/interrupt.h>
+
+#include <linux/iio/common/st_sensors.h>
+
+
+int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
+ const struct iio_trigger_ops *trigger_ops)
+{
+ int err;
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ sdata->trig = iio_trigger_alloc("%s-trigger", indio_dev->name);
+ if (sdata->trig == NULL) {
+ err = -ENOMEM;
+ dev_err(&indio_dev->dev, "failed to allocate iio trigger.\n");
+ goto iio_trigger_alloc_error;
+ }
+
+ err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev),
+ iio_trigger_generic_data_rdy_poll,
+ NULL,
+ IRQF_TRIGGER_RISING,
+ sdata->trig->name,
+ sdata->trig);
+ if (err)
+ goto request_irq_error;
+
+ sdata->trig->private_data = indio_dev;
+ sdata->trig->ops = trigger_ops;
+ sdata->trig->dev.parent = sdata->dev;
+
+ err = iio_trigger_register(sdata->trig);
+ if (err < 0) {
+ dev_err(&indio_dev->dev, "failed to register iio trigger.\n");
+ goto iio_trigger_register_error;
+ }
+ indio_dev->trig = sdata->trig;
+
+ return 0;
+
+iio_trigger_register_error:
+ free_irq(sdata->get_irq_data_ready(indio_dev), sdata->trig);
+request_irq_error:
+ iio_trigger_free(sdata->trig);
+iio_trigger_alloc_error:
+ return err;
+}
+EXPORT_SYMBOL(st_sensors_allocate_trigger);
+
+void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
+{
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ iio_trigger_unregister(sdata->trig);
+ free_irq(sdata->get_irq_data_ready(indio_dev), sdata->trig);
+ iio_trigger_free(sdata->trig);
+}
+EXPORT_SYMBOL(st_sensors_deallocate_trigger);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics ST-sensors trigger");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c
index 54b46fd3aede..92771217f665 100644
--- a/drivers/iio/dac/ad5360.c
+++ b/drivers/iio/dac/ad5360.c
@@ -213,7 +213,6 @@ static int ad5360_read(struct iio_dev *indio_dev, unsigned int type,
unsigned int addr)
{
struct ad5360_state *st = iio_priv(indio_dev);
- struct spi_message m;
int ret;
struct spi_transfer t[] = {
{
@@ -226,10 +225,6 @@ static int ad5360_read(struct iio_dev *indio_dev, unsigned int type,
},
};
- spi_message_init(&m);
- spi_message_add_tail(&t[0], &m);
- spi_message_add_tail(&t[1], &m);
-
mutex_lock(&indio_dev->mlock);
st->data[0].d32 = cpu_to_be32(AD5360_CMD(AD5360_CMD_SPECIAL_FUNCTION) |
@@ -237,7 +232,7 @@ static int ad5360_read(struct iio_dev *indio_dev, unsigned int type,
AD5360_READBACK_TYPE(type) |
AD5360_READBACK_ADDR(addr));
- ret = spi_sync(st->spi, &m);
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
if (ret >= 0)
ret = be32_to_cpu(st->data[1].d32) & 0xffff;
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index 6c7898c765d9..483fc379a2da 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -406,7 +406,11 @@ static int ad5380_probe(struct device *dev, struct regmap *regmap,
goto error_free_reg;
}
- st->vref = regulator_get_voltage(st->vref_reg);
+ ret = regulator_get_voltage(st->vref_reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ st->vref = ret;
} else {
st->vref = st->chip_info->int_vref;
ctrl |= AD5380_CTRL_INT_VREF_EN;
diff --git a/drivers/iio/dac/ad5421.c b/drivers/iio/dac/ad5421.c
index 43be948db83e..6b86a638dad0 100644
--- a/drivers/iio/dac/ad5421.c
+++ b/drivers/iio/dac/ad5421.c
@@ -127,7 +127,6 @@ static int ad5421_write(struct iio_dev *indio_dev, unsigned int reg,
static int ad5421_read(struct iio_dev *indio_dev, unsigned int reg)
{
struct ad5421_state *st = iio_priv(indio_dev);
- struct spi_message m;
int ret;
struct spi_transfer t[] = {
{
@@ -140,15 +139,11 @@ static int ad5421_read(struct iio_dev *indio_dev, unsigned int reg)
},
};
- spi_message_init(&m);
- spi_message_add_tail(&t[0], &m);
- spi_message_add_tail(&t[1], &m);
-
mutex_lock(&indio_dev->mlock);
st->data[0].d32 = cpu_to_be32((1 << 23) | (reg << 16));
- ret = spi_sync(st->spi, &m);
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
if (ret >= 0)
ret = be32_to_cpu(st->data[1].d32) & 0xffff;
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index 29f653dab2f7..f5583aedfb59 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -226,7 +226,11 @@ static int ad5446_probe(struct device *dev, const char *name,
if (ret)
goto error_put_reg;
- voltage_uv = regulator_get_voltage(reg);
+ ret = regulator_get_voltage(reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ voltage_uv = ret;
}
indio_dev = iio_device_alloc(sizeof(*st));
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index b2a31a0468ed..e5e59749f109 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -85,11 +85,7 @@ static int ad5504_spi_read(struct spi_device *spi, u8 addr)
.rx_buf = &val,
.len = 2,
};
- struct spi_message m;
-
- spi_message_init(&m);
- spi_message_add_tail(&t, &m);
- ret = spi_sync(spi, &m);
+ ret = spi_sync_transfer(spi, &t, 1);
if (ret < 0)
return ret;
@@ -296,7 +292,11 @@ static int ad5504_probe(struct spi_device *spi)
if (ret)
goto error_put_reg;
- voltage_uv = regulator_get_voltage(reg);
+ ret = regulator_get_voltage(reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ voltage_uv = ret;
}
spi_set_drvdata(spi, indio_dev);
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
index e9947969f9fe..f6e116627b71 100644
--- a/drivers/iio/dac/ad5624r_spi.c
+++ b/drivers/iio/dac/ad5624r_spi.c
@@ -238,7 +238,11 @@ static int ad5624r_probe(struct spi_device *spi)
if (ret)
goto error_put_reg;
- voltage_uv = regulator_get_voltage(st->reg);
+ ret = regulator_get_voltage(st->reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ voltage_uv = ret;
}
spi_set_drvdata(spi, indio_dev);
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index 36e51382ae52..5e554af21703 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -117,18 +117,13 @@ static int ad5686_spi_read(struct ad5686_state *st, u8 addr)
.len = 3,
},
};
- struct spi_message m;
int ret;
- spi_message_init(&m);
- spi_message_add_tail(&t[0], &m);
- spi_message_add_tail(&t[1], &m);
-
st->data[0].d32 = cpu_to_be32(AD5686_CMD(AD5686_CMD_READBACK_ENABLE) |
AD5686_ADDR(addr));
st->data[1].d32 = cpu_to_be32(AD5686_CMD(AD5686_CMD_NOOP));
- ret = spi_sync(st->spi, &m);
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
if (ret < 0)
return ret;
@@ -332,7 +327,11 @@ static int ad5686_probe(struct spi_device *spi)
if (ret)
goto error_put_reg;
- voltage_uv = regulator_get_voltage(st->reg);
+ ret = regulator_get_voltage(st->reg);
+ if (ret < 0)
+ goto error_disable_reg;
+
+ voltage_uv = ret;
}
st->chip_info =
diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
index 0869bbd27d30..71faabc6b14e 100644
--- a/drivers/iio/dac/ad5755.c
+++ b/drivers/iio/dac/ad5755.c
@@ -153,7 +153,6 @@ static int ad5755_write_ctrl(struct iio_dev *indio_dev, unsigned int channel,
static int ad5755_read(struct iio_dev *indio_dev, unsigned int addr)
{
struct ad5755_state *st = iio_priv(indio_dev);
- struct spi_message m;
int ret;
struct spi_transfer t[] = {
{
@@ -167,16 +166,12 @@ static int ad5755_read(struct iio_dev *indio_dev, unsigned int addr)
},
};
- spi_message_init(&m);
- spi_message_add_tail(&t[0], &m);
- spi_message_add_tail(&t[1], &m);
-
mutex_lock(&indio_dev->mlock);
st->data[0].d32 = cpu_to_be32(AD5755_READ_FLAG | (addr << 16));
st->data[1].d32 = cpu_to_be32(AD5755_NOOP);
- ret = spi_sync(st->spi, &m);
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
if (ret >= 0)
ret = be32_to_cpu(st->data[1].d32) & 0xffff;
diff --git a/drivers/iio/dac/ad5764.c b/drivers/iio/dac/ad5764.c
index 7f9045e6daa4..5b7acd3a2c77 100644
--- a/drivers/iio/dac/ad5764.c
+++ b/drivers/iio/dac/ad5764.c
@@ -135,7 +135,6 @@ static int ad5764_read(struct iio_dev *indio_dev, unsigned int reg,
unsigned int *val)
{
struct ad5764_state *st = iio_priv(indio_dev);
- struct spi_message m;
int ret;
struct spi_transfer t[] = {
{
@@ -148,15 +147,11 @@ static int ad5764_read(struct iio_dev *indio_dev, unsigned int reg,
},
};
- spi_message_init(&m);
- spi_message_add_tail(&t[0], &m);
- spi_message_add_tail(&t[1], &m);
-
mutex_lock(&indio_dev->mlock);
st->data[0].d32 = cpu_to_be32((1 << 23) | (reg << 16));
- ret = spi_sync(st->spi, &m);
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
if (ret >= 0)
*val = be32_to_cpu(st->data[1].d32) & 0xffff;
diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c
index c84180f23139..8dfd3da8a07b 100644
--- a/drivers/iio/dac/ad5791.c
+++ b/drivers/iio/dac/ad5791.c
@@ -125,7 +125,6 @@ static int ad5791_spi_read(struct spi_device *spi, u8 addr, u32 *val)
u8 d8[4];
} data[3];
int ret;
- struct spi_message msg;
struct spi_transfer xfers[] = {
{
.tx_buf = &data[0].d8[1],
@@ -144,10 +143,7 @@ static int ad5791_spi_read(struct spi_device *spi, u8 addr, u32 *val)
AD5791_ADDR(addr));
data[1].d32 = cpu_to_be32(AD5791_ADDR(AD5791_ADDR_NOOP));
- spi_message_init(&msg);
- spi_message_add_tail(&xfers[0], &msg);
- spi_message_add_tail(&xfers[1], &msg);
- ret = spi_sync(spi, &msg);
+ ret = spi_sync_transfer(spi, xfers, ARRAY_SIZE(xfers));
*val = be32_to_cpu(data[2].d32);
@@ -365,7 +361,11 @@ static int ad5791_probe(struct spi_device *spi)
if (ret)
goto error_put_reg_pos;
- pos_voltage_uv = regulator_get_voltage(st->reg_vdd);
+ ret = regulator_get_voltage(st->reg_vdd);
+ if (ret < 0)
+ goto error_disable_reg_pos;
+
+ pos_voltage_uv = ret;
}
st->reg_vss = regulator_get(&spi->dev, "vss");
@@ -374,7 +374,11 @@ static int ad5791_probe(struct spi_device *spi)
if (ret)
goto error_put_reg_neg;
- neg_voltage_uv = regulator_get_voltage(st->reg_vss);
+ ret = regulator_get_voltage(st->reg_vss);
+ if (ret < 0)
+ goto error_disable_reg_neg;
+
+ neg_voltage_uv = ret;
}
st->pwr_down = true;
@@ -428,6 +432,7 @@ error_put_reg_neg:
if (!IS_ERR(st->reg_vss))
regulator_put(st->reg_vss);
+error_disable_reg_pos:
if (!IS_ERR(st->reg_vdd))
regulator_disable(st->reg_vdd);
error_put_reg_pos:
diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
index 80307473e3a9..1ea132e239ea 100644
--- a/drivers/iio/frequency/ad9523.c
+++ b/drivers/iio/frequency/ad9523.c
@@ -287,7 +287,6 @@ struct ad9523_state {
static int ad9523_read(struct iio_dev *indio_dev, unsigned addr)
{
struct ad9523_state *st = iio_priv(indio_dev);
- struct spi_message m;
int ret;
/* We encode the register size 1..3 bytes into the register address.
@@ -305,15 +304,11 @@ static int ad9523_read(struct iio_dev *indio_dev, unsigned addr)
},
};
- spi_message_init(&m);
- spi_message_add_tail(&t[0], &m);
- spi_message_add_tail(&t[1], &m);
-
st->data[0].d32 = cpu_to_be32(AD9523_READ |
AD9523_CNT(AD9523_TRANSF_LEN(addr)) |
AD9523_ADDR(addr));
- ret = spi_sync(st->spi, &m);
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
if (ret < 0)
dev_err(&indio_dev->dev, "read failed (%d)", ret);
else
@@ -326,7 +321,6 @@ static int ad9523_read(struct iio_dev *indio_dev, unsigned addr)
static int ad9523_write(struct iio_dev *indio_dev, unsigned addr, unsigned val)
{
struct ad9523_state *st = iio_priv(indio_dev);
- struct spi_message m;
int ret;
struct spi_transfer t[] = {
{
@@ -338,16 +332,12 @@ static int ad9523_write(struct iio_dev *indio_dev, unsigned addr, unsigned val)
},
};
- spi_message_init(&m);
- spi_message_add_tail(&t[0], &m);
- spi_message_add_tail(&t[1], &m);
-
st->data[0].d32 = cpu_to_be32(AD9523_WRITE |
AD9523_CNT(AD9523_TRANSF_LEN(addr)) |
AD9523_ADDR(addr));
st->data[1].d32 = cpu_to_be32(val);
- ret = spi_sync(st->spi, &m);
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
if (ret < 0)
dev_err(&indio_dev->dev, "write failed (%d)", ret);
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index e5033b4cfba0..a884252ac66b 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -173,7 +173,7 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)
} while ((st->r1_mod > ADF4350_MAX_MODULUS) && r_cnt);
} while (r_cnt == 0);
- tmp = freq * (u64)st->r1_mod + (st->fpfd > 1);
+ tmp = freq * (u64)st->r1_mod + (st->fpfd >> 1);
do_div(tmp, st->fpfd); /* Div round closest (n + d/2)/d */
st->r0_fract = do_div(tmp, st->r1_mod);
st->r0_int = tmp;
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index 48ed1483ff27..6be4628faffe 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -3,6 +3,13 @@
#
menu "Digital gyroscope sensors"
+config ADIS16080
+ tristate "Analog Devices ADIS16080/100 Yaw Rate Gyroscope with SPI driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices ADIS16080, ADIS16100 Yaw
+ Rate Gyroscope with SPI.
+
config ADIS16136
tristate "Analog devices ADIS16136 and similar gyroscopes driver"
depends on SPI_MASTER
@@ -12,14 +19,63 @@ config ADIS16136
Say yes here to build support for the Analog Devices ADIS16133, ADIS16135,
ADIS16136 gyroscope devices.
+config ADXRS450
+ tristate "Analog Devices ADXRS450/3 Digital Output Gyroscope SPI driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices ADXRS450 and ADXRS453
+ programmable digital output gyroscope.
+
+ This driver can also be built as a module. If so, the module
+ will be called adxrs450.
+
config HID_SENSOR_GYRO_3D
depends on HID_SENSOR_HUB
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
tristate "HID Gyroscope 3D"
help
Say yes here to build support for the HID SENSOR
Gyroscope 3D.
+config IIO_ST_GYRO_3AXIS
+ tristate "STMicroelectronics gyroscopes 3-Axis Driver"
+ depends on (I2C || SPI_MASTER) && SYSFS
+ select IIO_ST_SENSORS_CORE
+ select IIO_ST_GYRO_I2C_3AXIS if (I2C)
+ select IIO_ST_GYRO_SPI_3AXIS if (SPI_MASTER)
+ select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
+ select IIO_ST_GYRO_BUFFER if (IIO_TRIGGERED_BUFFER)
+ help
+ Say yes here to build support for STMicroelectronics gyroscopes:
+ L3G4200D, LSM330DL, L3GD20, L3GD20H, LSM330DLC, L3G4IS, LSM330.
+
+ This driver can also be built as a module. If so, will be created
+ these modules:
+ - st_gyro (core functions for the driver [it is mandatory]);
+ - st_gyro_i2c (necessary for the I2C devices [optional*]);
+ - st_gyro_spi (necessary for the SPI devices [optional*]);
+
+ (*) one of these is necessary to do something.
+
+config IIO_ST_GYRO_I2C_3AXIS
+ tristate
+ depends on IIO_ST_GYRO_3AXIS
+ depends on IIO_ST_SENSORS_I2C
+
+config IIO_ST_GYRO_SPI_3AXIS
+ tristate
+ depends on IIO_ST_GYRO_3AXIS
+ depends on IIO_ST_SENSORS_SPI
+
+config ITG3200
+ tristate "InvenSense ITG3200 Digital 3-Axis Gyroscope I2C driver"
+ depends on I2C
+ select IIO_TRIGGERED_BUFFER if IIO_BUFFER
+ help
+ Say yes here to add support for the InvenSense ITG3200 digital
+ 3-axis gyroscope sensor.
+
endmenu
diff --git a/drivers/iio/gyro/Makefile b/drivers/iio/gyro/Makefile
index 702a058907e3..225d289082e6 100644
--- a/drivers/iio/gyro/Makefile
+++ b/drivers/iio/gyro/Makefile
@@ -2,5 +2,19 @@
# Makefile for industrial I/O gyroscope sensor drivers
#
+obj-$(CONFIG_ADIS16080) += adis16080.o
obj-$(CONFIG_ADIS16136) += adis16136.o
+obj-$(CONFIG_ADXRS450) += adxrs450.o
+
obj-$(CONFIG_HID_SENSOR_GYRO_3D) += hid-sensor-gyro-3d.o
+
+itg3200-y := itg3200_core.o
+itg3200-$(CONFIG_IIO_BUFFER) += itg3200_buffer.o
+obj-$(CONFIG_ITG3200) += itg3200.o
+
+obj-$(CONFIG_IIO_ST_GYRO_3AXIS) += st_gyro.o
+st_gyro-y := st_gyro_core.o
+st_gyro-$(CONFIG_IIO_BUFFER) += st_gyro_buffer.o
+
+obj-$(CONFIG_IIO_ST_GYRO_I2C_3AXIS) += st_gyro_i2c.o
+obj-$(CONFIG_IIO_ST_GYRO_SPI_3AXIS) += st_gyro_spi.o
diff --git a/drivers/staging/iio/gyro/adis16080_core.c b/drivers/iio/gyro/adis16080.c
index 3525a68d6a75..1861287911f1 100644
--- a/drivers/staging/iio/gyro/adis16080_core.c
+++ b/drivers/iio/gyro/adis16080.c
@@ -29,48 +29,50 @@
#define ADIS16080_DIN_WRITE (1 << 15)
+struct adis16080_chip_info {
+ int scale_val;
+ int scale_val2;
+};
+
/**
* struct adis16080_state - device instance specific data
* @us: actual spi_device to write data
+ * @info: chip specific parameters
* @buf: transmit or receive buffer
- * @buf_lock: mutex to protect tx and rx
**/
struct adis16080_state {
struct spi_device *us;
- struct mutex buf_lock;
+ const struct adis16080_chip_info *info;
- u8 buf[2] ____cacheline_aligned;
+ __be16 buf ____cacheline_aligned;
};
-static int adis16080_spi_write(struct iio_dev *indio_dev,
- u16 val)
+static int adis16080_read_sample(struct iio_dev *indio_dev,
+ u16 addr, int *val)
{
- int ret;
struct adis16080_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- st->buf[0] = val >> 8;
- st->buf[1] = val;
-
- ret = spi_write(st->us, st->buf, 2);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int adis16080_spi_read(struct iio_dev *indio_dev,
- u16 *val)
-{
+ struct spi_message m;
int ret;
- struct adis16080_state *st = iio_priv(indio_dev);
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->buf,
+ .len = 2,
+ .cs_change = 1,
+ }, {
+ .rx_buf = &st->buf,
+ .len = 2,
+ },
+ };
- mutex_lock(&st->buf_lock);
+ st->buf = cpu_to_be16(addr | ADIS16080_DIN_WRITE);
- ret = spi_read(st->us, st->buf, 2);
+ spi_message_init(&m);
+ spi_message_add_tail(&t[0], &m);
+ spi_message_add_tail(&t[1], &m);
+ ret = spi_sync(st->us, &m);
if (ret == 0)
- *val = ((st->buf[0] & 0xF) << 8) | st->buf[1];
- mutex_unlock(&st->buf_lock);
+ *val = sign_extend32(be16_to_cpu(st->buf), 11);
return ret;
}
@@ -81,28 +83,52 @@ static int adis16080_read_raw(struct iio_dev *indio_dev,
int *val2,
long mask)
{
- int ret = -EINVAL;
- u16 ut = 0;
- /* Take the iio_dev status lock */
+ struct adis16080_state *st = iio_priv(indio_dev);
+ int ret;
- mutex_lock(&indio_dev->mlock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = adis16080_spi_write(indio_dev,
- chan->address |
- ADIS16080_DIN_WRITE);
- if (ret < 0)
- break;
- ret = adis16080_spi_read(indio_dev, &ut);
- if (ret < 0)
- break;
- *val = ut;
- ret = IIO_VAL_INT;
+ mutex_lock(&indio_dev->mlock);
+ ret = adis16080_read_sample(indio_dev, chan->address, val);
+ mutex_unlock(&indio_dev->mlock);
+ return ret ? ret : IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = st->info->scale_val;
+ *val2 = st->info->scale_val2;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_VOLTAGE:
+ /* VREF = 5V, 12 bits */
+ *val = 5000;
+ *val2 = 12;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_TEMP:
+ /* 85 C = 585, 25 C = 0 */
+ *val = 85000 - 25000;
+ *val2 = 585;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ /* 2.5 V = 0 */
+ *val = 2048;
+ return IIO_VAL_INT;
+ case IIO_TEMP:
+ /* 85 C = 585, 25 C = 0 */
+ *val = DIV_ROUND_CLOSEST(25 * 585, 85 - 25);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
break;
}
- mutex_unlock(&indio_dev->mlock);
- return ret;
+ return -EINVAL;
}
static const struct iio_chan_spec adis16080_channels[] = {
@@ -110,25 +136,32 @@ static const struct iio_chan_spec adis16080_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_Z,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT,
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = ADIS16080_DIN_GYRO,
}, {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 0,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT,
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT,
.address = ADIS16080_DIN_AIN1,
}, {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 1,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT,
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT,
.address = ADIS16080_DIN_AIN2,
}, {
.type = IIO_TEMP,
.indexed = 1,
.channel = 0,
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT,
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT,
.address = ADIS16080_DIN_TEMP,
}
};
@@ -138,8 +171,27 @@ static const struct iio_info adis16080_info = {
.driver_module = THIS_MODULE,
};
+enum {
+ ID_ADIS16080,
+ ID_ADIS16100,
+};
+
+static const struct adis16080_chip_info adis16080_chip_info[] = {
+ [ID_ADIS16080] = {
+ /* 80 degree = 819, 819 rad = 46925 degree */
+ .scale_val = 80,
+ .scale_val2 = 46925,
+ },
+ [ID_ADIS16100] = {
+ /* 300 degree = 1230, 1230 rad = 70474 degree */
+ .scale_val = 300,
+ .scale_val2 = 70474,
+ },
+};
+
static int adis16080_probe(struct spi_device *spi)
{
+ const struct spi_device_id *id = spi_get_device_id(spi);
int ret;
struct adis16080_state *st;
struct iio_dev *indio_dev;
@@ -156,7 +208,7 @@ static int adis16080_probe(struct spi_device *spi)
/* Allocate the comms buffers */
st->us = spi;
- mutex_init(&st->buf_lock);
+ st->info = &adis16080_chip_info[id->driver_data];
indio_dev->name = spi->dev.driver->name;
indio_dev->channels = adis16080_channels;
@@ -176,7 +228,6 @@ error_ret:
return ret;
}
-/* fixme, confirm ordering in this function */
static int adis16080_remove(struct spi_device *spi)
{
iio_device_unregister(spi_get_drvdata(spi));
@@ -185,6 +236,13 @@ static int adis16080_remove(struct spi_device *spi)
return 0;
}
+static const struct spi_device_id adis16080_ids[] = {
+ { "adis16080", ID_ADIS16080 },
+ { "adis16100", ID_ADIS16100 },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, adis16080_ids);
+
static struct spi_driver adis16080_driver = {
.driver = {
.name = "adis16080",
@@ -192,10 +250,10 @@ static struct spi_driver adis16080_driver = {
},
.probe = adis16080_probe,
.remove = adis16080_remove,
+ .id_table = adis16080_ids,
};
module_spi_driver(adis16080_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADIS16080/100 Yaw Rate Gyroscope Driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:adis16080");
diff --git a/drivers/staging/iio/gyro/adxrs450_core.c b/drivers/iio/gyro/adxrs450.c
index f0ce81da8aca..5b79953f7011 100644
--- a/drivers/staging/iio/gyro/adxrs450_core.c
+++ b/drivers/iio/gyro/adxrs450.c
@@ -21,45 +21,110 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
-#include "adxrs450.h"
+#define ADXRS450_STARTUP_DELAY 50 /* ms */
+
+/* The MSB for the spi commands */
+#define ADXRS450_SENSOR_DATA (0x20 << 24)
+#define ADXRS450_WRITE_DATA (0x40 << 24)
+#define ADXRS450_READ_DATA (0x80 << 24)
+
+#define ADXRS450_RATE1 0x00 /* Rate Registers */
+#define ADXRS450_TEMP1 0x02 /* Temperature Registers */
+#define ADXRS450_LOCST1 0x04 /* Low CST Memory Registers */
+#define ADXRS450_HICST1 0x06 /* High CST Memory Registers */
+#define ADXRS450_QUAD1 0x08 /* Quad Memory Registers */
+#define ADXRS450_FAULT1 0x0A /* Fault Registers */
+#define ADXRS450_PID1 0x0C /* Part ID Register 1 */
+#define ADXRS450_SNH 0x0E /* Serial Number Registers, 4 bytes */
+#define ADXRS450_SNL 0x10
+#define ADXRS450_DNC1 0x12 /* Dynamic Null Correction Registers */
+/* Check bits */
+#define ADXRS450_P 0x01
+#define ADXRS450_CHK 0x02
+#define ADXRS450_CST 0x04
+#define ADXRS450_PWR 0x08
+#define ADXRS450_POR 0x10
+#define ADXRS450_NVM 0x20
+#define ADXRS450_Q 0x40
+#define ADXRS450_PLL 0x80
+#define ADXRS450_UV 0x100
+#define ADXRS450_OV 0x200
+#define ADXRS450_AMP 0x400
+#define ADXRS450_FAIL 0x800
+
+#define ADXRS450_WRERR_MASK (0x7 << 29)
+
+#define ADXRS450_MAX_RX 4
+#define ADXRS450_MAX_TX 4
+
+#define ADXRS450_GET_ST(a) ((a >> 26) & 0x3)
+
+enum {
+ ID_ADXRS450,
+ ID_ADXRS453,
+};
+
+/**
+ * struct adxrs450_state - device instance specific data
+ * @us: actual spi_device
+ * @buf_lock: mutex to protect tx and rx
+ * @tx: transmit buffer
+ * @rx: receive buffer
+ **/
+struct adxrs450_state {
+ struct spi_device *us;
+ struct mutex buf_lock;
+ __be32 tx ____cacheline_aligned;
+ __be32 rx;
+
+};
/**
* adxrs450_spi_read_reg_16() - read 2 bytes from a register pair
- * @dev: device associated with child of actual iio_dev
- * @reg_address: the address of the lower of the two registers,which should be an even address,
- * Second register's address is reg_address + 1.
+ * @indio_dev: device associated with child of actual iio_dev
+ * @reg_address: the address of the lower of the two registers, which should be
+ * an even address, the second register's address is reg_address + 1.
* @val: somewhere to pass back the value read
**/
static int adxrs450_spi_read_reg_16(struct iio_dev *indio_dev,
u8 reg_address,
u16 *val)
{
+ struct spi_message msg;
struct adxrs450_state *st = iio_priv(indio_dev);
+ u32 tx;
int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = &st->tx,
+ .bits_per_word = 8,
+ .len = sizeof(st->tx),
+ .cs_change = 1,
+ }, {
+ .rx_buf = &st->rx,
+ .bits_per_word = 8,
+ .len = sizeof(st->rx),
+ },
+ };
mutex_lock(&st->buf_lock);
- st->tx[0] = ADXRS450_READ_DATA | (reg_address >> 7);
- st->tx[1] = reg_address << 1;
- st->tx[2] = 0;
- st->tx[3] = 0;
+ tx = ADXRS450_READ_DATA | (reg_address << 17);
- if (!(hweight32(be32_to_cpu(*(u32 *)st->tx)) & 1))
- st->tx[3] |= ADXRS450_P;
+ if (!(hweight32(tx) & 1))
+ tx |= ADXRS450_P;
- ret = spi_write(st->us, st->tx, 4);
- if (ret) {
- dev_err(&st->us->dev, "problem while reading 16 bit register 0x%02x\n",
- reg_address);
- goto error_ret;
- }
- ret = spi_read(st->us, st->rx, 4);
+ st->tx = cpu_to_be32(tx);
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
if (ret) {
dev_err(&st->us->dev, "problem while reading 16 bit register 0x%02x\n",
reg_address);
goto error_ret;
}
- *val = (be32_to_cpu(*(u32 *)st->rx) >> 5) & 0xFFFF;
+ *val = (be32_to_cpu(st->rx) >> 5) & 0xFFFF;
error_ret:
mutex_unlock(&st->buf_lock);
@@ -68,9 +133,9 @@ error_ret:
/**
* adxrs450_spi_write_reg_16() - write 2 bytes data to a register pair
- * @dev: device associated with child of actual actual iio_dev
- * @reg_address: the address of the lower of the two registers,which should be an even address,
- * Second register's address is reg_address + 1.
+ * @indio_dev: device associated with child of actual actual iio_dev
+ * @reg_address: the address of the lower of the two registers,which should be
+ * an even address, the second register's address is reg_address + 1.
* @val: value to be written.
**/
static int adxrs450_spi_write_reg_16(struct iio_dev *indio_dev,
@@ -78,55 +143,61 @@ static int adxrs450_spi_write_reg_16(struct iio_dev *indio_dev,
u16 val)
{
struct adxrs450_state *st = iio_priv(indio_dev);
+ u32 tx;
int ret;
mutex_lock(&st->buf_lock);
- st->tx[0] = ADXRS450_WRITE_DATA | reg_address >> 7;
- st->tx[1] = reg_address << 1 | val >> 15;
- st->tx[2] = val >> 7;
- st->tx[3] = val << 1;
+ tx = ADXRS450_WRITE_DATA | (reg_address << 17) | (val << 1);
- if (!(hweight32(be32_to_cpu(*(u32 *)st->tx)) & 1))
- st->tx[3] |= ADXRS450_P;
+ if (!(hweight32(tx) & 1))
+ tx |= ADXRS450_P;
- ret = spi_write(st->us, st->tx, 4);
+ st->tx = cpu_to_be32(tx);
+ ret = spi_write(st->us, &st->tx, sizeof(st->tx));
if (ret)
dev_err(&st->us->dev, "problem while writing 16 bit register 0x%02x\n",
reg_address);
- msleep(1); /* enforce sequential transfer delay 0.1ms */
+ usleep_range(100, 1000); /* enforce sequential transfer delay 0.1ms */
mutex_unlock(&st->buf_lock);
return ret;
}
/**
* adxrs450_spi_sensor_data() - read 2 bytes sensor data
- * @dev: device associated with child of actual iio_dev
+ * @indio_dev: device associated with child of actual iio_dev
* @val: somewhere to pass back the value read
**/
static int adxrs450_spi_sensor_data(struct iio_dev *indio_dev, s16 *val)
{
+ struct spi_message msg;
struct adxrs450_state *st = iio_priv(indio_dev);
int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = &st->tx,
+ .bits_per_word = 8,
+ .len = sizeof(st->tx),
+ .cs_change = 1,
+ }, {
+ .rx_buf = &st->rx,
+ .bits_per_word = 8,
+ .len = sizeof(st->rx),
+ },
+ };
mutex_lock(&st->buf_lock);
- st->tx[0] = ADXRS450_SENSOR_DATA;
- st->tx[1] = 0;
- st->tx[2] = 0;
- st->tx[3] = 0;
-
- ret = spi_write(st->us, st->tx, 4);
- if (ret) {
- dev_err(&st->us->dev, "Problem while reading sensor data\n");
- goto error_ret;
- }
+ st->tx = cpu_to_be32(ADXRS450_SENSOR_DATA);
- ret = spi_read(st->us, st->rx, 4);
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfers[0], &msg);
+ spi_message_add_tail(&xfers[1], &msg);
+ ret = spi_sync(st->us, &msg);
if (ret) {
dev_err(&st->us->dev, "Problem while reading sensor data\n");
goto error_ret;
}
- *val = (be32_to_cpu(*(u32 *)st->rx) >> 10) & 0xFFFF;
+ *val = (be32_to_cpu(st->rx) >> 10) & 0xFFFF;
error_ret:
mutex_unlock(&st->buf_lock);
@@ -137,35 +208,32 @@ error_ret:
* adxrs450_spi_initial() - use for initializing procedure.
* @st: device instance specific data
* @val: somewhere to pass back the value read
+ * @chk: Whether to perform fault check
**/
static int adxrs450_spi_initial(struct adxrs450_state *st,
u32 *val, char chk)
{
- struct spi_message msg;
int ret;
+ u32 tx;
struct spi_transfer xfers = {
- .tx_buf = st->tx,
- .rx_buf = st->rx,
+ .tx_buf = &st->tx,
+ .rx_buf = &st->rx,
.bits_per_word = 8,
- .len = 4,
+ .len = sizeof(st->tx),
};
mutex_lock(&st->buf_lock);
- st->tx[0] = ADXRS450_SENSOR_DATA;
- st->tx[1] = 0;
- st->tx[2] = 0;
- st->tx[3] = 0;
+ tx = ADXRS450_SENSOR_DATA;
if (chk)
- st->tx[3] |= (ADXRS450_CHK | ADXRS450_P);
- spi_message_init(&msg);
- spi_message_add_tail(&xfers, &msg);
- ret = spi_sync(st->us, &msg);
+ tx |= (ADXRS450_CHK | ADXRS450_P);
+ st->tx = cpu_to_be32(tx);
+ ret = spi_sync_transfer(st->us, &xfers, 1);
if (ret) {
dev_err(&st->us->dev, "Problem while reading initializing data\n");
goto error_ret;
}
- *val = be32_to_cpu(*(u32 *)st->rx);
+ *val = be32_to_cpu(st->rx);
error_ret:
mutex_unlock(&st->buf_lock);
@@ -185,8 +253,7 @@ static int adxrs450_initial_setup(struct iio_dev *indio_dev)
if (ret)
return ret;
if (t != 0x01)
- dev_warn(&st->us->dev, "The initial power on response "
- "is not correct! Restart without reset?\n");
+ dev_warn(&st->us->dev, "The initial power on response is not correct! Restart without reset?\n");
msleep(ADXRS450_STARTUP_DELAY);
ret = adxrs450_spi_initial(st, &t, 0);
@@ -217,20 +284,6 @@ static int adxrs450_initial_setup(struct iio_dev *indio_dev)
dev_err(&st->us->dev, "The device is not in normal status!\n");
return -EINVAL;
}
- ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_PID1, &data);
- if (ret)
- return ret;
- dev_info(&st->us->dev, "The Part ID is 0x%x\n", data);
-
- ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_SNL, &data);
- if (ret)
- return ret;
- t = data;
- ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_SNH, &data);
- if (ret)
- return ret;
- t |= data << 16;
- dev_info(&st->us->dev, "The Serial Number is 0x%x\n", t);
return 0;
}
@@ -244,9 +297,10 @@ static int adxrs450_write_raw(struct iio_dev *indio_dev,
int ret;
switch (mask) {
case IIO_CHAN_INFO_CALIBBIAS:
+ if (val < -0x400 || val >= 0x400)
+ return -EINVAL;
ret = adxrs450_spi_write_reg_16(indio_dev,
- ADXRS450_DNC1,
- val & 0x3FF);
+ ADXRS450_DNC1, val);
break;
default:
ret = -EINVAL;
@@ -312,7 +366,7 @@ static int adxrs450_read_raw(struct iio_dev *indio_dev,
ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_DNC1, &t);
if (ret)
break;
- *val = t;
+ *val = sign_extend32(t, 9);
ret = IIO_VAL_INT;
break;
default:
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index 06e7cc35450c..fcfc83a9f861 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -28,7 +28,6 @@
#include <linux/iio/buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
-#include "../common/hid-sensors/hid-sensor-attributes.h"
#include "../common/hid-sensors/hid-sensor-trigger.h"
/*Format: HID-SENSOR-usage_id_in_hex*/
@@ -44,7 +43,7 @@ enum gyro_3d_channel {
struct gyro_3d_state {
struct hid_sensor_hub_callbacks callbacks;
- struct hid_sensor_iio_common common_attributes;
+ struct hid_sensor_common common_attributes;
struct hid_sensor_hub_attribute_info gyro[GYRO_3D_CHANNEL_MAX];
u32 gyro_val[GYRO_3D_CHANNEL_MAX];
};
diff --git a/drivers/iio/gyro/itg3200_buffer.c b/drivers/iio/gyro/itg3200_buffer.c
new file mode 100644
index 000000000000..f667d2c8c00f
--- /dev/null
+++ b/drivers/iio/gyro/itg3200_buffer.c
@@ -0,0 +1,156 @@
+/*
+ * itg3200_buffer.c -- support InvenSense ITG3200
+ * Digital 3-Axis Gyroscope driver
+ *
+ * Copyright (c) 2011 Christian Strobel <christian.strobel@iis.fraunhofer.de>
+ * Copyright (c) 2011 Manuel Stahl <manuel.stahl@iis.fraunhofer.de>
+ * Copyright (c) 2012 Thorsten Nowak <thorsten.nowak@iis.fraunhofer.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/gyro/itg3200.h>
+
+
+static int itg3200_read_all_channels(struct i2c_client *i2c, __be16 *buf)
+{
+ u8 tx = 0x80 | ITG3200_REG_TEMP_OUT_H;
+ struct i2c_msg msg[2] = {
+ {
+ .addr = i2c->addr,
+ .flags = i2c->flags,
+ .len = 1,
+ .buf = &tx,
+ },
+ {
+ .addr = i2c->addr,
+ .flags = i2c->flags | I2C_M_RD,
+ .len = ITG3200_SCAN_ELEMENTS * sizeof(s16),
+ .buf = (char *)&buf,
+ },
+ };
+
+ return i2c_transfer(i2c->adapter, msg, 2);
+}
+
+static irqreturn_t itg3200_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct itg3200 *st = iio_priv(indio_dev);
+ __be16 buf[ITG3200_SCAN_ELEMENTS + sizeof(s64)/sizeof(u16)];
+
+ int ret = itg3200_read_all_channels(st->i2c, buf);
+ if (ret < 0)
+ goto error_ret;
+
+ if (indio_dev->scan_timestamp)
+ memcpy(buf + indio_dev->scan_bytes - sizeof(s64),
+ &pf->timestamp, sizeof(pf->timestamp));
+
+ iio_push_to_buffers(indio_dev, (u8 *)buf);
+ iio_trigger_notify_done(indio_dev->trig);
+
+error_ret:
+ return IRQ_HANDLED;
+}
+
+int itg3200_buffer_configure(struct iio_dev *indio_dev)
+{
+ return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ itg3200_trigger_handler, NULL);
+}
+
+void itg3200_buffer_unconfigure(struct iio_dev *indio_dev)
+{
+ iio_triggered_buffer_cleanup(indio_dev);
+}
+
+
+static int itg3200_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct iio_dev *indio_dev = trig->private_data;
+ int ret;
+ u8 msc;
+
+ ret = itg3200_read_reg_8(indio_dev, ITG3200_REG_IRQ_CONFIG, &msc);
+ if (ret)
+ goto error_ret;
+
+ if (state)
+ msc |= ITG3200_IRQ_DATA_RDY_ENABLE;
+ else
+ msc &= ~ITG3200_IRQ_DATA_RDY_ENABLE;
+
+ ret = itg3200_write_reg_8(indio_dev, ITG3200_REG_IRQ_CONFIG, msc);
+ if (ret)
+ goto error_ret;
+
+error_ret:
+ return ret;
+
+}
+
+static const struct iio_trigger_ops itg3200_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = &itg3200_data_rdy_trigger_set_state,
+};
+
+int itg3200_probe_trigger(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct itg3200 *st = iio_priv(indio_dev);
+
+ st->trig = iio_trigger_alloc("%s-dev%d", indio_dev->name,
+ indio_dev->id);
+ if (!st->trig)
+ return -ENOMEM;
+
+ ret = request_irq(st->i2c->irq,
+ &iio_trigger_generic_data_rdy_poll,
+ IRQF_TRIGGER_RISING,
+ "itg3200_data_rdy",
+ st->trig);
+ if (ret)
+ goto error_free_trig;
+
+
+ st->trig->dev.parent = &st->i2c->dev;
+ st->trig->ops = &itg3200_trigger_ops;
+ st->trig->private_data = indio_dev;
+ ret = iio_trigger_register(st->trig);
+ if (ret)
+ goto error_free_irq;
+
+ /* select default trigger */
+ indio_dev->trig = st->trig;
+
+ return 0;
+
+error_free_irq:
+ free_irq(st->i2c->irq, st->trig);
+error_free_trig:
+ iio_trigger_free(st->trig);
+ return ret;
+}
+
+void itg3200_remove_trigger(struct iio_dev *indio_dev)
+{
+ struct itg3200 *st = iio_priv(indio_dev);
+
+ iio_trigger_unregister(st->trig);
+ free_irq(st->i2c->irq, st->trig);
+ iio_trigger_free(st->trig);
+}
diff --git a/drivers/iio/gyro/itg3200_core.c b/drivers/iio/gyro/itg3200_core.c
new file mode 100644
index 000000000000..df2e6aa5d73b
--- /dev/null
+++ b/drivers/iio/gyro/itg3200_core.c
@@ -0,0 +1,401 @@
+/*
+ * itg3200_core.c -- support InvenSense ITG3200
+ * Digital 3-Axis Gyroscope driver
+ *
+ * Copyright (c) 2011 Christian Strobel <christian.strobel@iis.fraunhofer.de>
+ * Copyright (c) 2011 Manuel Stahl <manuel.stahl@iis.fraunhofer.de>
+ * Copyright (c) 2012 Thorsten Nowak <thorsten.nowak@iis.fraunhofer.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * TODO:
+ * - Support digital low pass filter
+ * - Support power management
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+#include <linux/iio/buffer.h>
+
+#include <linux/iio/gyro/itg3200.h>
+
+
+int itg3200_write_reg_8(struct iio_dev *indio_dev,
+ u8 reg_address, u8 val)
+{
+ struct itg3200 *st = iio_priv(indio_dev);
+
+ return i2c_smbus_write_byte_data(st->i2c, 0x80 | reg_address, val);
+}
+
+int itg3200_read_reg_8(struct iio_dev *indio_dev,
+ u8 reg_address, u8 *val)
+{
+ struct itg3200 *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(st->i2c, reg_address);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return 0;
+}
+
+static int itg3200_read_reg_s16(struct iio_dev *indio_dev, u8 lower_reg_address,
+ int *val)
+{
+ struct itg3200 *st = iio_priv(indio_dev);
+ struct i2c_client *client = st->i2c;
+ int ret;
+ s16 out;
+
+ struct i2c_msg msg[2] = {
+ {
+ .addr = client->addr,
+ .flags = client->flags,
+ .len = 1,
+ .buf = (char *)&lower_reg_address,
+ },
+ {
+ .addr = client->addr,
+ .flags = client->flags | I2C_M_RD,
+ .len = 2,
+ .buf = (char *)&out,
+ },
+ };
+
+ lower_reg_address |= 0x80;
+ ret = i2c_transfer(client->adapter, msg, 2);
+ be16_to_cpus(&out);
+ *val = out;
+
+ return (ret == 2) ? 0 : ret;
+}
+
+static int itg3200_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long info)
+{
+ int ret = 0;
+ u8 reg;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ reg = (u8)chan->address;
+ ret = itg3200_read_reg_s16(indio_dev, reg, val);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ if (chan->type == IIO_TEMP)
+ *val2 = 1000000000/280;
+ else
+ *val2 = 1214142; /* (1 / 14,375) * (PI / 180) */
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_OFFSET:
+ /* Only the temperature channel has an offset */
+ *val = 23000;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static ssize_t itg3200_read_frequency(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ int ret, sps;
+ u8 val;
+
+ ret = itg3200_read_reg_8(indio_dev, ITG3200_REG_DLPF, &val);
+ if (ret)
+ return ret;
+
+ sps = (val & ITG3200_DLPF_CFG_MASK) ? 1000 : 8000;
+
+ ret = itg3200_read_reg_8(indio_dev, ITG3200_REG_SAMPLE_RATE_DIV, &val);
+ if (ret)
+ return ret;
+
+ sps /= val + 1;
+
+ return sprintf(buf, "%d\n", sps);
+}
+
+static ssize_t itg3200_write_frequency(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ unsigned val;
+ int ret;
+ u8 t;
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ ret = itg3200_read_reg_8(indio_dev, ITG3200_REG_DLPF, &t);
+ if (ret)
+ goto err_ret;
+
+ if (val == 0) {
+ ret = -EINVAL;
+ goto err_ret;
+ }
+ t = ((t & ITG3200_DLPF_CFG_MASK) ? 1000u : 8000u) / val - 1;
+
+ ret = itg3200_write_reg_8(indio_dev, ITG3200_REG_SAMPLE_RATE_DIV, t);
+
+err_ret:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+/*
+ * Reset device and internal registers to the power-up-default settings
+ * Use the gyro clock as reference, as suggested by the datasheet
+ */
+static int itg3200_reset(struct iio_dev *indio_dev)
+{
+ struct itg3200 *st = iio_priv(indio_dev);
+ int ret;
+
+ dev_dbg(&st->i2c->dev, "reset device");
+
+ ret = itg3200_write_reg_8(indio_dev,
+ ITG3200_REG_POWER_MANAGEMENT,
+ ITG3200_RESET);
+ if (ret) {
+ dev_err(&st->i2c->dev, "error resetting device");
+ goto error_ret;
+ }
+
+ /* Wait for PLL (1ms according to datasheet) */
+ udelay(1500);
+
+ ret = itg3200_write_reg_8(indio_dev,
+ ITG3200_REG_IRQ_CONFIG,
+ ITG3200_IRQ_ACTIVE_HIGH |
+ ITG3200_IRQ_PUSH_PULL |
+ ITG3200_IRQ_LATCH_50US_PULSE |
+ ITG3200_IRQ_LATCH_CLEAR_ANY);
+
+ if (ret)
+ dev_err(&st->i2c->dev, "error init device");
+
+error_ret:
+ return ret;
+}
+
+/* itg3200_enable_full_scale() - Disables the digital low pass filter */
+static int itg3200_enable_full_scale(struct iio_dev *indio_dev)
+{
+ u8 val;
+ int ret;
+
+ ret = itg3200_read_reg_8(indio_dev, ITG3200_REG_DLPF, &val);
+ if (ret)
+ goto err_ret;
+
+ val |= ITG3200_DLPF_FS_SEL_2000;
+ return itg3200_write_reg_8(indio_dev, ITG3200_REG_DLPF, val);
+
+err_ret:
+ return ret;
+}
+
+static int itg3200_initial_setup(struct iio_dev *indio_dev)
+{
+ struct itg3200 *st = iio_priv(indio_dev);
+ int ret;
+ u8 val;
+
+ ret = itg3200_read_reg_8(indio_dev, ITG3200_REG_ADDRESS, &val);
+ if (ret)
+ goto err_ret;
+
+ if (((val >> 1) & 0x3f) != 0x34) {
+ dev_err(&st->i2c->dev, "invalid reg value 0x%02x", val);
+ ret = -ENXIO;
+ goto err_ret;
+ }
+
+ ret = itg3200_reset(indio_dev);
+ if (ret)
+ goto err_ret;
+
+ ret = itg3200_enable_full_scale(indio_dev);
+err_ret:
+ return ret;
+}
+
+#define ITG3200_TEMP_INFO_MASK (IIO_CHAN_INFO_OFFSET_SHARED_BIT | \
+ IIO_CHAN_INFO_SCALE_SHARED_BIT | \
+ IIO_CHAN_INFO_RAW_SEPARATE_BIT)
+#define ITG3200_GYRO_INFO_MASK (IIO_CHAN_INFO_SCALE_SHARED_BIT | \
+ IIO_CHAN_INFO_RAW_SEPARATE_BIT)
+
+#define ITG3200_ST \
+ { .sign = 's', .realbits = 16, .storagebits = 16, .endianness = IIO_BE }
+
+#define ITG3200_GYRO_CHAN(_mod) { \
+ .type = IIO_ANGL_VEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_ ## _mod, \
+ .info_mask = ITG3200_GYRO_INFO_MASK, \
+ .address = ITG3200_REG_GYRO_ ## _mod ## OUT_H, \
+ .scan_index = ITG3200_SCAN_GYRO_ ## _mod, \
+ .scan_type = ITG3200_ST, \
+}
+
+static const struct iio_chan_spec itg3200_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .channel2 = IIO_NO_MOD,
+ .info_mask = ITG3200_TEMP_INFO_MASK,
+ .address = ITG3200_REG_TEMP_OUT_H,
+ .scan_index = ITG3200_SCAN_TEMP,
+ .scan_type = ITG3200_ST,
+ },
+ ITG3200_GYRO_CHAN(X),
+ ITG3200_GYRO_CHAN(Y),
+ ITG3200_GYRO_CHAN(Z),
+ IIO_CHAN_SOFT_TIMESTAMP(ITG3200_SCAN_ELEMENTS),
+};
+
+/* IIO device attributes */
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO, itg3200_read_frequency,
+ itg3200_write_frequency);
+
+static struct attribute *itg3200_attributes[] = {
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group itg3200_attribute_group = {
+ .attrs = itg3200_attributes,
+};
+
+static const struct iio_info itg3200_info = {
+ .attrs = &itg3200_attribute_group,
+ .read_raw = &itg3200_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static const unsigned long itg3200_available_scan_masks[] = { 0xffffffff, 0x0 };
+
+static int itg3200_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct itg3200 *st;
+ struct iio_dev *indio_dev;
+
+ dev_dbg(&client->dev, "probe I2C dev with IRQ %i", client->irq);
+
+ indio_dev = iio_device_alloc(sizeof(*st));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ st = iio_priv(indio_dev);
+
+ i2c_set_clientdata(client, indio_dev);
+ st->i2c = client;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = client->dev.driver->name;
+ indio_dev->channels = itg3200_channels;
+ indio_dev->num_channels = ARRAY_SIZE(itg3200_channels);
+ indio_dev->available_scan_masks = itg3200_available_scan_masks;
+ indio_dev->info = &itg3200_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = itg3200_buffer_configure(indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ if (client->irq) {
+ ret = itg3200_probe_trigger(indio_dev);
+ if (ret)
+ goto error_unconfigure_buffer;
+ }
+
+ ret = itg3200_initial_setup(indio_dev);
+ if (ret)
+ goto error_remove_trigger;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_remove_trigger;
+
+ return 0;
+
+error_remove_trigger:
+ if (client->irq)
+ itg3200_remove_trigger(indio_dev);
+error_unconfigure_buffer:
+ itg3200_buffer_unconfigure(indio_dev);
+error_free_dev:
+ iio_device_free(indio_dev);
+error_ret:
+ return ret;
+}
+
+static int itg3200_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+
+ if (client->irq)
+ itg3200_remove_trigger(indio_dev);
+
+ itg3200_buffer_unconfigure(indio_dev);
+
+ iio_device_free(indio_dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id itg3200_id[] = {
+ { "itg3200", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, itg3200_id);
+
+static struct i2c_driver itg3200_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "itg3200",
+ },
+ .id_table = itg3200_id,
+ .probe = itg3200_probe,
+ .remove = itg3200_remove,
+};
+
+module_i2c_driver(itg3200_driver);
+
+MODULE_AUTHOR("Christian Strobel <christian.strobel@iis.fraunhofer.de>");
+MODULE_DESCRIPTION("ITG3200 Gyroscope I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/gyro/st_gyro.h b/drivers/iio/gyro/st_gyro.h
new file mode 100644
index 000000000000..3ad9907bb154
--- /dev/null
+++ b/drivers/iio/gyro/st_gyro.h
@@ -0,0 +1,45 @@
+/*
+ * STMicroelectronics gyroscopes driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ * v. 1.0.0
+ * Licensed under the GPL-2.
+ */
+
+#ifndef ST_GYRO_H
+#define ST_GYRO_H
+
+#include <linux/types.h>
+#include <linux/iio/common/st_sensors.h>
+
+#define L3G4200D_GYRO_DEV_NAME "l3g4200d"
+#define LSM330D_GYRO_DEV_NAME "lsm330d_gyro"
+#define LSM330DL_GYRO_DEV_NAME "lsm330dl_gyro"
+#define LSM330DLC_GYRO_DEV_NAME "lsm330dlc_gyro"
+#define L3GD20_GYRO_DEV_NAME "l3gd20"
+#define L3GD20H_GYRO_DEV_NAME "l3gd20h"
+#define L3G4IS_GYRO_DEV_NAME "l3g4is_ui"
+#define LSM330_GYRO_DEV_NAME "lsm330_gyro"
+
+int st_gyro_common_probe(struct iio_dev *indio_dev);
+void st_gyro_common_remove(struct iio_dev *indio_dev);
+
+#ifdef CONFIG_IIO_BUFFER
+int st_gyro_allocate_ring(struct iio_dev *indio_dev);
+void st_gyro_deallocate_ring(struct iio_dev *indio_dev);
+int st_gyro_trig_set_state(struct iio_trigger *trig, bool state);
+#define ST_GYRO_TRIGGER_SET_STATE (&st_gyro_trig_set_state)
+#else /* CONFIG_IIO_BUFFER */
+static inline int st_gyro_allocate_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+static inline void st_gyro_deallocate_ring(struct iio_dev *indio_dev)
+{
+}
+#define ST_GYRO_TRIGGER_SET_STATE NULL
+#endif /* CONFIG_IIO_BUFFER */
+
+#endif /* ST_GYRO_H */
diff --git a/drivers/iio/gyro/st_gyro_buffer.c b/drivers/iio/gyro/st_gyro_buffer.c
new file mode 100644
index 000000000000..da4d122ec7dc
--- /dev/null
+++ b/drivers/iio/gyro/st_gyro_buffer.c
@@ -0,0 +1,114 @@
+/*
+ * STMicroelectronics gyroscopes driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include "st_gyro.h"
+
+int st_gyro_trig_set_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *indio_dev = trig->private_data;
+
+ return st_sensors_set_dataready_irq(indio_dev, state);
+}
+
+static int st_gyro_buffer_preenable(struct iio_dev *indio_dev)
+{
+ int err;
+
+ err = st_sensors_set_enable(indio_dev, true);
+ if (err < 0)
+ goto st_gyro_set_enable_error;
+
+ err = iio_sw_buffer_preenable(indio_dev);
+
+st_gyro_set_enable_error:
+ return err;
+}
+
+static int st_gyro_buffer_postenable(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *gdata = iio_priv(indio_dev);
+
+ gdata->buffer_data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
+ if (gdata->buffer_data == NULL) {
+ err = -ENOMEM;
+ goto allocate_memory_error;
+ }
+
+ err = st_sensors_set_axis_enable(indio_dev,
+ (u8)indio_dev->active_scan_mask[0]);
+ if (err < 0)
+ goto st_gyro_buffer_postenable_error;
+
+ err = iio_triggered_buffer_postenable(indio_dev);
+ if (err < 0)
+ goto st_gyro_buffer_postenable_error;
+
+ return err;
+
+st_gyro_buffer_postenable_error:
+ kfree(gdata->buffer_data);
+allocate_memory_error:
+ return err;
+}
+
+static int st_gyro_buffer_predisable(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *gdata = iio_priv(indio_dev);
+
+ err = iio_triggered_buffer_predisable(indio_dev);
+ if (err < 0)
+ goto st_gyro_buffer_predisable_error;
+
+ err = st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS);
+ if (err < 0)
+ goto st_gyro_buffer_predisable_error;
+
+ err = st_sensors_set_enable(indio_dev, false);
+
+st_gyro_buffer_predisable_error:
+ kfree(gdata->buffer_data);
+ return err;
+}
+
+static const struct iio_buffer_setup_ops st_gyro_buffer_setup_ops = {
+ .preenable = &st_gyro_buffer_preenable,
+ .postenable = &st_gyro_buffer_postenable,
+ .predisable = &st_gyro_buffer_predisable,
+};
+
+int st_gyro_allocate_ring(struct iio_dev *indio_dev)
+{
+ return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ &st_sensors_trigger_handler, &st_gyro_buffer_setup_ops);
+}
+
+void st_gyro_deallocate_ring(struct iio_dev *indio_dev)
+{
+ iio_triggered_buffer_cleanup(indio_dev);
+}
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics gyroscopes buffer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
new file mode 100644
index 000000000000..fa9b24219987
--- /dev/null
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -0,0 +1,368 @@
+/*
+ * STMicroelectronics gyroscopes driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/buffer.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include "st_gyro.h"
+
+/* DEFAULT VALUE FOR SENSORS */
+#define ST_GYRO_DEFAULT_OUT_X_L_ADDR 0x28
+#define ST_GYRO_DEFAULT_OUT_Y_L_ADDR 0x2a
+#define ST_GYRO_DEFAULT_OUT_Z_L_ADDR 0x2c
+
+/* FULLSCALE */
+#define ST_GYRO_FS_AVL_250DPS 250
+#define ST_GYRO_FS_AVL_500DPS 500
+#define ST_GYRO_FS_AVL_2000DPS 2000
+
+/* CUSTOM VALUES FOR SENSOR 1 */
+#define ST_GYRO_1_WAI_EXP 0xd3
+#define ST_GYRO_1_ODR_ADDR 0x20
+#define ST_GYRO_1_ODR_MASK 0xc0
+#define ST_GYRO_1_ODR_AVL_100HZ_VAL 0x00
+#define ST_GYRO_1_ODR_AVL_200HZ_VAL 0x01
+#define ST_GYRO_1_ODR_AVL_400HZ_VAL 0x02
+#define ST_GYRO_1_ODR_AVL_800HZ_VAL 0x03
+#define ST_GYRO_1_PW_ADDR 0x20
+#define ST_GYRO_1_PW_MASK 0x08
+#define ST_GYRO_1_FS_ADDR 0x23
+#define ST_GYRO_1_FS_MASK 0x30
+#define ST_GYRO_1_FS_AVL_250_VAL 0x00
+#define ST_GYRO_1_FS_AVL_500_VAL 0x01
+#define ST_GYRO_1_FS_AVL_2000_VAL 0x02
+#define ST_GYRO_1_FS_AVL_250_GAIN IIO_DEGREE_TO_RAD(8750)
+#define ST_GYRO_1_FS_AVL_500_GAIN IIO_DEGREE_TO_RAD(17500)
+#define ST_GYRO_1_FS_AVL_2000_GAIN IIO_DEGREE_TO_RAD(70000)
+#define ST_GYRO_1_BDU_ADDR 0x23
+#define ST_GYRO_1_BDU_MASK 0x80
+#define ST_GYRO_1_DRDY_IRQ_ADDR 0x22
+#define ST_GYRO_1_DRDY_IRQ_MASK 0x08
+#define ST_GYRO_1_MULTIREAD_BIT true
+
+/* CUSTOM VALUES FOR SENSOR 2 */
+#define ST_GYRO_2_WAI_EXP 0xd4
+#define ST_GYRO_2_ODR_ADDR 0x20
+#define ST_GYRO_2_ODR_MASK 0xc0
+#define ST_GYRO_2_ODR_AVL_95HZ_VAL 0x00
+#define ST_GYRO_2_ODR_AVL_190HZ_VAL 0x01
+#define ST_GYRO_2_ODR_AVL_380HZ_VAL 0x02
+#define ST_GYRO_2_ODR_AVL_760HZ_VAL 0x03
+#define ST_GYRO_2_PW_ADDR 0x20
+#define ST_GYRO_2_PW_MASK 0x08
+#define ST_GYRO_2_FS_ADDR 0x23
+#define ST_GYRO_2_FS_MASK 0x30
+#define ST_GYRO_2_FS_AVL_250_VAL 0x00
+#define ST_GYRO_2_FS_AVL_500_VAL 0x01
+#define ST_GYRO_2_FS_AVL_2000_VAL 0x02
+#define ST_GYRO_2_FS_AVL_250_GAIN IIO_DEGREE_TO_RAD(8750)
+#define ST_GYRO_2_FS_AVL_500_GAIN IIO_DEGREE_TO_RAD(17500)
+#define ST_GYRO_2_FS_AVL_2000_GAIN IIO_DEGREE_TO_RAD(70000)
+#define ST_GYRO_2_BDU_ADDR 0x23
+#define ST_GYRO_2_BDU_MASK 0x80
+#define ST_GYRO_2_DRDY_IRQ_ADDR 0x22
+#define ST_GYRO_2_DRDY_IRQ_MASK 0x08
+#define ST_GYRO_2_MULTIREAD_BIT true
+
+static const struct iio_chan_spec st_gyro_16bit_channels[] = {
+ ST_SENSORS_LSM_CHANNELS(IIO_ANGL_VEL, ST_SENSORS_SCAN_X,
+ IIO_MOD_X, IIO_LE, ST_SENSORS_DEFAULT_16_REALBITS,
+ ST_GYRO_DEFAULT_OUT_X_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_ANGL_VEL, ST_SENSORS_SCAN_Y,
+ IIO_MOD_Y, IIO_LE, ST_SENSORS_DEFAULT_16_REALBITS,
+ ST_GYRO_DEFAULT_OUT_Y_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_ANGL_VEL, ST_SENSORS_SCAN_Z,
+ IIO_MOD_Z, IIO_LE, ST_SENSORS_DEFAULT_16_REALBITS,
+ ST_GYRO_DEFAULT_OUT_Z_L_ADDR),
+ IIO_CHAN_SOFT_TIMESTAMP(3)
+};
+
+static const struct st_sensors st_gyro_sensors[] = {
+ {
+ .wai = ST_GYRO_1_WAI_EXP,
+ .sensors_supported = {
+ [0] = L3G4200D_GYRO_DEV_NAME,
+ [1] = LSM330DL_GYRO_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
+ .odr = {
+ .addr = ST_GYRO_1_ODR_ADDR,
+ .mask = ST_GYRO_1_ODR_MASK,
+ .odr_avl = {
+ { 100, ST_GYRO_1_ODR_AVL_100HZ_VAL, },
+ { 200, ST_GYRO_1_ODR_AVL_200HZ_VAL, },
+ { 400, ST_GYRO_1_ODR_AVL_400HZ_VAL, },
+ { 800, ST_GYRO_1_ODR_AVL_800HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_GYRO_1_PW_ADDR,
+ .mask = ST_GYRO_1_PW_MASK,
+ .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = ST_GYRO_1_FS_ADDR,
+ .mask = ST_GYRO_1_FS_MASK,
+ .fs_avl = {
+ [0] = {
+ .num = ST_GYRO_FS_AVL_250DPS,
+ .value = ST_GYRO_1_FS_AVL_250_VAL,
+ .gain = ST_GYRO_1_FS_AVL_250_GAIN,
+ },
+ [1] = {
+ .num = ST_GYRO_FS_AVL_500DPS,
+ .value = ST_GYRO_1_FS_AVL_500_VAL,
+ .gain = ST_GYRO_1_FS_AVL_500_GAIN,
+ },
+ [2] = {
+ .num = ST_GYRO_FS_AVL_2000DPS,
+ .value = ST_GYRO_1_FS_AVL_2000_VAL,
+ .gain = ST_GYRO_1_FS_AVL_2000_GAIN,
+ },
+ },
+ },
+ .bdu = {
+ .addr = ST_GYRO_1_BDU_ADDR,
+ .mask = ST_GYRO_1_BDU_MASK,
+ },
+ .drdy_irq = {
+ .addr = ST_GYRO_1_DRDY_IRQ_ADDR,
+ .mask = ST_GYRO_1_DRDY_IRQ_MASK,
+ },
+ .multi_read_bit = ST_GYRO_1_MULTIREAD_BIT,
+ .bootime = 2,
+ },
+ {
+ .wai = ST_GYRO_2_WAI_EXP,
+ .sensors_supported = {
+ [0] = L3GD20_GYRO_DEV_NAME,
+ [1] = L3GD20H_GYRO_DEV_NAME,
+ [2] = LSM330D_GYRO_DEV_NAME,
+ [3] = LSM330DLC_GYRO_DEV_NAME,
+ [4] = L3G4IS_GYRO_DEV_NAME,
+ [5] = LSM330_GYRO_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
+ .odr = {
+ .addr = ST_GYRO_2_ODR_ADDR,
+ .mask = ST_GYRO_2_ODR_MASK,
+ .odr_avl = {
+ { 95, ST_GYRO_2_ODR_AVL_95HZ_VAL, },
+ { 190, ST_GYRO_2_ODR_AVL_190HZ_VAL, },
+ { 380, ST_GYRO_2_ODR_AVL_380HZ_VAL, },
+ { 760, ST_GYRO_2_ODR_AVL_760HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_GYRO_2_PW_ADDR,
+ .mask = ST_GYRO_2_PW_MASK,
+ .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = ST_GYRO_2_FS_ADDR,
+ .mask = ST_GYRO_2_FS_MASK,
+ .fs_avl = {
+ [0] = {
+ .num = ST_GYRO_FS_AVL_250DPS,
+ .value = ST_GYRO_2_FS_AVL_250_VAL,
+ .gain = ST_GYRO_2_FS_AVL_250_GAIN,
+ },
+ [1] = {
+ .num = ST_GYRO_FS_AVL_500DPS,
+ .value = ST_GYRO_2_FS_AVL_500_VAL,
+ .gain = ST_GYRO_2_FS_AVL_500_GAIN,
+ },
+ [2] = {
+ .num = ST_GYRO_FS_AVL_2000DPS,
+ .value = ST_GYRO_2_FS_AVL_2000_VAL,
+ .gain = ST_GYRO_2_FS_AVL_2000_GAIN,
+ },
+ },
+ },
+ .bdu = {
+ .addr = ST_GYRO_2_BDU_ADDR,
+ .mask = ST_GYRO_2_BDU_MASK,
+ },
+ .drdy_irq = {
+ .addr = ST_GYRO_2_DRDY_IRQ_ADDR,
+ .mask = ST_GYRO_2_DRDY_IRQ_MASK,
+ },
+ .multi_read_bit = ST_GYRO_2_MULTIREAD_BIT,
+ .bootime = 2,
+ },
+};
+
+static int st_gyro_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *ch, int *val,
+ int *val2, long mask)
+{
+ int err;
+ struct st_sensor_data *gdata = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ err = st_sensors_read_info_raw(indio_dev, ch, val);
+ if (err < 0)
+ goto read_error;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = gdata->current_fullscale->gain;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+
+read_error:
+ return err;
+}
+
+static int st_gyro_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ int err;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ err = st_sensors_set_fullscale_by_gain(indio_dev, val2);
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static ST_SENSOR_DEV_ATTR_SAMP_FREQ();
+static ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL();
+static ST_SENSORS_DEV_ATTR_SCALE_AVAIL(in_anglvel_scale_available);
+
+static struct attribute *st_gyro_attributes[] = {
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_in_anglvel_scale_available.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group st_gyro_attribute_group = {
+ .attrs = st_gyro_attributes,
+};
+
+static const struct iio_info gyro_info = {
+ .driver_module = THIS_MODULE,
+ .attrs = &st_gyro_attribute_group,
+ .read_raw = &st_gyro_read_raw,
+ .write_raw = &st_gyro_write_raw,
+};
+
+#ifdef CONFIG_IIO_TRIGGER
+static const struct iio_trigger_ops st_gyro_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = ST_GYRO_TRIGGER_SET_STATE,
+};
+#define ST_GYRO_TRIGGER_OPS (&st_gyro_trigger_ops)
+#else
+#define ST_GYRO_TRIGGER_OPS NULL
+#endif
+
+int st_gyro_common_probe(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *gdata = iio_priv(indio_dev);
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &gyro_info;
+
+ err = st_sensors_check_device_support(indio_dev,
+ ARRAY_SIZE(st_gyro_sensors), st_gyro_sensors);
+ if (err < 0)
+ goto st_gyro_common_probe_error;
+
+ gdata->multiread_bit = gdata->sensor->multi_read_bit;
+ indio_dev->channels = gdata->sensor->ch;
+ indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
+
+ gdata->current_fullscale = (struct st_sensor_fullscale_avl *)
+ &gdata->sensor->fs.fs_avl[0];
+ gdata->odr = gdata->sensor->odr.odr_avl[0].hz;
+
+ err = st_sensors_init_sensor(indio_dev);
+ if (err < 0)
+ goto st_gyro_common_probe_error;
+
+ if (gdata->get_irq_data_ready(indio_dev) > 0) {
+ err = st_gyro_allocate_ring(indio_dev);
+ if (err < 0)
+ goto st_gyro_common_probe_error;
+
+ err = st_sensors_allocate_trigger(indio_dev,
+ ST_GYRO_TRIGGER_OPS);
+ if (err < 0)
+ goto st_gyro_probe_trigger_error;
+ }
+
+ err = iio_device_register(indio_dev);
+ if (err)
+ goto st_gyro_device_register_error;
+
+ return err;
+
+st_gyro_device_register_error:
+ if (gdata->get_irq_data_ready(indio_dev) > 0)
+ st_sensors_deallocate_trigger(indio_dev);
+st_gyro_probe_trigger_error:
+ if (gdata->get_irq_data_ready(indio_dev) > 0)
+ st_gyro_deallocate_ring(indio_dev);
+st_gyro_common_probe_error:
+ return err;
+}
+EXPORT_SYMBOL(st_gyro_common_probe);
+
+void st_gyro_common_remove(struct iio_dev *indio_dev)
+{
+ struct st_sensor_data *gdata = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ if (gdata->get_irq_data_ready(indio_dev) > 0) {
+ st_sensors_deallocate_trigger(indio_dev);
+ st_gyro_deallocate_ring(indio_dev);
+ }
+ iio_device_free(indio_dev);
+}
+EXPORT_SYMBOL(st_gyro_common_remove);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics gyroscopes driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
new file mode 100644
index 000000000000..8a310500573d
--- /dev/null
+++ b/drivers/iio/gyro/st_gyro_i2c.c
@@ -0,0 +1,84 @@
+/*
+ * STMicroelectronics gyroscopes driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include <linux/iio/common/st_sensors_i2c.h>
+#include "st_gyro.h"
+
+static int st_gyro_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct st_sensor_data *gdata;
+ int err;
+
+ indio_dev = iio_device_alloc(sizeof(*gdata));
+ if (indio_dev == NULL) {
+ err = -ENOMEM;
+ goto iio_device_alloc_error;
+ }
+
+ gdata = iio_priv(indio_dev);
+ gdata->dev = &client->dev;
+
+ st_sensors_i2c_configure(indio_dev, client, gdata);
+
+ err = st_gyro_common_probe(indio_dev);
+ if (err < 0)
+ goto st_gyro_common_probe_error;
+
+ return 0;
+
+st_gyro_common_probe_error:
+ iio_device_free(indio_dev);
+iio_device_alloc_error:
+ return err;
+}
+
+static int st_gyro_i2c_remove(struct i2c_client *client)
+{
+ st_gyro_common_remove(i2c_get_clientdata(client));
+
+ return 0;
+}
+
+static const struct i2c_device_id st_gyro_id_table[] = {
+ { L3G4200D_GYRO_DEV_NAME },
+ { LSM330D_GYRO_DEV_NAME },
+ { LSM330DL_GYRO_DEV_NAME },
+ { LSM330DLC_GYRO_DEV_NAME },
+ { L3GD20_GYRO_DEV_NAME },
+ { L3GD20H_GYRO_DEV_NAME },
+ { L3G4IS_GYRO_DEV_NAME },
+ { LSM330_GYRO_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, st_gyro_id_table);
+
+static struct i2c_driver st_gyro_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "st-gyro-i2c",
+ },
+ .probe = st_gyro_i2c_probe,
+ .remove = st_gyro_i2c_remove,
+ .id_table = st_gyro_id_table,
+};
+module_i2c_driver(st_gyro_driver);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics gyroscopes i2c driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c
new file mode 100644
index 000000000000..f3540390eb22
--- /dev/null
+++ b/drivers/iio/gyro/st_gyro_spi.c
@@ -0,0 +1,83 @@
+/*
+ * STMicroelectronics gyroscopes driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include <linux/iio/common/st_sensors_spi.h>
+#include "st_gyro.h"
+
+static int st_gyro_spi_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct st_sensor_data *gdata;
+ int err;
+
+ indio_dev = iio_device_alloc(sizeof(*gdata));
+ if (indio_dev == NULL) {
+ err = -ENOMEM;
+ goto iio_device_alloc_error;
+ }
+
+ gdata = iio_priv(indio_dev);
+ gdata->dev = &spi->dev;
+
+ st_sensors_spi_configure(indio_dev, spi, gdata);
+
+ err = st_gyro_common_probe(indio_dev);
+ if (err < 0)
+ goto st_gyro_common_probe_error;
+
+ return 0;
+
+st_gyro_common_probe_error:
+ iio_device_free(indio_dev);
+iio_device_alloc_error:
+ return err;
+}
+
+static int st_gyro_spi_remove(struct spi_device *spi)
+{
+ st_gyro_common_remove(spi_get_drvdata(spi));
+
+ return 0;
+}
+
+static const struct spi_device_id st_gyro_id_table[] = {
+ { L3G4200D_GYRO_DEV_NAME },
+ { LSM330D_GYRO_DEV_NAME },
+ { LSM330DL_GYRO_DEV_NAME },
+ { LSM330DLC_GYRO_DEV_NAME },
+ { L3GD20_GYRO_DEV_NAME },
+ { L3GD20H_GYRO_DEV_NAME },
+ { L3G4IS_GYRO_DEV_NAME },
+ { LSM330_GYRO_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, st_gyro_id_table);
+
+static struct spi_driver st_gyro_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "st-gyro-spi",
+ },
+ .probe = st_gyro_spi_probe,
+ .remove = st_gyro_spi_remove,
+ .id_table = st_gyro_id_table,
+};
+module_spi_driver(st_gyro_driver);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics gyroscopes spi driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index 3d79a40e916b..4f40a10cb74f 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -3,6 +3,17 @@
#
menu "Inertial measurement units"
+config ADIS16400
+ tristate "Analog Devices ADIS16400 and similar IMU SPI driver"
+ depends on SPI
+ select IIO_ADIS_LIB
+ select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
+ help
+ Say yes here to build support for Analog Devices adis16300, adis16344,
+ adis16350, adis16354, adis16355, adis16360, adis16362, adis16364,
+ adis16365, adis16400 and adis16405 triaxial inertial sensors
+ (adis16400 series also have magnetometers).
+
config ADIS16480
tristate "Analog Devices ADIS16480 and similar IMU driver"
depends on SPI
@@ -25,3 +36,5 @@ config IIO_ADIS_LIB_BUFFER
help
A set of buffer helper functions for the Analog Devices ADIS* device
family.
+
+source "drivers/iio/imu/inv_mpu6050/Kconfig"
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
index cfe57638f6f9..f2f56ceaed26 100644
--- a/drivers/iio/imu/Makefile
+++ b/drivers/iio/imu/Makefile
@@ -2,9 +2,14 @@
# Makefile for Inertial Measurement Units
#
+adis16400-y := adis16400_core.o
+adis16400-$(CONFIG_IIO_BUFFER) += adis16400_buffer.o
+obj-$(CONFIG_ADIS16400) += adis16400.o
obj-$(CONFIG_ADIS16480) += adis16480.o
adis_lib-y += adis.o
adis_lib-$(CONFIG_IIO_ADIS_LIB_BUFFER) += adis_trigger.o
adis_lib-$(CONFIG_IIO_ADIS_LIB_BUFFER) += adis_buffer.o
obj-$(CONFIG_IIO_ADIS_LIB) += adis_lib.o
+
+obj-y += inv_mpu6050/
diff --git a/drivers/staging/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h
index 7a105e966464..2f8f9d632386 100644
--- a/drivers/staging/iio/imu/adis16400.h
+++ b/drivers/iio/imu/adis16400.h
@@ -17,12 +17,11 @@
#ifndef SPI_ADIS16400_H_
#define SPI_ADIS16400_H_
+#include <linux/iio/imu/adis.h>
+
#define ADIS16400_STARTUP_DELAY 290 /* ms */
#define ADIS16400_MTEST_DELAY 90 /* ms */
-#define ADIS16400_READ_REG(a) a
-#define ADIS16400_WRITE_REG(a) ((a) | 0x80)
-
#define ADIS16400_FLASH_CNT 0x00 /* Flash memory write count */
#define ADIS16400_SUPPLY_OUT 0x02 /* Power supply measurement */
#define ADIS16400_XGYRO_OUT 0x04 /* X-axis gyroscope output */
@@ -45,6 +44,9 @@
#define ADIS16300_ROLL_OUT 0x14 /* Y axis inclinometer output measurement */
#define ADIS16300_AUX_ADC 0x16 /* Auxiliary ADC measurement */
+#define ADIS16448_BARO_OUT 0x16 /* Barometric pressure output */
+#define ADIS16448_TEMP_OUT 0x18 /* Temperature output */
+
/* Calibration parameters */
#define ADIS16400_XGYRO_OFF 0x1A /* X-axis gyroscope bias offset factor */
#define ADIS16400_YGYRO_OFF 0x1C /* Y-axis gyroscope bias offset factor */
@@ -75,7 +77,10 @@
#define ADIS16400_ALM_CTRL 0x48 /* Alarm control */
#define ADIS16400_AUX_DAC 0x4A /* Auxiliary DAC data */
+#define ADIS16334_LOT_ID1 0x52 /* Lot identification code 1 */
+#define ADIS16334_LOT_ID2 0x54 /* Lot identification code 2 */
#define ADIS16400_PRODUCT_ID 0x56 /* Product identifier */
+#define ADIS16334_SERIAL_NUMBER 0x58 /* Serial number, lot specific */
#define ADIS16400_ERROR_ACTIVE (1<<14)
#define ADIS16400_NEW_DATA (1<<14)
@@ -96,21 +101,21 @@
#define ADIS16400_SMPL_PRD_DIV_MASK 0x7F
/* DIAG_STAT */
-#define ADIS16400_DIAG_STAT_ZACCL_FAIL (1<<15)
-#define ADIS16400_DIAG_STAT_YACCL_FAIL (1<<14)
-#define ADIS16400_DIAG_STAT_XACCL_FAIL (1<<13)
-#define ADIS16400_DIAG_STAT_XGYRO_FAIL (1<<12)
-#define ADIS16400_DIAG_STAT_YGYRO_FAIL (1<<11)
-#define ADIS16400_DIAG_STAT_ZGYRO_FAIL (1<<10)
-#define ADIS16400_DIAG_STAT_ALARM2 (1<<9)
-#define ADIS16400_DIAG_STAT_ALARM1 (1<<8)
-#define ADIS16400_DIAG_STAT_FLASH_CHK (1<<6)
-#define ADIS16400_DIAG_STAT_SELF_TEST (1<<5)
-#define ADIS16400_DIAG_STAT_OVERFLOW (1<<4)
-#define ADIS16400_DIAG_STAT_SPI_FAIL (1<<3)
-#define ADIS16400_DIAG_STAT_FLASH_UPT (1<<2)
-#define ADIS16400_DIAG_STAT_POWER_HIGH (1<<1)
-#define ADIS16400_DIAG_STAT_POWER_LOW (1<<0)
+#define ADIS16400_DIAG_STAT_ZACCL_FAIL 15
+#define ADIS16400_DIAG_STAT_YACCL_FAIL 14
+#define ADIS16400_DIAG_STAT_XACCL_FAIL 13
+#define ADIS16400_DIAG_STAT_XGYRO_FAIL 12
+#define ADIS16400_DIAG_STAT_YGYRO_FAIL 11
+#define ADIS16400_DIAG_STAT_ZGYRO_FAIL 10
+#define ADIS16400_DIAG_STAT_ALARM2 9
+#define ADIS16400_DIAG_STAT_ALARM1 8
+#define ADIS16400_DIAG_STAT_FLASH_CHK 6
+#define ADIS16400_DIAG_STAT_SELF_TEST 5
+#define ADIS16400_DIAG_STAT_OVERFLOW 4
+#define ADIS16400_DIAG_STAT_SPI_FAIL 3
+#define ADIS16400_DIAG_STAT_FLASH_UPT 2
+#define ADIS16400_DIAG_STAT_POWER_HIGH 1
+#define ADIS16400_DIAG_STAT_POWER_LOW 0
/* GLOB_CMD */
#define ADIS16400_GLOB_CMD_SW_RESET (1<<7)
@@ -126,9 +131,6 @@
#define ADIS16334_RATE_DIV_SHIFT 8
#define ADIS16334_RATE_INT_CLK BIT(0)
-#define ADIS16400_MAX_TX 24
-#define ADIS16400_MAX_RX 24
-
#define ADIS16400_SPI_SLOW (u32)(300 * 1000)
#define ADIS16400_SPI_BURST (u32)(1000 * 1000)
#define ADIS16400_SPI_FAST (u32)(2000 * 1000)
@@ -136,6 +138,9 @@
#define ADIS16400_HAS_PROD_ID BIT(0)
#define ADIS16400_NO_BURST BIT(1)
#define ADIS16400_HAS_SLOW_MODE BIT(2)
+#define ADIS16400_HAS_SERIAL_NUMBER BIT(3)
+
+struct adis16400_state;
struct adis16400_chip_info {
const struct iio_chan_spec *channels;
@@ -145,95 +150,63 @@ struct adis16400_chip_info {
unsigned int accel_scale_micro;
int temp_scale_nano;
int temp_offset;
- unsigned long default_scan_mask;
- int (*set_freq)(struct iio_dev *indio_dev, unsigned int freq);
- int (*get_freq)(struct iio_dev *indio_dev);
+ int (*set_freq)(struct adis16400_state *st, unsigned int freq);
+ int (*get_freq)(struct adis16400_state *st);
};
/**
* struct adis16400_state - device instance specific data
- * @us: actual spi_device
- * @trig: data ready trigger registered with iio
- * @tx: transmit buffer
- * @rx: receive buffer
- * @buf_lock: mutex to protect tx and rx
- * @filt_int: integer part of requested filter frequency
+ * @variant: chip variant info
+ * @filt_int: integer part of requested filter frequency
+ * @adis: adis device
**/
struct adis16400_state {
- struct spi_device *us;
- struct iio_trigger *trig;
- struct mutex buf_lock;
struct adis16400_chip_info *variant;
int filt_int;
- u8 tx[ADIS16400_MAX_TX] ____cacheline_aligned;
- u8 rx[ADIS16400_MAX_RX] ____cacheline_aligned;
+ struct adis adis;
};
-int adis16400_set_irq(struct iio_dev *indio_dev, bool enable);
-
/* At the moment triggers are only used for ring buffer
* filling. This may change!
*/
-#define ADIS16400_SCAN_SUPPLY 0
-#define ADIS16400_SCAN_GYRO_X 1
-#define ADIS16400_SCAN_GYRO_Y 2
-#define ADIS16400_SCAN_GYRO_Z 3
-#define ADIS16400_SCAN_ACC_X 4
-#define ADIS16400_SCAN_ACC_Y 5
-#define ADIS16400_SCAN_ACC_Z 6
-#define ADIS16400_SCAN_MAGN_X 7
-#define ADIS16350_SCAN_TEMP_X 7
-#define ADIS16400_SCAN_MAGN_Y 8
-#define ADIS16350_SCAN_TEMP_Y 8
-#define ADIS16400_SCAN_MAGN_Z 9
-#define ADIS16350_SCAN_TEMP_Z 9
-#define ADIS16400_SCAN_TEMP 10
-#define ADIS16350_SCAN_ADC_0 10
-#define ADIS16400_SCAN_ADC_0 11
-#define ADIS16300_SCAN_INCLI_X 12
-#define ADIS16300_SCAN_INCLI_Y 13
+enum {
+ ADIS16400_SCAN_SUPPLY,
+ ADIS16400_SCAN_GYRO_X,
+ ADIS16400_SCAN_GYRO_Y,
+ ADIS16400_SCAN_GYRO_Z,
+ ADIS16400_SCAN_ACC_X,
+ ADIS16400_SCAN_ACC_Y,
+ ADIS16400_SCAN_ACC_Z,
+ ADIS16400_SCAN_MAGN_X,
+ ADIS16400_SCAN_MAGN_Y,
+ ADIS16400_SCAN_MAGN_Z,
+ ADIS16400_SCAN_BARO,
+ ADIS16350_SCAN_TEMP_X,
+ ADIS16350_SCAN_TEMP_Y,
+ ADIS16350_SCAN_TEMP_Z,
+ ADIS16300_SCAN_INCLI_X,
+ ADIS16300_SCAN_INCLI_Y,
+ ADIS16400_SCAN_ADC,
+};
#ifdef CONFIG_IIO_BUFFER
-void adis16400_remove_trigger(struct iio_dev *indio_dev);
-int adis16400_probe_trigger(struct iio_dev *indio_dev);
ssize_t adis16400_read_data_from_ring(struct device *dev,
struct device_attribute *attr,
char *buf);
-int adis16400_configure_ring(struct iio_dev *indio_dev);
-void adis16400_unconfigure_ring(struct iio_dev *indio_dev);
+int adis16400_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask);
+irqreturn_t adis16400_trigger_handler(int irq, void *p);
#else /* CONFIG_IIO_BUFFER */
-static inline void adis16400_remove_trigger(struct iio_dev *indio_dev)
-{
-}
-
-static inline int adis16400_probe_trigger(struct iio_dev *indio_dev)
-{
- return 0;
-}
-
-static inline ssize_t
-adis16400_read_data_from_ring(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return 0;
-}
-
-static int adis16400_configure_ring(struct iio_dev *indio_dev)
-{
- return 0;
-}
-
-static inline void adis16400_unconfigure_ring(struct iio_dev *indio_dev)
-{
-}
+#define adis16400_update_scan_mode NULL
+#define adis16400_trigger_handler NULL
#endif /* CONFIG_IIO_BUFFER */
+
#endif /* SPI_ADIS16400_H_ */
diff --git a/drivers/iio/imu/adis16400_buffer.c b/drivers/iio/imu/adis16400_buffer.c
new file mode 100644
index 000000000000..054c01d6e73c
--- /dev/null
+++ b/drivers/iio/imu/adis16400_buffer.c
@@ -0,0 +1,96 @@
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/export.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+#include "adis16400.h"
+
+int adis16400_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct adis16400_state *st = iio_priv(indio_dev);
+ struct adis *adis = &st->adis;
+ uint16_t *tx, *rx;
+
+ if (st->variant->flags & ADIS16400_NO_BURST)
+ return adis_update_scan_mode(indio_dev, scan_mask);
+
+ kfree(adis->xfer);
+ kfree(adis->buffer);
+
+ adis->xfer = kcalloc(2, sizeof(*adis->xfer), GFP_KERNEL);
+ if (!adis->xfer)
+ return -ENOMEM;
+
+ adis->buffer = kzalloc(indio_dev->scan_bytes + sizeof(u16),
+ GFP_KERNEL);
+ if (!adis->buffer)
+ return -ENOMEM;
+
+ rx = adis->buffer;
+ tx = adis->buffer + indio_dev->scan_bytes;
+
+ tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD);
+ tx[1] = 0;
+
+ adis->xfer[0].tx_buf = tx;
+ adis->xfer[0].bits_per_word = 8;
+ adis->xfer[0].len = 2;
+ adis->xfer[1].tx_buf = tx;
+ adis->xfer[1].bits_per_word = 8;
+ adis->xfer[1].len = indio_dev->scan_bytes;
+
+ spi_message_init(&adis->msg);
+ spi_message_add_tail(&adis->xfer[0], &adis->msg);
+ spi_message_add_tail(&adis->xfer[1], &adis->msg);
+
+ return 0;
+}
+
+irqreturn_t adis16400_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct adis16400_state *st = iio_priv(indio_dev);
+ struct adis *adis = &st->adis;
+ u32 old_speed_hz = st->adis.spi->max_speed_hz;
+ int ret;
+
+ if (!adis->buffer)
+ return -ENOMEM;
+
+ if (!(st->variant->flags & ADIS16400_NO_BURST) &&
+ st->adis.spi->max_speed_hz > ADIS16400_SPI_BURST) {
+ st->adis.spi->max_speed_hz = ADIS16400_SPI_BURST;
+ spi_setup(st->adis.spi);
+ }
+
+ ret = spi_sync(adis->spi, &adis->msg);
+ if (ret)
+ dev_err(&adis->spi->dev, "Failed to read data: %d\n", ret);
+
+ if (!(st->variant->flags & ADIS16400_NO_BURST)) {
+ st->adis.spi->max_speed_hz = old_speed_hz;
+ spi_setup(st->adis.spi);
+ }
+
+ /* Guaranteed to be aligned with 8 byte boundary */
+ if (indio_dev->scan_timestamp) {
+ void *b = adis->buffer + indio_dev->scan_bytes - sizeof(s64);
+ *(s64 *)b = pf->timestamp;
+ }
+
+ iio_push_to_buffers(indio_dev, adis->buffer);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
new file mode 100644
index 000000000000..b7f215eab5de
--- /dev/null
+++ b/drivers/iio/imu/adis16400_core.c
@@ -0,0 +1,965 @@
+/*
+ * adis16400.c support Analog Devices ADIS16400/5
+ * 3d 2g Linear Accelerometers,
+ * 3d Gyroscopes,
+ * 3d Magnetometers via SPI
+ *
+ * Copyright (c) 2009 Manuel Stahl <manuel.stahl@iis.fraunhofer.de>
+ * Copyright (c) 2007 Jonathan Cameron <jic23@kernel.org>
+ * Copyright (c) 2011 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+
+#include "adis16400.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+static ssize_t adis16400_show_serial_number(struct file *file,
+ char __user *userbuf, size_t count, loff_t *ppos)
+{
+ struct adis16400_state *st = file->private_data;
+ u16 lot1, lot2, serial_number;
+ char buf[16];
+ size_t len;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16334_LOT_ID1, &lot1);
+ if (ret < 0)
+ return ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16334_LOT_ID2, &lot2);
+ if (ret < 0)
+ return ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16334_SERIAL_NUMBER,
+ &serial_number);
+ if (ret < 0)
+ return ret;
+
+ len = snprintf(buf, sizeof(buf), "%.4x-%.4x-%.4x\n", lot1, lot2,
+ serial_number);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations adis16400_serial_number_fops = {
+ .open = simple_open,
+ .read = adis16400_show_serial_number,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
+static int adis16400_show_product_id(void *arg, u64 *val)
+{
+ struct adis16400_state *st = arg;
+ uint16_t prod_id;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16400_PRODUCT_ID, &prod_id);
+ if (ret < 0)
+ return ret;
+
+ *val = prod_id;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(adis16400_product_id_fops,
+ adis16400_show_product_id, NULL, "%lld\n");
+
+static int adis16400_show_flash_count(void *arg, u64 *val)
+{
+ struct adis16400_state *st = arg;
+ uint16_t flash_count;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16400_FLASH_CNT, &flash_count);
+ if (ret < 0)
+ return ret;
+
+ *val = flash_count;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(adis16400_flash_count_fops,
+ adis16400_show_flash_count, NULL, "%lld\n");
+
+static int adis16400_debugfs_init(struct iio_dev *indio_dev)
+{
+ struct adis16400_state *st = iio_priv(indio_dev);
+
+ if (st->variant->flags & ADIS16400_HAS_SERIAL_NUMBER)
+ debugfs_create_file("serial_number", 0400,
+ indio_dev->debugfs_dentry, st,
+ &adis16400_serial_number_fops);
+ if (st->variant->flags & ADIS16400_HAS_PROD_ID)
+ debugfs_create_file("product_id", 0400,
+ indio_dev->debugfs_dentry, st,
+ &adis16400_product_id_fops);
+ debugfs_create_file("flash_count", 0400, indio_dev->debugfs_dentry,
+ st, &adis16400_flash_count_fops);
+
+ return 0;
+}
+
+#else
+
+static int adis16400_debugfs_init(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+#endif
+
+enum adis16400_chip_variant {
+ ADIS16300,
+ ADIS16334,
+ ADIS16350,
+ ADIS16360,
+ ADIS16362,
+ ADIS16364,
+ ADIS16400,
+ ADIS16448,
+};
+
+static int adis16334_get_freq(struct adis16400_state *st)
+{
+ int ret;
+ uint16_t t;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &t);
+ if (ret < 0)
+ return ret;
+
+ t >>= ADIS16334_RATE_DIV_SHIFT;
+
+ return 819200 >> t;
+}
+
+static int adis16334_set_freq(struct adis16400_state *st, unsigned int freq)
+{
+ unsigned int t;
+
+ if (freq < 819200)
+ t = ilog2(819200 / freq);
+ else
+ t = 0;
+
+ if (t > 0x31)
+ t = 0x31;
+
+ t <<= ADIS16334_RATE_DIV_SHIFT;
+ t |= ADIS16334_RATE_INT_CLK;
+
+ return adis_write_reg_16(&st->adis, ADIS16400_SMPL_PRD, t);
+}
+
+static int adis16400_get_freq(struct adis16400_state *st)
+{
+ int sps, ret;
+ uint16_t t;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &t);
+ if (ret < 0)
+ return ret;
+
+ sps = (t & ADIS16400_SMPL_PRD_TIME_BASE) ? 52851 : 1638404;
+ sps /= (t & ADIS16400_SMPL_PRD_DIV_MASK) + 1;
+
+ return sps;
+}
+
+static int adis16400_set_freq(struct adis16400_state *st, unsigned int freq)
+{
+ unsigned int t;
+ uint8_t val = 0;
+
+ t = 1638404 / freq;
+ if (t >= 128) {
+ val |= ADIS16400_SMPL_PRD_TIME_BASE;
+ t = 52851 / freq;
+ if (t >= 128)
+ t = 127;
+ } else if (t != 0) {
+ t--;
+ }
+
+ val |= t;
+
+ if (t >= 0x0A || (val & ADIS16400_SMPL_PRD_TIME_BASE))
+ st->adis.spi->max_speed_hz = ADIS16400_SPI_SLOW;
+ else
+ st->adis.spi->max_speed_hz = ADIS16400_SPI_FAST;
+
+ return adis_write_reg_8(&st->adis, ADIS16400_SMPL_PRD, val);
+}
+
+static ssize_t adis16400_read_frequency(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct adis16400_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = st->variant->get_freq(st);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d.%.3d\n", ret / 1000, ret % 1000);
+}
+
+static const unsigned adis16400_3db_divisors[] = {
+ [0] = 2, /* Special case */
+ [1] = 6,
+ [2] = 12,
+ [3] = 25,
+ [4] = 50,
+ [5] = 100,
+ [6] = 200,
+ [7] = 200, /* Not a valid setting */
+};
+
+static int adis16400_set_filter(struct iio_dev *indio_dev, int sps, int val)
+{
+ struct adis16400_state *st = iio_priv(indio_dev);
+ uint16_t val16;
+ int i, ret;
+
+ for (i = ARRAY_SIZE(adis16400_3db_divisors) - 1; i >= 1; i--) {
+ if (sps / adis16400_3db_divisors[i] >= val)
+ break;
+ }
+
+ ret = adis_read_reg_16(&st->adis, ADIS16400_SENS_AVG, &val16);
+ if (ret < 0)
+ return ret;
+
+ ret = adis_write_reg_16(&st->adis, ADIS16400_SENS_AVG,
+ (val16 & ~0x07) | i);
+ return ret;
+}
+
+static ssize_t adis16400_write_frequency(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct adis16400_state *st = iio_priv(indio_dev);
+ int i, f, val;
+ int ret;
+
+ ret = iio_str_to_fixpoint(buf, 100, &i, &f);
+ if (ret)
+ return ret;
+
+ val = i * 1000 + f;
+
+ if (val <= 0)
+ return -EINVAL;
+
+ mutex_lock(&indio_dev->mlock);
+ st->variant->set_freq(st, val);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+/* Power down the device */
+static int adis16400_stop_device(struct iio_dev *indio_dev)
+{
+ struct adis16400_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = adis_write_reg_16(&st->adis, ADIS16400_SLP_CNT,
+ ADIS16400_SLP_CNT_POWER_OFF);
+ if (ret)
+ dev_err(&indio_dev->dev,
+ "problem with turning device off: SLP_CNT");
+
+ return ret;
+}
+
+static int adis16400_initial_setup(struct iio_dev *indio_dev)
+{
+ struct adis16400_state *st = iio_priv(indio_dev);
+ uint16_t prod_id, smp_prd;
+ unsigned int device_id;
+ int ret;
+
+ /* use low spi speed for init if the device has a slow mode */
+ if (st->variant->flags & ADIS16400_HAS_SLOW_MODE)
+ st->adis.spi->max_speed_hz = ADIS16400_SPI_SLOW;
+ else
+ st->adis.spi->max_speed_hz = ADIS16400_SPI_FAST;
+ st->adis.spi->mode = SPI_MODE_3;
+ spi_setup(st->adis.spi);
+
+ ret = adis_initial_startup(&st->adis);
+ if (ret)
+ return ret;
+
+ if (st->variant->flags & ADIS16400_HAS_PROD_ID) {
+ ret = adis_read_reg_16(&st->adis,
+ ADIS16400_PRODUCT_ID, &prod_id);
+ if (ret)
+ goto err_ret;
+
+ sscanf(indio_dev->name, "adis%u\n", &device_id);
+
+ if (prod_id != device_id)
+ dev_warn(&indio_dev->dev, "Device ID(%u) and product ID(%u) do not match.",
+ device_id, prod_id);
+
+ dev_info(&indio_dev->dev, "%s: prod_id 0x%04x at CS%d (irq %d)\n",
+ indio_dev->name, prod_id,
+ st->adis.spi->chip_select, st->adis.spi->irq);
+ }
+ /* use high spi speed if possible */
+ if (st->variant->flags & ADIS16400_HAS_SLOW_MODE) {
+ ret = adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &smp_prd);
+ if (ret)
+ goto err_ret;
+
+ if ((smp_prd & ADIS16400_SMPL_PRD_DIV_MASK) < 0x0A) {
+ st->adis.spi->max_speed_hz = ADIS16400_SPI_FAST;
+ spi_setup(st->adis.spi);
+ }
+ }
+
+err_ret:
+ return ret;
+}
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+ adis16400_read_frequency,
+ adis16400_write_frequency);
+
+static const uint8_t adis16400_addresses[] = {
+ [ADIS16400_SCAN_GYRO_X] = ADIS16400_XGYRO_OFF,
+ [ADIS16400_SCAN_GYRO_Y] = ADIS16400_YGYRO_OFF,
+ [ADIS16400_SCAN_GYRO_Z] = ADIS16400_ZGYRO_OFF,
+ [ADIS16400_SCAN_ACC_X] = ADIS16400_XACCL_OFF,
+ [ADIS16400_SCAN_ACC_Y] = ADIS16400_YACCL_OFF,
+ [ADIS16400_SCAN_ACC_Z] = ADIS16400_ZACCL_OFF,
+};
+
+static int adis16400_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long info)
+{
+ struct adis16400_state *st = iio_priv(indio_dev);
+ int ret, sps;
+
+ switch (info) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ mutex_lock(&indio_dev->mlock);
+ ret = adis_write_reg_16(&st->adis,
+ adis16400_addresses[chan->scan_index], val);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ /*
+ * Need to cache values so we can update if the frequency
+ * changes.
+ */
+ mutex_lock(&indio_dev->mlock);
+ st->filt_int = val;
+ /* Work out update to current value */
+ sps = st->variant->get_freq(st);
+ if (sps < 0) {
+ mutex_unlock(&indio_dev->mlock);
+ return sps;
+ }
+
+ ret = adis16400_set_filter(indio_dev, sps,
+ val * 1000 + val2 / 1000);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adis16400_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long info)
+{
+ struct adis16400_state *st = iio_priv(indio_dev);
+ int16_t val16;
+ int ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ return adis_single_conversion(indio_dev, chan, 0, val);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = 0;
+ *val2 = st->variant->gyro_scale_micro;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_VOLTAGE:
+ *val = 0;
+ if (chan->channel == 0) {
+ *val = 2;
+ *val2 = 418000; /* 2.418 mV */
+ } else {
+ *val = 0;
+ *val2 = 805800; /* 805.8 uV */
+ }
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_ACCEL:
+ *val = 0;
+ *val2 = st->variant->accel_scale_micro;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_MAGN:
+ *val = 0;
+ *val2 = 500; /* 0.5 mgauss */
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ *val = st->variant->temp_scale_nano / 1000000;
+ *val2 = (st->variant->temp_scale_nano % 1000000);
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ mutex_lock(&indio_dev->mlock);
+ ret = adis_read_reg_16(&st->adis,
+ adis16400_addresses[chan->scan_index], &val16);
+ mutex_unlock(&indio_dev->mlock);
+ if (ret)
+ return ret;
+ val16 = ((val16 & 0xFFF) << 4) >> 4;
+ *val = val16;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_OFFSET:
+ /* currently only temperature */
+ *val = st->variant->temp_offset;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ mutex_lock(&indio_dev->mlock);
+ /* Need both the number of taps and the sampling frequency */
+ ret = adis_read_reg_16(&st->adis,
+ ADIS16400_SENS_AVG,
+ &val16);
+ if (ret < 0) {
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ }
+ ret = st->variant->get_freq(st);
+ if (ret >= 0) {
+ ret /= adis16400_3db_divisors[val16 & 0x07];
+ *val = ret / 1000;
+ *val2 = (ret % 1000) * 1000;
+ }
+ mutex_unlock(&indio_dev->mlock);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = 0, \
+ .extend_name = name, \
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
+ .address = (addr), \
+ .scan_index = (si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define ADIS16400_SUPPLY_CHAN(addr, bits) \
+ ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY)
+
+#define ADIS16400_AUX_ADC_CHAN(addr, bits) \
+ ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC)
+
+#define ADIS16400_GYRO_CHAN(mod, addr, bits) { \
+ .type = IIO_ANGL_VEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_ ## mod, \
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SHARED_BIT | \
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT, \
+ .address = addr, \
+ .scan_index = ADIS16400_SCAN_GYRO_ ## mod, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define ADIS16400_ACCEL_CHAN(mod, addr, bits) { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_ ## mod, \
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SHARED_BIT | \
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT, \
+ .address = (addr), \
+ .scan_index = ADIS16400_SCAN_ACC_ ## mod, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define ADIS16400_MAGN_CHAN(mod, addr, bits) { \
+ .type = IIO_MAGN, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_ ## mod, \
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SHARED_BIT | \
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT, \
+ .address = (addr), \
+ .scan_index = ADIS16400_SCAN_MAGN_ ## mod, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define ADIS16400_MOD_TEMP_NAME_X "x"
+#define ADIS16400_MOD_TEMP_NAME_Y "y"
+#define ADIS16400_MOD_TEMP_NAME_Z "z"
+
+#define ADIS16400_MOD_TEMP_CHAN(mod, addr, bits) { \
+ .type = IIO_TEMP, \
+ .indexed = 1, \
+ .channel = 0, \
+ .extend_name = ADIS16400_MOD_TEMP_NAME_ ## mod, \
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT | \
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT, \
+ .address = (addr), \
+ .scan_index = ADIS16350_SCAN_TEMP_ ## mod, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define ADIS16400_TEMP_CHAN(addr, bits) { \
+ .type = IIO_TEMP, \
+ .indexed = 1, \
+ .channel = 0, \
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
+ .address = (addr), \
+ .scan_index = ADIS16350_SCAN_TEMP_X, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define ADIS16400_INCLI_CHAN(mod, addr, bits) { \
+ .type = IIO_INCLI, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_ ## mod, \
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SHARED_BIT, \
+ .address = (addr), \
+ .scan_index = ADIS16300_SCAN_INCLI_ ## mod, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+static const struct iio_chan_spec adis16400_channels[] = {
+ ADIS16400_SUPPLY_CHAN(ADIS16400_SUPPLY_OUT, 14),
+ ADIS16400_GYRO_CHAN(X, ADIS16400_XGYRO_OUT, 14),
+ ADIS16400_GYRO_CHAN(Y, ADIS16400_YGYRO_OUT, 14),
+ ADIS16400_GYRO_CHAN(Z, ADIS16400_ZGYRO_OUT, 14),
+ ADIS16400_ACCEL_CHAN(X, ADIS16400_XACCL_OUT, 14),
+ ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14),
+ ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14),
+ ADIS16400_MAGN_CHAN(X, ADIS16400_XMAGN_OUT, 14),
+ ADIS16400_MAGN_CHAN(Y, ADIS16400_YMAGN_OUT, 14),
+ ADIS16400_MAGN_CHAN(Z, ADIS16400_ZMAGN_OUT, 14),
+ ADIS16400_TEMP_CHAN(ADIS16400_TEMP_OUT, 12),
+ ADIS16400_AUX_ADC_CHAN(ADIS16400_AUX_ADC, 12),
+ IIO_CHAN_SOFT_TIMESTAMP(12)
+};
+
+static const struct iio_chan_spec adis16448_channels[] = {
+ ADIS16400_GYRO_CHAN(X, ADIS16400_XGYRO_OUT, 16),
+ ADIS16400_GYRO_CHAN(Y, ADIS16400_YGYRO_OUT, 16),
+ ADIS16400_GYRO_CHAN(Z, ADIS16400_ZGYRO_OUT, 16),
+ ADIS16400_ACCEL_CHAN(X, ADIS16400_XACCL_OUT, 16),
+ ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 16),
+ ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 16),
+ ADIS16400_MAGN_CHAN(X, ADIS16400_XMAGN_OUT, 16),
+ ADIS16400_MAGN_CHAN(Y, ADIS16400_YMAGN_OUT, 16),
+ ADIS16400_MAGN_CHAN(Z, ADIS16400_ZMAGN_OUT, 16),
+ {
+ .type = IIO_PRESSURE,
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
+ .address = ADIS16448_BARO_OUT,
+ .scan_index = ADIS16400_SCAN_BARO,
+ .scan_type = IIO_ST('s', 16, 16, 0),
+ },
+ ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
+ IIO_CHAN_SOFT_TIMESTAMP(11)
+};
+
+static const struct iio_chan_spec adis16350_channels[] = {
+ ADIS16400_SUPPLY_CHAN(ADIS16400_SUPPLY_OUT, 12),
+ ADIS16400_GYRO_CHAN(X, ADIS16400_XGYRO_OUT, 14),
+ ADIS16400_GYRO_CHAN(Y, ADIS16400_YGYRO_OUT, 14),
+ ADIS16400_GYRO_CHAN(Z, ADIS16400_ZGYRO_OUT, 14),
+ ADIS16400_ACCEL_CHAN(X, ADIS16400_XACCL_OUT, 14),
+ ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14),
+ ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14),
+ ADIS16400_MAGN_CHAN(X, ADIS16400_XMAGN_OUT, 14),
+ ADIS16400_MAGN_CHAN(Y, ADIS16400_YMAGN_OUT, 14),
+ ADIS16400_MAGN_CHAN(Z, ADIS16400_ZMAGN_OUT, 14),
+ ADIS16400_AUX_ADC_CHAN(ADIS16300_AUX_ADC, 12),
+ ADIS16400_MOD_TEMP_CHAN(X, ADIS16350_XTEMP_OUT, 12),
+ ADIS16400_MOD_TEMP_CHAN(Y, ADIS16350_YTEMP_OUT, 12),
+ ADIS16400_MOD_TEMP_CHAN(Z, ADIS16350_ZTEMP_OUT, 12),
+ IIO_CHAN_SOFT_TIMESTAMP(11)
+};
+
+static const struct iio_chan_spec adis16300_channels[] = {
+ ADIS16400_SUPPLY_CHAN(ADIS16400_SUPPLY_OUT, 12),
+ ADIS16400_GYRO_CHAN(X, ADIS16400_XGYRO_OUT, 14),
+ ADIS16400_ACCEL_CHAN(X, ADIS16400_XACCL_OUT, 14),
+ ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14),
+ ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14),
+ ADIS16400_TEMP_CHAN(ADIS16350_XTEMP_OUT, 12),
+ ADIS16400_AUX_ADC_CHAN(ADIS16300_AUX_ADC, 12),
+ ADIS16400_INCLI_CHAN(X, ADIS16300_PITCH_OUT, 13),
+ ADIS16400_INCLI_CHAN(Y, ADIS16300_ROLL_OUT, 13),
+ IIO_CHAN_SOFT_TIMESTAMP(14)
+};
+
+static const struct iio_chan_spec adis16334_channels[] = {
+ ADIS16400_GYRO_CHAN(X, ADIS16400_XGYRO_OUT, 14),
+ ADIS16400_GYRO_CHAN(Y, ADIS16400_YGYRO_OUT, 14),
+ ADIS16400_GYRO_CHAN(Z, ADIS16400_ZGYRO_OUT, 14),
+ ADIS16400_ACCEL_CHAN(X, ADIS16400_XACCL_OUT, 14),
+ ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14),
+ ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14),
+ ADIS16400_TEMP_CHAN(ADIS16350_XTEMP_OUT, 12),
+ IIO_CHAN_SOFT_TIMESTAMP(8)
+};
+
+static struct attribute *adis16400_attributes[] = {
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group adis16400_attribute_group = {
+ .attrs = adis16400_attributes,
+};
+
+static struct adis16400_chip_info adis16400_chips[] = {
+ [ADIS16300] = {
+ .channels = adis16300_channels,
+ .num_channels = ARRAY_SIZE(adis16300_channels),
+ .flags = ADIS16400_HAS_SLOW_MODE,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
+ .accel_scale_micro = 5884,
+ .temp_scale_nano = 140000000, /* 0.14 C */
+ .temp_offset = 25000000 / 140000, /* 25 C = 0x00 */
+ .set_freq = adis16400_set_freq,
+ .get_freq = adis16400_get_freq,
+ },
+ [ADIS16334] = {
+ .channels = adis16334_channels,
+ .num_channels = ARRAY_SIZE(adis16334_channels),
+ .flags = ADIS16400_HAS_PROD_ID | ADIS16400_NO_BURST |
+ ADIS16400_HAS_SERIAL_NUMBER,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(1000), /* 1 mg */
+ .temp_scale_nano = 67850000, /* 0.06785 C */
+ .temp_offset = 25000000 / 67850, /* 25 C = 0x00 */
+ .set_freq = adis16334_set_freq,
+ .get_freq = adis16334_get_freq,
+ },
+ [ADIS16350] = {
+ .channels = adis16350_channels,
+ .num_channels = ARRAY_SIZE(adis16350_channels),
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(73260), /* 0.07326 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(2522), /* 0.002522 g */
+ .temp_scale_nano = 145300000, /* 0.1453 C */
+ .temp_offset = 25000000 / 145300, /* 25 C = 0x00 */
+ .flags = ADIS16400_NO_BURST | ADIS16400_HAS_SLOW_MODE,
+ .set_freq = adis16400_set_freq,
+ .get_freq = adis16400_get_freq,
+ },
+ [ADIS16360] = {
+ .channels = adis16350_channels,
+ .num_channels = ARRAY_SIZE(adis16350_channels),
+ .flags = ADIS16400_HAS_PROD_ID | ADIS16400_HAS_SLOW_MODE |
+ ADIS16400_HAS_SERIAL_NUMBER,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(3333), /* 3.333 mg */
+ .temp_scale_nano = 136000000, /* 0.136 C */
+ .temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
+ .set_freq = adis16400_set_freq,
+ .get_freq = adis16400_get_freq,
+ },
+ [ADIS16362] = {
+ .channels = adis16350_channels,
+ .num_channels = ARRAY_SIZE(adis16350_channels),
+ .flags = ADIS16400_HAS_PROD_ID | ADIS16400_HAS_SLOW_MODE |
+ ADIS16400_HAS_SERIAL_NUMBER,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(333), /* 0.333 mg */
+ .temp_scale_nano = 136000000, /* 0.136 C */
+ .temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
+ .set_freq = adis16400_set_freq,
+ .get_freq = adis16400_get_freq,
+ },
+ [ADIS16364] = {
+ .channels = adis16350_channels,
+ .num_channels = ARRAY_SIZE(adis16350_channels),
+ .flags = ADIS16400_HAS_PROD_ID | ADIS16400_HAS_SLOW_MODE |
+ ADIS16400_HAS_SERIAL_NUMBER,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(1000), /* 1 mg */
+ .temp_scale_nano = 136000000, /* 0.136 C */
+ .temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
+ .set_freq = adis16400_set_freq,
+ .get_freq = adis16400_get_freq,
+ },
+ [ADIS16400] = {
+ .channels = adis16400_channels,
+ .num_channels = ARRAY_SIZE(adis16400_channels),
+ .flags = ADIS16400_HAS_PROD_ID | ADIS16400_HAS_SLOW_MODE,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(3333), /* 3.333 mg */
+ .temp_scale_nano = 140000000, /* 0.14 C */
+ .temp_offset = 25000000 / 140000, /* 25 C = 0x00 */
+ .set_freq = adis16400_set_freq,
+ .get_freq = adis16400_get_freq,
+ },
+ [ADIS16448] = {
+ .channels = adis16448_channels,
+ .num_channels = ARRAY_SIZE(adis16448_channels),
+ .flags = ADIS16400_HAS_PROD_ID |
+ ADIS16400_HAS_SERIAL_NUMBER,
+ .gyro_scale_micro = IIO_DEGREE_TO_RAD(10000), /* 0.01 deg/s */
+ .accel_scale_micro = IIO_G_TO_M_S_2(833), /* 1/1200 g */
+ .temp_scale_nano = 73860000, /* 0.07386 C */
+ .temp_offset = 31000000 / 73860, /* 31 C = 0x00 */
+ .set_freq = adis16334_set_freq,
+ .get_freq = adis16334_get_freq,
+ }
+};
+
+static const struct iio_info adis16400_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &adis16400_read_raw,
+ .write_raw = &adis16400_write_raw,
+ .attrs = &adis16400_attribute_group,
+ .update_scan_mode = adis16400_update_scan_mode,
+ .debugfs_reg_access = adis_debugfs_reg_access,
+};
+
+static const unsigned long adis16400_burst_scan_mask[] = {
+ ~0UL,
+ 0,
+};
+
+static const char * const adis16400_status_error_msgs[] = {
+ [ADIS16400_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure",
+ [ADIS16400_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure",
+ [ADIS16400_DIAG_STAT_XACCL_FAIL] = "X-axis accelerometer self-test failure",
+ [ADIS16400_DIAG_STAT_XGYRO_FAIL] = "X-axis gyroscope self-test failure",
+ [ADIS16400_DIAG_STAT_YGYRO_FAIL] = "Y-axis gyroscope self-test failure",
+ [ADIS16400_DIAG_STAT_ZGYRO_FAIL] = "Z-axis gyroscope self-test failure",
+ [ADIS16400_DIAG_STAT_ALARM2] = "Alarm 2 active",
+ [ADIS16400_DIAG_STAT_ALARM1] = "Alarm 1 active",
+ [ADIS16400_DIAG_STAT_FLASH_CHK] = "Flash checksum error",
+ [ADIS16400_DIAG_STAT_SELF_TEST] = "Self test error",
+ [ADIS16400_DIAG_STAT_OVERFLOW] = "Sensor overrange",
+ [ADIS16400_DIAG_STAT_SPI_FAIL] = "SPI failure",
+ [ADIS16400_DIAG_STAT_FLASH_UPT] = "Flash update failed",
+ [ADIS16400_DIAG_STAT_POWER_HIGH] = "Power supply above 5.25V",
+ [ADIS16400_DIAG_STAT_POWER_LOW] = "Power supply below 4.75V",
+};
+
+static const struct adis_data adis16400_data = {
+ .msc_ctrl_reg = ADIS16400_MSC_CTRL,
+ .glob_cmd_reg = ADIS16400_GLOB_CMD,
+ .diag_stat_reg = ADIS16400_DIAG_STAT,
+
+ .read_delay = 50,
+ .write_delay = 50,
+
+ .self_test_mask = ADIS16400_MSC_CTRL_MEM_TEST,
+ .startup_delay = ADIS16400_STARTUP_DELAY,
+
+ .status_error_msgs = adis16400_status_error_msgs,
+ .status_error_mask = BIT(ADIS16400_DIAG_STAT_ZACCL_FAIL) |
+ BIT(ADIS16400_DIAG_STAT_YACCL_FAIL) |
+ BIT(ADIS16400_DIAG_STAT_XACCL_FAIL) |
+ BIT(ADIS16400_DIAG_STAT_XGYRO_FAIL) |
+ BIT(ADIS16400_DIAG_STAT_YGYRO_FAIL) |
+ BIT(ADIS16400_DIAG_STAT_ZGYRO_FAIL) |
+ BIT(ADIS16400_DIAG_STAT_ALARM2) |
+ BIT(ADIS16400_DIAG_STAT_ALARM1) |
+ BIT(ADIS16400_DIAG_STAT_FLASH_CHK) |
+ BIT(ADIS16400_DIAG_STAT_SELF_TEST) |
+ BIT(ADIS16400_DIAG_STAT_OVERFLOW) |
+ BIT(ADIS16400_DIAG_STAT_SPI_FAIL) |
+ BIT(ADIS16400_DIAG_STAT_FLASH_UPT) |
+ BIT(ADIS16400_DIAG_STAT_POWER_HIGH) |
+ BIT(ADIS16400_DIAG_STAT_POWER_LOW),
+};
+
+static int adis16400_probe(struct spi_device *spi)
+{
+ struct adis16400_state *st;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = iio_device_alloc(sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, indio_dev);
+
+ /* setup the industrialio driver allocated elements */
+ st->variant = &adis16400_chips[spi_get_device_id(spi)->driver_data];
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->channels = st->variant->channels;
+ indio_dev->num_channels = st->variant->num_channels;
+ indio_dev->info = &adis16400_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ if (!(st->variant->flags & ADIS16400_NO_BURST))
+ indio_dev->available_scan_masks = adis16400_burst_scan_mask;
+
+ ret = adis_init(&st->adis, indio_dev, spi, &adis16400_data);
+ if (ret)
+ goto error_free_dev;
+
+ ret = adis_setup_buffer_and_trigger(&st->adis, indio_dev,
+ adis16400_trigger_handler);
+ if (ret)
+ goto error_free_dev;
+
+ /* Get the device into a sane initial state */
+ ret = adis16400_initial_setup(indio_dev);
+ if (ret)
+ goto error_cleanup_buffer;
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_cleanup_buffer;
+
+ adis16400_debugfs_init(indio_dev);
+ return 0;
+
+error_cleanup_buffer:
+ adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
+error_free_dev:
+ iio_device_free(indio_dev);
+ return ret;
+}
+
+static int adis16400_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct adis16400_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ adis16400_stop_device(indio_dev);
+
+ adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
+
+ iio_device_free(indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id adis16400_id[] = {
+ {"adis16300", ADIS16300},
+ {"adis16334", ADIS16334},
+ {"adis16350", ADIS16350},
+ {"adis16354", ADIS16350},
+ {"adis16355", ADIS16350},
+ {"adis16360", ADIS16360},
+ {"adis16362", ADIS16362},
+ {"adis16364", ADIS16364},
+ {"adis16365", ADIS16360},
+ {"adis16400", ADIS16400},
+ {"adis16405", ADIS16400},
+ {"adis16448", ADIS16448},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, adis16400_id);
+
+static struct spi_driver adis16400_driver = {
+ .driver = {
+ .name = "adis16400",
+ .owner = THIS_MODULE,
+ },
+ .id_table = adis16400_id,
+ .probe = adis16400_probe,
+ .remove = adis16400_remove,
+};
+module_spi_driver(adis16400_driver);
+
+MODULE_AUTHOR("Manuel Stahl <manuel.stahl@iis.fraunhofer.de>");
+MODULE_DESCRIPTION("Analog Devices ADIS16400/5 IMU SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
new file mode 100644
index 000000000000..b5cfa3a354cf
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -0,0 +1,13 @@
+#
+# inv-mpu6050 drivers for Invensense MPU devices and combos
+#
+
+config INV_MPU6050_IIO
+ tristate "Invensense MPU6050 devices"
+ depends on I2C && SYSFS
+ select IIO_TRIGGERED_BUFFER
+ help
+ This driver supports the Invensense MPU6050 devices.
+ It is a gyroscope/accelerometer combo device.
+ This driver can be built as a module. The module will be called
+ inv-mpu6050.
diff --git a/drivers/iio/imu/inv_mpu6050/Makefile b/drivers/iio/imu/inv_mpu6050/Makefile
new file mode 100644
index 000000000000..3a677c778afb
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for Invensense MPU6050 device.
+#
+
+obj-$(CONFIG_INV_MPU6050_IIO) += inv-mpu6050.o
+inv-mpu6050-objs := inv_mpu_core.o inv_mpu_ring.o inv_mpu_trigger.o
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
new file mode 100644
index 000000000000..37ca05b47e4b
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -0,0 +1,795 @@
+/*
+* Copyright (C) 2012 Invensense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/spinlock.h>
+#include "inv_mpu_iio.h"
+
+/*
+ * this is the gyro scale translated from dynamic range plus/minus
+ * {250, 500, 1000, 2000} to rad/s
+ */
+static const int gyro_scale_6050[] = {133090, 266181, 532362, 1064724};
+
+/*
+ * this is the accel scale translated from dynamic range plus/minus
+ * {2, 4, 8, 16} to m/s^2
+ */
+static const int accel_scale[] = {598, 1196, 2392, 4785};
+
+static const struct inv_mpu6050_reg_map reg_set_6050 = {
+ .sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV,
+ .lpf = INV_MPU6050_REG_CONFIG,
+ .user_ctrl = INV_MPU6050_REG_USER_CTRL,
+ .fifo_en = INV_MPU6050_REG_FIFO_EN,
+ .gyro_config = INV_MPU6050_REG_GYRO_CONFIG,
+ .accl_config = INV_MPU6050_REG_ACCEL_CONFIG,
+ .fifo_count_h = INV_MPU6050_REG_FIFO_COUNT_H,
+ .fifo_r_w = INV_MPU6050_REG_FIFO_R_W,
+ .raw_gyro = INV_MPU6050_REG_RAW_GYRO,
+ .raw_accl = INV_MPU6050_REG_RAW_ACCEL,
+ .temperature = INV_MPU6050_REG_TEMPERATURE,
+ .int_enable = INV_MPU6050_REG_INT_ENABLE,
+ .pwr_mgmt_1 = INV_MPU6050_REG_PWR_MGMT_1,
+ .pwr_mgmt_2 = INV_MPU6050_REG_PWR_MGMT_2,
+};
+
+static const struct inv_mpu6050_chip_config chip_config_6050 = {
+ .fsr = INV_MPU6050_FSR_2000DPS,
+ .lpf = INV_MPU6050_FILTER_20HZ,
+ .fifo_rate = INV_MPU6050_INIT_FIFO_RATE,
+ .gyro_fifo_enable = false,
+ .accl_fifo_enable = false,
+ .accl_fs = INV_MPU6050_FS_02G,
+};
+
+static const struct inv_mpu6050_hw hw_info[INV_NUM_PARTS] = {
+ {
+ .num_reg = 117,
+ .name = "MPU6050",
+ .reg = &reg_set_6050,
+ .config = &chip_config_6050,
+ },
+};
+
+int inv_mpu6050_write_reg(struct inv_mpu6050_state *st, int reg, u8 d)
+{
+ return i2c_smbus_write_i2c_block_data(st->client, reg, 1, &d);
+}
+
+int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask)
+{
+ u8 d, mgmt_1;
+ int result;
+
+ /* switch clock needs to be careful. Only when gyro is on, can
+ clock source be switched to gyro. Otherwise, it must be set to
+ internal clock */
+ if (INV_MPU6050_BIT_PWR_GYRO_STBY == mask) {
+ result = i2c_smbus_read_i2c_block_data(st->client,
+ st->reg->pwr_mgmt_1, 1, &mgmt_1);
+ if (result != 1)
+ return result;
+
+ mgmt_1 &= ~INV_MPU6050_BIT_CLK_MASK;
+ }
+
+ if ((INV_MPU6050_BIT_PWR_GYRO_STBY == mask) && (!en)) {
+ /* turning off gyro requires switch to internal clock first.
+ Then turn off gyro engine */
+ mgmt_1 |= INV_CLK_INTERNAL;
+ result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1, mgmt_1);
+ if (result)
+ return result;
+ }
+
+ result = i2c_smbus_read_i2c_block_data(st->client,
+ st->reg->pwr_mgmt_2, 1, &d);
+ if (result != 1)
+ return result;
+ if (en)
+ d &= ~mask;
+ else
+ d |= mask;
+ result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_2, d);
+ if (result)
+ return result;
+
+ if (en) {
+ /* Wait for output stablize */
+ msleep(INV_MPU6050_TEMP_UP_TIME);
+ if (INV_MPU6050_BIT_PWR_GYRO_STBY == mask) {
+ /* switch internal clock to PLL */
+ mgmt_1 |= INV_CLK_PLL;
+ result = inv_mpu6050_write_reg(st,
+ st->reg->pwr_mgmt_1, mgmt_1);
+ if (result)
+ return result;
+ }
+ }
+
+ return 0;
+}
+
+int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on)
+{
+ int result;
+
+ if (power_on)
+ result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1, 0);
+ else
+ result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1,
+ INV_MPU6050_BIT_SLEEP);
+ if (result)
+ return result;
+
+ if (power_on)
+ msleep(INV_MPU6050_REG_UP_TIME);
+
+ return 0;
+}
+
+/**
+ * inv_mpu6050_init_config() - Initialize hardware, disable FIFO.
+ *
+ * Initial configuration:
+ * FSR: ± 2000DPS
+ * DLPF: 20Hz
+ * FIFO rate: 50Hz
+ * Clock source: Gyro PLL
+ */
+static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
+{
+ int result;
+ u8 d;
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ result = inv_mpu6050_set_power_itg(st, true);
+ if (result)
+ return result;
+ d = (INV_MPU6050_FSR_2000DPS << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
+ result = inv_mpu6050_write_reg(st, st->reg->gyro_config, d);
+ if (result)
+ return result;
+
+ d = INV_MPU6050_FILTER_20HZ;
+ result = inv_mpu6050_write_reg(st, st->reg->lpf, d);
+ if (result)
+ return result;
+
+ d = INV_MPU6050_ONE_K_HZ / INV_MPU6050_INIT_FIFO_RATE - 1;
+ result = inv_mpu6050_write_reg(st, st->reg->sample_rate_div, d);
+ if (result)
+ return result;
+
+ d = (INV_MPU6050_FS_02G << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
+ result = inv_mpu6050_write_reg(st, st->reg->accl_config, d);
+ if (result)
+ return result;
+
+ memcpy(&st->chip_config, hw_info[st->chip_type].config,
+ sizeof(struct inv_mpu6050_chip_config));
+ result = inv_mpu6050_set_power_itg(st, false);
+
+ return result;
+}
+
+static int inv_mpu6050_sensor_show(struct inv_mpu6050_state *st, int reg,
+ int axis, int *val)
+{
+ int ind, result;
+ __be16 d;
+
+ ind = (axis - IIO_MOD_X) * 2;
+ result = i2c_smbus_read_i2c_block_data(st->client, reg + ind, 2,
+ (u8 *)&d);
+ if (result != 2)
+ return -EINVAL;
+ *val = (short)be16_to_cpup(&d);
+
+ return IIO_VAL_INT;
+}
+
+static int inv_mpu6050_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask) {
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ {
+ int ret, result;
+
+ ret = IIO_VAL_INT;
+ result = 0;
+ mutex_lock(&indio_dev->mlock);
+ if (!st->chip_config.enable) {
+ result = inv_mpu6050_set_power_itg(st, true);
+ if (result)
+ goto error_read_raw;
+ }
+ /* when enable is on, power is already on */
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ if (!st->chip_config.gyro_fifo_enable ||
+ !st->chip_config.enable) {
+ result = inv_mpu6050_switch_engine(st, true,
+ INV_MPU6050_BIT_PWR_GYRO_STBY);
+ if (result)
+ goto error_read_raw;
+ }
+ ret = inv_mpu6050_sensor_show(st, st->reg->raw_gyro,
+ chan->channel2, val);
+ if (!st->chip_config.gyro_fifo_enable ||
+ !st->chip_config.enable) {
+ result = inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_GYRO_STBY);
+ if (result)
+ goto error_read_raw;
+ }
+ break;
+ case IIO_ACCEL:
+ if (!st->chip_config.accl_fifo_enable ||
+ !st->chip_config.enable) {
+ result = inv_mpu6050_switch_engine(st, true,
+ INV_MPU6050_BIT_PWR_ACCL_STBY);
+ if (result)
+ goto error_read_raw;
+ }
+ ret = inv_mpu6050_sensor_show(st, st->reg->raw_accl,
+ chan->channel2, val);
+ if (!st->chip_config.accl_fifo_enable ||
+ !st->chip_config.enable) {
+ result = inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_ACCL_STBY);
+ if (result)
+ goto error_read_raw;
+ }
+ break;
+ case IIO_TEMP:
+ /* wait for stablization */
+ msleep(INV_MPU6050_SENSOR_UP_TIME);
+ inv_mpu6050_sensor_show(st, st->reg->temperature,
+ IIO_MOD_X, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+error_read_raw:
+ if (!st->chip_config.enable)
+ result |= inv_mpu6050_set_power_itg(st, false);
+ mutex_unlock(&indio_dev->mlock);
+ if (result)
+ return result;
+
+ return ret;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = 0;
+ *val2 = gyro_scale_6050[st->chip_config.fsr];
+
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_ACCEL:
+ *val = 0;
+ *val2 = accel_scale[st->chip_config.accl_fs];
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ *val = 0;
+ *val2 = INV_MPU6050_TEMP_SCALE;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val = INV_MPU6050_TEMP_OFFSET;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int inv_mpu6050_write_fsr(struct inv_mpu6050_state *st, int fsr)
+{
+ int result;
+ u8 d;
+
+ if (fsr < 0 || fsr > INV_MPU6050_MAX_GYRO_FS_PARAM)
+ return -EINVAL;
+ if (fsr == st->chip_config.fsr)
+ return 0;
+
+ d = (fsr << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
+ result = inv_mpu6050_write_reg(st, st->reg->gyro_config, d);
+ if (result)
+ return result;
+ st->chip_config.fsr = fsr;
+
+ return 0;
+}
+
+static int inv_mpu6050_write_accel_fs(struct inv_mpu6050_state *st, int fs)
+{
+ int result;
+ u8 d;
+
+ if (fs < 0 || fs > INV_MPU6050_MAX_ACCL_FS_PARAM)
+ return -EINVAL;
+ if (fs == st->chip_config.accl_fs)
+ return 0;
+
+ d = (fs << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
+ result = inv_mpu6050_write_reg(st, st->reg->accl_config, d);
+ if (result)
+ return result;
+ st->chip_config.accl_fs = fs;
+
+ return 0;
+}
+
+static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask) {
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ int result;
+
+ mutex_lock(&indio_dev->mlock);
+ /* we should only update scale when the chip is disabled, i.e.,
+ not running */
+ if (st->chip_config.enable) {
+ result = -EBUSY;
+ goto error_write_raw;
+ }
+ result = inv_mpu6050_set_power_itg(st, true);
+ if (result)
+ goto error_write_raw;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ result = inv_mpu6050_write_fsr(st, val);
+ break;
+ case IIO_ACCEL:
+ result = inv_mpu6050_write_accel_fs(st, val);
+ break;
+ default:
+ result = -EINVAL;
+ break;
+ }
+ break;
+ default:
+ result = -EINVAL;
+ break;
+ }
+
+error_write_raw:
+ result |= inv_mpu6050_set_power_itg(st, false);
+ mutex_unlock(&indio_dev->mlock);
+
+ return result;
+}
+
+/**
+ * inv_mpu6050_set_lpf() - set low pass filer based on fifo rate.
+ *
+ * Based on the Nyquist principle, the sampling rate must
+ * exceed twice of the bandwidth of the signal, or there
+ * would be alising. This function basically search for the
+ * correct low pass parameters based on the fifo rate, e.g,
+ * sampling frequency.
+ */
+static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
+{
+ const int hz[] = {188, 98, 42, 20, 10, 5};
+ const int d[] = {INV_MPU6050_FILTER_188HZ, INV_MPU6050_FILTER_98HZ,
+ INV_MPU6050_FILTER_42HZ, INV_MPU6050_FILTER_20HZ,
+ INV_MPU6050_FILTER_10HZ, INV_MPU6050_FILTER_5HZ};
+ int i, h, result;
+ u8 data;
+
+ h = (rate >> 1);
+ i = 0;
+ while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1))
+ i++;
+ data = d[i];
+ result = inv_mpu6050_write_reg(st, st->reg->lpf, data);
+ if (result)
+ return result;
+ st->chip_config.lpf = data;
+
+ return 0;
+}
+
+/**
+ * inv_mpu6050_fifo_rate_store() - Set fifo rate.
+ */
+static ssize_t inv_mpu6050_fifo_rate_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ s32 fifo_rate;
+ u8 d;
+ int result;
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ if (kstrtoint(buf, 10, &fifo_rate))
+ return -EINVAL;
+ if (fifo_rate < INV_MPU6050_MIN_FIFO_RATE ||
+ fifo_rate > INV_MPU6050_MAX_FIFO_RATE)
+ return -EINVAL;
+ if (fifo_rate == st->chip_config.fifo_rate)
+ return count;
+
+ mutex_lock(&indio_dev->mlock);
+ if (st->chip_config.enable) {
+ result = -EBUSY;
+ goto fifo_rate_fail;
+ }
+ result = inv_mpu6050_set_power_itg(st, true);
+ if (result)
+ goto fifo_rate_fail;
+
+ d = INV_MPU6050_ONE_K_HZ / fifo_rate - 1;
+ result = inv_mpu6050_write_reg(st, st->reg->sample_rate_div, d);
+ if (result)
+ goto fifo_rate_fail;
+ st->chip_config.fifo_rate = fifo_rate;
+
+ result = inv_mpu6050_set_lpf(st, fifo_rate);
+ if (result)
+ goto fifo_rate_fail;
+
+fifo_rate_fail:
+ result |= inv_mpu6050_set_power_itg(st, false);
+ mutex_unlock(&indio_dev->mlock);
+ if (result)
+ return result;
+
+ return count;
+}
+
+/**
+ * inv_fifo_rate_show() - Get the current sampling rate.
+ */
+static ssize_t inv_fifo_rate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct inv_mpu6050_state *st = iio_priv(dev_to_iio_dev(dev));
+
+ return sprintf(buf, "%d\n", st->chip_config.fifo_rate);
+}
+
+/**
+ * inv_attr_show() - calling this function will show current
+ * parameters.
+ */
+static ssize_t inv_attr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct inv_mpu6050_state *st = iio_priv(dev_to_iio_dev(dev));
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ s8 *m;
+
+ switch (this_attr->address) {
+ /* In MPU6050, the two matrix are the same because gyro and accel
+ are integrated in one chip */
+ case ATTR_GYRO_MATRIX:
+ case ATTR_ACCL_MATRIX:
+ m = st->plat_data.orientation;
+
+ return sprintf(buf, "%d, %d, %d; %d, %d, %d; %d, %d, %d\n",
+ m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8]);
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * inv_mpu6050_validate_trigger() - validate_trigger callback for invensense
+ * MPU6050 device.
+ * @indio_dev: The IIO device
+ * @trig: The new trigger
+ *
+ * Returns: 0 if the 'trig' matches the trigger registered by the MPU6050
+ * device, -EINVAL otherwise.
+ */
+static int inv_mpu6050_validate_trigger(struct iio_dev *indio_dev,
+ struct iio_trigger *trig)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ if (st->trig != trig)
+ return -EINVAL;
+
+ return 0;
+}
+
+#define INV_MPU6050_CHAN(_type, _channel2, _index) \
+ { \
+ .type = _type, \
+ .modified = 1, \
+ .channel2 = _channel2, \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT \
+ | IIO_CHAN_INFO_RAW_SEPARATE_BIT, \
+ .scan_index = _index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .shift = 0 , \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+static const struct iio_chan_spec inv_mpu_channels[] = {
+ IIO_CHAN_SOFT_TIMESTAMP(INV_MPU6050_SCAN_TIMESTAMP),
+ /*
+ * Note that temperature should only be via polled reading only,
+ * not the final scan elements output.
+ */
+ {
+ .type = IIO_TEMP,
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT
+ | IIO_CHAN_INFO_OFFSET_SEPARATE_BIT
+ | IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
+ .scan_index = -1,
+ },
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_X, INV_MPU6050_SCAN_GYRO_X),
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Y, INV_MPU6050_SCAN_GYRO_Y),
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Z, INV_MPU6050_SCAN_GYRO_Z),
+
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_X, INV_MPU6050_SCAN_ACCL_X),
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Y, INV_MPU6050_SCAN_ACCL_Y),
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_MPU6050_SCAN_ACCL_Z),
+};
+
+/* constant IIO attribute */
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("10 20 50 100 200 500");
+static IIO_DEV_ATTR_SAMP_FREQ(S_IRUGO | S_IWUSR, inv_fifo_rate_show,
+ inv_mpu6050_fifo_rate_store);
+static IIO_DEVICE_ATTR(in_gyro_matrix, S_IRUGO, inv_attr_show, NULL,
+ ATTR_GYRO_MATRIX);
+static IIO_DEVICE_ATTR(in_accel_matrix, S_IRUGO, inv_attr_show, NULL,
+ ATTR_ACCL_MATRIX);
+
+static struct attribute *inv_attributes[] = {
+ &iio_dev_attr_in_gyro_matrix.dev_attr.attr,
+ &iio_dev_attr_in_accel_matrix.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group inv_attribute_group = {
+ .attrs = inv_attributes
+};
+
+static const struct iio_info mpu_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &inv_mpu6050_read_raw,
+ .write_raw = &inv_mpu6050_write_raw,
+ .attrs = &inv_attribute_group,
+ .validate_trigger = inv_mpu6050_validate_trigger,
+};
+
+/**
+ * inv_check_and_setup_chip() - check and setup chip.
+ */
+static int inv_check_and_setup_chip(struct inv_mpu6050_state *st,
+ const struct i2c_device_id *id)
+{
+ int result;
+
+ st->chip_type = INV_MPU6050;
+ st->hw = &hw_info[st->chip_type];
+ st->reg = hw_info[st->chip_type].reg;
+
+ /* reset to make sure previous state are not there */
+ result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1,
+ INV_MPU6050_BIT_H_RESET);
+ if (result)
+ return result;
+ msleep(INV_MPU6050_POWER_UP_TIME);
+ /* toggle power state. After reset, the sleep bit could be on
+ or off depending on the OTP settings. Toggling power would
+ make it in a definite state as well as making the hardware
+ state align with the software state */
+ result = inv_mpu6050_set_power_itg(st, false);
+ if (result)
+ return result;
+ result = inv_mpu6050_set_power_itg(st, true);
+ if (result)
+ return result;
+
+ result = inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_ACCL_STBY);
+ if (result)
+ return result;
+ result = inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_GYRO_STBY);
+ if (result)
+ return result;
+
+ return 0;
+}
+
+/**
+ * inv_mpu_probe() - probe function.
+ * @client: i2c client.
+ * @id: i2c device id.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int inv_mpu_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct inv_mpu6050_state *st;
+ struct iio_dev *indio_dev;
+ int result;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_I2C_BLOCK |
+ I2C_FUNC_SMBUS_WRITE_I2C_BLOCK)) {
+ result = -ENOSYS;
+ goto out_no_free;
+ }
+ indio_dev = iio_device_alloc(sizeof(*st));
+ if (indio_dev == NULL) {
+ result = -ENOMEM;
+ goto out_no_free;
+ }
+ st = iio_priv(indio_dev);
+ st->client = client;
+ st->plat_data = *(struct inv_mpu6050_platform_data
+ *)dev_get_platdata(&client->dev);
+ /* power is turned on inside check chip type*/
+ result = inv_check_and_setup_chip(st, id);
+ if (result)
+ goto out_free;
+
+ result = inv_mpu6050_init_config(indio_dev);
+ if (result) {
+ dev_err(&client->dev,
+ "Could not initialize device.\n");
+ goto out_free;
+ }
+
+ i2c_set_clientdata(client, indio_dev);
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = id->name;
+ indio_dev->channels = inv_mpu_channels;
+ indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
+
+ indio_dev->info = &mpu_info;
+ indio_dev->modes = INDIO_BUFFER_TRIGGERED;
+
+ result = iio_triggered_buffer_setup(indio_dev,
+ inv_mpu6050_irq_handler,
+ inv_mpu6050_read_fifo,
+ NULL);
+ if (result) {
+ dev_err(&st->client->dev, "configure buffer fail %d\n",
+ result);
+ goto out_free;
+ }
+ result = inv_mpu6050_probe_trigger(indio_dev);
+ if (result) {
+ dev_err(&st->client->dev, "trigger probe fail %d\n", result);
+ goto out_unreg_ring;
+ }
+
+ INIT_KFIFO(st->timestamps);
+ spin_lock_init(&st->time_stamp_lock);
+ result = iio_device_register(indio_dev);
+ if (result) {
+ dev_err(&st->client->dev, "IIO register fail %d\n", result);
+ goto out_remove_trigger;
+ }
+
+ return 0;
+
+out_remove_trigger:
+ inv_mpu6050_remove_trigger(st);
+out_unreg_ring:
+ iio_triggered_buffer_cleanup(indio_dev);
+out_free:
+ iio_device_free(indio_dev);
+out_no_free:
+
+ return result;
+}
+
+static int inv_mpu_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ inv_mpu6050_remove_trigger(st);
+ iio_triggered_buffer_cleanup(indio_dev);
+ iio_device_free(indio_dev);
+
+ return 0;
+}
+#ifdef CONFIG_PM_SLEEP
+
+static int inv_mpu_resume(struct device *dev)
+{
+ return inv_mpu6050_set_power_itg(
+ iio_priv(i2c_get_clientdata(to_i2c_client(dev))), true);
+}
+
+static int inv_mpu_suspend(struct device *dev)
+{
+ return inv_mpu6050_set_power_itg(
+ iio_priv(i2c_get_clientdata(to_i2c_client(dev))), false);
+}
+static SIMPLE_DEV_PM_OPS(inv_mpu_pmops, inv_mpu_suspend, inv_mpu_resume);
+
+#define INV_MPU6050_PMOPS (&inv_mpu_pmops)
+#else
+#define INV_MPU6050_PMOPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+/*
+ * device id table is used to identify what device can be
+ * supported by this driver
+ */
+static const struct i2c_device_id inv_mpu_id[] = {
+ {"mpu6050", INV_MPU6050},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, inv_mpu_id);
+
+static struct i2c_driver inv_mpu_driver = {
+ .probe = inv_mpu_probe,
+ .remove = inv_mpu_remove,
+ .id_table = inv_mpu_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "inv-mpu6050",
+ .pm = INV_MPU6050_PMOPS,
+ },
+};
+
+module_i2c_driver(inv_mpu_driver);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Invensense device MPU6050 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
new file mode 100644
index 000000000000..f38395529a44
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -0,0 +1,246 @@
+/*
+* Copyright (C) 2012 Invensense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+#include <linux/i2c.h>
+#include <linux/kfifo.h>
+#include <linux/spinlock.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/platform_data/invensense_mpu6050.h>
+
+/**
+ * struct inv_mpu6050_reg_map - Notable registers.
+ * @sample_rate_div: Divider applied to gyro output rate.
+ * @lpf: Configures internal low pass filter.
+ * @user_ctrl: Enables/resets the FIFO.
+ * @fifo_en: Determines which data will appear in FIFO.
+ * @gyro_config: gyro config register.
+ * @accl_config: accel config register
+ * @fifo_count_h: Upper byte of FIFO count.
+ * @fifo_r_w: FIFO register.
+ * @raw_gyro: Address of first gyro register.
+ * @raw_accl: Address of first accel register.
+ * @temperature: temperature register
+ * @int_enable: Interrupt enable register.
+ * @pwr_mgmt_1: Controls chip's power state and clock source.
+ * @pwr_mgmt_2: Controls power state of individual sensors.
+ */
+struct inv_mpu6050_reg_map {
+ u8 sample_rate_div;
+ u8 lpf;
+ u8 user_ctrl;
+ u8 fifo_en;
+ u8 gyro_config;
+ u8 accl_config;
+ u8 fifo_count_h;
+ u8 fifo_r_w;
+ u8 raw_gyro;
+ u8 raw_accl;
+ u8 temperature;
+ u8 int_enable;
+ u8 pwr_mgmt_1;
+ u8 pwr_mgmt_2;
+};
+
+/*device enum */
+enum inv_devices {
+ INV_MPU6050,
+ INV_NUM_PARTS
+};
+
+/**
+ * struct inv_mpu6050_chip_config - Cached chip configuration data.
+ * @fsr: Full scale range.
+ * @lpf: Digital low pass filter frequency.
+ * @accl_fs: accel full scale range.
+ * @enable: master enable state.
+ * @accl_fifo_enable: enable accel data output
+ * @gyro_fifo_enable: enable gyro data output
+ * @fifo_rate: FIFO update rate.
+ */
+struct inv_mpu6050_chip_config {
+ unsigned int fsr:2;
+ unsigned int lpf:3;
+ unsigned int accl_fs:2;
+ unsigned int enable:1;
+ unsigned int accl_fifo_enable:1;
+ unsigned int gyro_fifo_enable:1;
+ u16 fifo_rate;
+};
+
+/**
+ * struct inv_mpu6050_hw - Other important hardware information.
+ * @num_reg: Number of registers on device.
+ * @name: name of the chip.
+ * @reg: register map of the chip.
+ * @config: configuration of the chip.
+ */
+struct inv_mpu6050_hw {
+ u8 num_reg;
+ u8 *name;
+ const struct inv_mpu6050_reg_map *reg;
+ const struct inv_mpu6050_chip_config *config;
+};
+
+/*
+ * struct inv_mpu6050_state - Driver state variables.
+ * @TIMESTAMP_FIFO_SIZE: fifo size for timestamp.
+ * @trig: IIO trigger.
+ * @chip_config: Cached attribute information.
+ * @reg: Map of important registers.
+ * @hw: Other hardware-specific information.
+ * @chip_type: chip type.
+ * @time_stamp_lock: spin lock to time stamp.
+ * @client: i2c client handle.
+ * @plat_data: platform data.
+ * @timestamps: kfifo queue to store time stamp.
+ */
+struct inv_mpu6050_state {
+#define TIMESTAMP_FIFO_SIZE 16
+ struct iio_trigger *trig;
+ struct inv_mpu6050_chip_config chip_config;
+ const struct inv_mpu6050_reg_map *reg;
+ const struct inv_mpu6050_hw *hw;
+ enum inv_devices chip_type;
+ spinlock_t time_stamp_lock;
+ struct i2c_client *client;
+ struct inv_mpu6050_platform_data plat_data;
+ DECLARE_KFIFO(timestamps, long long, TIMESTAMP_FIFO_SIZE);
+};
+
+/*register and associated bit definition*/
+#define INV_MPU6050_REG_SAMPLE_RATE_DIV 0x19
+#define INV_MPU6050_REG_CONFIG 0x1A
+#define INV_MPU6050_REG_GYRO_CONFIG 0x1B
+#define INV_MPU6050_REG_ACCEL_CONFIG 0x1C
+
+#define INV_MPU6050_REG_FIFO_EN 0x23
+#define INV_MPU6050_BIT_ACCEL_OUT 0x08
+#define INV_MPU6050_BITS_GYRO_OUT 0x70
+
+#define INV_MPU6050_REG_INT_ENABLE 0x38
+#define INV_MPU6050_BIT_DATA_RDY_EN 0x01
+#define INV_MPU6050_BIT_DMP_INT_EN 0x02
+
+#define INV_MPU6050_REG_RAW_ACCEL 0x3B
+#define INV_MPU6050_REG_TEMPERATURE 0x41
+#define INV_MPU6050_REG_RAW_GYRO 0x43
+
+#define INV_MPU6050_REG_USER_CTRL 0x6A
+#define INV_MPU6050_BIT_FIFO_RST 0x04
+#define INV_MPU6050_BIT_DMP_RST 0x08
+#define INV_MPU6050_BIT_I2C_MST_EN 0x20
+#define INV_MPU6050_BIT_FIFO_EN 0x40
+#define INV_MPU6050_BIT_DMP_EN 0x80
+
+#define INV_MPU6050_REG_PWR_MGMT_1 0x6B
+#define INV_MPU6050_BIT_H_RESET 0x80
+#define INV_MPU6050_BIT_SLEEP 0x40
+#define INV_MPU6050_BIT_CLK_MASK 0x7
+
+#define INV_MPU6050_REG_PWR_MGMT_2 0x6C
+#define INV_MPU6050_BIT_PWR_ACCL_STBY 0x38
+#define INV_MPU6050_BIT_PWR_GYRO_STBY 0x07
+
+#define INV_MPU6050_REG_FIFO_COUNT_H 0x72
+#define INV_MPU6050_REG_FIFO_R_W 0x74
+
+#define INV_MPU6050_BYTES_PER_3AXIS_SENSOR 6
+#define INV_MPU6050_FIFO_COUNT_BYTE 2
+#define INV_MPU6050_FIFO_THRESHOLD 500
+#define INV_MPU6050_POWER_UP_TIME 100
+#define INV_MPU6050_TEMP_UP_TIME 100
+#define INV_MPU6050_SENSOR_UP_TIME 30
+#define INV_MPU6050_REG_UP_TIME 5
+
+#define INV_MPU6050_TEMP_OFFSET 12421
+#define INV_MPU6050_TEMP_SCALE 2941
+#define INV_MPU6050_MAX_GYRO_FS_PARAM 3
+#define INV_MPU6050_MAX_ACCL_FS_PARAM 3
+#define INV_MPU6050_THREE_AXIS 3
+#define INV_MPU6050_GYRO_CONFIG_FSR_SHIFT 3
+#define INV_MPU6050_ACCL_CONFIG_FSR_SHIFT 3
+
+/* 6 + 6 round up and plus 8 */
+#define INV_MPU6050_OUTPUT_DATA_SIZE 24
+
+/* init parameters */
+#define INV_MPU6050_INIT_FIFO_RATE 50
+#define INV_MPU6050_TIME_STAMP_TOR 5
+#define INV_MPU6050_MAX_FIFO_RATE 1000
+#define INV_MPU6050_MIN_FIFO_RATE 4
+#define INV_MPU6050_ONE_K_HZ 1000
+
+/* scan element definition */
+enum inv_mpu6050_scan {
+ INV_MPU6050_SCAN_ACCL_X,
+ INV_MPU6050_SCAN_ACCL_Y,
+ INV_MPU6050_SCAN_ACCL_Z,
+ INV_MPU6050_SCAN_GYRO_X,
+ INV_MPU6050_SCAN_GYRO_Y,
+ INV_MPU6050_SCAN_GYRO_Z,
+ INV_MPU6050_SCAN_TIMESTAMP,
+};
+
+enum inv_mpu6050_filter_e {
+ INV_MPU6050_FILTER_256HZ_NOLPF2 = 0,
+ INV_MPU6050_FILTER_188HZ,
+ INV_MPU6050_FILTER_98HZ,
+ INV_MPU6050_FILTER_42HZ,
+ INV_MPU6050_FILTER_20HZ,
+ INV_MPU6050_FILTER_10HZ,
+ INV_MPU6050_FILTER_5HZ,
+ INV_MPU6050_FILTER_2100HZ_NOLPF,
+ NUM_MPU6050_FILTER
+};
+
+/* IIO attribute address */
+enum INV_MPU6050_IIO_ATTR_ADDR {
+ ATTR_GYRO_MATRIX,
+ ATTR_ACCL_MATRIX,
+};
+
+enum inv_mpu6050_accl_fs_e {
+ INV_MPU6050_FS_02G = 0,
+ INV_MPU6050_FS_04G,
+ INV_MPU6050_FS_08G,
+ INV_MPU6050_FS_16G,
+ NUM_ACCL_FSR
+};
+
+enum inv_mpu6050_fsr_e {
+ INV_MPU6050_FSR_250DPS = 0,
+ INV_MPU6050_FSR_500DPS,
+ INV_MPU6050_FSR_1000DPS,
+ INV_MPU6050_FSR_2000DPS,
+ NUM_MPU6050_FSR
+};
+
+enum inv_mpu6050_clock_sel_e {
+ INV_CLK_INTERNAL = 0,
+ INV_CLK_PLL,
+ NUM_CLK
+};
+
+irqreturn_t inv_mpu6050_irq_handler(int irq, void *p);
+irqreturn_t inv_mpu6050_read_fifo(int irq, void *p);
+int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev);
+void inv_mpu6050_remove_trigger(struct inv_mpu6050_state *st);
+int inv_reset_fifo(struct iio_dev *indio_dev);
+int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask);
+int inv_mpu6050_write_reg(struct inv_mpu6050_state *st, int reg, u8 val);
+int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
new file mode 100644
index 000000000000..331781ffbb15
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -0,0 +1,196 @@
+/*
+* Copyright (C) 2012 Invensense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+#include "inv_mpu_iio.h"
+
+int inv_reset_fifo(struct iio_dev *indio_dev)
+{
+ int result;
+ u8 d;
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ /* disable interrupt */
+ result = inv_mpu6050_write_reg(st, st->reg->int_enable, 0);
+ if (result) {
+ dev_err(&st->client->dev, "int_enable failed %d\n", result);
+ return result;
+ }
+ /* disable the sensor output to FIFO */
+ result = inv_mpu6050_write_reg(st, st->reg->fifo_en, 0);
+ if (result)
+ goto reset_fifo_fail;
+ /* disable fifo reading */
+ result = inv_mpu6050_write_reg(st, st->reg->user_ctrl, 0);
+ if (result)
+ goto reset_fifo_fail;
+
+ /* reset FIFO*/
+ result = inv_mpu6050_write_reg(st, st->reg->user_ctrl,
+ INV_MPU6050_BIT_FIFO_RST);
+ if (result)
+ goto reset_fifo_fail;
+ /* enable interrupt */
+ if (st->chip_config.accl_fifo_enable ||
+ st->chip_config.gyro_fifo_enable) {
+ result = inv_mpu6050_write_reg(st, st->reg->int_enable,
+ INV_MPU6050_BIT_DATA_RDY_EN);
+ if (result)
+ return result;
+ }
+ /* enable FIFO reading and I2C master interface*/
+ result = inv_mpu6050_write_reg(st, st->reg->user_ctrl,
+ INV_MPU6050_BIT_FIFO_EN);
+ if (result)
+ goto reset_fifo_fail;
+ /* enable sensor output to FIFO */
+ d = 0;
+ if (st->chip_config.gyro_fifo_enable)
+ d |= INV_MPU6050_BITS_GYRO_OUT;
+ if (st->chip_config.accl_fifo_enable)
+ d |= INV_MPU6050_BIT_ACCEL_OUT;
+ result = inv_mpu6050_write_reg(st, st->reg->fifo_en, d);
+ if (result)
+ goto reset_fifo_fail;
+
+ return 0;
+
+reset_fifo_fail:
+ dev_err(&st->client->dev, "reset fifo failed %d\n", result);
+ result = inv_mpu6050_write_reg(st, st->reg->int_enable,
+ INV_MPU6050_BIT_DATA_RDY_EN);
+
+ return result;
+}
+
+static void inv_clear_kfifo(struct inv_mpu6050_state *st)
+{
+ unsigned long flags;
+
+ /* take the spin lock sem to avoid interrupt kick in */
+ spin_lock_irqsave(&st->time_stamp_lock, flags);
+ kfifo_reset(&st->timestamps);
+ spin_unlock_irqrestore(&st->time_stamp_lock, flags);
+}
+
+/**
+ * inv_mpu6050_irq_handler() - Cache a timestamp at each data ready interrupt.
+ */
+irqreturn_t inv_mpu6050_irq_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ s64 timestamp;
+
+ timestamp = iio_get_time_ns();
+ spin_lock(&st->time_stamp_lock);
+ kfifo_in(&st->timestamps, &timestamp, 1);
+ spin_unlock(&st->time_stamp_lock);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * inv_mpu6050_read_fifo() - Transfer data from hardware FIFO to KFIFO.
+ */
+irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ size_t bytes_per_datum;
+ int result;
+ u8 data[INV_MPU6050_OUTPUT_DATA_SIZE];
+ u16 fifo_count;
+ s64 timestamp;
+ u64 *tmp;
+
+ mutex_lock(&indio_dev->mlock);
+ if (!(st->chip_config.accl_fifo_enable |
+ st->chip_config.gyro_fifo_enable))
+ goto end_session;
+ bytes_per_datum = 0;
+ if (st->chip_config.accl_fifo_enable)
+ bytes_per_datum += INV_MPU6050_BYTES_PER_3AXIS_SENSOR;
+
+ if (st->chip_config.gyro_fifo_enable)
+ bytes_per_datum += INV_MPU6050_BYTES_PER_3AXIS_SENSOR;
+
+ /*
+ * read fifo_count register to know how many bytes inside FIFO
+ * right now
+ */
+ result = i2c_smbus_read_i2c_block_data(st->client,
+ st->reg->fifo_count_h,
+ INV_MPU6050_FIFO_COUNT_BYTE, data);
+ if (result != INV_MPU6050_FIFO_COUNT_BYTE)
+ goto end_session;
+ fifo_count = be16_to_cpup((__be16 *)(&data[0]));
+ if (fifo_count < bytes_per_datum)
+ goto end_session;
+ /* fifo count can't be odd number, if it is odd, reset fifo*/
+ if (fifo_count & 1)
+ goto flush_fifo;
+ if (fifo_count > INV_MPU6050_FIFO_THRESHOLD)
+ goto flush_fifo;
+ /* Timestamp mismatch. */
+ if (kfifo_len(&st->timestamps) >
+ fifo_count / bytes_per_datum + INV_MPU6050_TIME_STAMP_TOR)
+ goto flush_fifo;
+ while (fifo_count >= bytes_per_datum) {
+ result = i2c_smbus_read_i2c_block_data(st->client,
+ st->reg->fifo_r_w,
+ bytes_per_datum, data);
+ if (result != bytes_per_datum)
+ goto flush_fifo;
+
+ result = kfifo_out(&st->timestamps, &timestamp, 1);
+ /* when there is no timestamp, put timestamp as 0 */
+ if (0 == result)
+ timestamp = 0;
+
+ tmp = (u64 *)data;
+ tmp[DIV_ROUND_UP(bytes_per_datum, 8)] = timestamp;
+ result = iio_push_to_buffers(indio_dev, data);
+ if (result)
+ goto flush_fifo;
+ fifo_count -= bytes_per_datum;
+ }
+
+end_session:
+ mutex_unlock(&indio_dev->mlock);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+
+flush_fifo:
+ /* Flush HW and SW FIFOs. */
+ inv_reset_fifo(indio_dev);
+ inv_clear_kfifo(st);
+ mutex_unlock(&indio_dev->mlock);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
new file mode 100644
index 000000000000..e1d0869e0ad1
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -0,0 +1,155 @@
+/*
+* Copyright (C) 2012 Invensense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include "inv_mpu_iio.h"
+
+static void inv_scan_query(struct iio_dev *indio_dev)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ st->chip_config.gyro_fifo_enable =
+ test_bit(INV_MPU6050_SCAN_GYRO_X,
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU6050_SCAN_GYRO_Y,
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU6050_SCAN_GYRO_Z,
+ indio_dev->active_scan_mask);
+
+ st->chip_config.accl_fifo_enable =
+ test_bit(INV_MPU6050_SCAN_ACCL_X,
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU6050_SCAN_ACCL_Y,
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU6050_SCAN_ACCL_Z,
+ indio_dev->active_scan_mask);
+}
+
+/**
+ * inv_mpu6050_set_enable() - enable chip functions.
+ * @indio_dev: Device driver instance.
+ * @enable: enable/disable
+ */
+static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ int result;
+
+ if (enable) {
+ result = inv_mpu6050_set_power_itg(st, true);
+ if (result)
+ return result;
+ inv_scan_query(indio_dev);
+ if (st->chip_config.gyro_fifo_enable) {
+ result = inv_mpu6050_switch_engine(st, true,
+ INV_MPU6050_BIT_PWR_GYRO_STBY);
+ if (result)
+ return result;
+ }
+ if (st->chip_config.accl_fifo_enable) {
+ result = inv_mpu6050_switch_engine(st, true,
+ INV_MPU6050_BIT_PWR_ACCL_STBY);
+ if (result)
+ return result;
+ }
+ result = inv_reset_fifo(indio_dev);
+ if (result)
+ return result;
+ } else {
+ result = inv_mpu6050_write_reg(st, st->reg->fifo_en, 0);
+ if (result)
+ return result;
+
+ result = inv_mpu6050_write_reg(st, st->reg->int_enable, 0);
+ if (result)
+ return result;
+
+ result = inv_mpu6050_write_reg(st, st->reg->user_ctrl, 0);
+ if (result)
+ return result;
+
+ result = inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_GYRO_STBY);
+ if (result)
+ return result;
+
+ result = inv_mpu6050_switch_engine(st, false,
+ INV_MPU6050_BIT_PWR_ACCL_STBY);
+ if (result)
+ return result;
+ result = inv_mpu6050_set_power_itg(st, false);
+ if (result)
+ return result;
+ }
+ st->chip_config.enable = enable;
+
+ return 0;
+}
+
+/**
+ * inv_mpu_data_rdy_trigger_set_state() - set data ready interrupt state
+ * @trig: Trigger instance
+ * @state: Desired trigger state
+ */
+static int inv_mpu_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ return inv_mpu6050_set_enable(trig->private_data, state);
+}
+
+static const struct iio_trigger_ops inv_mpu_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = &inv_mpu_data_rdy_trigger_set_state,
+};
+
+int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ st->trig = iio_trigger_alloc("%s-dev%d",
+ indio_dev->name,
+ indio_dev->id);
+ if (st->trig == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ ret = request_irq(st->client->irq, &iio_trigger_generic_data_rdy_poll,
+ IRQF_TRIGGER_RISING,
+ "inv_mpu",
+ st->trig);
+ if (ret)
+ goto error_free_trig;
+ st->trig->dev.parent = &st->client->dev;
+ st->trig->private_data = indio_dev;
+ st->trig->ops = &inv_mpu_trigger_ops;
+ ret = iio_trigger_register(st->trig);
+ if (ret)
+ goto error_free_irq;
+ indio_dev->trig = st->trig;
+
+ return 0;
+
+error_free_irq:
+ free_irq(st->client->irq, st->trig);
+error_free_trig:
+ iio_trigger_free(st->trig);
+error_ret:
+ return ret;
+}
+
+void inv_mpu6050_remove_trigger(struct inv_mpu6050_state *st)
+{
+ iio_trigger_unregister(st->trig);
+ free_irq(st->client->irq, st->trig);
+ iio_trigger_free(st->trig);
+}
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index 4fe0ead84213..4d6c7d84e155 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -160,7 +160,7 @@ void iio_trigger_notify_done(struct iio_trigger *trig)
trig->use_count--;
if (trig->use_count == 0 && trig->ops && trig->ops->try_reenable)
if (trig->ops->try_reenable(trig))
- /* Missed and interrupt so launch new poll now */
+ /* Missed an interrupt so launch new poll now */
iio_trigger_poll(trig, 0);
}
EXPORT_SYMBOL(iio_trigger_notify_done);
@@ -193,7 +193,7 @@ static void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
* This is not currently handled. Alternative of not enabling trigger unless
* the relevant function is in there may be the best option.
*/
-/* Worth protecting against double additions?*/
+/* Worth protecting against double additions? */
static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
struct iio_poll_func *pf)
{
@@ -201,7 +201,7 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
bool notinuse
= bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
- /* Prevent the module being removed whilst attached to a trigger */
+ /* Prevent the module from being removed whilst attached to a trigger */
__module_get(pf->indio_dev->info->driver_module);
pf->irq = iio_trigger_get_irq(trig);
ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
@@ -288,7 +288,7 @@ void iio_dealloc_pollfunc(struct iio_poll_func *pf)
EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
/**
- * iio_trigger_read_current() - trigger consumer sysfs query which trigger
+ * iio_trigger_read_current() - trigger consumer sysfs query current trigger
*
* For trigger consumers the current_trigger interface allows the trigger
* used by the device to be queried.
@@ -305,7 +305,7 @@ static ssize_t iio_trigger_read_current(struct device *dev,
}
/**
- * iio_trigger_write_current() trigger consumer sysfs set current trigger
+ * iio_trigger_write_current() - trigger consumer sysfs set current trigger
*
* For trigger consumers the current_trigger interface allows the trigger
* used for this device to be specified at run time based on the triggers
@@ -476,7 +476,7 @@ void iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
{
- /* Clean up and associated but not attached triggers references */
+ /* Clean up an associated but not attached trigger reference */
if (indio_dev->trig)
iio_trigger_put(indio_dev->trig);
}
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index d55e98fb300e..b289915b8469 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -54,39 +54,25 @@ error_ret:
EXPORT_SYMBOL_GPL(iio_map_array_register);
-/* Assumes the exact same array (e.g. memory locations)
- * used at unregistration as used at registration rather than
- * more complex checking of contents.
+/*
+ * Remove all map entries associated with the given iio device
*/
-int iio_map_array_unregister(struct iio_dev *indio_dev,
- struct iio_map *maps)
+int iio_map_array_unregister(struct iio_dev *indio_dev)
{
- int i = 0, ret = 0;
- bool found_it;
+ int ret = -ENODEV;
struct iio_map_internal *mapi;
-
- if (maps == NULL)
- return 0;
+ struct list_head *pos, *tmp;
mutex_lock(&iio_map_list_lock);
- while (maps[i].consumer_dev_name != NULL) {
- found_it = false;
- list_for_each_entry(mapi, &iio_map_list, l)
- if (&maps[i] == mapi->map) {
- list_del(&mapi->l);
- kfree(mapi);
- found_it = true;
- break;
- }
- if (!found_it) {
- ret = -ENODEV;
- goto error_ret;
+ list_for_each_safe(pos, tmp, &iio_map_list) {
+ mapi = list_entry(pos, struct iio_map_internal, l);
+ if (indio_dev == mapi->indio_dev) {
+ list_del(&mapi->l);
+ kfree(mapi);
+ ret = 0;
}
- i++;
}
-error_ret:
mutex_unlock(&iio_map_list_lock);
-
return ret;
}
EXPORT_SYMBOL_GPL(iio_map_array_unregister);
@@ -107,7 +93,8 @@ static const struct iio_chan_spec
}
-struct iio_channel *iio_channel_get(const char *name, const char *channel_name)
+static struct iio_channel *iio_channel_get_sys(const char *name,
+ const char *channel_name)
{
struct iio_map_internal *c_i = NULL, *c = NULL;
struct iio_channel *channel;
@@ -158,6 +145,14 @@ error_no_mem:
iio_device_put(c->indio_dev);
return ERR_PTR(err);
}
+
+struct iio_channel *iio_channel_get(struct device *dev,
+ const char *channel_name)
+{
+ const char *name = dev ? dev_name(dev) : NULL;
+
+ return iio_channel_get_sys(name, channel_name);
+}
EXPORT_SYMBOL_GPL(iio_channel_get);
void iio_channel_release(struct iio_channel *channel)
@@ -167,16 +162,18 @@ void iio_channel_release(struct iio_channel *channel)
}
EXPORT_SYMBOL_GPL(iio_channel_release);
-struct iio_channel *iio_channel_get_all(const char *name)
+struct iio_channel *iio_channel_get_all(struct device *dev)
{
+ const char *name;
struct iio_channel *chans;
struct iio_map_internal *c = NULL;
int nummaps = 0;
int mapind = 0;
int i, ret;
- if (name == NULL)
+ if (dev == NULL)
return ERR_PTR(-EINVAL);
+ name = dev_name(dev);
mutex_lock(&iio_map_list_lock);
/* first count the matching maps */
diff --git a/drivers/iio/kfifo_buf.c b/drivers/iio/kfifo_buf.c
index 5bc5c860e9ca..a923c78d5cb4 100644
--- a/drivers/iio/kfifo_buf.c
+++ b/drivers/iio/kfifo_buf.c
@@ -22,7 +22,6 @@ static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
if ((length == 0) || (bytes_per_datum == 0))
return -EINVAL;
- __iio_update_buffer(&buf->buffer, bytes_per_datum, length);
return __kfifo_alloc((struct __kfifo *)&buf->kf, length,
bytes_per_datum, GFP_KERNEL);
}
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 1763c9bcb98a..5ef1a396e0c9 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -32,6 +32,16 @@ config SENSORS_LM3533
changes. The ALS-control output values can be set per zone for the
three current output channels.
+config SENSORS_TSL2563
+ tristate "TAOS TSL2560, TSL2561, TSL2562 and TSL2563 ambient light sensors"
+ depends on I2C
+ help
+ If you say yes here you get support for the Taos TSL2560,
+ TSL2561, TSL2562 and TSL2563 ambient light sensors.
+
+ This driver can also be built as a module. If so, the module
+ will be called tsl2563.
+
config VCNL4000
tristate "VCNL4000 combined ALS and proximity sensor"
depends on I2C
@@ -47,6 +57,7 @@ config HID_SENSOR_ALS
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
tristate "HID ALS"
help
Say yes here to build support for the HID SENSOR
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index 21a8f0df1407..040d9c75f8e6 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -4,5 +4,6 @@
obj-$(CONFIG_ADJD_S311) += adjd_s311.o
obj-$(CONFIG_SENSORS_LM3533) += lm3533-als.o
+obj-$(CONFIG_SENSORS_TSL2563) += tsl2563.o
obj-$(CONFIG_VCNL4000) += vcnl4000.o
obj-$(CONFIG_HID_SENSOR_ALS) += hid-sensor-als.o
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index e2d042f2a544..3d7e8c9b4beb 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -28,7 +28,6 @@
#include <linux/iio/buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
-#include "../common/hid-sensors/hid-sensor-attributes.h"
#include "../common/hid-sensors/hid-sensor-trigger.h"
/*Format: HID-SENSOR-usage_id_in_hex*/
@@ -39,7 +38,7 @@
struct als_state {
struct hid_sensor_hub_callbacks callbacks;
- struct hid_sensor_iio_common common_attributes;
+ struct hid_sensor_common common_attributes;
struct hid_sensor_hub_attribute_info als_illum;
u32 illum;
};
diff --git a/drivers/staging/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index 1a9adc020f64..fd8be69b7d05 100644
--- a/drivers/staging/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -1,5 +1,5 @@
/*
- * drivers/i2c/chips/tsl2563.c
+ * drivers/iio/light/tsl2563.c
*
* Copyright (C) 2008 Nokia Corporation
*
@@ -38,52 +38,52 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/events.h>
-#include "tsl2563.h"
+#include <linux/platform_data/tsl2563.h>
/* Use this many bits for fraction part. */
-#define ADC_FRAC_BITS (14)
+#define ADC_FRAC_BITS 14
/* Given number of 1/10000's in ADC_FRAC_BITS precision. */
#define FRAC10K(f) (((f) * (1L << (ADC_FRAC_BITS))) / (10000))
/* Bits used for fraction in calibration coefficients.*/
-#define CALIB_FRAC_BITS (10)
+#define CALIB_FRAC_BITS 10
/* 0.5 in CALIB_FRAC_BITS precision */
#define CALIB_FRAC_HALF (1 << (CALIB_FRAC_BITS - 1))
/* Make a fraction from a number n that was multiplied with b. */
#define CALIB_FRAC(n, b) (((n) << CALIB_FRAC_BITS) / (b))
/* Decimal 10^(digits in sysfs presentation) */
-#define CALIB_BASE_SYSFS (1000)
-
-#define TSL2563_CMD (0x80)
-#define TSL2563_CLEARINT (0x40)
-
-#define TSL2563_REG_CTRL (0x00)
-#define TSL2563_REG_TIMING (0x01)
-#define TSL2563_REG_LOWLOW (0x02) /* data0 low threshold, 2 bytes */
-#define TSL2563_REG_LOWHIGH (0x03)
-#define TSL2563_REG_HIGHLOW (0x04) /* data0 high threshold, 2 bytes */
-#define TSL2563_REG_HIGHHIGH (0x05)
-#define TSL2563_REG_INT (0x06)
-#define TSL2563_REG_ID (0x0a)
-#define TSL2563_REG_DATA0LOW (0x0c) /* broadband sensor value, 2 bytes */
-#define TSL2563_REG_DATA0HIGH (0x0d)
-#define TSL2563_REG_DATA1LOW (0x0e) /* infrared sensor value, 2 bytes */
-#define TSL2563_REG_DATA1HIGH (0x0f)
-
-#define TSL2563_CMD_POWER_ON (0x03)
-#define TSL2563_CMD_POWER_OFF (0x00)
-#define TSL2563_CTRL_POWER_MASK (0x03)
-
-#define TSL2563_TIMING_13MS (0x00)
-#define TSL2563_TIMING_100MS (0x01)
-#define TSL2563_TIMING_400MS (0x02)
-#define TSL2563_TIMING_MASK (0x03)
-#define TSL2563_TIMING_GAIN16 (0x10)
-#define TSL2563_TIMING_GAIN1 (0x00)
-
-#define TSL2563_INT_DISBLED (0x00)
-#define TSL2563_INT_LEVEL (0x10)
+#define CALIB_BASE_SYSFS 1000
+
+#define TSL2563_CMD 0x80
+#define TSL2563_CLEARINT 0x40
+
+#define TSL2563_REG_CTRL 0x00
+#define TSL2563_REG_TIMING 0x01
+#define TSL2563_REG_LOWLOW 0x02 /* data0 low threshold, 2 bytes */
+#define TSL2563_REG_LOWHIGH 0x03
+#define TSL2563_REG_HIGHLOW 0x04 /* data0 high threshold, 2 bytes */
+#define TSL2563_REG_HIGHHIGH 0x05
+#define TSL2563_REG_INT 0x06
+#define TSL2563_REG_ID 0x0a
+#define TSL2563_REG_DATA0LOW 0x0c /* broadband sensor value, 2 bytes */
+#define TSL2563_REG_DATA0HIGH 0x0d
+#define TSL2563_REG_DATA1LOW 0x0e /* infrared sensor value, 2 bytes */
+#define TSL2563_REG_DATA1HIGH 0x0f
+
+#define TSL2563_CMD_POWER_ON 0x03
+#define TSL2563_CMD_POWER_OFF 0x00
+#define TSL2563_CTRL_POWER_MASK 0x03
+
+#define TSL2563_TIMING_13MS 0x00
+#define TSL2563_TIMING_100MS 0x01
+#define TSL2563_TIMING_400MS 0x02
+#define TSL2563_TIMING_MASK 0x03
+#define TSL2563_TIMING_GAIN16 0x10
+#define TSL2563_TIMING_GAIN1 0x00
+
+#define TSL2563_INT_DISBLED 0x00
+#define TSL2563_INT_LEVEL 0x10
#define TSL2563_INT_PERSIST(n) ((n) & 0x0F)
struct tsl2563_gainlevel_coeff {
@@ -190,8 +190,10 @@ static int tsl2563_configure(struct tsl2563_chip *chip)
ret = i2c_smbus_write_byte_data(chip->client,
TSL2563_CMD | TSL2563_REG_LOWHIGH,
(chip->low_thres >> 8) & 0xFF);
-/* Interrupt register is automatically written anyway if it is relevant
- so is not here */
+/*
+ * Interrupt register is automatically written anyway if it is relevant
+ * so is not here.
+ */
error_ret:
return ret;
}
@@ -423,9 +425,7 @@ static const struct tsl2563_lux_coeff lux_table[] = {
},
};
-/*
- * Convert normalized, scaled ADC values to lux.
- */
+/* Convert normalized, scaled ADC values to lux. */
static unsigned int adc_to_lux(u32 adc0, u32 adc1)
{
const struct tsl2563_lux_coeff *lp = lux_table;
@@ -441,11 +441,6 @@ static unsigned int adc_to_lux(u32 adc0, u32 adc1)
return (unsigned int) (lux >> ADC_FRAC_BITS);
}
-/*--------------------------------------------------------------*/
-/* Sysfs interface */
-/*--------------------------------------------------------------*/
-
-
/* Apply calibration coefficient to ADC count. */
static u32 calib_adc(u32 adc, u32 calib)
{
@@ -677,18 +672,11 @@ static int tsl2563_read_interrupt_config(struct iio_dev *indio_dev,
TSL2563_CMD | TSL2563_REG_INT);
mutex_unlock(&chip->lock);
if (ret < 0)
- goto error_ret;
- ret = !!(ret & 0x30);
-error_ret:
+ return ret;
- return ret;
+ return !!(ret & 0x30);
}
-/*--------------------------------------------------------------*/
-/* Probe, Attach, Remove */
-/*--------------------------------------------------------------*/
-static struct i2c_driver tsl2563_i2c_driver;
-
static const struct iio_info tsl2563_info_no_irq = {
.driver_module = THIS_MODULE,
.read_raw = &tsl2563_read_raw,
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index c1f0cdd57037..cd29be54f643 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -8,9 +8,40 @@ config HID_SENSOR_MAGNETOMETER_3D
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
tristate "HID Magenetometer 3D"
help
Say yes here to build support for the HID SENSOR
Magnetometer 3D.
+config IIO_ST_MAGN_3AXIS
+ tristate "STMicroelectronics magnetometers 3-Axis Driver"
+ depends on (I2C || SPI_MASTER) && SYSFS
+ select IIO_ST_SENSORS_CORE
+ select IIO_ST_MAGN_I2C_3AXIS if (I2C)
+ select IIO_ST_MAGN_SPI_3AXIS if (SPI_MASTER)
+ select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
+ select IIO_ST_MAGN_BUFFER if (IIO_TRIGGERED_BUFFER)
+ help
+ Say yes here to build support for STMicroelectronics magnetometers:
+ LSM303DLHC, LSM303DLM, LIS3MDL.
+
+ This driver can also be built as a module. If so, will be created
+ these modules:
+ - st_magn (core functions for the driver [it is mandatory]);
+ - st_magn_i2c (necessary for the I2C devices [optional*]);
+ - st_magn_spi (necessary for the SPI devices [optional*]);
+
+ (*) one of these is necessary to do something.
+
+config IIO_ST_MAGN_I2C_3AXIS
+ tristate
+ depends on IIO_ST_MAGN_3AXIS
+ depends on IIO_ST_SENSORS_I2C
+
+config IIO_ST_MAGN_SPI_3AXIS
+ tristate
+ depends on IIO_ST_MAGN_3AXIS
+ depends on IIO_ST_SENSORS_SPI
+
endmenu
diff --git a/drivers/iio/magnetometer/Makefile b/drivers/iio/magnetometer/Makefile
index 60dc4f2b1963..e78672876dc2 100644
--- a/drivers/iio/magnetometer/Makefile
+++ b/drivers/iio/magnetometer/Makefile
@@ -3,3 +3,10 @@
#
obj-$(CONFIG_HID_SENSOR_MAGNETOMETER_3D) += hid-sensor-magn-3d.o
+
+obj-$(CONFIG_IIO_ST_MAGN_3AXIS) += st_magn.o
+st_magn-y := st_magn_core.o
+st_magn-$(CONFIG_IIO_BUFFER) += st_magn_buffer.o
+
+obj-$(CONFIG_IIO_ST_MAGN_I2C_3AXIS) += st_magn_i2c.o
+obj-$(CONFIG_IIO_ST_MAGN_SPI_3AXIS) += st_magn_spi.o
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index 7ac2c7483ba8..d8d01265220b 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -28,7 +28,6 @@
#include <linux/iio/buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
-#include "../common/hid-sensors/hid-sensor-attributes.h"
#include "../common/hid-sensors/hid-sensor-trigger.h"
/*Format: HID-SENSOR-usage_id_in_hex*/
@@ -44,7 +43,7 @@ enum magn_3d_channel {
struct magn_3d_state {
struct hid_sensor_hub_callbacks callbacks;
- struct hid_sensor_iio_common common_attributes;
+ struct hid_sensor_common common_attributes;
struct hid_sensor_hub_attribute_info magn[MAGN_3D_CHANNEL_MAX];
u32 magn_val[MAGN_3D_CHANNEL_MAX];
};
diff --git a/drivers/iio/magnetometer/st_magn.h b/drivers/iio/magnetometer/st_magn.h
new file mode 100644
index 000000000000..7e81d00ef0c3
--- /dev/null
+++ b/drivers/iio/magnetometer/st_magn.h
@@ -0,0 +1,45 @@
+/*
+ * STMicroelectronics magnetometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ * v. 1.0.0
+ * Licensed under the GPL-2.
+ */
+
+#ifndef ST_MAGN_H
+#define ST_MAGN_H
+
+#include <linux/types.h>
+#include <linux/iio/common/st_sensors.h>
+
+#define LSM303DLHC_MAGN_DEV_NAME "lsm303dlhc_magn"
+#define LSM303DLM_MAGN_DEV_NAME "lsm303dlm_magn"
+#define LIS3MDL_MAGN_DEV_NAME "lis3mdl"
+
+int st_magn_common_probe(struct iio_dev *indio_dev);
+void st_magn_common_remove(struct iio_dev *indio_dev);
+
+#ifdef CONFIG_IIO_BUFFER
+int st_magn_allocate_ring(struct iio_dev *indio_dev);
+void st_magn_deallocate_ring(struct iio_dev *indio_dev);
+#else /* CONFIG_IIO_BUFFER */
+static inline int st_magn_probe_trigger(struct iio_dev *indio_dev, int irq)
+{
+ return 0;
+}
+static inline void st_magn_remove_trigger(struct iio_dev *indio_dev, int irq)
+{
+ return;
+}
+static inline int st_magn_allocate_ring(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+static inline void st_magn_deallocate_ring(struct iio_dev *indio_dev)
+{
+}
+#endif /* CONFIG_IIO_BUFFER */
+
+#endif /* ST_MAGN_H */
diff --git a/drivers/iio/magnetometer/st_magn_buffer.c b/drivers/iio/magnetometer/st_magn_buffer.c
new file mode 100644
index 000000000000..708857bdb47d
--- /dev/null
+++ b/drivers/iio/magnetometer/st_magn_buffer.c
@@ -0,0 +1,98 @@
+/*
+ * STMicroelectronics magnetometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include "st_magn.h"
+
+static int st_magn_buffer_preenable(struct iio_dev *indio_dev)
+{
+ int err;
+
+ err = st_sensors_set_enable(indio_dev, true);
+ if (err < 0)
+ goto st_magn_set_enable_error;
+
+ err = iio_sw_buffer_preenable(indio_dev);
+
+st_magn_set_enable_error:
+ return err;
+}
+
+static int st_magn_buffer_postenable(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *mdata = iio_priv(indio_dev);
+
+ mdata->buffer_data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
+ if (mdata->buffer_data == NULL) {
+ err = -ENOMEM;
+ goto allocate_memory_error;
+ }
+
+ err = iio_triggered_buffer_postenable(indio_dev);
+ if (err < 0)
+ goto st_magn_buffer_postenable_error;
+
+ return err;
+
+st_magn_buffer_postenable_error:
+ kfree(mdata->buffer_data);
+allocate_memory_error:
+ return err;
+}
+
+static int st_magn_buffer_predisable(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *mdata = iio_priv(indio_dev);
+
+ err = iio_triggered_buffer_predisable(indio_dev);
+ if (err < 0)
+ goto st_magn_buffer_predisable_error;
+
+ err = st_sensors_set_enable(indio_dev, false);
+
+st_magn_buffer_predisable_error:
+ kfree(mdata->buffer_data);
+ return err;
+}
+
+static const struct iio_buffer_setup_ops st_magn_buffer_setup_ops = {
+ .preenable = &st_magn_buffer_preenable,
+ .postenable = &st_magn_buffer_postenable,
+ .predisable = &st_magn_buffer_predisable,
+};
+
+int st_magn_allocate_ring(struct iio_dev *indio_dev)
+{
+ return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ &st_sensors_trigger_handler, &st_magn_buffer_setup_ops);
+}
+
+void st_magn_deallocate_ring(struct iio_dev *indio_dev)
+{
+ iio_triggered_buffer_cleanup(indio_dev);
+}
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics magnetometers buffer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
new file mode 100644
index 000000000000..16f0d6df239f
--- /dev/null
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -0,0 +1,400 @@
+/*
+ * STMicroelectronics magnetometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include "st_magn.h"
+
+/* DEFAULT VALUE FOR SENSORS */
+#define ST_MAGN_DEFAULT_OUT_X_L_ADDR 0X04
+#define ST_MAGN_DEFAULT_OUT_Y_L_ADDR 0X08
+#define ST_MAGN_DEFAULT_OUT_Z_L_ADDR 0X06
+
+/* FULLSCALE */
+#define ST_MAGN_FS_AVL_1300MG 1300
+#define ST_MAGN_FS_AVL_1900MG 1900
+#define ST_MAGN_FS_AVL_2500MG 2500
+#define ST_MAGN_FS_AVL_4000MG 4000
+#define ST_MAGN_FS_AVL_4700MG 4700
+#define ST_MAGN_FS_AVL_5600MG 5600
+#define ST_MAGN_FS_AVL_8000MG 8000
+#define ST_MAGN_FS_AVL_8100MG 8100
+#define ST_MAGN_FS_AVL_10000MG 10000
+
+/* CUSTOM VALUES FOR SENSOR 1 */
+#define ST_MAGN_1_WAI_EXP 0x3c
+#define ST_MAGN_1_ODR_ADDR 0x00
+#define ST_MAGN_1_ODR_MASK 0x1c
+#define ST_MAGN_1_ODR_AVL_1HZ_VAL 0x00
+#define ST_MAGN_1_ODR_AVL_2HZ_VAL 0x01
+#define ST_MAGN_1_ODR_AVL_3HZ_VAL 0x02
+#define ST_MAGN_1_ODR_AVL_8HZ_VAL 0x03
+#define ST_MAGN_1_ODR_AVL_15HZ_VAL 0x04
+#define ST_MAGN_1_ODR_AVL_30HZ_VAL 0x05
+#define ST_MAGN_1_ODR_AVL_75HZ_VAL 0x06
+#define ST_MAGN_1_ODR_AVL_220HZ_VAL 0x07
+#define ST_MAGN_1_PW_ADDR 0x02
+#define ST_MAGN_1_PW_MASK 0x03
+#define ST_MAGN_1_PW_ON 0x00
+#define ST_MAGN_1_PW_OFF 0x03
+#define ST_MAGN_1_FS_ADDR 0x01
+#define ST_MAGN_1_FS_MASK 0xe0
+#define ST_MAGN_1_FS_AVL_1300_VAL 0x01
+#define ST_MAGN_1_FS_AVL_1900_VAL 0x02
+#define ST_MAGN_1_FS_AVL_2500_VAL 0x03
+#define ST_MAGN_1_FS_AVL_4000_VAL 0x04
+#define ST_MAGN_1_FS_AVL_4700_VAL 0x05
+#define ST_MAGN_1_FS_AVL_5600_VAL 0x06
+#define ST_MAGN_1_FS_AVL_8100_VAL 0x07
+#define ST_MAGN_1_FS_AVL_1300_GAIN_XY 1100
+#define ST_MAGN_1_FS_AVL_1900_GAIN_XY 855
+#define ST_MAGN_1_FS_AVL_2500_GAIN_XY 670
+#define ST_MAGN_1_FS_AVL_4000_GAIN_XY 450
+#define ST_MAGN_1_FS_AVL_4700_GAIN_XY 400
+#define ST_MAGN_1_FS_AVL_5600_GAIN_XY 330
+#define ST_MAGN_1_FS_AVL_8100_GAIN_XY 230
+#define ST_MAGN_1_FS_AVL_1300_GAIN_Z 980
+#define ST_MAGN_1_FS_AVL_1900_GAIN_Z 760
+#define ST_MAGN_1_FS_AVL_2500_GAIN_Z 600
+#define ST_MAGN_1_FS_AVL_4000_GAIN_Z 400
+#define ST_MAGN_1_FS_AVL_4700_GAIN_Z 355
+#define ST_MAGN_1_FS_AVL_5600_GAIN_Z 295
+#define ST_MAGN_1_FS_AVL_8100_GAIN_Z 205
+#define ST_MAGN_1_MULTIREAD_BIT false
+
+/* CUSTOM VALUES FOR SENSOR 2 */
+#define ST_MAGN_2_WAI_EXP 0x3d
+#define ST_MAGN_2_ODR_ADDR 0x20
+#define ST_MAGN_2_ODR_MASK 0x1c
+#define ST_MAGN_2_ODR_AVL_1HZ_VAL 0x00
+#define ST_MAGN_2_ODR_AVL_2HZ_VAL 0x01
+#define ST_MAGN_2_ODR_AVL_3HZ_VAL 0x02
+#define ST_MAGN_2_ODR_AVL_5HZ_VAL 0x03
+#define ST_MAGN_2_ODR_AVL_10HZ_VAL 0x04
+#define ST_MAGN_2_ODR_AVL_20HZ_VAL 0x05
+#define ST_MAGN_2_ODR_AVL_40HZ_VAL 0x06
+#define ST_MAGN_2_ODR_AVL_80HZ_VAL 0x07
+#define ST_MAGN_2_PW_ADDR 0x22
+#define ST_MAGN_2_PW_MASK 0x03
+#define ST_MAGN_2_PW_ON 0x00
+#define ST_MAGN_2_PW_OFF 0x03
+#define ST_MAGN_2_FS_ADDR 0x21
+#define ST_MAGN_2_FS_MASK 0x60
+#define ST_MAGN_2_FS_AVL_4000_VAL 0x00
+#define ST_MAGN_2_FS_AVL_8000_VAL 0x01
+#define ST_MAGN_2_FS_AVL_10000_VAL 0x02
+#define ST_MAGN_2_FS_AVL_4000_GAIN 430
+#define ST_MAGN_2_FS_AVL_8000_GAIN 230
+#define ST_MAGN_2_FS_AVL_10000_GAIN 230
+#define ST_MAGN_2_MULTIREAD_BIT false
+#define ST_MAGN_2_OUT_X_L_ADDR 0x28
+#define ST_MAGN_2_OUT_Y_L_ADDR 0x2a
+#define ST_MAGN_2_OUT_Z_L_ADDR 0x2c
+
+static const struct iio_chan_spec st_magn_16bit_channels[] = {
+ ST_SENSORS_LSM_CHANNELS(IIO_MAGN, ST_SENSORS_SCAN_X, IIO_MOD_X, IIO_LE,
+ ST_SENSORS_DEFAULT_16_REALBITS, ST_MAGN_DEFAULT_OUT_X_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_MAGN, ST_SENSORS_SCAN_Y, IIO_MOD_Y, IIO_LE,
+ ST_SENSORS_DEFAULT_16_REALBITS, ST_MAGN_DEFAULT_OUT_Y_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_MAGN, ST_SENSORS_SCAN_Z, IIO_MOD_Z, IIO_LE,
+ ST_SENSORS_DEFAULT_16_REALBITS, ST_MAGN_DEFAULT_OUT_Z_L_ADDR),
+ IIO_CHAN_SOFT_TIMESTAMP(3)
+};
+
+static const struct iio_chan_spec st_magn_2_16bit_channels[] = {
+ ST_SENSORS_LSM_CHANNELS(IIO_MAGN, ST_SENSORS_SCAN_X, IIO_MOD_X, IIO_LE,
+ ST_SENSORS_DEFAULT_16_REALBITS, ST_MAGN_2_OUT_X_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_MAGN, ST_SENSORS_SCAN_Y, IIO_MOD_Y, IIO_LE,
+ ST_SENSORS_DEFAULT_16_REALBITS, ST_MAGN_2_OUT_Y_L_ADDR),
+ ST_SENSORS_LSM_CHANNELS(IIO_MAGN, ST_SENSORS_SCAN_Z, IIO_MOD_Z, IIO_LE,
+ ST_SENSORS_DEFAULT_16_REALBITS, ST_MAGN_2_OUT_Z_L_ADDR),
+ IIO_CHAN_SOFT_TIMESTAMP(3)
+};
+
+static const struct st_sensors st_magn_sensors[] = {
+ {
+ .wai = ST_MAGN_1_WAI_EXP,
+ .sensors_supported = {
+ [0] = LSM303DLHC_MAGN_DEV_NAME,
+ [1] = LSM303DLM_MAGN_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_magn_16bit_channels,
+ .odr = {
+ .addr = ST_MAGN_1_ODR_ADDR,
+ .mask = ST_MAGN_1_ODR_MASK,
+ .odr_avl = {
+ { 1, ST_MAGN_1_ODR_AVL_1HZ_VAL, },
+ { 2, ST_MAGN_1_ODR_AVL_2HZ_VAL, },
+ { 3, ST_MAGN_1_ODR_AVL_3HZ_VAL, },
+ { 8, ST_MAGN_1_ODR_AVL_8HZ_VAL, },
+ { 15, ST_MAGN_1_ODR_AVL_15HZ_VAL, },
+ { 30, ST_MAGN_1_ODR_AVL_30HZ_VAL, },
+ { 75, ST_MAGN_1_ODR_AVL_75HZ_VAL, },
+ { 220, ST_MAGN_1_ODR_AVL_220HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_MAGN_1_PW_ADDR,
+ .mask = ST_MAGN_1_PW_MASK,
+ .value_on = ST_MAGN_1_PW_ON,
+ .value_off = ST_MAGN_1_PW_OFF,
+ },
+ .fs = {
+ .addr = ST_MAGN_1_FS_ADDR,
+ .mask = ST_MAGN_1_FS_MASK,
+ .fs_avl = {
+ [0] = {
+ .num = ST_MAGN_FS_AVL_1300MG,
+ .value = ST_MAGN_1_FS_AVL_1300_VAL,
+ .gain = ST_MAGN_1_FS_AVL_1300_GAIN_XY,
+ .gain2 = ST_MAGN_1_FS_AVL_1300_GAIN_Z,
+ },
+ [1] = {
+ .num = ST_MAGN_FS_AVL_1900MG,
+ .value = ST_MAGN_1_FS_AVL_1900_VAL,
+ .gain = ST_MAGN_1_FS_AVL_1900_GAIN_XY,
+ .gain2 = ST_MAGN_1_FS_AVL_1900_GAIN_Z,
+ },
+ [2] = {
+ .num = ST_MAGN_FS_AVL_2500MG,
+ .value = ST_MAGN_1_FS_AVL_2500_VAL,
+ .gain = ST_MAGN_1_FS_AVL_2500_GAIN_XY,
+ .gain2 = ST_MAGN_1_FS_AVL_2500_GAIN_Z,
+ },
+ [3] = {
+ .num = ST_MAGN_FS_AVL_4000MG,
+ .value = ST_MAGN_1_FS_AVL_4000_VAL,
+ .gain = ST_MAGN_1_FS_AVL_4000_GAIN_XY,
+ .gain2 = ST_MAGN_1_FS_AVL_4000_GAIN_Z,
+ },
+ [4] = {
+ .num = ST_MAGN_FS_AVL_4700MG,
+ .value = ST_MAGN_1_FS_AVL_4700_VAL,
+ .gain = ST_MAGN_1_FS_AVL_4700_GAIN_XY,
+ .gain2 = ST_MAGN_1_FS_AVL_4700_GAIN_Z,
+ },
+ [5] = {
+ .num = ST_MAGN_FS_AVL_5600MG,
+ .value = ST_MAGN_1_FS_AVL_5600_VAL,
+ .gain = ST_MAGN_1_FS_AVL_5600_GAIN_XY,
+ .gain2 = ST_MAGN_1_FS_AVL_5600_GAIN_Z,
+ },
+ [6] = {
+ .num = ST_MAGN_FS_AVL_8100MG,
+ .value = ST_MAGN_1_FS_AVL_8100_VAL,
+ .gain = ST_MAGN_1_FS_AVL_8100_GAIN_XY,
+ .gain2 = ST_MAGN_1_FS_AVL_8100_GAIN_Z,
+ },
+ },
+ },
+ .multi_read_bit = ST_MAGN_1_MULTIREAD_BIT,
+ .bootime = 2,
+ },
+ {
+ .wai = ST_MAGN_2_WAI_EXP,
+ .sensors_supported = {
+ [0] = LIS3MDL_MAGN_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_magn_2_16bit_channels,
+ .odr = {
+ .addr = ST_MAGN_2_ODR_ADDR,
+ .mask = ST_MAGN_2_ODR_MASK,
+ .odr_avl = {
+ { 1, ST_MAGN_2_ODR_AVL_1HZ_VAL, },
+ { 2, ST_MAGN_2_ODR_AVL_2HZ_VAL, },
+ { 3, ST_MAGN_2_ODR_AVL_3HZ_VAL, },
+ { 5, ST_MAGN_2_ODR_AVL_5HZ_VAL, },
+ { 10, ST_MAGN_2_ODR_AVL_10HZ_VAL, },
+ { 20, ST_MAGN_2_ODR_AVL_20HZ_VAL, },
+ { 40, ST_MAGN_2_ODR_AVL_40HZ_VAL, },
+ { 80, ST_MAGN_2_ODR_AVL_80HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_MAGN_2_PW_ADDR,
+ .mask = ST_MAGN_2_PW_MASK,
+ .value_on = ST_MAGN_2_PW_ON,
+ .value_off = ST_MAGN_2_PW_OFF,
+ },
+ .fs = {
+ .addr = ST_MAGN_2_FS_ADDR,
+ .mask = ST_MAGN_2_FS_MASK,
+ .fs_avl = {
+ [0] = {
+ .num = ST_MAGN_FS_AVL_4000MG,
+ .value = ST_MAGN_2_FS_AVL_4000_VAL,
+ .gain = ST_MAGN_2_FS_AVL_4000_GAIN,
+ },
+ [1] = {
+ .num = ST_MAGN_FS_AVL_8000MG,
+ .value = ST_MAGN_2_FS_AVL_8000_VAL,
+ .gain = ST_MAGN_2_FS_AVL_8000_GAIN,
+ },
+ [2] = {
+ .num = ST_MAGN_FS_AVL_10000MG,
+ .value = ST_MAGN_2_FS_AVL_10000_VAL,
+ .gain = ST_MAGN_2_FS_AVL_10000_GAIN,
+ },
+ },
+ },
+ .multi_read_bit = ST_MAGN_2_MULTIREAD_BIT,
+ .bootime = 2,
+ },
+};
+
+static int st_magn_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *ch, int *val,
+ int *val2, long mask)
+{
+ int err;
+ struct st_sensor_data *mdata = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ err = st_sensors_read_info_raw(indio_dev, ch, val);
+ if (err < 0)
+ goto read_error;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ if ((ch->scan_index == ST_SENSORS_SCAN_Z) &&
+ (mdata->current_fullscale->gain2 != 0))
+ *val2 = mdata->current_fullscale->gain2;
+ else
+ *val2 = mdata->current_fullscale->gain;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+
+read_error:
+ return err;
+}
+
+static int st_magn_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ int err;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ err = st_sensors_set_fullscale_by_gain(indio_dev, val2);
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static ST_SENSOR_DEV_ATTR_SAMP_FREQ();
+static ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL();
+static ST_SENSORS_DEV_ATTR_SCALE_AVAIL(in_magn_scale_available);
+
+static struct attribute *st_magn_attributes[] = {
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_in_magn_scale_available.dev_attr.attr,
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group st_magn_attribute_group = {
+ .attrs = st_magn_attributes,
+};
+
+static const struct iio_info magn_info = {
+ .driver_module = THIS_MODULE,
+ .attrs = &st_magn_attribute_group,
+ .read_raw = &st_magn_read_raw,
+ .write_raw = &st_magn_write_raw,
+};
+
+int st_magn_common_probe(struct iio_dev *indio_dev)
+{
+ int err;
+ struct st_sensor_data *mdata = iio_priv(indio_dev);
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &magn_info;
+
+ err = st_sensors_check_device_support(indio_dev,
+ ARRAY_SIZE(st_magn_sensors), st_magn_sensors);
+ if (err < 0)
+ goto st_magn_common_probe_error;
+
+ mdata->multiread_bit = mdata->sensor->multi_read_bit;
+ indio_dev->channels = mdata->sensor->ch;
+ indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
+
+ mdata->current_fullscale = (struct st_sensor_fullscale_avl *)
+ &mdata->sensor->fs.fs_avl[0];
+ mdata->odr = mdata->sensor->odr.odr_avl[0].hz;
+
+ err = st_sensors_init_sensor(indio_dev);
+ if (err < 0)
+ goto st_magn_common_probe_error;
+
+ if (mdata->get_irq_data_ready(indio_dev) > 0) {
+ err = st_magn_allocate_ring(indio_dev);
+ if (err < 0)
+ goto st_magn_common_probe_error;
+ err = st_sensors_allocate_trigger(indio_dev, NULL);
+ if (err < 0)
+ goto st_magn_probe_trigger_error;
+ }
+
+ err = iio_device_register(indio_dev);
+ if (err)
+ goto st_magn_device_register_error;
+
+ return err;
+
+st_magn_device_register_error:
+ if (mdata->get_irq_data_ready(indio_dev) > 0)
+ st_sensors_deallocate_trigger(indio_dev);
+st_magn_probe_trigger_error:
+ if (mdata->get_irq_data_ready(indio_dev) > 0)
+ st_magn_deallocate_ring(indio_dev);
+st_magn_common_probe_error:
+ return err;
+}
+EXPORT_SYMBOL(st_magn_common_probe);
+
+void st_magn_common_remove(struct iio_dev *indio_dev)
+{
+ struct st_sensor_data *mdata = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ if (mdata->get_irq_data_ready(indio_dev) > 0) {
+ st_sensors_deallocate_trigger(indio_dev);
+ st_magn_deallocate_ring(indio_dev);
+ }
+ iio_device_free(indio_dev);
+}
+EXPORT_SYMBOL(st_magn_common_remove);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics magnetometers driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/magnetometer/st_magn_i2c.c b/drivers/iio/magnetometer/st_magn_i2c.c
new file mode 100644
index 000000000000..e6adc4a86425
--- /dev/null
+++ b/drivers/iio/magnetometer/st_magn_i2c.c
@@ -0,0 +1,80 @@
+/*
+ * STMicroelectronics magnetometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include <linux/iio/common/st_sensors_i2c.h>
+#include "st_magn.h"
+
+static int st_magn_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct st_sensor_data *mdata;
+ int err;
+
+ indio_dev = iio_device_alloc(sizeof(*mdata));
+ if (indio_dev == NULL) {
+ err = -ENOMEM;
+ goto iio_device_alloc_error;
+ }
+
+ mdata = iio_priv(indio_dev);
+ mdata->dev = &client->dev;
+
+ st_sensors_i2c_configure(indio_dev, client, mdata);
+
+ err = st_magn_common_probe(indio_dev);
+ if (err < 0)
+ goto st_magn_common_probe_error;
+
+ return 0;
+
+st_magn_common_probe_error:
+ iio_device_free(indio_dev);
+iio_device_alloc_error:
+ return err;
+}
+
+static int st_magn_i2c_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ st_magn_common_remove(indio_dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id st_magn_id_table[] = {
+ { LSM303DLHC_MAGN_DEV_NAME },
+ { LSM303DLM_MAGN_DEV_NAME },
+ { LIS3MDL_MAGN_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, st_magn_id_table);
+
+static struct i2c_driver st_magn_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "st-magn-i2c",
+ },
+ .probe = st_magn_i2c_probe,
+ .remove = st_magn_i2c_remove,
+ .id_table = st_magn_id_table,
+};
+module_i2c_driver(st_magn_driver);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics magnetometers i2c driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c
new file mode 100644
index 000000000000..51adb797cb7d
--- /dev/null
+++ b/drivers/iio/magnetometer/st_magn_spi.c
@@ -0,0 +1,79 @@
+/*
+ * STMicroelectronics magnetometers driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+
+#include <linux/iio/common/st_sensors.h>
+#include <linux/iio/common/st_sensors_spi.h>
+#include "st_magn.h"
+
+static int st_magn_spi_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct st_sensor_data *mdata;
+ int err;
+
+ indio_dev = iio_device_alloc(sizeof(*mdata));
+ if (indio_dev == NULL) {
+ err = -ENOMEM;
+ goto iio_device_alloc_error;
+ }
+
+ mdata = iio_priv(indio_dev);
+ mdata->dev = &spi->dev;
+
+ st_sensors_spi_configure(indio_dev, spi, mdata);
+
+ err = st_magn_common_probe(indio_dev);
+ if (err < 0)
+ goto st_magn_common_probe_error;
+
+ return 0;
+
+st_magn_common_probe_error:
+ iio_device_free(indio_dev);
+iio_device_alloc_error:
+ return err;
+}
+
+static int st_magn_spi_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ st_magn_common_remove(indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id st_magn_id_table[] = {
+ { LSM303DLHC_MAGN_DEV_NAME },
+ { LSM303DLM_MAGN_DEV_NAME },
+ { LIS3MDL_MAGN_DEV_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, st_magn_id_table);
+
+static struct spi_driver st_magn_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "st-magn-spi",
+ },
+ .probe = st_magn_spi_probe,
+ .remove = st_magn_spi_remove,
+ .id_table = st_magn_id_table,
+};
+module_spi_driver(st_magn_driver);
+
+MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics magnetometers spi driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 0bdf09aa6f42..145d82a64d0a 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -783,8 +783,8 @@ static int iwch_dealloc_mw(struct ib_mw *mw)
mmid = (mw->rkey) >> 8;
cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
remove_handle(rhp, &rhp->mmidr, mmid);
- kfree(mhp);
PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
+ kfree(mhp);
return 0;
}
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 5b152a366dff..429141078eec 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -135,6 +135,7 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
struct net_device *event_netdev = ifa->ifa_dev->dev;
struct nes_device *nesdev;
struct net_device *netdev;
+ struct net_device *upper_dev;
struct nes_vnic *nesvnic;
unsigned int is_bonded;
@@ -145,8 +146,9 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
nesdev, nesdev->netdev[0]->name);
netdev = nesdev->netdev[0];
nesvnic = netdev_priv(netdev);
+ upper_dev = netdev_master_upper_dev_get(netdev);
is_bonded = netif_is_bond_slave(netdev) &&
- (netdev->master == event_netdev);
+ (upper_dev == event_netdev);
if ((netdev == event_netdev) || is_bonded) {
if (nesvnic->rdma_enabled == 0) {
nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since"
@@ -179,9 +181,9 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
/* fall through */
case NETDEV_CHANGEADDR:
/* Add the address to the IP table */
- if (netdev->master)
+ if (upper_dev)
nesvnic->local_ipaddr =
- ((struct in_device *)netdev->master->ip_ptr)->ifa_list->ifa_address;
+ ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address;
else
nesvnic->local_ipaddr = ifa->ifa_address;
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 22ea67eea5dc..24b9f1a0107b 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1340,7 +1340,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
}
if (netif_is_bond_slave(nesvnic->netdev))
- netdev = nesvnic->netdev->master;
+ netdev = netdev_master_upper_dev_get(nesvnic->netdev);
else
netdev = nesvnic->netdev;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 9542e1644a5c..85cf4d1ac442 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1317,11 +1317,13 @@ static void nes_netdev_get_drvinfo(struct net_device *netdev,
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
- strcpy(drvinfo->driver, DRV_NAME);
- strcpy(drvinfo->bus_info, pci_name(nesvnic->nesdev->pcidev));
- sprintf(drvinfo->fw_version, "%u.%u", nesadapter->firmware_version>>16,
- nesadapter->firmware_version & 0x000000ff);
- strcpy(drvinfo->version, DRV_VERSION);
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->bus_info, pci_name(nesvnic->nesdev->pcidev),
+ sizeof(drvinfo->bus_info));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%u.%u", nesadapter->firmware_version >> 16,
+ nesadapter->firmware_version & 0x000000ff);
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
drvinfo->testinfo_len = 0;
drvinfo->eedump_len = 0;
drvinfo->regdump_len = 0;
@@ -1703,7 +1705,6 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
netdev->dev_addr[3] = (u8)(u64temp>>16);
netdev->dev_addr[4] = (u8)(u64temp>>8);
netdev->dev_addr[5] = (u8)u64temp;
- memcpy(netdev->perm_addr, netdev->dev_addr, 6);
netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX;
if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV))
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 4850d03870c2..35275099cafd 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -263,20 +263,15 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
struct qib_qp __rcu **qpp;
qpp = &dev->qp_table[n];
- q = rcu_dereference_protected(*qpp,
- lockdep_is_held(&dev->qpt_lock));
- for (; q; qpp = &q->next) {
+ for (; (q = rcu_dereference_protected(*qpp,
+ lockdep_is_held(&dev->qpt_lock))) != NULL;
+ qpp = &q->next)
if (q == qp) {
atomic_dec(&qp->refcount);
*qpp = qp->next;
rcu_assign_pointer(qp->next, NULL);
- q = rcu_dereference_protected(*qpp,
- lockdep_is_held(&dev->qpt_lock));
break;
}
- q = rcu_dereference_protected(*qpp,
- lockdep_is_held(&dev->qpt_lock));
- }
}
spin_unlock_irqrestore(&dev->qpt_lock, flags);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 03103d2bd641..67b0c1d23678 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -741,6 +741,9 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
tx_req->mapping = addr;
+ skb_orphan(skb);
+ skb_dst_drop(skb);
+
rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
addr, skb->len);
if (unlikely(rc)) {
@@ -752,9 +755,6 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
dev->trans_start = jiffies;
++tx->tx_head;
- skb_orphan(skb);
- skb_dst_drop(skb);
-
if (++priv->tx_outstanding == ipoib_sendq_size) {
ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
tx->qp->qp_num);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 29bc7b5724ac..ca131335417b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -39,7 +39,7 @@
static void ipoib_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strncpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver) - 1);
+ strlcpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver));
}
static int ipoib_get_coalesce(struct net_device *dev,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index a1bca70e20aa..2cfa76f5d99e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -600,6 +600,9 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
netif_stop_queue(dev);
}
+ skb_orphan(skb);
+ skb_dst_drop(skb);
+
rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
address->ah, qpn, tx_req, phead, hlen);
if (unlikely(rc)) {
@@ -615,9 +618,6 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
address->last_send = priv->tx_head;
++priv->tx_head;
-
- skb_orphan(skb);
- skb_dst_drop(skb);
}
if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 55f7e57d4e42..38b523a1ece0 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -3,7 +3,7 @@
#
menu "Input device support"
- depends on !S390 && !UML
+ depends on !UML
config INPUT
tristate "Generic input layer (needed for keyboard, mouse, ...)" if EXPERT
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
index 47a6009dbf43..71db1930573f 100644
--- a/drivers/input/input-mt.c
+++ b/drivers/input/input-mt.c
@@ -18,6 +18,7 @@ static void copy_abs(struct input_dev *dev, unsigned int dst, unsigned int src)
{
if (dev->absinfo && test_bit(src, dev->absbit)) {
dev->absinfo[dst] = dev->absinfo[src];
+ dev->absinfo[dst].fuzz = 0;
dev->absbit[BIT_WORD(dst)] |= BIT_MASK(dst);
}
}
diff --git a/drivers/input/input.c b/drivers/input/input.c
index ce01332f7b3a..c04469928925 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1785,12 +1785,13 @@ static void devm_input_device_release(struct device *dev, void *res)
* its driver (or binding fails). Once managed input device is allocated,
* it is ready to be set up and registered in the same fashion as regular
* input device. There are no special devm_input_device_[un]register()
- * variants, regular ones work with both managed and unmanaged devices.
+ * variants, regular ones work with both managed and unmanaged devices,
+ * should you need them. In most cases however, managed input device need
+ * not be explicitly unregistered or freed.
*
* NOTE: the owner device is set up as parent of input device and users
* should not override it.
*/
-
struct input_dev *devm_input_allocate_device(struct device *dev)
{
struct input_dev *input;
@@ -2004,6 +2005,17 @@ static void devm_input_device_unregister(struct device *dev, void *res)
* Once device has been successfully registered it can be unregistered
* with input_unregister_device(); input_free_device() should not be
* called in this case.
+ *
+ * Note that this function is also used to register managed input devices
+ * (ones allocated with devm_input_allocate_device()). Such managed input
+ * devices need not be explicitly unregistered or freed, their tear down
+ * is controlled by the devres infrastructure. It is also worth noting
+ * that tear down of managed input devices is internally a 2-step process:
+ * registered managed input device is first unregistered, but stays in
+ * memory and can still handle input_event() calls (although events will
+ * not be delivered anywhere). The freeing of managed input device will
+ * happen later, when devres stack is unwound to the point where device
+ * allocation was made.
*/
int input_register_device(struct input_dev *dev)
{
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 358cd7ee905b..7cd74e29cbc8 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -162,7 +162,7 @@ static unsigned int get_time_pit(void)
#define GET_TIME(x) do { x = get_cycles(); } while (0)
#define DELTA(x,y) ((y)-(x))
#define TIME_NAME "PCC"
-#elif defined(CONFIG_MN10300)
+#elif defined(CONFIG_MN10300) || defined(CONFIG_TILE)
#define GET_TIME(x) do { x = get_cycles(); } while (0)
#define DELTA(x, y) ((x) - (y))
#define TIME_NAME "TSC"
diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c
index f8f892b076e8..b76ac580703c 100644
--- a/drivers/input/joystick/walkera0701.c
+++ b/drivers/input/joystick/walkera0701.c
@@ -12,7 +12,7 @@
* the Free Software Foundation.
*/
-/* #define WK0701_DEBUG */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define RESERVE 20000
#define SYNC_PULSE 1306000
@@ -67,6 +67,7 @@ static inline void walkera0701_parse_frame(struct walkera_dev *w)
{
int i;
int val1, val2, val3, val4, val5, val6, val7, val8;
+ int magic, magic_bit;
int crc1, crc2;
for (crc1 = crc2 = i = 0; i < 10; i++) {
@@ -102,17 +103,12 @@ static inline void walkera0701_parse_frame(struct walkera_dev *w)
val8 = (w->buf[18] & 1) << 8 | (w->buf[19] << 4) | w->buf[20];
val8 *= (w->buf[18] & 2) - 1; /*sign */
-#ifdef WK0701_DEBUG
- {
- int magic, magic_bit;
- magic = (w->buf[21] << 4) | w->buf[22];
- magic_bit = (w->buf[24] & 8) >> 3;
- printk(KERN_DEBUG
- "walkera0701: %4d %4d %4d %4d %4d %4d %4d %4d (magic %2x %d)\n",
- val1, val2, val3, val4, val5, val6, val7, val8, magic,
- magic_bit);
- }
-#endif
+ magic = (w->buf[21] << 4) | w->buf[22];
+ magic_bit = (w->buf[24] & 8) >> 3;
+ pr_debug("%4d %4d %4d %4d %4d %4d %4d %4d (magic %2x %d)\n",
+ val1, val2, val3, val4, val5, val6, val7, val8,
+ magic, magic_bit);
+
input_report_abs(w->input_dev, ABS_X, val2);
input_report_abs(w->input_dev, ABS_Y, val1);
input_report_abs(w->input_dev, ABS_Z, val6);
@@ -187,6 +183,9 @@ static int walkera0701_open(struct input_dev *dev)
{
struct walkera_dev *w = input_get_drvdata(dev);
+ if (parport_claim(w->pardevice))
+ return -EBUSY;
+
parport_enable_irq(w->parport);
return 0;
}
@@ -197,40 +196,51 @@ static void walkera0701_close(struct input_dev *dev)
parport_disable_irq(w->parport);
hrtimer_cancel(&w->timer);
+
+ parport_release(w->pardevice);
}
static int walkera0701_connect(struct walkera_dev *w, int parport)
{
- int err = -ENODEV;
+ int error;
w->parport = parport_find_number(parport);
- if (w->parport == NULL)
+ if (!w->parport) {
+ pr_err("parport %d does not exist\n", parport);
return -ENODEV;
+ }
if (w->parport->irq == -1) {
- printk(KERN_ERR "walkera0701: parport without interrupt\n");
- goto init_err;
+ pr_err("parport %d does not have interrupt assigned\n",
+ parport);
+ error = -EINVAL;
+ goto err_put_parport;
}
- err = -EBUSY;
w->pardevice = parport_register_device(w->parport, "walkera0701",
NULL, NULL, walkera0701_irq_handler,
PARPORT_DEV_EXCL, w);
- if (!w->pardevice)
- goto init_err;
-
- if (parport_negotiate(w->pardevice->port, IEEE1284_MODE_COMPAT))
- goto init_err1;
+ if (!w->pardevice) {
+ pr_err("failed to register parport device\n");
+ error = -EIO;
+ goto err_put_parport;
+ }
- if (parport_claim(w->pardevice))
- goto init_err1;
+ if (parport_negotiate(w->pardevice->port, IEEE1284_MODE_COMPAT)) {
+ pr_err("failed to negotiate parport mode\n");
+ error = -EIO;
+ goto err_unregister_device;
+ }
hrtimer_init(&w->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
w->timer.function = timer_handler;
w->input_dev = input_allocate_device();
- if (!w->input_dev)
- goto init_err2;
+ if (!w->input_dev) {
+ pr_err("failed to allocate input device\n");
+ error = -ENOMEM;
+ goto err_unregister_device;
+ }
input_set_drvdata(w->input_dev, w);
w->input_dev->name = "Walkera WK-0701 TX";
@@ -241,6 +251,7 @@ static int walkera0701_connect(struct walkera_dev *w, int parport)
w->input_dev->id.vendor = 0x0001;
w->input_dev->id.product = 0x0001;
w->input_dev->id.version = 0x0100;
+ w->input_dev->dev.parent = w->parport->dev;
w->input_dev->open = walkera0701_open;
w->input_dev->close = walkera0701_close;
@@ -254,27 +265,26 @@ static int walkera0701_connect(struct walkera_dev *w, int parport)
input_set_abs_params(w->input_dev, ABS_RUDDER, -512, 512, 0, 0);
input_set_abs_params(w->input_dev, ABS_MISC, -512, 512, 0, 0);
- err = input_register_device(w->input_dev);
- if (err)
- goto init_err3;
+ error = input_register_device(w->input_dev);
+ if (error) {
+ pr_err("failed to register input device\n");
+ goto err_free_input_dev;
+ }
return 0;
- init_err3:
+err_free_input_dev:
input_free_device(w->input_dev);
- init_err2:
- parport_release(w->pardevice);
- init_err1:
+err_unregister_device:
parport_unregister_device(w->pardevice);
- init_err:
+err_put_parport:
parport_put_port(w->parport);
- return err;
+ return error;
}
static void walkera0701_disconnect(struct walkera_dev *w)
{
input_unregister_device(w->input_dev);
- parport_release(w->pardevice);
parport_unregister_device(w->pardevice);
parport_put_port(w->parport);
}
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 5a240c60342d..ac0500667000 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -224,7 +224,7 @@ config KEYBOARD_TCA6416
config KEYBOARD_TCA8418
tristate "TCA8418 Keypad Support"
- depends on I2C
+ depends on I2C && GENERIC_HARDIRQS
select INPUT_MATRIXKMAP
help
This driver implements basic keypad functionality
@@ -303,7 +303,7 @@ config KEYBOARD_HP7XX
config KEYBOARD_LM8323
tristate "LM8323 keypad chip"
- depends on I2C
+ depends on I2C && GENERIC_HARDIRQS
depends on LEDS_CLASS
help
If you say yes here you get support for the National Semiconductor
@@ -420,7 +420,7 @@ config KEYBOARD_NOMADIK
config KEYBOARD_TEGRA
tristate "NVIDIA Tegra internal matrix keyboard controller support"
- depends on ARCH_TEGRA
+ depends on ARCH_TEGRA && OF
select INPUT_MATRIXKMAP
help
Say Y here if you want to use a matrix keyboard connected directly
@@ -479,6 +479,16 @@ config KEYBOARD_SAMSUNG
To compile this driver as a module, choose M here: the
module will be called samsung-keypad.
+config KEYBOARD_GOLDFISH_EVENTS
+ depends on GOLDFISH
+ tristate "Generic Input Event device for Goldfish"
+ help
+ Say Y here to get an input event device for the Goldfish virtual
+ device emulator.
+
+ To compile this driver as a module, choose M here: the
+ module will be called goldfish-events.
+
config KEYBOARD_STOWAWAY
tristate "Stowaway keyboard"
select SERIO
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 44e76002f54b..49b16453d00e 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o
obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o
obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
+obj-$(CONFIG_KEYBOARD_GOLDFISH_EVENTS) += goldfish_events.o
obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
obj-$(CONFIG_KEYBOARD_GPIO_POLLED) += gpio_keys_polled.o
obj-$(CONFIG_KEYBOARD_TCA6416) += tca6416-keypad.o
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index add5ffd9fe26..2626773ff29b 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -676,6 +676,39 @@ static inline void atkbd_disable(struct atkbd *atkbd)
serio_continue_rx(atkbd->ps2dev.serio);
}
+static int atkbd_activate(struct atkbd *atkbd)
+{
+ struct ps2dev *ps2dev = &atkbd->ps2dev;
+
+/*
+ * Enable the keyboard to receive keystrokes.
+ */
+
+ if (ps2_command(ps2dev, NULL, ATKBD_CMD_ENABLE)) {
+ dev_err(&ps2dev->serio->dev,
+ "Failed to enable keyboard on %s\n",
+ ps2dev->serio->phys);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * atkbd_deactivate() resets and disables the keyboard from sending
+ * keystrokes.
+ */
+
+static void atkbd_deactivate(struct atkbd *atkbd)
+{
+ struct ps2dev *ps2dev = &atkbd->ps2dev;
+
+ if (ps2_command(ps2dev, NULL, ATKBD_CMD_RESET_DIS))
+ dev_err(&ps2dev->serio->dev,
+ "Failed to deactivate keyboard on %s\n",
+ ps2dev->serio->phys);
+}
+
/*
* atkbd_probe() probes for an AT keyboard on a serio port.
*/
@@ -726,11 +759,17 @@ static int atkbd_probe(struct atkbd *atkbd)
if (atkbd->id == 0xaca1 && atkbd->translated) {
dev_err(&ps2dev->serio->dev,
- "NCD terminal keyboards are only supported on non-translating controlelrs. "
+ "NCD terminal keyboards are only supported on non-translating controllers. "
"Use i8042.direct=1 to disable translation.\n");
return -1;
}
+/*
+ * Make sure nothing is coming from the keyboard and disturbs our
+ * internal state.
+ */
+ atkbd_deactivate(atkbd);
+
return 0;
}
@@ -825,24 +864,6 @@ static int atkbd_reset_state(struct atkbd *atkbd)
return 0;
}
-static int atkbd_activate(struct atkbd *atkbd)
-{
- struct ps2dev *ps2dev = &atkbd->ps2dev;
-
-/*
- * Enable the keyboard to receive keystrokes.
- */
-
- if (ps2_command(ps2dev, NULL, ATKBD_CMD_ENABLE)) {
- dev_err(&ps2dev->serio->dev,
- "Failed to enable keyboard on %s\n",
- ps2dev->serio->phys);
- return -1;
- }
-
- return 0;
-}
-
/*
* atkbd_cleanup() restores the keyboard state so that BIOS is happy after a
* reboot.
@@ -1150,7 +1171,6 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
atkbd->set = atkbd_select_set(atkbd, atkbd_set, atkbd_extra);
atkbd_reset_state(atkbd);
- atkbd_activate(atkbd);
} else {
atkbd->set = 2;
@@ -1165,6 +1185,8 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
goto fail3;
atkbd_enable(atkbd);
+ if (serio->write)
+ atkbd_activate(atkbd);
err = input_register_device(atkbd->dev);
if (err)
@@ -1208,8 +1230,6 @@ static int atkbd_reconnect(struct serio *serio)
if (atkbd->set != atkbd_select_set(atkbd, atkbd->set, atkbd->extra))
goto out;
- atkbd_activate(atkbd);
-
/*
* Restore LED state and repeat rate. While input core
* will do this for us at resume time reconnect may happen
@@ -1223,7 +1243,17 @@ static int atkbd_reconnect(struct serio *serio)
}
+ /*
+ * Reset our state machine in case reconnect happened in the middle
+ * of multi-byte scancode.
+ */
+ atkbd->xl_bit = 0;
+ atkbd->emul = 0;
+
atkbd_enable(atkbd);
+ if (atkbd->write)
+ atkbd_activate(atkbd);
+
retval = 0;
out:
diff --git a/drivers/input/keyboard/goldfish_events.c b/drivers/input/keyboard/goldfish_events.c
new file mode 100644
index 000000000000..9f60a2ec88db
--- /dev/null
+++ b/drivers/input/keyboard/goldfish_events.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (C) 2012 Intel, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+
+enum {
+ REG_READ = 0x00,
+ REG_SET_PAGE = 0x00,
+ REG_LEN = 0x04,
+ REG_DATA = 0x08,
+
+ PAGE_NAME = 0x00000,
+ PAGE_EVBITS = 0x10000,
+ PAGE_ABSDATA = 0x20000 | EV_ABS,
+};
+
+struct event_dev {
+ struct input_dev *input;
+ int irq;
+ void __iomem *addr;
+ char name[0];
+};
+
+static irqreturn_t events_interrupt(int irq, void *dev_id)
+{
+ struct event_dev *edev = dev_id;
+ unsigned type, code, value;
+
+ type = __raw_readl(edev->addr + REG_READ);
+ code = __raw_readl(edev->addr + REG_READ);
+ value = __raw_readl(edev->addr + REG_READ);
+
+ input_event(edev->input, type, code, value);
+ input_sync(edev->input);
+ return IRQ_HANDLED;
+}
+
+static void events_import_bits(struct event_dev *edev,
+ unsigned long bits[], unsigned type, size_t count)
+{
+ void __iomem *addr = edev->addr;
+ int i, j;
+ size_t size;
+ uint8_t val;
+
+ __raw_writel(PAGE_EVBITS | type, addr + REG_SET_PAGE);
+
+ size = __raw_readl(addr + REG_LEN) * 8;
+ if (size < count)
+ count = size;
+
+ addr += REG_DATA;
+ for (i = 0; i < count; i += 8) {
+ val = __raw_readb(addr++);
+ for (j = 0; j < 8; j++)
+ if (val & 1 << j)
+ set_bit(i + j, bits);
+ }
+}
+
+static void events_import_abs_params(struct event_dev *edev)
+{
+ struct input_dev *input_dev = edev->input;
+ void __iomem *addr = edev->addr;
+ u32 val[4];
+ int count;
+ int i, j;
+
+ __raw_writel(PAGE_ABSDATA, addr + REG_SET_PAGE);
+
+ count = __raw_readl(addr + REG_LEN) / sizeof(val);
+ if (count > ABS_MAX)
+ count = ABS_MAX;
+
+ for (i = 0; i < count; i++) {
+ if (!test_bit(i, input_dev->absbit))
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(val); j++) {
+ int offset = (i * ARRAY_SIZE(val) + j) * sizeof(u32);
+ val[j] = __raw_readl(edev->addr + REG_DATA + offset);
+ }
+
+ input_set_abs_params(input_dev, i,
+ val[0], val[1], val[2], val[3]);
+ }
+}
+
+static int events_probe(struct platform_device *pdev)
+{
+ struct input_dev *input_dev;
+ struct event_dev *edev;
+ struct resource *res;
+ unsigned keymapnamelen;
+ void __iomem *addr;
+ int irq;
+ int i;
+ int error;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ addr = devm_ioremap(&pdev->dev, res->start, 4096);
+ if (!addr)
+ return -ENOMEM;
+
+ __raw_writel(PAGE_NAME, addr + REG_SET_PAGE);
+ keymapnamelen = __raw_readl(addr + REG_LEN);
+
+ edev = devm_kzalloc(&pdev->dev,
+ sizeof(struct event_dev) + keymapnamelen + 1,
+ GFP_KERNEL);
+ if (!edev)
+ return -ENOMEM;
+
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!input_dev)
+ return -ENOMEM;
+
+ edev->input = input_dev;
+ edev->addr = addr;
+ edev->irq = irq;
+
+ for (i = 0; i < keymapnamelen; i++)
+ edev->name[i] = __raw_readb(edev->addr + REG_DATA + i);
+
+ pr_debug("events_probe() keymap=%s\n", edev->name);
+
+ input_dev->name = edev->name;
+ input_dev->id.bustype = BUS_HOST;
+
+ events_import_bits(edev, input_dev->evbit, EV_SYN, EV_MAX);
+ events_import_bits(edev, input_dev->keybit, EV_KEY, KEY_MAX);
+ events_import_bits(edev, input_dev->relbit, EV_REL, REL_MAX);
+ events_import_bits(edev, input_dev->absbit, EV_ABS, ABS_MAX);
+ events_import_bits(edev, input_dev->mscbit, EV_MSC, MSC_MAX);
+ events_import_bits(edev, input_dev->ledbit, EV_LED, LED_MAX);
+ events_import_bits(edev, input_dev->sndbit, EV_SND, SND_MAX);
+ events_import_bits(edev, input_dev->ffbit, EV_FF, FF_MAX);
+ events_import_bits(edev, input_dev->swbit, EV_SW, SW_MAX);
+
+ events_import_abs_params(edev);
+
+ error = devm_request_irq(&pdev->dev, edev->irq, events_interrupt, 0,
+ "goldfish-events-keypad", edev);
+ if (error)
+ return error;
+
+ error = input_register_device(input_dev);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static struct platform_driver events_driver = {
+ .probe = events_probe,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "goldfish_events",
+ },
+};
+
+module_platform_driver(events_driver);
+
+MODULE_AUTHOR("Brian Swetland");
+MODULE_DESCRIPTION("Goldfish Event Device");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index 6d150e3e1f55..98f9113251d2 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -20,6 +20,7 @@
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/timer.h>
@@ -414,15 +415,23 @@ open_err:
return -EIO;
}
+#ifdef CONFIG_OF
+static struct of_device_id imx_keypad_of_match[] = {
+ { .compatible = "fsl,imx21-kpp", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_keypad_of_match);
+#endif
+
static int imx_keypad_probe(struct platform_device *pdev)
{
const struct matrix_keymap_data *keymap_data = pdev->dev.platform_data;
struct imx_keypad *keypad;
struct input_dev *input_dev;
struct resource *res;
- int irq, error, i;
+ int irq, error, i, row, col;
- if (keymap_data == NULL) {
+ if (!keymap_data && !pdev->dev.of_node) {
dev_err(&pdev->dev, "no keymap defined\n");
return -EINVAL;
}
@@ -480,22 +489,6 @@ static int imx_keypad_probe(struct platform_device *pdev)
goto failed_unmap;
}
- /* Search for rows and cols enabled */
- for (i = 0; i < keymap_data->keymap_size; i++) {
- keypad->rows_en_mask |= 1 << KEY_ROW(keymap_data->keymap[i]);
- keypad->cols_en_mask |= 1 << KEY_COL(keymap_data->keymap[i]);
- }
-
- if (keypad->rows_en_mask > ((1 << MAX_MATRIX_KEY_ROWS) - 1) ||
- keypad->cols_en_mask > ((1 << MAX_MATRIX_KEY_COLS) - 1)) {
- dev_err(&pdev->dev,
- "invalid key data (too many rows or colums)\n");
- error = -EINVAL;
- goto failed_clock_put;
- }
- dev_dbg(&pdev->dev, "enabled rows mask: %x\n", keypad->rows_en_mask);
- dev_dbg(&pdev->dev, "enabled cols mask: %x\n", keypad->cols_en_mask);
-
/* Init the Input device */
input_dev->name = pdev->name;
input_dev->id.bustype = BUS_HOST;
@@ -512,6 +505,19 @@ static int imx_keypad_probe(struct platform_device *pdev)
goto failed_clock_put;
}
+ /* Search for rows and cols enabled */
+ for (row = 0; row < MAX_MATRIX_KEY_ROWS; row++) {
+ for (col = 0; col < MAX_MATRIX_KEY_COLS; col++) {
+ i = MATRIX_SCAN_CODE(row, col, MATRIX_ROW_SHIFT);
+ if (keypad->keycodes[i] != KEY_RESERVED) {
+ keypad->rows_en_mask |= 1 << row;
+ keypad->cols_en_mask |= 1 << col;
+ }
+ }
+ }
+ dev_dbg(&pdev->dev, "enabled rows mask: %x\n", keypad->rows_en_mask);
+ dev_dbg(&pdev->dev, "enabled cols mask: %x\n", keypad->cols_en_mask);
+
__set_bit(EV_REP, input_dev->evbit);
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
input_set_drvdata(input_dev, keypad);
@@ -631,6 +637,7 @@ static struct platform_driver imx_keypad_driver = {
.name = "imx-keypad",
.owner = THIS_MODULE,
.pm = &imx_kbd_pm_ops,
+ .of_match_table = of_match_ptr(imx_keypad_of_match),
},
.probe = imx_keypad_probe,
.remove = imx_keypad_remove,
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index 93c812662134..0de23f41b2d3 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -398,7 +398,7 @@ static irqreturn_t lm8323_irq(int irq, void *_lm)
lm8323_configure(lm);
}
for (i = 0; i < LM8323_NUM_PWMS; i++) {
- if (ints & (1 << (INT_PWM1 + i))) {
+ if (ints & (INT_PWM1 << i)) {
dev_vdbg(&lm->client->dev,
"pwm%d engine completed\n", i);
pwm_done(&lm->pwm[i]);
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index f4ff0dda7597..71d77192ac1e 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -403,7 +403,7 @@ matrix_keypad_parse_dt(struct device *dev)
struct matrix_keypad_platform_data *pdata;
struct device_node *np = dev->of_node;
unsigned int *gpios;
- int i;
+ int i, nrow, ncol;
if (!np) {
dev_err(dev, "device lacks DT data\n");
@@ -416,9 +416,9 @@ matrix_keypad_parse_dt(struct device *dev)
return ERR_PTR(-ENOMEM);
}
- pdata->num_row_gpios = of_gpio_named_count(np, "row-gpios");
- pdata->num_col_gpios = of_gpio_named_count(np, "col-gpios");
- if (!pdata->num_row_gpios || !pdata->num_col_gpios) {
+ pdata->num_row_gpios = nrow = of_gpio_named_count(np, "row-gpios");
+ pdata->num_col_gpios = ncol = of_gpio_named_count(np, "col-gpios");
+ if (nrow <= 0 || ncol <= 0) {
dev_err(dev, "number of keypad rows/columns not specified\n");
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c
index 3dc2b0f27b0c..1c0ddad0a1cc 100644
--- a/drivers/input/keyboard/qt2160.c
+++ b/drivers/input/keyboard/qt2160.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/leds.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
@@ -39,6 +40,11 @@
#define QT2160_CMD_GPIOS 6
#define QT2160_CMD_SUBVER 7
#define QT2160_CMD_CALIBRATE 10
+#define QT2160_CMD_DRIVE_X 70
+#define QT2160_CMD_PWMEN_X 74
+#define QT2160_CMD_PWM_DUTY 76
+
+#define QT2160_NUM_LEDS_X 8
#define QT2160_CYCLE_INTERVAL (2*HZ)
@@ -49,6 +55,17 @@ static unsigned char qt2160_key2code[] = {
KEY_C, KEY_D, KEY_E, KEY_F,
};
+#ifdef CONFIG_LEDS_CLASS
+struct qt2160_led {
+ struct qt2160_data *qt2160;
+ struct led_classdev cdev;
+ struct work_struct work;
+ char name[32];
+ int id;
+ enum led_brightness new_brightness;
+};
+#endif
+
struct qt2160_data {
struct i2c_client *client;
struct input_dev *input;
@@ -56,8 +73,61 @@ struct qt2160_data {
spinlock_t lock; /* Protects canceling/rescheduling of dwork */
unsigned short keycodes[ARRAY_SIZE(qt2160_key2code)];
u16 key_matrix;
+#ifdef CONFIG_LEDS_CLASS
+ struct qt2160_led leds[QT2160_NUM_LEDS_X];
+ struct mutex led_lock;
+#endif
};
+static int qt2160_read(struct i2c_client *client, u8 reg);
+static int qt2160_write(struct i2c_client *client, u8 reg, u8 data);
+
+#ifdef CONFIG_LEDS_CLASS
+
+static void qt2160_led_work(struct work_struct *work)
+{
+ struct qt2160_led *led = container_of(work, struct qt2160_led, work);
+ struct qt2160_data *qt2160 = led->qt2160;
+ struct i2c_client *client = qt2160->client;
+ int value = led->new_brightness;
+ u32 drive, pwmen;
+
+ mutex_lock(&qt2160->led_lock);
+
+ drive = qt2160_read(client, QT2160_CMD_DRIVE_X);
+ pwmen = qt2160_read(client, QT2160_CMD_PWMEN_X);
+ if (value != LED_OFF) {
+ drive |= (1 << led->id);
+ pwmen |= (1 << led->id);
+
+ } else {
+ drive &= ~(1 << led->id);
+ pwmen &= ~(1 << led->id);
+ }
+ qt2160_write(client, QT2160_CMD_DRIVE_X, drive);
+ qt2160_write(client, QT2160_CMD_PWMEN_X, pwmen);
+
+ /*
+ * Changing this register will change the brightness
+ * of every LED in the qt2160. It's a HW limitation.
+ */
+ if (value != LED_OFF)
+ qt2160_write(client, QT2160_CMD_PWM_DUTY, value);
+
+ mutex_unlock(&qt2160->led_lock);
+}
+
+static void qt2160_led_set(struct led_classdev *cdev,
+ enum led_brightness value)
+{
+ struct qt2160_led *led = container_of(cdev, struct qt2160_led, cdev);
+
+ led->new_brightness = value;
+ schedule_work(&led->work);
+}
+
+#endif /* CONFIG_LEDS_CLASS */
+
static int qt2160_read_block(struct i2c_client *client,
u8 inireg, u8 *buffer, unsigned int count)
{
@@ -216,6 +286,63 @@ static int qt2160_write(struct i2c_client *client, u8 reg, u8 data)
return ret;
}
+#ifdef CONFIG_LEDS_CLASS
+
+static int qt2160_register_leds(struct qt2160_data *qt2160)
+{
+ struct i2c_client *client = qt2160->client;
+ int ret;
+ int i;
+
+ mutex_init(&qt2160->led_lock);
+
+ for (i = 0; i < QT2160_NUM_LEDS_X; i++) {
+ struct qt2160_led *led = &qt2160->leds[i];
+
+ snprintf(led->name, sizeof(led->name), "qt2160:x%d", i);
+ led->cdev.name = led->name;
+ led->cdev.brightness_set = qt2160_led_set;
+ led->cdev.brightness = LED_OFF;
+ led->id = i;
+ led->qt2160 = qt2160;
+
+ INIT_WORK(&led->work, qt2160_led_work);
+
+ ret = led_classdev_register(&client->dev, &led->cdev);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Tur off LEDs */
+ qt2160_write(client, QT2160_CMD_DRIVE_X, 0);
+ qt2160_write(client, QT2160_CMD_PWMEN_X, 0);
+ qt2160_write(client, QT2160_CMD_PWM_DUTY, 0);
+
+ return 0;
+}
+
+static void qt2160_unregister_leds(struct qt2160_data *qt2160)
+{
+ int i;
+
+ for (i = 0; i < QT2160_NUM_LEDS_X; i++) {
+ led_classdev_unregister(&qt2160->leds[i].cdev);
+ cancel_work_sync(&qt2160->leds[i].work);
+ }
+}
+
+#else
+
+static inline int qt2160_register_leds(struct qt2160_data *qt2160)
+{
+ return 0;
+}
+
+static inline void qt2160_unregister_leds(struct qt2160_data *qt2160)
+{
+}
+
+#endif
static bool qt2160_identify(struct i2c_client *client)
{
@@ -249,7 +376,7 @@ static bool qt2160_identify(struct i2c_client *client)
}
static int qt2160_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
struct qt2160_data *qt2160;
struct input_dev *input;
@@ -314,11 +441,17 @@ static int qt2160_probe(struct i2c_client *client,
}
}
+ error = qt2160_register_leds(qt2160);
+ if (error) {
+ dev_err(&client->dev, "Failed to register leds\n");
+ goto err_free_irq;
+ }
+
error = input_register_device(qt2160->input);
if (error) {
dev_err(&client->dev,
"Failed to register input device\n");
- goto err_free_irq;
+ goto err_unregister_leds;
}
i2c_set_clientdata(client, qt2160);
@@ -326,6 +459,8 @@ static int qt2160_probe(struct i2c_client *client,
return 0;
+err_unregister_leds:
+ qt2160_unregister_leds(qt2160);
err_free_irq:
if (client->irq)
free_irq(client->irq, qt2160);
@@ -339,6 +474,8 @@ static int qt2160_remove(struct i2c_client *client)
{
struct qt2160_data *qt2160 = i2c_get_clientdata(client);
+ qt2160_unregister_leds(qt2160);
+
/* Release IRQ so no queue will be scheduled */
if (client->irq)
free_irq(client->irq, qt2160);
diff --git a/drivers/input/keyboard/spear-keyboard.c b/drivers/input/keyboard/spear-keyboard.c
index 695d237417d6..cb1e8f614631 100644
--- a/drivers/input/keyboard/spear-keyboard.c
+++ b/drivers/input/keyboard/spear-keyboard.c
@@ -228,11 +228,9 @@ static int spear_kbd_probe(struct platform_device *pdev)
kbd->suspended_rate = pdata->suspended_rate;
}
- kbd->io_base = devm_request_and_ioremap(&pdev->dev, res);
- if (!kbd->io_base) {
- dev_err(&pdev->dev, "request-ioremap failed for kbd_region\n");
- return -ENOMEM;
- }
+ kbd->io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(kbd->io_base))
+ return PTR_ERR(kbd->io_base);
kbd->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(kbd->clk))
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index c76f96872d31..0e138ebcc768 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -29,8 +29,15 @@
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/slab.h>
-#include <linux/input/tegra_kbc.h>
-#include <mach/clk.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/clk/tegra.h>
+
+#define KBC_MAX_GPIO 24
+#define KBC_MAX_KPENT 8
+
+#define KBC_MAX_ROW 16
+#define KBC_MAX_COL 8
+#define KBC_MAX_KEY (KBC_MAX_ROW * KBC_MAX_COL)
#define KBC_MAX_DEBOUNCE_CNT 0x3ffu
@@ -67,10 +74,27 @@
#define KBC_ROW_SHIFT 3
+enum tegra_pin_type {
+ PIN_CFG_IGNORE,
+ PIN_CFG_COL,
+ PIN_CFG_ROW,
+};
+
+struct tegra_kbc_pin_cfg {
+ enum tegra_pin_type type;
+ unsigned char num;
+};
+
struct tegra_kbc {
+ struct device *dev;
+ unsigned int debounce_cnt;
+ unsigned int repeat_cnt;
+ struct tegra_kbc_pin_cfg pin_cfg[KBC_MAX_GPIO];
+ const struct matrix_keymap_data *keymap_data;
+ bool wakeup;
void __iomem *mmio;
struct input_dev *idev;
- unsigned int irq;
+ int irq;
spinlock_t lock;
unsigned int repoll_dly;
unsigned long cp_dly_jiffies;
@@ -78,7 +102,6 @@ struct tegra_kbc {
bool use_fn_map;
bool use_ghost_filter;
bool keypress_caused_wake;
- const struct tegra_kbc_platform_data *pdata;
unsigned short keycode[KBC_MAX_KEY * 2];
unsigned short current_keys[KBC_MAX_KPENT];
unsigned int num_pressed_keys;
@@ -87,147 +110,6 @@ struct tegra_kbc {
struct clk *clk;
};
-static const u32 tegra_kbc_default_keymap[] = {
- KEY(0, 2, KEY_W),
- KEY(0, 3, KEY_S),
- KEY(0, 4, KEY_A),
- KEY(0, 5, KEY_Z),
- KEY(0, 7, KEY_FN),
-
- KEY(1, 7, KEY_LEFTMETA),
-
- KEY(2, 6, KEY_RIGHTALT),
- KEY(2, 7, KEY_LEFTALT),
-
- KEY(3, 0, KEY_5),
- KEY(3, 1, KEY_4),
- KEY(3, 2, KEY_R),
- KEY(3, 3, KEY_E),
- KEY(3, 4, KEY_F),
- KEY(3, 5, KEY_D),
- KEY(3, 6, KEY_X),
-
- KEY(4, 0, KEY_7),
- KEY(4, 1, KEY_6),
- KEY(4, 2, KEY_T),
- KEY(4, 3, KEY_H),
- KEY(4, 4, KEY_G),
- KEY(4, 5, KEY_V),
- KEY(4, 6, KEY_C),
- KEY(4, 7, KEY_SPACE),
-
- KEY(5, 0, KEY_9),
- KEY(5, 1, KEY_8),
- KEY(5, 2, KEY_U),
- KEY(5, 3, KEY_Y),
- KEY(5, 4, KEY_J),
- KEY(5, 5, KEY_N),
- KEY(5, 6, KEY_B),
- KEY(5, 7, KEY_BACKSLASH),
-
- KEY(6, 0, KEY_MINUS),
- KEY(6, 1, KEY_0),
- KEY(6, 2, KEY_O),
- KEY(6, 3, KEY_I),
- KEY(6, 4, KEY_L),
- KEY(6, 5, KEY_K),
- KEY(6, 6, KEY_COMMA),
- KEY(6, 7, KEY_M),
-
- KEY(7, 1, KEY_EQUAL),
- KEY(7, 2, KEY_RIGHTBRACE),
- KEY(7, 3, KEY_ENTER),
- KEY(7, 7, KEY_MENU),
-
- KEY(8, 4, KEY_RIGHTSHIFT),
- KEY(8, 5, KEY_LEFTSHIFT),
-
- KEY(9, 5, KEY_RIGHTCTRL),
- KEY(9, 7, KEY_LEFTCTRL),
-
- KEY(11, 0, KEY_LEFTBRACE),
- KEY(11, 1, KEY_P),
- KEY(11, 2, KEY_APOSTROPHE),
- KEY(11, 3, KEY_SEMICOLON),
- KEY(11, 4, KEY_SLASH),
- KEY(11, 5, KEY_DOT),
-
- KEY(12, 0, KEY_F10),
- KEY(12, 1, KEY_F9),
- KEY(12, 2, KEY_BACKSPACE),
- KEY(12, 3, KEY_3),
- KEY(12, 4, KEY_2),
- KEY(12, 5, KEY_UP),
- KEY(12, 6, KEY_PRINT),
- KEY(12, 7, KEY_PAUSE),
-
- KEY(13, 0, KEY_INSERT),
- KEY(13, 1, KEY_DELETE),
- KEY(13, 3, KEY_PAGEUP),
- KEY(13, 4, KEY_PAGEDOWN),
- KEY(13, 5, KEY_RIGHT),
- KEY(13, 6, KEY_DOWN),
- KEY(13, 7, KEY_LEFT),
-
- KEY(14, 0, KEY_F11),
- KEY(14, 1, KEY_F12),
- KEY(14, 2, KEY_F8),
- KEY(14, 3, KEY_Q),
- KEY(14, 4, KEY_F4),
- KEY(14, 5, KEY_F3),
- KEY(14, 6, KEY_1),
- KEY(14, 7, KEY_F7),
-
- KEY(15, 0, KEY_ESC),
- KEY(15, 1, KEY_GRAVE),
- KEY(15, 2, KEY_F5),
- KEY(15, 3, KEY_TAB),
- KEY(15, 4, KEY_F1),
- KEY(15, 5, KEY_F2),
- KEY(15, 6, KEY_CAPSLOCK),
- KEY(15, 7, KEY_F6),
-
- /* Software Handled Function Keys */
- KEY(20, 0, KEY_KP7),
-
- KEY(21, 0, KEY_KP9),
- KEY(21, 1, KEY_KP8),
- KEY(21, 2, KEY_KP4),
- KEY(21, 4, KEY_KP1),
-
- KEY(22, 1, KEY_KPSLASH),
- KEY(22, 2, KEY_KP6),
- KEY(22, 3, KEY_KP5),
- KEY(22, 4, KEY_KP3),
- KEY(22, 5, KEY_KP2),
- KEY(22, 7, KEY_KP0),
-
- KEY(27, 1, KEY_KPASTERISK),
- KEY(27, 3, KEY_KPMINUS),
- KEY(27, 4, KEY_KPPLUS),
- KEY(27, 5, KEY_KPDOT),
-
- KEY(28, 5, KEY_VOLUMEUP),
-
- KEY(29, 3, KEY_HOME),
- KEY(29, 4, KEY_END),
- KEY(29, 5, KEY_BRIGHTNESSDOWN),
- KEY(29, 6, KEY_VOLUMEDOWN),
- KEY(29, 7, KEY_BRIGHTNESSUP),
-
- KEY(30, 0, KEY_NUMLOCK),
- KEY(30, 1, KEY_SCROLLLOCK),
- KEY(30, 2, KEY_MUTE),
-
- KEY(31, 4, KEY_HELP),
-};
-
-static const
-struct matrix_keymap_data tegra_kbc_default_keymap_data = {
- .keymap = tegra_kbc_default_keymap,
- .keymap_size = ARRAY_SIZE(tegra_kbc_default_keymap),
-};
-
static void tegra_kbc_report_released_keys(struct input_dev *input,
unsigned short old_keycodes[],
unsigned int old_num_keys,
@@ -357,18 +239,6 @@ static void tegra_kbc_set_fifo_interrupt(struct tegra_kbc *kbc, bool enable)
writel(val, kbc->mmio + KBC_CONTROL_0);
}
-static void tegra_kbc_set_keypress_interrupt(struct tegra_kbc *kbc, bool enable)
-{
- u32 val;
-
- val = readl(kbc->mmio + KBC_CONTROL_0);
- if (enable)
- val |= KBC_CONTROL_KEYPRESS_INT_EN;
- else
- val &= ~KBC_CONTROL_KEYPRESS_INT_EN;
- writel(val, kbc->mmio + KBC_CONTROL_0);
-}
-
static void tegra_kbc_keypress_timer(unsigned long data)
{
struct tegra_kbc *kbc = (struct tegra_kbc *)data;
@@ -439,12 +309,11 @@ static irqreturn_t tegra_kbc_isr(int irq, void *args)
static void tegra_kbc_setup_wakekeys(struct tegra_kbc *kbc, bool filter)
{
- const struct tegra_kbc_platform_data *pdata = kbc->pdata;
int i;
unsigned int rst_val;
/* Either mask all keys or none. */
- rst_val = (filter && !pdata->wakeup) ? ~0 : 0;
+ rst_val = (filter && !kbc->wakeup) ? ~0 : 0;
for (i = 0; i < KBC_MAX_ROW; i++)
writel(rst_val, kbc->mmio + KBC_ROW0_MASK_0 + i * 4);
@@ -452,7 +321,6 @@ static void tegra_kbc_setup_wakekeys(struct tegra_kbc *kbc, bool filter)
static void tegra_kbc_config_pins(struct tegra_kbc *kbc)
{
- const struct tegra_kbc_platform_data *pdata = kbc->pdata;
int i;
for (i = 0; i < KBC_MAX_GPIO; i++) {
@@ -468,13 +336,13 @@ static void tegra_kbc_config_pins(struct tegra_kbc *kbc)
row_cfg &= ~r_mask;
col_cfg &= ~c_mask;
- switch (pdata->pin_cfg[i].type) {
+ switch (kbc->pin_cfg[i].type) {
case PIN_CFG_ROW:
- row_cfg |= ((pdata->pin_cfg[i].num << 1) | 1) << r_shft;
+ row_cfg |= ((kbc->pin_cfg[i].num << 1) | 1) << r_shft;
break;
case PIN_CFG_COL:
- col_cfg |= ((pdata->pin_cfg[i].num << 1) | 1) << c_shft;
+ col_cfg |= ((kbc->pin_cfg[i].num << 1) | 1) << c_shft;
break;
case PIN_CFG_IGNORE:
@@ -488,7 +356,6 @@ static void tegra_kbc_config_pins(struct tegra_kbc *kbc)
static int tegra_kbc_start(struct tegra_kbc *kbc)
{
- const struct tegra_kbc_platform_data *pdata = kbc->pdata;
unsigned int debounce_cnt;
u32 val = 0;
@@ -503,10 +370,10 @@ static int tegra_kbc_start(struct tegra_kbc *kbc)
tegra_kbc_config_pins(kbc);
tegra_kbc_setup_wakekeys(kbc, false);
- writel(pdata->repeat_cnt, kbc->mmio + KBC_RPT_DLY_0);
+ writel(kbc->repeat_cnt, kbc->mmio + KBC_RPT_DLY_0);
/* Keyboard debounce count is maximum of 12 bits. */
- debounce_cnt = min(pdata->debounce_cnt, KBC_MAX_DEBOUNCE_CNT);
+ debounce_cnt = min(kbc->debounce_cnt, KBC_MAX_DEBOUNCE_CNT);
val = KBC_DEBOUNCE_CNT_SHIFT(debounce_cnt);
val |= KBC_FIFO_TH_CNT_SHIFT(1); /* set fifo interrupt threshold to 1 */
val |= KBC_CONTROL_FIFO_CNT_INT_EN; /* interrupt on FIFO threshold */
@@ -573,21 +440,20 @@ static void tegra_kbc_close(struct input_dev *dev)
return tegra_kbc_stop(kbc);
}
-static bool
-tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
- struct device *dev, unsigned int *num_rows)
+static bool tegra_kbc_check_pin_cfg(const struct tegra_kbc *kbc,
+ unsigned int *num_rows)
{
int i;
*num_rows = 0;
for (i = 0; i < KBC_MAX_GPIO; i++) {
- const struct tegra_kbc_pin_cfg *pin_cfg = &pdata->pin_cfg[i];
+ const struct tegra_kbc_pin_cfg *pin_cfg = &kbc->pin_cfg[i];
switch (pin_cfg->type) {
case PIN_CFG_ROW:
if (pin_cfg->num >= KBC_MAX_ROW) {
- dev_err(dev,
+ dev_err(kbc->dev,
"pin_cfg[%d]: invalid row number %d\n",
i, pin_cfg->num);
return false;
@@ -597,7 +463,7 @@ tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
case PIN_CFG_COL:
if (pin_cfg->num >= KBC_MAX_COL) {
- dev_err(dev,
+ dev_err(kbc->dev,
"pin_cfg[%d]: invalid column number %d\n",
i, pin_cfg->num);
return false;
@@ -608,7 +474,7 @@ tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
break;
default:
- dev_err(dev,
+ dev_err(kbc->dev,
"pin_cfg[%d]: invalid entry type %d\n",
pin_cfg->type, pin_cfg->num);
return false;
@@ -618,154 +484,140 @@ tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
return true;
}
-#ifdef CONFIG_OF
-static struct tegra_kbc_platform_data *tegra_kbc_dt_parse_pdata(
- struct platform_device *pdev)
+static int tegra_kbc_parse_dt(struct tegra_kbc *kbc)
{
- struct tegra_kbc_platform_data *pdata;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = kbc->dev->of_node;
u32 prop;
int i;
-
- if (!np)
- return NULL;
-
- pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return NULL;
+ u32 num_rows = 0;
+ u32 num_cols = 0;
+ u32 cols_cfg[KBC_MAX_GPIO];
+ u32 rows_cfg[KBC_MAX_GPIO];
+ int proplen;
+ int ret;
if (!of_property_read_u32(np, "nvidia,debounce-delay-ms", &prop))
- pdata->debounce_cnt = prop;
+ kbc->debounce_cnt = prop;
if (!of_property_read_u32(np, "nvidia,repeat-delay-ms", &prop))
- pdata->repeat_cnt = prop;
+ kbc->repeat_cnt = prop;
if (of_find_property(np, "nvidia,needs-ghost-filter", NULL))
- pdata->use_ghost_filter = true;
+ kbc->use_ghost_filter = true;
if (of_find_property(np, "nvidia,wakeup-source", NULL))
- pdata->wakeup = true;
+ kbc->wakeup = true;
- /*
- * All currently known keymaps with device tree support use the same
- * pin_cfg, so set it up here.
- */
- for (i = 0; i < KBC_MAX_ROW; i++) {
- pdata->pin_cfg[i].num = i;
- pdata->pin_cfg[i].type = PIN_CFG_ROW;
+ if (!of_get_property(np, "nvidia,kbc-row-pins", &proplen)) {
+ dev_err(kbc->dev, "property nvidia,kbc-row-pins not found\n");
+ return -ENOENT;
}
+ num_rows = proplen / sizeof(u32);
- for (i = 0; i < KBC_MAX_COL; i++) {
- pdata->pin_cfg[KBC_MAX_ROW + i].num = i;
- pdata->pin_cfg[KBC_MAX_ROW + i].type = PIN_CFG_COL;
+ if (!of_get_property(np, "nvidia,kbc-col-pins", &proplen)) {
+ dev_err(kbc->dev, "property nvidia,kbc-col-pins not found\n");
+ return -ENOENT;
}
+ num_cols = proplen / sizeof(u32);
- return pdata;
-}
-#else
-static inline struct tegra_kbc_platform_data *tegra_kbc_dt_parse_pdata(
- struct platform_device *pdev)
-{
- return NULL;
-}
-#endif
+ if (!of_get_property(np, "linux,keymap", &proplen)) {
+ dev_err(kbc->dev, "property linux,keymap not found\n");
+ return -ENOENT;
+ }
-static int tegra_kbd_setup_keymap(struct tegra_kbc *kbc)
-{
- const struct tegra_kbc_platform_data *pdata = kbc->pdata;
- const struct matrix_keymap_data *keymap_data = pdata->keymap_data;
- unsigned int keymap_rows = KBC_MAX_KEY;
- int retval;
+ if (!num_rows || !num_cols || ((num_rows + num_cols) > KBC_MAX_GPIO)) {
+ dev_err(kbc->dev,
+ "keypad rows/columns not porperly specified\n");
+ return -EINVAL;
+ }
- if (keymap_data && pdata->use_fn_map)
- keymap_rows *= 2;
+ /* Set all pins as non-configured */
+ for (i = 0; i < KBC_MAX_GPIO; i++)
+ kbc->pin_cfg[i].type = PIN_CFG_IGNORE;
- retval = matrix_keypad_build_keymap(keymap_data, NULL,
- keymap_rows, KBC_MAX_COL,
- kbc->keycode, kbc->idev);
- if (retval == -ENOSYS || retval == -ENOENT) {
- /*
- * If there is no OF support in kernel or keymap
- * property is missing, use default keymap.
- */
- retval = matrix_keypad_build_keymap(
- &tegra_kbc_default_keymap_data, NULL,
- keymap_rows, KBC_MAX_COL,
- kbc->keycode, kbc->idev);
+ ret = of_property_read_u32_array(np, "nvidia,kbc-row-pins",
+ rows_cfg, num_rows);
+ if (ret < 0) {
+ dev_err(kbc->dev, "Rows configurations are not proper\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_array(np, "nvidia,kbc-col-pins",
+ cols_cfg, num_cols);
+ if (ret < 0) {
+ dev_err(kbc->dev, "Cols configurations are not proper\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_rows; i++) {
+ kbc->pin_cfg[rows_cfg[i]].type = PIN_CFG_ROW;
+ kbc->pin_cfg[rows_cfg[i]].num = i;
}
- return retval;
+ for (i = 0; i < num_cols; i++) {
+ kbc->pin_cfg[cols_cfg[i]].type = PIN_CFG_COL;
+ kbc->pin_cfg[cols_cfg[i]].num = i;
+ }
+
+ return 0;
}
static int tegra_kbc_probe(struct platform_device *pdev)
{
- const struct tegra_kbc_platform_data *pdata = pdev->dev.platform_data;
struct tegra_kbc *kbc;
- struct input_dev *input_dev;
struct resource *res;
- int irq;
int err;
int num_rows = 0;
unsigned int debounce_cnt;
unsigned int scan_time_rows;
+ unsigned int keymap_rows = KBC_MAX_KEY;
- if (!pdata)
- pdata = tegra_kbc_dt_parse_pdata(pdev);
+ kbc = devm_kzalloc(&pdev->dev, sizeof(*kbc), GFP_KERNEL);
+ if (!kbc) {
+ dev_err(&pdev->dev, "failed to alloc memory for kbc\n");
+ return -ENOMEM;
+ }
- if (!pdata)
- return -EINVAL;
+ kbc->dev = &pdev->dev;
+ spin_lock_init(&kbc->lock);
- if (!tegra_kbc_check_pin_cfg(pdata, &pdev->dev, &num_rows)) {
- err = -EINVAL;
- goto err_free_pdata;
- }
+ err = tegra_kbc_parse_dt(kbc);
+ if (err)
+ return err;
+
+ if (!tegra_kbc_check_pin_cfg(kbc, &num_rows))
+ return -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "failed to get I/O memory\n");
- err = -ENXIO;
- goto err_free_pdata;
+ return -ENXIO;
}
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
+ kbc->irq = platform_get_irq(pdev, 0);
+ if (kbc->irq < 0) {
dev_err(&pdev->dev, "failed to get keyboard IRQ\n");
- err = -ENXIO;
- goto err_free_pdata;
+ return -ENXIO;
}
- kbc = kzalloc(sizeof(*kbc), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!kbc || !input_dev) {
- err = -ENOMEM;
- goto err_free_mem;
+ kbc->idev = devm_input_allocate_device(&pdev->dev);
+ if (!kbc->idev) {
+ dev_err(&pdev->dev, "failed to allocate input device\n");
+ return -ENOMEM;
}
- kbc->pdata = pdata;
- kbc->idev = input_dev;
- kbc->irq = irq;
- spin_lock_init(&kbc->lock);
setup_timer(&kbc->timer, tegra_kbc_keypress_timer, (unsigned long)kbc);
- res = request_mem_region(res->start, resource_size(res), pdev->name);
- if (!res) {
- dev_err(&pdev->dev, "failed to request I/O memory\n");
- err = -EBUSY;
- goto err_free_mem;
- }
-
- kbc->mmio = ioremap(res->start, resource_size(res));
+ kbc->mmio = devm_request_and_ioremap(&pdev->dev, res);
if (!kbc->mmio) {
- dev_err(&pdev->dev, "failed to remap I/O memory\n");
- err = -ENXIO;
- goto err_free_mem_region;
+ dev_err(&pdev->dev, "Cannot request memregion/iomap address\n");
+ return -EBUSY;
}
- kbc->clk = clk_get(&pdev->dev, NULL);
+ kbc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(kbc->clk)) {
dev_err(&pdev->dev, "failed to get keyboard clock\n");
- err = PTR_ERR(kbc->clk);
- goto err_iounmap;
+ return PTR_ERR(kbc->clk);
}
/*
@@ -774,37 +626,38 @@ static int tegra_kbc_probe(struct platform_device *pdev)
* the rows. There is an additional delay before the row scanning
* starts. The repoll delay is computed in milliseconds.
*/
- debounce_cnt = min(pdata->debounce_cnt, KBC_MAX_DEBOUNCE_CNT);
+ debounce_cnt = min(kbc->debounce_cnt, KBC_MAX_DEBOUNCE_CNT);
scan_time_rows = (KBC_ROW_SCAN_TIME + debounce_cnt) * num_rows;
- kbc->repoll_dly = KBC_ROW_SCAN_DLY + scan_time_rows + pdata->repeat_cnt;
+ kbc->repoll_dly = KBC_ROW_SCAN_DLY + scan_time_rows + kbc->repeat_cnt;
kbc->repoll_dly = DIV_ROUND_UP(kbc->repoll_dly, KBC_CYCLE_MS);
- kbc->wakeup_key = pdata->wakeup_key;
- kbc->use_fn_map = pdata->use_fn_map;
- kbc->use_ghost_filter = pdata->use_ghost_filter;
+ kbc->idev->name = pdev->name;
+ kbc->idev->id.bustype = BUS_HOST;
+ kbc->idev->dev.parent = &pdev->dev;
+ kbc->idev->open = tegra_kbc_open;
+ kbc->idev->close = tegra_kbc_close;
- input_dev->name = pdev->name;
- input_dev->id.bustype = BUS_HOST;
- input_dev->dev.parent = &pdev->dev;
- input_dev->open = tegra_kbc_open;
- input_dev->close = tegra_kbc_close;
+ if (kbc->keymap_data && kbc->use_fn_map)
+ keymap_rows *= 2;
- err = tegra_kbd_setup_keymap(kbc);
+ err = matrix_keypad_build_keymap(kbc->keymap_data, NULL,
+ keymap_rows, KBC_MAX_COL,
+ kbc->keycode, kbc->idev);
if (err) {
dev_err(&pdev->dev, "failed to setup keymap\n");
- goto err_put_clk;
+ return err;
}
- __set_bit(EV_REP, input_dev->evbit);
- input_set_capability(input_dev, EV_MSC, MSC_SCAN);
+ __set_bit(EV_REP, kbc->idev->evbit);
+ input_set_capability(kbc->idev, EV_MSC, MSC_SCAN);
- input_set_drvdata(input_dev, kbc);
+ input_set_drvdata(kbc->idev, kbc);
- err = request_irq(kbc->irq, tegra_kbc_isr,
+ err = devm_request_irq(&pdev->dev, kbc->irq, tegra_kbc_isr,
IRQF_NO_SUSPEND | IRQF_TRIGGER_HIGH, pdev->name, kbc);
if (err) {
dev_err(&pdev->dev, "failed to request keyboard IRQ\n");
- goto err_put_clk;
+ return err;
}
disable_irq(kbc->irq);
@@ -812,60 +665,28 @@ static int tegra_kbc_probe(struct platform_device *pdev)
err = input_register_device(kbc->idev);
if (err) {
dev_err(&pdev->dev, "failed to register input device\n");
- goto err_free_irq;
+ return err;
}
platform_set_drvdata(pdev, kbc);
- device_init_wakeup(&pdev->dev, pdata->wakeup);
+ device_init_wakeup(&pdev->dev, kbc->wakeup);
return 0;
-
-err_free_irq:
- free_irq(kbc->irq, pdev);
-err_put_clk:
- clk_put(kbc->clk);
-err_iounmap:
- iounmap(kbc->mmio);
-err_free_mem_region:
- release_mem_region(res->start, resource_size(res));
-err_free_mem:
- input_free_device(input_dev);
- kfree(kbc);
-err_free_pdata:
- if (!pdev->dev.platform_data)
- kfree(pdata);
-
- return err;
}
-static int tegra_kbc_remove(struct platform_device *pdev)
+#ifdef CONFIG_PM_SLEEP
+static void tegra_kbc_set_keypress_interrupt(struct tegra_kbc *kbc, bool enable)
{
- struct tegra_kbc *kbc = platform_get_drvdata(pdev);
- struct resource *res;
-
- platform_set_drvdata(pdev, NULL);
-
- free_irq(kbc->irq, pdev);
- clk_put(kbc->clk);
-
- input_unregister_device(kbc->idev);
- iounmap(kbc->mmio);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
- /*
- * If we do not have platform data attached to the device we
- * allocated it ourselves and thus need to free it.
- */
- if (!pdev->dev.platform_data)
- kfree(kbc->pdata);
-
- kfree(kbc);
+ u32 val;
- return 0;
+ val = readl(kbc->mmio + KBC_CONTROL_0);
+ if (enable)
+ val |= KBC_CONTROL_KEYPRESS_INT_EN;
+ else
+ val &= ~KBC_CONTROL_KEYPRESS_INT_EN;
+ writel(val, kbc->mmio + KBC_CONTROL_0);
}
-#ifdef CONFIG_PM_SLEEP
static int tegra_kbc_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -954,7 +775,6 @@ MODULE_DEVICE_TABLE(of, tegra_kbc_of_match);
static struct platform_driver tegra_kbc_driver = {
.probe = tegra_kbc_probe,
- .remove = tegra_kbc_remove,
.driver = {
.name = "tegra-kbc",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c
index 1cf72fe513e6..0735de3a6468 100644
--- a/drivers/input/misc/adxl34x.c
+++ b/drivers/input/misc/adxl34x.c
@@ -232,7 +232,7 @@ static const struct adxl34x_platform_data adxl34x_default_init = {
.ev_code_tap = {BTN_TOUCH, BTN_TOUCH, BTN_TOUCH}, /* EV_KEY {x,y,z} */
.power_mode = ADXL_AUTO_SLEEP | ADXL_LINK,
- .fifo_mode = FIFO_STREAM,
+ .fifo_mode = ADXL_FIFO_STREAM,
.watermark = 0,
};
@@ -732,7 +732,7 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq,
mutex_init(&ac->mutex);
input_dev->name = "ADXL34x accelerometer";
- revid = ac->bops->read(dev, DEVID);
+ revid = AC_READ(ac, DEVID);
switch (revid) {
case ID_ADXL345:
@@ -809,7 +809,7 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq,
if (FIFO_MODE(pdata->fifo_mode) == FIFO_BYPASS)
ac->fifo_delay = false;
- ac->bops->write(dev, POWER_CTL, 0);
+ AC_WRITE(ac, POWER_CTL, 0);
err = request_threaded_irq(ac->irq, NULL, adxl34x_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
@@ -827,7 +827,6 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq,
if (err)
goto err_remove_attr;
- AC_WRITE(ac, THRESH_TAP, pdata->tap_threshold);
AC_WRITE(ac, OFSX, pdata->x_axis_offset);
ac->hwcal.x = pdata->x_axis_offset;
AC_WRITE(ac, OFSY, pdata->y_axis_offset);
diff --git a/drivers/input/misc/atlas_btns.c b/drivers/input/misc/atlas_btns.c
index 26f13131639a..5d4402365a52 100644
--- a/drivers/input/misc/atlas_btns.c
+++ b/drivers/input/misc/atlas_btns.c
@@ -121,7 +121,7 @@ static int atlas_acpi_button_add(struct acpi_device *device)
return err;
}
-static int atlas_acpi_button_remove(struct acpi_device *device, int type)
+static int atlas_acpi_button_remove(struct acpi_device *device)
{
acpi_status status;
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index 08ffcabd7220..865c2f9d25b9 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -46,18 +46,6 @@
#define BMA150_POLL_MAX 200
#define BMA150_POLL_MIN 0
-#define BMA150_BW_25HZ 0
-#define BMA150_BW_50HZ 1
-#define BMA150_BW_100HZ 2
-#define BMA150_BW_190HZ 3
-#define BMA150_BW_375HZ 4
-#define BMA150_BW_750HZ 5
-#define BMA150_BW_1500HZ 6
-
-#define BMA150_RANGE_2G 0
-#define BMA150_RANGE_4G 1
-#define BMA150_RANGE_8G 2
-
#define BMA150_MODE_NORMAL 0
#define BMA150_MODE_SLEEP 2
#define BMA150_MODE_WAKE_UP 3
@@ -372,7 +360,7 @@ static int bma150_open(struct bma150_data *bma150)
int error;
error = pm_runtime_get_sync(&bma150->client->dev);
- if (error && error != -ENOSYS)
+ if (error < 0 && error != -ENOSYS)
return error;
/*
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 78eb6b30580a..68a5f33152a8 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -43,7 +43,6 @@ struct vibra_info {
struct device *dev;
struct input_dev *input_dev;
- struct workqueue_struct *workqueue;
struct work_struct play_work;
bool enabled;
@@ -143,19 +142,7 @@ static int vibra_play(struct input_dev *input, void *data,
if (!info->speed)
info->speed = effect->u.rumble.weak_magnitude >> 9;
info->direction = effect->direction < EFFECT_DIR_180_DEG ? 0 : 1;
- queue_work(info->workqueue, &info->play_work);
- return 0;
-}
-
-static int twl4030_vibra_open(struct input_dev *input)
-{
- struct vibra_info *info = input_get_drvdata(input);
-
- info->workqueue = create_singlethread_workqueue("vibra");
- if (info->workqueue == NULL) {
- dev_err(&input->dev, "couldn't create workqueue\n");
- return -ENOMEM;
- }
+ schedule_work(&info->play_work);
return 0;
}
@@ -164,9 +151,6 @@ static void twl4030_vibra_close(struct input_dev *input)
struct vibra_info *info = input_get_drvdata(input);
cancel_work_sync(&info->play_work);
- INIT_WORK(&info->play_work, vibra_play_work); /* cleanup */
- destroy_workqueue(info->workqueue);
- info->workqueue = NULL;
if (info->enabled)
vibra_disable(info);
@@ -219,7 +203,7 @@ static int twl4030_vibra_probe(struct platform_device *pdev)
return -EINVAL;
}
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -227,11 +211,10 @@ static int twl4030_vibra_probe(struct platform_device *pdev)
info->coexist = twl4030_vibra_check_coexist(pdata, twl4030_core_node);
INIT_WORK(&info->play_work, vibra_play_work);
- info->input_dev = input_allocate_device();
+ info->input_dev = devm_input_allocate_device(&pdev->dev);
if (info->input_dev == NULL) {
dev_err(&pdev->dev, "couldn't allocate input device\n");
- ret = -ENOMEM;
- goto err_kzalloc;
+ return -ENOMEM;
}
input_set_drvdata(info->input_dev, info);
@@ -239,14 +222,13 @@ static int twl4030_vibra_probe(struct platform_device *pdev)
info->input_dev->name = "twl4030:vibrator";
info->input_dev->id.version = 1;
info->input_dev->dev.parent = pdev->dev.parent;
- info->input_dev->open = twl4030_vibra_open;
info->input_dev->close = twl4030_vibra_close;
__set_bit(FF_RUMBLE, info->input_dev->ffbit);
ret = input_ff_create_memless(info->input_dev, NULL, vibra_play);
if (ret < 0) {
dev_dbg(&pdev->dev, "couldn't register vibrator to FF\n");
- goto err_ialloc;
+ return ret;
}
ret = input_register_device(info->input_dev);
@@ -262,28 +244,11 @@ static int twl4030_vibra_probe(struct platform_device *pdev)
err_iff:
input_ff_destroy(info->input_dev);
-err_ialloc:
- input_free_device(info->input_dev);
-err_kzalloc:
- kfree(info);
return ret;
}
-static int twl4030_vibra_remove(struct platform_device *pdev)
-{
- struct vibra_info *info = platform_get_drvdata(pdev);
-
- /* this also free ff-memless and calls close if needed */
- input_unregister_device(info->input_dev);
- kfree(info);
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
static struct platform_driver twl4030_vibra_driver = {
.probe = twl4030_vibra_probe,
- .remove = twl4030_vibra_remove,
.driver = {
.name = "twl4030-vibra",
.owner = THIS_MODULE,
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index 71a28ee699f3..0c2dfc8e9691 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -275,7 +275,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
return -EINVAL;
}
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info) {
dev_err(&pdev->dev, "couldn't allocate memory\n");
return -ENOMEM;
@@ -309,53 +309,23 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
if ((!info->vibldrv_res && !info->viblmotor_res) ||
(!info->vibrdrv_res && !info->vibrmotor_res)) {
dev_err(info->dev, "invalid vibra driver/motor resistance\n");
- ret = -EINVAL;
- goto err_kzalloc;
+ return -EINVAL;
}
info->irq = platform_get_irq(pdev, 0);
if (info->irq < 0) {
dev_err(info->dev, "invalid irq\n");
- ret = -EINVAL;
- goto err_kzalloc;
+ return -EINVAL;
}
mutex_init(&info->mutex);
- info->input_dev = input_allocate_device();
- if (info->input_dev == NULL) {
- dev_err(info->dev, "couldn't allocate input device\n");
- ret = -ENOMEM;
- goto err_kzalloc;
- }
-
- input_set_drvdata(info->input_dev, info);
-
- info->input_dev->name = "twl6040:vibrator";
- info->input_dev->id.version = 1;
- info->input_dev->dev.parent = pdev->dev.parent;
- info->input_dev->close = twl6040_vibra_close;
- __set_bit(FF_RUMBLE, info->input_dev->ffbit);
-
- ret = input_ff_create_memless(info->input_dev, NULL, vibra_play);
- if (ret < 0) {
- dev_err(info->dev, "couldn't register vibrator to FF\n");
- goto err_ialloc;
- }
-
- ret = input_register_device(info->input_dev);
- if (ret < 0) {
- dev_err(info->dev, "couldn't register input device\n");
- goto err_iff;
- }
-
- platform_set_drvdata(pdev, info);
-
- ret = request_threaded_irq(info->irq, NULL, twl6040_vib_irq_handler, 0,
- "twl6040_irq_vib", info);
+ ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
+ twl6040_vib_irq_handler, 0,
+ "twl6040_irq_vib", info);
if (ret) {
dev_err(info->dev, "VIB IRQ request failed: %d\n", ret);
- goto err_irq;
+ return ret;
}
info->supplies[0].supply = "vddvibl";
@@ -368,7 +338,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
ARRAY_SIZE(info->supplies), info->supplies);
if (ret) {
dev_err(info->dev, "couldn't get regulators %d\n", ret);
- goto err_regulator;
+ return ret;
}
if (vddvibl_uV) {
@@ -377,7 +347,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
if (ret) {
dev_err(info->dev, "failed to set VDDVIBL volt %d\n",
ret);
- goto err_voltage;
+ goto err_regulator;
}
}
@@ -387,34 +357,49 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
if (ret) {
dev_err(info->dev, "failed to set VDDVIBR volt %d\n",
ret);
- goto err_voltage;
+ goto err_regulator;
}
}
- info->workqueue = alloc_workqueue("twl6040-vibra", 0, 0);
- if (info->workqueue == NULL) {
- dev_err(info->dev, "couldn't create workqueue\n");
+ INIT_WORK(&info->play_work, vibra_play_work);
+
+ info->input_dev = input_allocate_device();
+ if (info->input_dev == NULL) {
+ dev_err(info->dev, "couldn't allocate input device\n");
ret = -ENOMEM;
- goto err_voltage;
+ goto err_regulator;
}
- INIT_WORK(&info->play_work, vibra_play_work);
+
+ input_set_drvdata(info->input_dev, info);
+
+ info->input_dev->name = "twl6040:vibrator";
+ info->input_dev->id.version = 1;
+ info->input_dev->dev.parent = pdev->dev.parent;
+ info->input_dev->close = twl6040_vibra_close;
+ __set_bit(FF_RUMBLE, info->input_dev->ffbit);
+
+ ret = input_ff_create_memless(info->input_dev, NULL, vibra_play);
+ if (ret < 0) {
+ dev_err(info->dev, "couldn't register vibrator to FF\n");
+ goto err_ialloc;
+ }
+
+ ret = input_register_device(info->input_dev);
+ if (ret < 0) {
+ dev_err(info->dev, "couldn't register input device\n");
+ goto err_iff;
+ }
+
+ platform_set_drvdata(pdev, info);
return 0;
-err_voltage:
- regulator_bulk_free(ARRAY_SIZE(info->supplies), info->supplies);
-err_regulator:
- free_irq(info->irq, info);
-err_irq:
- input_unregister_device(info->input_dev);
- info->input_dev = NULL;
err_iff:
- if (info->input_dev)
- input_ff_destroy(info->input_dev);
+ input_ff_destroy(info->input_dev);
err_ialloc:
input_free_device(info->input_dev);
-err_kzalloc:
- kfree(info);
+err_regulator:
+ regulator_bulk_free(ARRAY_SIZE(info->supplies), info->supplies);
return ret;
}
@@ -423,10 +408,7 @@ static int twl6040_vibra_remove(struct platform_device *pdev)
struct vibra_info *info = platform_get_drvdata(pdev);
input_unregister_device(info->input_dev);
- free_irq(info->irq, info);
regulator_bulk_free(ARRAY_SIZE(info->supplies), info->supplies);
- destroy_workqueue(info->workqueue);
- kfree(info);
return 0;
}
diff --git a/drivers/input/misc/wm831x-on.c b/drivers/input/misc/wm831x-on.c
index 558767d8ebf4..caa2c4068f09 100644
--- a/drivers/input/misc/wm831x-on.c
+++ b/drivers/input/misc/wm831x-on.c
@@ -86,7 +86,7 @@ static int wm831x_on_probe(struct platform_device *pdev)
wm831x_on->wm831x = wm831x;
INIT_DELAYED_WORK(&wm831x_on->work, wm831x_poll_on);
- wm831x_on->dev = input_allocate_device();
+ wm831x_on->dev = devm_input_allocate_device(&pdev->dev);
if (!wm831x_on->dev) {
dev_err(&pdev->dev, "Can't allocate input dev\n");
ret = -ENOMEM;
@@ -119,7 +119,6 @@ static int wm831x_on_probe(struct platform_device *pdev)
err_irq:
free_irq(irq, wm831x_on);
err_input_dev:
- input_free_device(wm831x_on->dev);
err:
return ret;
}
@@ -131,7 +130,6 @@ static int wm831x_on_remove(struct platform_device *pdev)
free_irq(irq, wm831x_on);
cancel_delayed_work_sync(&wm831x_on->work);
- input_unregister_device(wm831x_on->dev);
return 0;
}
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index cd6268cf7cd5..802bd6a72d73 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -68,6 +68,16 @@ config MOUSE_PS2_SYNAPTICS
If unsure, say Y.
+config MOUSE_PS2_CYPRESS
+ bool "Cypress PS/2 mouse protocol extension" if EXPERT
+ default y
+ depends on MOUSE_PS2
+ help
+ Say Y here if you have a Cypress PS/2 Trackpad connected to
+ your system.
+
+ If unsure, say Y.
+
config MOUSE_PS2_LIFEBOOK
bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EXPERT
default y
@@ -193,6 +203,18 @@ config MOUSE_BCM5974
To compile this driver as a module, choose M here: the
module will be called bcm5974.
+config MOUSE_CYAPA
+ tristate "Cypress APA I2C Trackpad support"
+ depends on I2C
+ help
+ This driver adds support for Cypress All Points Addressable (APA)
+ I2C Trackpads, including the ones used in 2012 Samsung Chromebooks.
+
+ Say Y here if you have a Cypress APA I2C Trackpad.
+
+ To compile this driver as a module, choose M here: the module will be
+ called cyapa.
+
config MOUSE_INPORT
tristate "InPort/MS/ATIXL busmouse"
depends on ISA
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index 46ba7556fd4f..c25efdb3f288 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_MOUSE_AMIGA) += amimouse.o
obj-$(CONFIG_MOUSE_APPLETOUCH) += appletouch.o
obj-$(CONFIG_MOUSE_ATARI) += atarimouse.o
obj-$(CONFIG_MOUSE_BCM5974) += bcm5974.o
+obj-$(CONFIG_MOUSE_CYAPA) += cyapa.o
obj-$(CONFIG_MOUSE_GPIO) += gpio_mouse.o
obj-$(CONFIG_MOUSE_INPORT) += inport.o
obj-$(CONFIG_MOUSE_LOGIBM) += logibm.o
@@ -32,3 +33,4 @@ psmouse-$(CONFIG_MOUSE_PS2_LIFEBOOK) += lifebook.o
psmouse-$(CONFIG_MOUSE_PS2_SENTELIC) += sentelic.o
psmouse-$(CONFIG_MOUSE_PS2_TRACKPOINT) += trackpoint.o
psmouse-$(CONFIG_MOUSE_PS2_TOUCHKIT) += touchkit_ps2.o
+psmouse-$(CONFIG_MOUSE_PS2_CYPRESS) += cypress_ps2.o
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index e229fa3cad96..7b99fc7c9438 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -27,14 +27,11 @@
/*
* Definitions for ALPS version 3 and 4 command mode protocol
*/
-#define ALPS_V3_X_MAX 2000
-#define ALPS_V3_Y_MAX 1400
-
-#define ALPS_BITMAP_X_BITS 15
-#define ALPS_BITMAP_Y_BITS 11
-
#define ALPS_CMD_NIBBLE_10 0x01f2
+#define ALPS_REG_BASE_RUSHMORE 0xc2c0
+#define ALPS_REG_BASE_PINNACLE 0x0000
+
static const struct alps_nibble_commands alps_v3_nibble_commands[] = {
{ PSMOUSE_CMD_SETPOLL, 0x00 }, /* 0 */
{ PSMOUSE_CMD_RESET_DIS, 0x00 }, /* 1 */
@@ -109,11 +106,14 @@ static const struct alps_model_info alps_model_data[] = {
{ { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
{ { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff,
ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
- { { 0x73, 0x02, 0x64 }, 0x9b, ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT },
- { { 0x73, 0x02, 0x64 }, 0x9d, ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT },
{ { 0x73, 0x02, 0x64 }, 0x8a, ALPS_PROTO_V4, 0x8f, 0x8f, 0 },
};
+static void alps_set_abs_params_st(struct alps_data *priv,
+ struct input_dev *dev1);
+static void alps_set_abs_params_mt(struct alps_data *priv,
+ struct input_dev *dev1);
+
/*
* XXX - this entry is suspicious. First byte has zero lower nibble,
* which is what a normal mouse would report. Also, the value 0x0e
@@ -122,10 +122,10 @@ static const struct alps_model_info alps_model_data[] = {
/* Packet formats are described in Documentation/input/alps.txt */
-static bool alps_is_valid_first_byte(const struct alps_model_info *model,
+static bool alps_is_valid_first_byte(struct alps_data *priv,
unsigned char data)
{
- return (data & model->mask0) == model->byte0;
+ return (data & priv->mask0) == priv->byte0;
}
static void alps_report_buttons(struct psmouse *psmouse,
@@ -158,14 +158,13 @@ static void alps_report_buttons(struct psmouse *psmouse,
static void alps_process_packet_v1_v2(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
- const struct alps_model_info *model = priv->i;
unsigned char *packet = psmouse->packet;
struct input_dev *dev = psmouse->dev;
struct input_dev *dev2 = priv->dev2;
int x, y, z, ges, fin, left, right, middle;
int back = 0, forward = 0;
- if (model->proto_version == ALPS_PROTO_V1) {
+ if (priv->proto_version == ALPS_PROTO_V1) {
left = packet[2] & 0x10;
right = packet[2] & 0x08;
middle = 0;
@@ -181,12 +180,12 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
z = packet[5];
}
- if (model->flags & ALPS_FW_BK_1) {
+ if (priv->flags & ALPS_FW_BK_1) {
back = packet[0] & 0x10;
forward = packet[2] & 4;
}
- if (model->flags & ALPS_FW_BK_2) {
+ if (priv->flags & ALPS_FW_BK_2) {
back = packet[3] & 4;
forward = packet[2] & 4;
if ((middle = forward && back))
@@ -196,7 +195,7 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
ges = packet[2] & 1;
fin = packet[2] & 2;
- if ((model->flags & ALPS_DUALPOINT) && z == 127) {
+ if ((priv->flags & ALPS_DUALPOINT) && z == 127) {
input_report_rel(dev2, REL_X, (x > 383 ? (x - 768) : x));
input_report_rel(dev2, REL_Y, -(y > 255 ? (y - 512) : y));
@@ -239,15 +238,15 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
input_report_abs(dev, ABS_PRESSURE, z);
input_report_key(dev, BTN_TOOL_FINGER, z > 0);
- if (model->flags & ALPS_WHEEL)
+ if (priv->flags & ALPS_WHEEL)
input_report_rel(dev, REL_WHEEL, ((packet[2] << 1) & 0x08) - ((packet[0] >> 4) & 0x07));
- if (model->flags & (ALPS_FW_BK_1 | ALPS_FW_BK_2)) {
+ if (priv->flags & (ALPS_FW_BK_1 | ALPS_FW_BK_2)) {
input_report_key(dev, BTN_FORWARD, forward);
input_report_key(dev, BTN_BACK, back);
}
- if (model->flags & ALPS_FOUR_BUTTONS) {
+ if (priv->flags & ALPS_FOUR_BUTTONS) {
input_report_key(dev, BTN_0, packet[2] & 4);
input_report_key(dev, BTN_1, packet[0] & 0x10);
input_report_key(dev, BTN_2, packet[3] & 4);
@@ -267,7 +266,8 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
* These points are returned in x1, y1, x2, and y2 when the return value
* is greater than 0.
*/
-static int alps_process_bitmap(unsigned int x_map, unsigned int y_map,
+static int alps_process_bitmap(struct alps_data *priv,
+ unsigned int x_map, unsigned int y_map,
int *x1, int *y1, int *x2, int *y2)
{
struct alps_bitmap_point {
@@ -309,7 +309,7 @@ static int alps_process_bitmap(unsigned int x_map, unsigned int y_map,
* y bitmap is reversed for what we need (lower positions are in
* higher bits), so we process from the top end.
*/
- y_map = y_map << (sizeof(y_map) * BITS_PER_BYTE - ALPS_BITMAP_Y_BITS);
+ y_map = y_map << (sizeof(y_map) * BITS_PER_BYTE - priv->y_bits);
prev_bit = 0;
point = &y_low;
for (i = 0; y_map != 0; i++, y_map <<= 1) {
@@ -355,16 +355,18 @@ static int alps_process_bitmap(unsigned int x_map, unsigned int y_map,
}
}
- *x1 = (ALPS_V3_X_MAX * (2 * x_low.start_bit + x_low.num_bits - 1)) /
- (2 * (ALPS_BITMAP_X_BITS - 1));
- *y1 = (ALPS_V3_Y_MAX * (2 * y_low.start_bit + y_low.num_bits - 1)) /
- (2 * (ALPS_BITMAP_Y_BITS - 1));
+ *x1 = (priv->x_max * (2 * x_low.start_bit + x_low.num_bits - 1)) /
+ (2 * (priv->x_bits - 1));
+ *y1 = (priv->y_max * (2 * y_low.start_bit + y_low.num_bits - 1)) /
+ (2 * (priv->y_bits - 1));
if (fingers > 1) {
- *x2 = (ALPS_V3_X_MAX * (2 * x_high.start_bit + x_high.num_bits - 1)) /
- (2 * (ALPS_BITMAP_X_BITS - 1));
- *y2 = (ALPS_V3_Y_MAX * (2 * y_high.start_bit + y_high.num_bits - 1)) /
- (2 * (ALPS_BITMAP_Y_BITS - 1));
+ *x2 = (priv->x_max *
+ (2 * x_high.start_bit + x_high.num_bits - 1)) /
+ (2 * (priv->x_bits - 1));
+ *y2 = (priv->y_max *
+ (2 * y_high.start_bit + y_high.num_bits - 1)) /
+ (2 * (priv->y_bits - 1));
}
return fingers;
@@ -448,17 +450,57 @@ static void alps_process_trackstick_packet_v3(struct psmouse *psmouse)
return;
}
+static void alps_decode_buttons_v3(struct alps_fields *f, unsigned char *p)
+{
+ f->left = !!(p[3] & 0x01);
+ f->right = !!(p[3] & 0x02);
+ f->middle = !!(p[3] & 0x04);
+
+ f->ts_left = !!(p[3] & 0x10);
+ f->ts_right = !!(p[3] & 0x20);
+ f->ts_middle = !!(p[3] & 0x40);
+}
+
+static void alps_decode_pinnacle(struct alps_fields *f, unsigned char *p)
+{
+ f->first_mp = !!(p[4] & 0x40);
+ f->is_mp = !!(p[0] & 0x40);
+
+ f->fingers = (p[5] & 0x3) + 1;
+ f->x_map = ((p[4] & 0x7e) << 8) |
+ ((p[1] & 0x7f) << 2) |
+ ((p[0] & 0x30) >> 4);
+ f->y_map = ((p[3] & 0x70) << 4) |
+ ((p[2] & 0x7f) << 1) |
+ (p[4] & 0x01);
+
+ f->x = ((p[1] & 0x7f) << 4) | ((p[4] & 0x30) >> 2) |
+ ((p[0] & 0x30) >> 4);
+ f->y = ((p[2] & 0x7f) << 4) | (p[4] & 0x0f);
+ f->z = p[5] & 0x7f;
+
+ alps_decode_buttons_v3(f, p);
+}
+
+static void alps_decode_rushmore(struct alps_fields *f, unsigned char *p)
+{
+ alps_decode_pinnacle(f, p);
+
+ f->x_map |= (p[5] & 0x10) << 11;
+ f->y_map |= (p[5] & 0x20) << 6;
+}
+
static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
unsigned char *packet = psmouse->packet;
struct input_dev *dev = psmouse->dev;
struct input_dev *dev2 = priv->dev2;
- int x, y, z;
- int left, right, middle;
int x1 = 0, y1 = 0, x2 = 0, y2 = 0;
int fingers = 0, bmap_fingers;
- unsigned int x_bitmap, y_bitmap;
+ struct alps_fields f;
+
+ priv->decode_fields(&f, packet);
/*
* There's no single feature of touchpad position and bitmap packets
@@ -473,16 +515,10 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
* packet. Check for this, and when it happens process the
* position packet as usual.
*/
- if (packet[0] & 0x40) {
- fingers = (packet[5] & 0x3) + 1;
- x_bitmap = ((packet[4] & 0x7e) << 8) |
- ((packet[1] & 0x7f) << 2) |
- ((packet[0] & 0x30) >> 4);
- y_bitmap = ((packet[3] & 0x70) << 4) |
- ((packet[2] & 0x7f) << 1) |
- (packet[4] & 0x01);
-
- bmap_fingers = alps_process_bitmap(x_bitmap, y_bitmap,
+ if (f.is_mp) {
+ fingers = f.fingers;
+ bmap_fingers = alps_process_bitmap(priv,
+ f.x_map, f.y_map,
&x1, &y1, &x2, &y2);
/*
@@ -493,7 +529,7 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
fingers = bmap_fingers;
/* Now process position packet */
- packet = priv->multi_data;
+ priv->decode_fields(&f, priv->multi_data);
} else {
priv->multi_packet = 0;
}
@@ -507,10 +543,10 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
* out misidentified bitmap packets, we reject anything with this
* bit set.
*/
- if (packet[0] & 0x40)
+ if (f.is_mp)
return;
- if (!priv->multi_packet && (packet[4] & 0x40)) {
+ if (!priv->multi_packet && f.first_mp) {
priv->multi_packet = 1;
memcpy(priv->multi_data, packet, sizeof(priv->multi_data));
return;
@@ -518,22 +554,13 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
priv->multi_packet = 0;
- left = packet[3] & 0x01;
- right = packet[3] & 0x02;
- middle = packet[3] & 0x04;
-
- x = ((packet[1] & 0x7f) << 4) | ((packet[4] & 0x30) >> 2) |
- ((packet[0] & 0x30) >> 4);
- y = ((packet[2] & 0x7f) << 4) | (packet[4] & 0x0f);
- z = packet[5] & 0x7f;
-
/*
* Sometimes the hardware sends a single packet with z = 0
* in the middle of a stream. Real releases generate packets
* with x, y, and z all zero, so these seem to be flukes.
* Ignore them.
*/
- if (x && y && !z)
+ if (f.x && f.y && !f.z)
return;
/*
@@ -541,12 +568,12 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
* to rely on ST data.
*/
if (!fingers) {
- x1 = x;
- y1 = y;
- fingers = z > 0 ? 1 : 0;
+ x1 = f.x;
+ y1 = f.y;
+ fingers = f.z > 0 ? 1 : 0;
}
- if (z >= 64)
+ if (f.z >= 64)
input_report_key(dev, BTN_TOUCH, 1);
else
input_report_key(dev, BTN_TOUCH, 0);
@@ -555,26 +582,22 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
input_mt_report_finger_count(dev, fingers);
- input_report_key(dev, BTN_LEFT, left);
- input_report_key(dev, BTN_RIGHT, right);
- input_report_key(dev, BTN_MIDDLE, middle);
+ input_report_key(dev, BTN_LEFT, f.left);
+ input_report_key(dev, BTN_RIGHT, f.right);
+ input_report_key(dev, BTN_MIDDLE, f.middle);
- if (z > 0) {
- input_report_abs(dev, ABS_X, x);
- input_report_abs(dev, ABS_Y, y);
+ if (f.z > 0) {
+ input_report_abs(dev, ABS_X, f.x);
+ input_report_abs(dev, ABS_Y, f.y);
}
- input_report_abs(dev, ABS_PRESSURE, z);
+ input_report_abs(dev, ABS_PRESSURE, f.z);
input_sync(dev);
if (!(priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS)) {
- left = packet[3] & 0x10;
- right = packet[3] & 0x20;
- middle = packet[3] & 0x40;
-
- input_report_key(dev2, BTN_LEFT, left);
- input_report_key(dev2, BTN_RIGHT, right);
- input_report_key(dev2, BTN_MIDDLE, middle);
+ input_report_key(dev2, BTN_LEFT, f.ts_left);
+ input_report_key(dev2, BTN_RIGHT, f.ts_right);
+ input_report_key(dev2, BTN_MIDDLE, f.ts_middle);
input_sync(dev2);
}
}
@@ -639,7 +662,7 @@ static void alps_process_packet_v4(struct psmouse *psmouse)
((priv->multi_data[3] & 0x1f) << 5) |
(priv->multi_data[1] & 0x1f);
- fingers = alps_process_bitmap(x_bitmap, y_bitmap,
+ fingers = alps_process_bitmap(priv, x_bitmap, y_bitmap,
&x1, &y1, &x2, &y2);
/* Store MT data.*/
@@ -696,25 +719,6 @@ static void alps_process_packet_v4(struct psmouse *psmouse)
input_sync(dev);
}
-static void alps_process_packet(struct psmouse *psmouse)
-{
- struct alps_data *priv = psmouse->private;
- const struct alps_model_info *model = priv->i;
-
- switch (model->proto_version) {
- case ALPS_PROTO_V1:
- case ALPS_PROTO_V2:
- alps_process_packet_v1_v2(psmouse);
- break;
- case ALPS_PROTO_V3:
- alps_process_packet_v3(psmouse);
- break;
- case ALPS_PROTO_V4:
- alps_process_packet_v4(psmouse);
- break;
- }
-}
-
static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
unsigned char packet[],
bool report_buttons)
@@ -765,14 +769,14 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
if (((psmouse->packet[3] |
psmouse->packet[4] |
psmouse->packet[5]) & 0x80) ||
- (!alps_is_valid_first_byte(priv->i, psmouse->packet[6]))) {
+ (!alps_is_valid_first_byte(priv, psmouse->packet[6]))) {
psmouse_dbg(psmouse,
"refusing packet %4ph (suspected interleaved ps/2)\n",
psmouse->packet + 3);
return PSMOUSE_BAD_DATA;
}
- alps_process_packet(psmouse);
+ priv->process_packet(psmouse);
/* Continue with the next packet */
psmouse->packet[0] = psmouse->packet[6];
@@ -816,6 +820,7 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
static void alps_flush_packet(unsigned long data)
{
struct psmouse *psmouse = (struct psmouse *)data;
+ struct alps_data *priv = psmouse->private;
serio_pause_rx(psmouse->ps2dev.serio);
@@ -833,7 +838,7 @@ static void alps_flush_packet(unsigned long data)
"refusing packet %3ph (suspected interleaved ps/2)\n",
psmouse->packet + 3);
} else {
- alps_process_packet(psmouse);
+ priv->process_packet(psmouse);
}
psmouse->pktcnt = 0;
}
@@ -844,7 +849,6 @@ static void alps_flush_packet(unsigned long data)
static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
- const struct alps_model_info *model = priv->i;
if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */
if (psmouse->pktcnt == 3) {
@@ -857,15 +861,15 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
/* Check for PS/2 packet stuffed in the middle of ALPS packet. */
- if ((model->flags & ALPS_PS2_INTERLEAVED) &&
+ if ((priv->flags & ALPS_PS2_INTERLEAVED) &&
psmouse->pktcnt >= 4 && (psmouse->packet[3] & 0x0f) == 0x0f) {
return alps_handle_interleaved_ps2(psmouse);
}
- if (!alps_is_valid_first_byte(model, psmouse->packet[0])) {
+ if (!alps_is_valid_first_byte(priv, psmouse->packet[0])) {
psmouse_dbg(psmouse,
"refusing packet[0] = %x (mask0 = %x, byte0 = %x)\n",
- psmouse->packet[0], model->mask0, model->byte0);
+ psmouse->packet[0], priv->mask0, priv->byte0);
return PSMOUSE_BAD_DATA;
}
@@ -879,7 +883,7 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
}
if (psmouse->pktcnt == psmouse->pktsize) {
- alps_process_packet(psmouse);
+ priv->process_packet(psmouse);
return PSMOUSE_FULL_PACKET;
}
@@ -967,24 +971,42 @@ static int alps_command_mode_write_reg(struct psmouse *psmouse, int addr,
return __alps_command_mode_write_reg(psmouse, value);
}
+static int alps_rpt_cmd(struct psmouse *psmouse, int init_command,
+ int repeated_command, unsigned char *param)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+
+ param[0] = 0;
+ if (init_command && ps2_command(ps2dev, param, init_command))
+ return -EIO;
+
+ if (ps2_command(ps2dev, NULL, repeated_command) ||
+ ps2_command(ps2dev, NULL, repeated_command) ||
+ ps2_command(ps2dev, NULL, repeated_command))
+ return -EIO;
+
+ param[0] = param[1] = param[2] = 0xff;
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
+ return -EIO;
+
+ psmouse_dbg(psmouse, "%2.2X report: %2.2x %2.2x %2.2x\n",
+ repeated_command, param[0], param[1], param[2]);
+ return 0;
+}
+
static int alps_enter_command_mode(struct psmouse *psmouse,
unsigned char *resp)
{
unsigned char param[4];
- struct ps2dev *ps2dev = &psmouse->ps2dev;
- if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
- ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
+ if (alps_rpt_cmd(psmouse, 0, PSMOUSE_CMD_RESET_WRAP, param)) {
psmouse_err(psmouse, "failed to enter command mode\n");
return -1;
}
- if (param[0] != 0x88 && param[1] != 0x07) {
+ if (param[0] != 0x88 || (param[1] != 0x07 && param[1] != 0x08)) {
psmouse_dbg(psmouse,
- "unknown response while entering command mode: %2.2x %2.2x %2.2x\n",
- param[0], param[1], param[2]);
+ "unknown response while entering command mode\n");
return -1;
}
@@ -1001,99 +1023,6 @@ static inline int alps_exit_command_mode(struct psmouse *psmouse)
return 0;
}
-static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int *version)
-{
- struct ps2dev *ps2dev = &psmouse->ps2dev;
- static const unsigned char rates[] = { 0, 10, 20, 40, 60, 80, 100, 200 };
- unsigned char param[4];
- const struct alps_model_info *model = NULL;
- int i;
-
- /*
- * First try "E6 report".
- * ALPS should return 0,0,10 or 0,0,100 if no buttons are pressed.
- * The bits 0-2 of the first byte will be 1s if some buttons are
- * pressed.
- */
- param[0] = 0;
- if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11))
- return NULL;
-
- param[0] = param[1] = param[2] = 0xff;
- if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
- return NULL;
-
- psmouse_dbg(psmouse, "E6 report: %2.2x %2.2x %2.2x",
- param[0], param[1], param[2]);
-
- if ((param[0] & 0xf8) != 0 || param[1] != 0 ||
- (param[2] != 10 && param[2] != 100))
- return NULL;
-
- /*
- * Now try "E7 report". Allowed responses are in
- * alps_model_data[].signature
- */
- param[0] = 0;
- if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21))
- return NULL;
-
- param[0] = param[1] = param[2] = 0xff;
- if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
- return NULL;
-
- psmouse_dbg(psmouse, "E7 report: %2.2x %2.2x %2.2x",
- param[0], param[1], param[2]);
-
- if (version) {
- for (i = 0; i < ARRAY_SIZE(rates) && param[2] != rates[i]; i++)
- /* empty */;
- *version = (param[0] << 8) | (param[1] << 4) | i;
- }
-
- for (i = 0; i < ARRAY_SIZE(alps_model_data); i++) {
- if (!memcmp(param, alps_model_data[i].signature,
- sizeof(alps_model_data[i].signature))) {
- model = alps_model_data + i;
- break;
- }
- }
-
- if (model && model->proto_version > ALPS_PROTO_V2) {
- /*
- * Need to check command mode response to identify
- * model
- */
- model = NULL;
- if (alps_enter_command_mode(psmouse, param)) {
- psmouse_warn(psmouse,
- "touchpad failed to enter command mode\n");
- } else {
- for (i = 0; i < ARRAY_SIZE(alps_model_data); i++) {
- if (alps_model_data[i].proto_version > ALPS_PROTO_V2 &&
- alps_model_data[i].command_mode_resp == param[0]) {
- model = alps_model_data + i;
- break;
- }
- }
- alps_exit_command_mode(psmouse);
-
- if (!model)
- psmouse_dbg(psmouse,
- "Unknown command mode response %2.2x\n",
- param[0]);
- }
- }
-
- return model;
-}
-
/*
* For DualPoint devices select the device that should respond to
* subsequent commands. It looks like glidepad is behind stickpointer,
@@ -1137,18 +1066,10 @@ static int alps_absolute_mode_v1_v2(struct psmouse *psmouse)
static int alps_get_status(struct psmouse *psmouse, char *param)
{
- struct ps2dev *ps2dev = &psmouse->ps2dev;
-
/* Get status: 0xF5 0xF5 0xF5 0xE9 */
- if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) ||
- ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
+ if (alps_rpt_cmd(psmouse, 0, PSMOUSE_CMD_DISABLE, param))
return -1;
- psmouse_dbg(psmouse, "Status: %2.2x %2.2x %2.2x",
- param[0], param[1], param[2]);
-
return 0;
}
@@ -1190,16 +1111,16 @@ static int alps_poll(struct psmouse *psmouse)
unsigned char buf[sizeof(psmouse->packet)];
bool poll_failed;
- if (priv->i->flags & ALPS_PASS)
+ if (priv->flags & ALPS_PASS)
alps_passthrough_mode_v2(psmouse, true);
poll_failed = ps2_command(&psmouse->ps2dev, buf,
PSMOUSE_CMD_POLL | (psmouse->pktsize << 8)) < 0;
- if (priv->i->flags & ALPS_PASS)
+ if (priv->flags & ALPS_PASS)
alps_passthrough_mode_v2(psmouse, false);
- if (poll_failed || (buf[0] & priv->i->mask0) != priv->i->byte0)
+ if (poll_failed || (buf[0] & priv->mask0) != priv->byte0)
return -1;
if ((psmouse->badbyte & 0xc8) == 0x08) {
@@ -1217,9 +1138,8 @@ static int alps_poll(struct psmouse *psmouse)
static int alps_hw_init_v1_v2(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
- const struct alps_model_info *model = priv->i;
- if ((model->flags & ALPS_PASS) &&
+ if ((priv->flags & ALPS_PASS) &&
alps_passthrough_mode_v2(psmouse, true)) {
return -1;
}
@@ -1234,7 +1154,7 @@ static int alps_hw_init_v1_v2(struct psmouse *psmouse)
return -1;
}
- if ((model->flags & ALPS_PASS) &&
+ if ((priv->flags & ALPS_PASS) &&
alps_passthrough_mode_v2(psmouse, false)) {
return -1;
}
@@ -1249,26 +1169,31 @@ static int alps_hw_init_v1_v2(struct psmouse *psmouse)
}
/*
- * Enable or disable passthrough mode to the trackstick. Must be in
- * command mode when calling this function.
+ * Enable or disable passthrough mode to the trackstick.
*/
-static int alps_passthrough_mode_v3(struct psmouse *psmouse, bool enable)
+static int alps_passthrough_mode_v3(struct psmouse *psmouse,
+ int reg_base, bool enable)
{
- int reg_val;
+ int reg_val, ret = -1;
- reg_val = alps_command_mode_read_reg(psmouse, 0x0008);
- if (reg_val == -1)
+ if (alps_enter_command_mode(psmouse, NULL))
return -1;
+ reg_val = alps_command_mode_read_reg(psmouse, reg_base + 0x0008);
+ if (reg_val == -1)
+ goto error;
+
if (enable)
reg_val |= 0x01;
else
reg_val &= ~0x01;
- if (__alps_command_mode_write_reg(psmouse, reg_val))
- return -1;
+ ret = __alps_command_mode_write_reg(psmouse, reg_val);
- return 0;
+error:
+ if (alps_exit_command_mode(psmouse))
+ ret = -1;
+ return ret;
}
/* Must be in command mode when calling this function */
@@ -1287,73 +1212,102 @@ static int alps_absolute_mode_v3(struct psmouse *psmouse)
return 0;
}
-static int alps_hw_init_v3(struct psmouse *psmouse)
+static int alps_probe_trackstick_v3(struct psmouse *psmouse, int reg_base)
{
- struct alps_data *priv = psmouse->private;
- struct ps2dev *ps2dev = &psmouse->ps2dev;
- int reg_val;
- unsigned char param[4];
-
- priv->nibble_commands = alps_v3_nibble_commands;
- priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
+ int ret = -EIO, reg_val;
if (alps_enter_command_mode(psmouse, NULL))
goto error;
- /* Check for trackstick */
- reg_val = alps_command_mode_read_reg(psmouse, 0x0008);
+ reg_val = alps_command_mode_read_reg(psmouse, reg_base + 0x08);
if (reg_val == -1)
goto error;
- if (reg_val & 0x80) {
- if (alps_passthrough_mode_v3(psmouse, true))
- goto error;
- if (alps_exit_command_mode(psmouse))
- goto error;
+
+ /* bit 7: trackstick is present */
+ ret = reg_val & 0x80 ? 0 : -ENODEV;
+
+error:
+ alps_exit_command_mode(psmouse);
+ return ret;
+}
+
+static int alps_setup_trackstick_v3(struct psmouse *psmouse, int reg_base)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ int ret = 0;
+ unsigned char param[4];
+
+ if (alps_passthrough_mode_v3(psmouse, reg_base, true))
+ return -EIO;
+
+ /*
+ * E7 report for the trackstick
+ *
+ * There have been reports of failures to seem to trace back
+ * to the above trackstick check failing. When these occur
+ * this E7 report fails, so when that happens we continue
+ * with the assumption that there isn't a trackstick after
+ * all.
+ */
+ if (alps_rpt_cmd(psmouse, 0, PSMOUSE_CMD_SETSCALE21, param)) {
+ psmouse_warn(psmouse, "trackstick E7 report failed\n");
+ ret = -ENODEV;
+ } else {
+ psmouse_dbg(psmouse,
+ "trackstick E7 report: %2.2x %2.2x %2.2x\n",
+ param[0], param[1], param[2]);
/*
- * E7 report for the trackstick
- *
- * There have been reports of failures to seem to trace back
- * to the above trackstick check failing. When these occur
- * this E7 report fails, so when that happens we continue
- * with the assumption that there isn't a trackstick after
- * all.
+ * Not sure what this does, but it is absolutely
+ * essential. Without it, the touchpad does not
+ * work at all and the trackstick just emits normal
+ * PS/2 packets.
*/
- param[0] = 0x64;
- if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
- ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
- psmouse_warn(psmouse, "trackstick E7 report failed\n");
- } else {
- psmouse_dbg(psmouse,
- "trackstick E7 report: %2.2x %2.2x %2.2x\n",
- param[0], param[1], param[2]);
-
- /*
- * Not sure what this does, but it is absolutely
- * essential. Without it, the touchpad does not
- * work at all and the trackstick just emits normal
- * PS/2 packets.
- */
- if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
- ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
- alps_command_mode_send_nibble(psmouse, 0x9) ||
- alps_command_mode_send_nibble(psmouse, 0x4)) {
- psmouse_err(psmouse,
- "Error sending magic E6 sequence\n");
- goto error_passthrough;
- }
+ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
+ alps_command_mode_send_nibble(psmouse, 0x9) ||
+ alps_command_mode_send_nibble(psmouse, 0x4)) {
+ psmouse_err(psmouse,
+ "Error sending magic E6 sequence\n");
+ ret = -EIO;
+ goto error;
}
- if (alps_enter_command_mode(psmouse, NULL))
- goto error_passthrough;
- if (alps_passthrough_mode_v3(psmouse, false))
- goto error;
+ /*
+ * This ensures the trackstick packets are in the format
+ * supported by this driver. If bit 1 isn't set the packet
+ * format is different.
+ */
+ if (alps_enter_command_mode(psmouse, NULL) ||
+ alps_command_mode_write_reg(psmouse,
+ reg_base + 0x08, 0x82) ||
+ alps_exit_command_mode(psmouse))
+ ret = -EIO;
}
- if (alps_absolute_mode_v3(psmouse)) {
+error:
+ if (alps_passthrough_mode_v3(psmouse, reg_base, false))
+ ret = -EIO;
+
+ return ret;
+}
+
+static int alps_hw_init_v3(struct psmouse *psmouse)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ int reg_val;
+ unsigned char param[4];
+
+ reg_val = alps_probe_trackstick_v3(psmouse, ALPS_REG_BASE_PINNACLE);
+ if (reg_val == -EIO)
+ goto error;
+ if (reg_val == 0 &&
+ alps_setup_trackstick_v3(psmouse, ALPS_REG_BASE_PINNACLE) == -EIO)
+ goto error;
+
+ if (alps_enter_command_mode(psmouse, NULL) ||
+ alps_absolute_mode_v3(psmouse)) {
psmouse_err(psmouse, "Failed to enter absolute mode\n");
goto error;
}
@@ -1390,14 +1344,6 @@ static int alps_hw_init_v3(struct psmouse *psmouse)
if (alps_command_mode_write_reg(psmouse, 0x0162, 0x04))
goto error;
- /*
- * This ensures the trackstick packets are in the format
- * supported by this driver. If bit 1 isn't set the packet
- * format is different.
- */
- if (alps_command_mode_write_reg(psmouse, 0x0008, 0x82))
- goto error;
-
alps_exit_command_mode(psmouse);
/* Set rate and enable data reporting */
@@ -1410,10 +1356,6 @@ static int alps_hw_init_v3(struct psmouse *psmouse)
return 0;
-error_passthrough:
- /* Something failed while in passthrough mode, so try to get out */
- if (!alps_enter_command_mode(psmouse, NULL))
- alps_passthrough_mode_v3(psmouse, false);
error:
/*
* Leaving the touchpad in command mode will essentially render
@@ -1424,6 +1366,50 @@ error:
return -1;
}
+static int alps_hw_init_rushmore_v3(struct psmouse *psmouse)
+{
+ struct alps_data *priv = psmouse->private;
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ int reg_val, ret = -1;
+
+ if (priv->flags & ALPS_DUALPOINT) {
+ reg_val = alps_setup_trackstick_v3(psmouse,
+ ALPS_REG_BASE_RUSHMORE);
+ if (reg_val == -EIO)
+ goto error;
+ if (reg_val == -ENODEV)
+ priv->flags &= ~ALPS_DUALPOINT;
+ }
+
+ if (alps_enter_command_mode(psmouse, NULL) ||
+ alps_command_mode_read_reg(psmouse, 0xc2d9) == -1 ||
+ alps_command_mode_write_reg(psmouse, 0xc2cb, 0x00))
+ goto error;
+
+ reg_val = alps_command_mode_read_reg(psmouse, 0xc2c6);
+ if (reg_val == -1)
+ goto error;
+ if (__alps_command_mode_write_reg(psmouse, reg_val & 0xfd))
+ goto error;
+
+ if (alps_command_mode_write_reg(psmouse, 0xc2c9, 0x64))
+ goto error;
+
+ /* enter absolute mode */
+ reg_val = alps_command_mode_read_reg(psmouse, 0xc2c4);
+ if (reg_val == -1)
+ goto error;
+ if (__alps_command_mode_write_reg(psmouse, reg_val | 0x02))
+ goto error;
+
+ alps_exit_command_mode(psmouse);
+ return ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE);
+
+error:
+ alps_exit_command_mode(psmouse);
+ return ret;
+}
+
/* Must be in command mode when calling this function */
static int alps_absolute_mode_v4(struct psmouse *psmouse)
{
@@ -1442,13 +1428,9 @@ static int alps_absolute_mode_v4(struct psmouse *psmouse)
static int alps_hw_init_v4(struct psmouse *psmouse)
{
- struct alps_data *priv = psmouse->private;
struct ps2dev *ps2dev = &psmouse->ps2dev;
unsigned char param[4];
- priv->nibble_commands = alps_v4_nibble_commands;
- priv->addr_command = PSMOUSE_CMD_DISABLE;
-
if (alps_enter_command_mode(psmouse, NULL))
goto error;
@@ -1517,39 +1499,140 @@ error:
return -1;
}
-static int alps_hw_init(struct psmouse *psmouse)
+static void alps_set_defaults(struct alps_data *priv)
{
- struct alps_data *priv = psmouse->private;
- const struct alps_model_info *model = priv->i;
- int ret = -1;
+ priv->byte0 = 0x8f;
+ priv->mask0 = 0x8f;
+ priv->flags = ALPS_DUALPOINT;
+
+ priv->x_max = 2000;
+ priv->y_max = 1400;
+ priv->x_bits = 15;
+ priv->y_bits = 11;
- switch (model->proto_version) {
+ switch (priv->proto_version) {
case ALPS_PROTO_V1:
case ALPS_PROTO_V2:
- ret = alps_hw_init_v1_v2(psmouse);
+ priv->hw_init = alps_hw_init_v1_v2;
+ priv->process_packet = alps_process_packet_v1_v2;
+ priv->set_abs_params = alps_set_abs_params_st;
break;
case ALPS_PROTO_V3:
- ret = alps_hw_init_v3(psmouse);
+ priv->hw_init = alps_hw_init_v3;
+ priv->process_packet = alps_process_packet_v3;
+ priv->set_abs_params = alps_set_abs_params_mt;
+ priv->decode_fields = alps_decode_pinnacle;
+ priv->nibble_commands = alps_v3_nibble_commands;
+ priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
break;
case ALPS_PROTO_V4:
- ret = alps_hw_init_v4(psmouse);
+ priv->hw_init = alps_hw_init_v4;
+ priv->process_packet = alps_process_packet_v4;
+ priv->set_abs_params = alps_set_abs_params_mt;
+ priv->nibble_commands = alps_v4_nibble_commands;
+ priv->addr_command = PSMOUSE_CMD_DISABLE;
break;
}
+}
- return ret;
+static int alps_match_table(struct psmouse *psmouse, struct alps_data *priv,
+ unsigned char *e7, unsigned char *ec)
+{
+ const struct alps_model_info *model;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(alps_model_data); i++) {
+ model = &alps_model_data[i];
+
+ if (!memcmp(e7, model->signature, sizeof(model->signature)) &&
+ (!model->command_mode_resp ||
+ model->command_mode_resp == ec[2])) {
+
+ priv->proto_version = model->proto_version;
+ alps_set_defaults(priv);
+
+ priv->flags = model->flags;
+ priv->byte0 = model->byte0;
+ priv->mask0 = model->mask0;
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
+{
+ unsigned char e6[4], e7[4], ec[4];
+
+ /*
+ * First try "E6 report".
+ * ALPS should return 0,0,10 or 0,0,100 if no buttons are pressed.
+ * The bits 0-2 of the first byte will be 1s if some buttons are
+ * pressed.
+ */
+ if (alps_rpt_cmd(psmouse, PSMOUSE_CMD_SETRES,
+ PSMOUSE_CMD_SETSCALE11, e6))
+ return -EIO;
+
+ if ((e6[0] & 0xf8) != 0 || e6[1] != 0 || (e6[2] != 10 && e6[2] != 100))
+ return -EINVAL;
+
+ /*
+ * Now get the "E7" and "EC" reports. These will uniquely identify
+ * most ALPS touchpads.
+ */
+ if (alps_rpt_cmd(psmouse, PSMOUSE_CMD_SETRES,
+ PSMOUSE_CMD_SETSCALE21, e7) ||
+ alps_rpt_cmd(psmouse, PSMOUSE_CMD_SETRES,
+ PSMOUSE_CMD_RESET_WRAP, ec) ||
+ alps_exit_command_mode(psmouse))
+ return -EIO;
+
+ if (alps_match_table(psmouse, priv, e7, ec) == 0) {
+ return 0;
+ } else if (ec[0] == 0x88 && ec[1] == 0x08) {
+ priv->proto_version = ALPS_PROTO_V3;
+ alps_set_defaults(priv);
+
+ priv->hw_init = alps_hw_init_rushmore_v3;
+ priv->decode_fields = alps_decode_rushmore;
+ priv->x_bits = 16;
+ priv->y_bits = 12;
+
+ /* hack to make addr_command, nibble_command available */
+ psmouse->private = priv;
+
+ if (alps_probe_trackstick_v3(psmouse, ALPS_REG_BASE_RUSHMORE))
+ priv->flags &= ~ALPS_DUALPOINT;
+
+ return 0;
+ } else if (ec[0] == 0x88 && ec[1] == 0x07 &&
+ ec[2] >= 0x90 && ec[2] <= 0x9d) {
+ priv->proto_version = ALPS_PROTO_V3;
+ alps_set_defaults(priv);
+
+ return 0;
+ }
+
+ psmouse_info(psmouse,
+ "Unknown ALPS touchpad: E7=%2.2x %2.2x %2.2x, EC=%2.2x %2.2x %2.2x\n",
+ e7[0], e7[1], e7[2], ec[0], ec[1], ec[2]);
+
+ return -EINVAL;
}
static int alps_reconnect(struct psmouse *psmouse)
{
- const struct alps_model_info *model;
+ struct alps_data *priv = psmouse->private;
psmouse_reset(psmouse);
- model = alps_get_model(psmouse, NULL);
- if (!model)
+ if (alps_identify(psmouse, priv) < 0)
return -1;
- return alps_hw_init(psmouse);
+ return priv->hw_init(psmouse);
}
static void alps_disconnect(struct psmouse *psmouse)
@@ -1562,12 +1645,33 @@ static void alps_disconnect(struct psmouse *psmouse)
kfree(priv);
}
+static void alps_set_abs_params_st(struct alps_data *priv,
+ struct input_dev *dev1)
+{
+ input_set_abs_params(dev1, ABS_X, 0, 1023, 0, 0);
+ input_set_abs_params(dev1, ABS_Y, 0, 767, 0, 0);
+}
+
+static void alps_set_abs_params_mt(struct alps_data *priv,
+ struct input_dev *dev1)
+{
+ set_bit(INPUT_PROP_SEMI_MT, dev1->propbit);
+ input_mt_init_slots(dev1, 2, 0);
+ input_set_abs_params(dev1, ABS_MT_POSITION_X, 0, priv->x_max, 0, 0);
+ input_set_abs_params(dev1, ABS_MT_POSITION_Y, 0, priv->y_max, 0, 0);
+
+ set_bit(BTN_TOOL_DOUBLETAP, dev1->keybit);
+ set_bit(BTN_TOOL_TRIPLETAP, dev1->keybit);
+ set_bit(BTN_TOOL_QUADTAP, dev1->keybit);
+
+ input_set_abs_params(dev1, ABS_X, 0, priv->x_max, 0, 0);
+ input_set_abs_params(dev1, ABS_Y, 0, priv->y_max, 0, 0);
+}
+
int alps_init(struct psmouse *psmouse)
{
struct alps_data *priv;
- const struct alps_model_info *model;
struct input_dev *dev1 = psmouse->dev, *dev2;
- int version;
priv = kzalloc(sizeof(struct alps_data), GFP_KERNEL);
dev2 = input_allocate_device();
@@ -1581,13 +1685,10 @@ int alps_init(struct psmouse *psmouse)
psmouse_reset(psmouse);
- model = alps_get_model(psmouse, &version);
- if (!model)
+ if (alps_identify(psmouse, priv) < 0)
goto init_fail;
- priv->i = model;
-
- if (alps_hw_init(psmouse))
+ if (priv->hw_init(psmouse))
goto init_fail;
/*
@@ -1609,41 +1710,20 @@ int alps_init(struct psmouse *psmouse)
dev1->evbit[BIT_WORD(EV_ABS)] |= BIT_MASK(EV_ABS);
- switch (model->proto_version) {
- case ALPS_PROTO_V1:
- case ALPS_PROTO_V2:
- input_set_abs_params(dev1, ABS_X, 0, 1023, 0, 0);
- input_set_abs_params(dev1, ABS_Y, 0, 767, 0, 0);
- break;
- case ALPS_PROTO_V3:
- case ALPS_PROTO_V4:
- set_bit(INPUT_PROP_SEMI_MT, dev1->propbit);
- input_mt_init_slots(dev1, 2, 0);
- input_set_abs_params(dev1, ABS_MT_POSITION_X, 0, ALPS_V3_X_MAX, 0, 0);
- input_set_abs_params(dev1, ABS_MT_POSITION_Y, 0, ALPS_V3_Y_MAX, 0, 0);
-
- set_bit(BTN_TOOL_DOUBLETAP, dev1->keybit);
- set_bit(BTN_TOOL_TRIPLETAP, dev1->keybit);
- set_bit(BTN_TOOL_QUADTAP, dev1->keybit);
-
- input_set_abs_params(dev1, ABS_X, 0, ALPS_V3_X_MAX, 0, 0);
- input_set_abs_params(dev1, ABS_Y, 0, ALPS_V3_Y_MAX, 0, 0);
- break;
- }
-
+ priv->set_abs_params(priv, dev1);
input_set_abs_params(dev1, ABS_PRESSURE, 0, 127, 0, 0);
- if (model->flags & ALPS_WHEEL) {
+ if (priv->flags & ALPS_WHEEL) {
dev1->evbit[BIT_WORD(EV_REL)] |= BIT_MASK(EV_REL);
dev1->relbit[BIT_WORD(REL_WHEEL)] |= BIT_MASK(REL_WHEEL);
}
- if (model->flags & (ALPS_FW_BK_1 | ALPS_FW_BK_2)) {
+ if (priv->flags & (ALPS_FW_BK_1 | ALPS_FW_BK_2)) {
dev1->keybit[BIT_WORD(BTN_FORWARD)] |= BIT_MASK(BTN_FORWARD);
dev1->keybit[BIT_WORD(BTN_BACK)] |= BIT_MASK(BTN_BACK);
}
- if (model->flags & ALPS_FOUR_BUTTONS) {
+ if (priv->flags & ALPS_FOUR_BUTTONS) {
dev1->keybit[BIT_WORD(BTN_0)] |= BIT_MASK(BTN_0);
dev1->keybit[BIT_WORD(BTN_1)] |= BIT_MASK(BTN_1);
dev1->keybit[BIT_WORD(BTN_2)] |= BIT_MASK(BTN_2);
@@ -1654,7 +1734,8 @@ int alps_init(struct psmouse *psmouse)
snprintf(priv->phys, sizeof(priv->phys), "%s/input1", psmouse->ps2dev.serio->phys);
dev2->phys = priv->phys;
- dev2->name = (model->flags & ALPS_DUALPOINT) ? "DualPoint Stick" : "PS/2 Mouse";
+ dev2->name = (priv->flags & ALPS_DUALPOINT) ?
+ "DualPoint Stick" : "PS/2 Mouse";
dev2->id.bustype = BUS_I8042;
dev2->id.vendor = 0x0002;
dev2->id.product = PSMOUSE_ALPS;
@@ -1673,7 +1754,7 @@ int alps_init(struct psmouse *psmouse)
psmouse->poll = alps_poll;
psmouse->disconnect = alps_disconnect;
psmouse->reconnect = alps_reconnect;
- psmouse->pktsize = model->proto_version == ALPS_PROTO_V4 ? 8 : 6;
+ psmouse->pktsize = priv->proto_version == ALPS_PROTO_V4 ? 8 : 6;
/* We are having trouble resyncing ALPS touchpads so disable it for now */
psmouse->resync_time = 0;
@@ -1690,18 +1771,16 @@ init_fail:
int alps_detect(struct psmouse *psmouse, bool set_properties)
{
- int version;
- const struct alps_model_info *model;
+ struct alps_data dummy;
- model = alps_get_model(psmouse, &version);
- if (!model)
+ if (alps_identify(psmouse, &dummy) < 0)
return -1;
if (set_properties) {
psmouse->vendor = "ALPS";
- psmouse->name = model->flags & ALPS_DUALPOINT ?
+ psmouse->name = dummy.flags & ALPS_DUALPOINT ?
"DualPoint TouchPad" : "GlidePoint";
- psmouse->model = version;
+ psmouse->model = dummy.proto_version << 8;
}
return 0;
}
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index ae1ac354c778..970480551b6e 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -12,35 +12,146 @@
#ifndef _ALPS_H
#define _ALPS_H
-#define ALPS_PROTO_V1 0
-#define ALPS_PROTO_V2 1
-#define ALPS_PROTO_V3 2
-#define ALPS_PROTO_V4 3
+#define ALPS_PROTO_V1 1
+#define ALPS_PROTO_V2 2
+#define ALPS_PROTO_V3 3
+#define ALPS_PROTO_V4 4
+/**
+ * struct alps_model_info - touchpad ID table
+ * @signature: E7 response string to match.
+ * @command_mode_resp: For V3/V4 touchpads, the final byte of the EC response
+ * (aka command mode response) identifies the firmware minor version. This
+ * can be used to distinguish different hardware models which are not
+ * uniquely identifiable through their E7 responses.
+ * @proto_version: Indicates V1/V2/V3/...
+ * @byte0: Helps figure out whether a position report packet matches the
+ * known format for this model. The first byte of the report, ANDed with
+ * mask0, should match byte0.
+ * @mask0: The mask used to check the first byte of the report.
+ * @flags: Additional device capabilities (passthrough port, trackstick, etc.).
+ *
+ * Many (but not all) ALPS touchpads can be identified by looking at the
+ * values returned in the "E7 report" and/or the "EC report." This table
+ * lists a number of such touchpads.
+ */
struct alps_model_info {
- unsigned char signature[3];
- unsigned char command_mode_resp; /* v3/v4 only */
+ unsigned char signature[3];
+ unsigned char command_mode_resp;
unsigned char proto_version;
- unsigned char byte0, mask0;
- unsigned char flags;
+ unsigned char byte0, mask0;
+ unsigned char flags;
};
+/**
+ * struct alps_nibble_commands - encodings for register accesses
+ * @command: PS/2 command used for the nibble
+ * @data: Data supplied as an argument to the PS/2 command, if applicable
+ *
+ * The ALPS protocol uses magic sequences to transmit binary data to the
+ * touchpad, as it is generally not OK to send arbitrary bytes out the
+ * PS/2 port. Each of the sequences in this table sends one nibble of the
+ * register address or (write) data. Different versions of the ALPS protocol
+ * use slightly different encodings.
+ */
struct alps_nibble_commands {
int command;
unsigned char data;
};
+/**
+ * struct alps_fields - decoded version of the report packet
+ * @x_map: Bitmap of active X positions for MT.
+ * @y_map: Bitmap of active Y positions for MT.
+ * @fingers: Number of fingers for MT.
+ * @x: X position for ST.
+ * @y: Y position for ST.
+ * @z: Z position for ST.
+ * @first_mp: Packet is the first of a multi-packet report.
+ * @is_mp: Packet is part of a multi-packet report.
+ * @left: Left touchpad button is active.
+ * @right: Right touchpad button is active.
+ * @middle: Middle touchpad button is active.
+ * @ts_left: Left trackstick button is active.
+ * @ts_right: Right trackstick button is active.
+ * @ts_middle: Middle trackstick button is active.
+ */
+struct alps_fields {
+ unsigned int x_map;
+ unsigned int y_map;
+ unsigned int fingers;
+ unsigned int x;
+ unsigned int y;
+ unsigned int z;
+ unsigned int first_mp:1;
+ unsigned int is_mp:1;
+
+ unsigned int left:1;
+ unsigned int right:1;
+ unsigned int middle:1;
+
+ unsigned int ts_left:1;
+ unsigned int ts_right:1;
+ unsigned int ts_middle:1;
+};
+
+/**
+ * struct alps_data - private data structure for the ALPS driver
+ * @dev2: "Relative" device used to report trackstick or mouse activity.
+ * @phys: Physical path for the relative device.
+ * @nibble_commands: Command mapping used for touchpad register accesses.
+ * @addr_command: Command used to tell the touchpad that a register address
+ * follows.
+ * @proto_version: Indicates V1/V2/V3/...
+ * @byte0: Helps figure out whether a position report packet matches the
+ * known format for this model. The first byte of the report, ANDed with
+ * mask0, should match byte0.
+ * @mask0: The mask used to check the first byte of the report.
+ * @flags: Additional device capabilities (passthrough port, trackstick, etc.).
+ * @x_max: Largest possible X position value.
+ * @y_max: Largest possible Y position value.
+ * @x_bits: Number of X bits in the MT bitmap.
+ * @y_bits: Number of Y bits in the MT bitmap.
+ * @hw_init: Protocol-specific hardware init function.
+ * @process_packet: Protocol-specific function to process a report packet.
+ * @decode_fields: Protocol-specific function to read packet bitfields.
+ * @set_abs_params: Protocol-specific function to configure the input_dev.
+ * @prev_fin: Finger bit from previous packet.
+ * @multi_packet: Multi-packet data in progress.
+ * @multi_data: Saved multi-packet data.
+ * @x1: First X coordinate from last MT report.
+ * @x2: Second X coordinate from last MT report.
+ * @y1: First Y coordinate from last MT report.
+ * @y2: Second Y coordinate from last MT report.
+ * @fingers: Number of fingers from last MT report.
+ * @quirks: Bitmap of ALPS_QUIRK_*.
+ * @timer: Timer for flushing out the final report packet in the stream.
+ */
struct alps_data {
- struct input_dev *dev2; /* Relative device */
- char phys[32]; /* Phys */
- const struct alps_model_info *i;/* Info */
+ struct input_dev *dev2;
+ char phys[32];
+
+ /* these are autodetected when the device is identified */
const struct alps_nibble_commands *nibble_commands;
- int addr_command; /* Command to set register address */
- int prev_fin; /* Finger bit from previous packet */
- int multi_packet; /* Multi-packet data in progress */
- unsigned char multi_data[6]; /* Saved multi-packet data */
- int x1, x2, y1, y2; /* Coordinates from last MT report */
- int fingers; /* Number of fingers from MT report */
+ int addr_command;
+ unsigned char proto_version;
+ unsigned char byte0, mask0;
+ unsigned char flags;
+ int x_max;
+ int y_max;
+ int x_bits;
+ int y_bits;
+
+ int (*hw_init)(struct psmouse *psmouse);
+ void (*process_packet)(struct psmouse *psmouse);
+ void (*decode_fields)(struct alps_fields *f, unsigned char *p);
+ void (*set_abs_params)(struct alps_data *priv, struct input_dev *dev1);
+
+ int prev_fin;
+ int multi_packet;
+ unsigned char multi_data[6];
+ int x1, x2, y1, y2;
+ int fingers;
u8 quirks;
struct timer_list timer;
};
diff --git a/drivers/input/mouse/cyapa.c b/drivers/input/mouse/cyapa.c
new file mode 100644
index 000000000000..b409c3d7d4fb
--- /dev/null
+++ b/drivers/input/mouse/cyapa.c
@@ -0,0 +1,973 @@
+/*
+ * Cypress APA trackpad with I2C interface
+ *
+ * Author: Dudley Du <dudl@cypress.com>
+ * Further cleanup and restructuring by:
+ * Daniel Kurtz <djkurtz@chromium.org>
+ * Benson Leung <bleung@chromium.org>
+ *
+ * Copyright (C) 2011-2012 Cypress Semiconductor, Inc.
+ * Copyright (C) 2011-2012 Google, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+/* APA trackpad firmware generation */
+#define CYAPA_GEN3 0x03 /* support MT-protocol B with tracking ID. */
+
+#define CYAPA_NAME "Cypress APA Trackpad (cyapa)"
+
+/* commands for read/write registers of Cypress trackpad */
+#define CYAPA_CMD_SOFT_RESET 0x00
+#define CYAPA_CMD_POWER_MODE 0x01
+#define CYAPA_CMD_DEV_STATUS 0x02
+#define CYAPA_CMD_GROUP_DATA 0x03
+#define CYAPA_CMD_GROUP_CMD 0x04
+#define CYAPA_CMD_GROUP_QUERY 0x05
+#define CYAPA_CMD_BL_STATUS 0x06
+#define CYAPA_CMD_BL_HEAD 0x07
+#define CYAPA_CMD_BL_CMD 0x08
+#define CYAPA_CMD_BL_DATA 0x09
+#define CYAPA_CMD_BL_ALL 0x0a
+#define CYAPA_CMD_BLK_PRODUCT_ID 0x0b
+#define CYAPA_CMD_BLK_HEAD 0x0c
+
+/* report data start reg offset address. */
+#define DATA_REG_START_OFFSET 0x0000
+
+#define BL_HEAD_OFFSET 0x00
+#define BL_DATA_OFFSET 0x10
+
+/*
+ * Operational Device Status Register
+ *
+ * bit 7: Valid interrupt source
+ * bit 6 - 4: Reserved
+ * bit 3 - 2: Power status
+ * bit 1 - 0: Device status
+ */
+#define REG_OP_STATUS 0x00
+#define OP_STATUS_SRC 0x80
+#define OP_STATUS_POWER 0x0c
+#define OP_STATUS_DEV 0x03
+#define OP_STATUS_MASK (OP_STATUS_SRC | OP_STATUS_POWER | OP_STATUS_DEV)
+
+/*
+ * Operational Finger Count/Button Flags Register
+ *
+ * bit 7 - 4: Number of touched finger
+ * bit 3: Valid data
+ * bit 2: Middle Physical Button
+ * bit 1: Right Physical Button
+ * bit 0: Left physical Button
+ */
+#define REG_OP_DATA1 0x01
+#define OP_DATA_VALID 0x08
+#define OP_DATA_MIDDLE_BTN 0x04
+#define OP_DATA_RIGHT_BTN 0x02
+#define OP_DATA_LEFT_BTN 0x01
+#define OP_DATA_BTN_MASK (OP_DATA_MIDDLE_BTN | OP_DATA_RIGHT_BTN | \
+ OP_DATA_LEFT_BTN)
+
+/*
+ * Bootloader Status Register
+ *
+ * bit 7: Busy
+ * bit 6 - 5: Reserved
+ * bit 4: Bootloader running
+ * bit 3 - 1: Reserved
+ * bit 0: Checksum valid
+ */
+#define REG_BL_STATUS 0x01
+#define BL_STATUS_BUSY 0x80
+#define BL_STATUS_RUNNING 0x10
+#define BL_STATUS_DATA_VALID 0x08
+#define BL_STATUS_CSUM_VALID 0x01
+
+/*
+ * Bootloader Error Register
+ *
+ * bit 7: Invalid
+ * bit 6: Invalid security key
+ * bit 5: Bootloading
+ * bit 4: Command checksum
+ * bit 3: Flash protection error
+ * bit 2: Flash checksum error
+ * bit 1 - 0: Reserved
+ */
+#define REG_BL_ERROR 0x02
+#define BL_ERROR_INVALID 0x80
+#define BL_ERROR_INVALID_KEY 0x40
+#define BL_ERROR_BOOTLOADING 0x20
+#define BL_ERROR_CMD_CSUM 0x10
+#define BL_ERROR_FLASH_PROT 0x08
+#define BL_ERROR_FLASH_CSUM 0x04
+
+#define BL_STATUS_SIZE 3 /* length of bootloader status registers */
+#define BLK_HEAD_BYTES 32
+
+#define PRODUCT_ID_SIZE 16
+#define QUERY_DATA_SIZE 27
+#define REG_PROTOCOL_GEN_QUERY_OFFSET 20
+
+#define REG_OFFSET_DATA_BASE 0x0000
+#define REG_OFFSET_COMMAND_BASE 0x0028
+#define REG_OFFSET_QUERY_BASE 0x002a
+
+#define CAPABILITY_LEFT_BTN_MASK (0x01 << 3)
+#define CAPABILITY_RIGHT_BTN_MASK (0x01 << 4)
+#define CAPABILITY_MIDDLE_BTN_MASK (0x01 << 5)
+#define CAPABILITY_BTN_MASK (CAPABILITY_LEFT_BTN_MASK | \
+ CAPABILITY_RIGHT_BTN_MASK | \
+ CAPABILITY_MIDDLE_BTN_MASK)
+
+#define CYAPA_OFFSET_SOFT_RESET REG_OFFSET_COMMAND_BASE
+
+#define REG_OFFSET_POWER_MODE (REG_OFFSET_COMMAND_BASE + 1)
+
+#define PWR_MODE_MASK 0xfc
+#define PWR_MODE_FULL_ACTIVE (0x3f << 2)
+#define PWR_MODE_IDLE (0x05 << 2) /* default sleep time is 50 ms. */
+#define PWR_MODE_OFF (0x00 << 2)
+
+#define PWR_STATUS_MASK 0x0c
+#define PWR_STATUS_ACTIVE (0x03 << 2)
+#define PWR_STATUS_IDLE (0x02 << 2)
+#define PWR_STATUS_OFF (0x00 << 2)
+
+/*
+ * CYAPA trackpad device states.
+ * Used in register 0x00, bit1-0, DeviceStatus field.
+ * Other values indicate device is in an abnormal state and must be reset.
+ */
+#define CYAPA_DEV_NORMAL 0x03
+#define CYAPA_DEV_BUSY 0x01
+
+enum cyapa_state {
+ CYAPA_STATE_OP,
+ CYAPA_STATE_BL_IDLE,
+ CYAPA_STATE_BL_ACTIVE,
+ CYAPA_STATE_BL_BUSY,
+ CYAPA_STATE_NO_DEVICE,
+};
+
+
+struct cyapa_touch {
+ /*
+ * high bits or x/y position value
+ * bit 7 - 4: high 4 bits of x position value
+ * bit 3 - 0: high 4 bits of y position value
+ */
+ u8 xy_hi;
+ u8 x_lo; /* low 8 bits of x position value. */
+ u8 y_lo; /* low 8 bits of y position value. */
+ u8 pressure;
+ /* id range is 1 - 15. It is incremented with every new touch. */
+ u8 id;
+} __packed;
+
+/* The touch.id is used as the MT slot id, thus max MT slot is 15 */
+#define CYAPA_MAX_MT_SLOTS 15
+
+struct cyapa_reg_data {
+ /*
+ * bit 0 - 1: device status
+ * bit 3 - 2: power mode
+ * bit 6 - 4: reserved
+ * bit 7: interrupt valid bit
+ */
+ u8 device_status;
+ /*
+ * bit 7 - 4: number of fingers currently touching pad
+ * bit 3: valid data check bit
+ * bit 2: middle mechanism button state if exists
+ * bit 1: right mechanism button state if exists
+ * bit 0: left mechanism button state if exists
+ */
+ u8 finger_btn;
+ /* CYAPA reports up to 5 touches per packet. */
+ struct cyapa_touch touches[5];
+} __packed;
+
+/* The main device structure */
+struct cyapa {
+ enum cyapa_state state;
+
+ struct i2c_client *client;
+ struct input_dev *input;
+ char phys[32]; /* device physical location */
+ int irq;
+ bool irq_wake; /* irq wake is enabled */
+ bool smbus;
+
+ /* read from query data region. */
+ char product_id[16];
+ u8 btn_capability;
+ u8 gen;
+ int max_abs_x;
+ int max_abs_y;
+ int physical_size_x;
+ int physical_size_y;
+};
+
+static const u8 bl_deactivate[] = { 0x00, 0xff, 0x3b, 0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07 };
+static const u8 bl_exit[] = { 0x00, 0xff, 0xa5, 0x00, 0x01, 0x02, 0x03, 0x04,
+ 0x05, 0x06, 0x07 };
+
+struct cyapa_cmd_len {
+ u8 cmd;
+ u8 len;
+};
+
+#define CYAPA_ADAPTER_FUNC_NONE 0
+#define CYAPA_ADAPTER_FUNC_I2C 1
+#define CYAPA_ADAPTER_FUNC_SMBUS 2
+#define CYAPA_ADAPTER_FUNC_BOTH 3
+
+/*
+ * macros for SMBus communication
+ */
+#define SMBUS_READ 0x01
+#define SMBUS_WRITE 0x00
+#define SMBUS_ENCODE_IDX(cmd, idx) ((cmd) | (((idx) & 0x03) << 1))
+#define SMBUS_ENCODE_RW(cmd, rw) ((cmd) | ((rw) & 0x01))
+#define SMBUS_BYTE_BLOCK_CMD_MASK 0x80
+#define SMBUS_GROUP_BLOCK_CMD_MASK 0x40
+
+ /* for byte read/write command */
+#define CMD_RESET 0
+#define CMD_POWER_MODE 1
+#define CMD_DEV_STATUS 2
+#define SMBUS_BYTE_CMD(cmd) (((cmd) & 0x3f) << 1)
+#define CYAPA_SMBUS_RESET SMBUS_BYTE_CMD(CMD_RESET)
+#define CYAPA_SMBUS_POWER_MODE SMBUS_BYTE_CMD(CMD_POWER_MODE)
+#define CYAPA_SMBUS_DEV_STATUS SMBUS_BYTE_CMD(CMD_DEV_STATUS)
+
+ /* for group registers read/write command */
+#define REG_GROUP_DATA 0
+#define REG_GROUP_CMD 2
+#define REG_GROUP_QUERY 3
+#define SMBUS_GROUP_CMD(grp) (0x80 | (((grp) & 0x07) << 3))
+#define CYAPA_SMBUS_GROUP_DATA SMBUS_GROUP_CMD(REG_GROUP_DATA)
+#define CYAPA_SMBUS_GROUP_CMD SMBUS_GROUP_CMD(REG_GROUP_CMD)
+#define CYAPA_SMBUS_GROUP_QUERY SMBUS_GROUP_CMD(REG_GROUP_QUERY)
+
+ /* for register block read/write command */
+#define CMD_BL_STATUS 0
+#define CMD_BL_HEAD 1
+#define CMD_BL_CMD 2
+#define CMD_BL_DATA 3
+#define CMD_BL_ALL 4
+#define CMD_BLK_PRODUCT_ID 5
+#define CMD_BLK_HEAD 6
+#define SMBUS_BLOCK_CMD(cmd) (0xc0 | (((cmd) & 0x1f) << 1))
+
+/* register block read/write command in bootloader mode */
+#define CYAPA_SMBUS_BL_STATUS SMBUS_BLOCK_CMD(CMD_BL_STATUS)
+#define CYAPA_SMBUS_BL_HEAD SMBUS_BLOCK_CMD(CMD_BL_HEAD)
+#define CYAPA_SMBUS_BL_CMD SMBUS_BLOCK_CMD(CMD_BL_CMD)
+#define CYAPA_SMBUS_BL_DATA SMBUS_BLOCK_CMD(CMD_BL_DATA)
+#define CYAPA_SMBUS_BL_ALL SMBUS_BLOCK_CMD(CMD_BL_ALL)
+
+/* register block read/write command in operational mode */
+#define CYAPA_SMBUS_BLK_PRODUCT_ID SMBUS_BLOCK_CMD(CMD_BLK_PRODUCT_ID)
+#define CYAPA_SMBUS_BLK_HEAD SMBUS_BLOCK_CMD(CMD_BLK_HEAD)
+
+static const struct cyapa_cmd_len cyapa_i2c_cmds[] = {
+ { CYAPA_OFFSET_SOFT_RESET, 1 },
+ { REG_OFFSET_COMMAND_BASE + 1, 1 },
+ { REG_OFFSET_DATA_BASE, 1 },
+ { REG_OFFSET_DATA_BASE, sizeof(struct cyapa_reg_data) },
+ { REG_OFFSET_COMMAND_BASE, 0 },
+ { REG_OFFSET_QUERY_BASE, QUERY_DATA_SIZE },
+ { BL_HEAD_OFFSET, 3 },
+ { BL_HEAD_OFFSET, 16 },
+ { BL_HEAD_OFFSET, 16 },
+ { BL_DATA_OFFSET, 16 },
+ { BL_HEAD_OFFSET, 32 },
+ { REG_OFFSET_QUERY_BASE, PRODUCT_ID_SIZE },
+ { REG_OFFSET_DATA_BASE, 32 }
+};
+
+static const struct cyapa_cmd_len cyapa_smbus_cmds[] = {
+ { CYAPA_SMBUS_RESET, 1 },
+ { CYAPA_SMBUS_POWER_MODE, 1 },
+ { CYAPA_SMBUS_DEV_STATUS, 1 },
+ { CYAPA_SMBUS_GROUP_DATA, sizeof(struct cyapa_reg_data) },
+ { CYAPA_SMBUS_GROUP_CMD, 2 },
+ { CYAPA_SMBUS_GROUP_QUERY, QUERY_DATA_SIZE },
+ { CYAPA_SMBUS_BL_STATUS, 3 },
+ { CYAPA_SMBUS_BL_HEAD, 16 },
+ { CYAPA_SMBUS_BL_CMD, 16 },
+ { CYAPA_SMBUS_BL_DATA, 16 },
+ { CYAPA_SMBUS_BL_ALL, 32 },
+ { CYAPA_SMBUS_BLK_PRODUCT_ID, PRODUCT_ID_SIZE },
+ { CYAPA_SMBUS_BLK_HEAD, 16 },
+};
+
+static ssize_t cyapa_i2c_reg_read_block(struct cyapa *cyapa, u8 reg, size_t len,
+ u8 *values)
+{
+ return i2c_smbus_read_i2c_block_data(cyapa->client, reg, len, values);
+}
+
+static ssize_t cyapa_i2c_reg_write_block(struct cyapa *cyapa, u8 reg,
+ size_t len, const u8 *values)
+{
+ return i2c_smbus_write_i2c_block_data(cyapa->client, reg, len, values);
+}
+
+/*
+ * cyapa_smbus_read_block - perform smbus block read command
+ * @cyapa - private data structure of the driver
+ * @cmd - the properly encoded smbus command
+ * @len - expected length of smbus command result
+ * @values - buffer to store smbus command result
+ *
+ * Returns negative errno, else the number of bytes written.
+ *
+ * Note:
+ * In trackpad device, the memory block allocated for I2C register map
+ * is 256 bytes, so the max read block for I2C bus is 256 bytes.
+ */
+static ssize_t cyapa_smbus_read_block(struct cyapa *cyapa, u8 cmd, size_t len,
+ u8 *values)
+{
+ ssize_t ret;
+ u8 index;
+ u8 smbus_cmd;
+ u8 *buf;
+ struct i2c_client *client = cyapa->client;
+
+ if (!(SMBUS_BYTE_BLOCK_CMD_MASK & cmd))
+ return -EINVAL;
+
+ if (SMBUS_GROUP_BLOCK_CMD_MASK & cmd) {
+ /* read specific block registers command. */
+ smbus_cmd = SMBUS_ENCODE_RW(cmd, SMBUS_READ);
+ ret = i2c_smbus_read_block_data(client, smbus_cmd, values);
+ goto out;
+ }
+
+ ret = 0;
+ for (index = 0; index * I2C_SMBUS_BLOCK_MAX < len; index++) {
+ smbus_cmd = SMBUS_ENCODE_IDX(cmd, index);
+ smbus_cmd = SMBUS_ENCODE_RW(smbus_cmd, SMBUS_READ);
+ buf = values + I2C_SMBUS_BLOCK_MAX * index;
+ ret = i2c_smbus_read_block_data(client, smbus_cmd, buf);
+ if (ret < 0)
+ goto out;
+ }
+
+out:
+ return ret > 0 ? len : ret;
+}
+
+static s32 cyapa_read_byte(struct cyapa *cyapa, u8 cmd_idx)
+{
+ u8 cmd;
+
+ if (cyapa->smbus) {
+ cmd = cyapa_smbus_cmds[cmd_idx].cmd;
+ cmd = SMBUS_ENCODE_RW(cmd, SMBUS_READ);
+ } else {
+ cmd = cyapa_i2c_cmds[cmd_idx].cmd;
+ }
+ return i2c_smbus_read_byte_data(cyapa->client, cmd);
+}
+
+static s32 cyapa_write_byte(struct cyapa *cyapa, u8 cmd_idx, u8 value)
+{
+ u8 cmd;
+
+ if (cyapa->smbus) {
+ cmd = cyapa_smbus_cmds[cmd_idx].cmd;
+ cmd = SMBUS_ENCODE_RW(cmd, SMBUS_WRITE);
+ } else {
+ cmd = cyapa_i2c_cmds[cmd_idx].cmd;
+ }
+ return i2c_smbus_write_byte_data(cyapa->client, cmd, value);
+}
+
+static ssize_t cyapa_read_block(struct cyapa *cyapa, u8 cmd_idx, u8 *values)
+{
+ u8 cmd;
+ size_t len;
+
+ if (cyapa->smbus) {
+ cmd = cyapa_smbus_cmds[cmd_idx].cmd;
+ len = cyapa_smbus_cmds[cmd_idx].len;
+ return cyapa_smbus_read_block(cyapa, cmd, len, values);
+ } else {
+ cmd = cyapa_i2c_cmds[cmd_idx].cmd;
+ len = cyapa_i2c_cmds[cmd_idx].len;
+ return cyapa_i2c_reg_read_block(cyapa, cmd, len, values);
+ }
+}
+
+/*
+ * Query device for its current operating state.
+ *
+ */
+static int cyapa_get_state(struct cyapa *cyapa)
+{
+ int ret;
+ u8 status[BL_STATUS_SIZE];
+
+ cyapa->state = CYAPA_STATE_NO_DEVICE;
+
+ /*
+ * Get trackpad status by reading 3 registers starting from 0.
+ * If the device is in the bootloader, this will be BL_HEAD.
+ * If the device is in operation mode, this will be the DATA regs.
+ *
+ */
+ ret = cyapa_i2c_reg_read_block(cyapa, BL_HEAD_OFFSET, BL_STATUS_SIZE,
+ status);
+
+ /*
+ * On smbus systems in OP mode, the i2c_reg_read will fail with
+ * -ETIMEDOUT. In this case, try again using the smbus equivalent
+ * command. This should return a BL_HEAD indicating CYAPA_STATE_OP.
+ */
+ if (cyapa->smbus && (ret == -ETIMEDOUT || ret == -ENXIO))
+ ret = cyapa_read_block(cyapa, CYAPA_CMD_BL_STATUS, status);
+
+ if (ret != BL_STATUS_SIZE)
+ goto error;
+
+ if ((status[REG_OP_STATUS] & OP_STATUS_SRC) == OP_STATUS_SRC) {
+ switch (status[REG_OP_STATUS] & OP_STATUS_DEV) {
+ case CYAPA_DEV_NORMAL:
+ case CYAPA_DEV_BUSY:
+ cyapa->state = CYAPA_STATE_OP;
+ break;
+ default:
+ ret = -EAGAIN;
+ goto error;
+ }
+ } else {
+ if (status[REG_BL_STATUS] & BL_STATUS_BUSY)
+ cyapa->state = CYAPA_STATE_BL_BUSY;
+ else if (status[REG_BL_ERROR] & BL_ERROR_BOOTLOADING)
+ cyapa->state = CYAPA_STATE_BL_ACTIVE;
+ else
+ cyapa->state = CYAPA_STATE_BL_IDLE;
+ }
+
+ return 0;
+error:
+ return (ret < 0) ? ret : -EAGAIN;
+}
+
+/*
+ * Poll device for its status in a loop, waiting up to timeout for a response.
+ *
+ * When the device switches state, it usually takes ~300 ms.
+ * However, when running a new firmware image, the device must calibrate its
+ * sensors, which can take as long as 2 seconds.
+ *
+ * Note: The timeout has granularity of the polling rate, which is 100 ms.
+ *
+ * Returns:
+ * 0 when the device eventually responds with a valid non-busy state.
+ * -ETIMEDOUT if device never responds (too many -EAGAIN)
+ * < 0 other errors
+ */
+static int cyapa_poll_state(struct cyapa *cyapa, unsigned int timeout)
+{
+ int ret;
+ int tries = timeout / 100;
+
+ ret = cyapa_get_state(cyapa);
+ while ((ret || cyapa->state >= CYAPA_STATE_BL_BUSY) && tries--) {
+ msleep(100);
+ ret = cyapa_get_state(cyapa);
+ }
+ return (ret == -EAGAIN || ret == -ETIMEDOUT) ? -ETIMEDOUT : ret;
+}
+
+static int cyapa_bl_deactivate(struct cyapa *cyapa)
+{
+ int ret;
+
+ ret = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_deactivate),
+ bl_deactivate);
+ if (ret < 0)
+ return ret;
+
+ /* wait for bootloader to switch to idle state; should take < 100ms */
+ msleep(100);
+ ret = cyapa_poll_state(cyapa, 500);
+ if (ret < 0)
+ return ret;
+ if (cyapa->state != CYAPA_STATE_BL_IDLE)
+ return -EAGAIN;
+ return 0;
+}
+
+/*
+ * Exit bootloader
+ *
+ * Send bl_exit command, then wait 50 - 100 ms to let device transition to
+ * operational mode. If this is the first time the device's firmware is
+ * running, it can take up to 2 seconds to calibrate its sensors. So, poll
+ * the device's new state for up to 2 seconds.
+ *
+ * Returns:
+ * -EIO failure while reading from device
+ * -EAGAIN device is stuck in bootloader, b/c it has invalid firmware
+ * 0 device is supported and in operational mode
+ */
+static int cyapa_bl_exit(struct cyapa *cyapa)
+{
+ int ret;
+
+ ret = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_exit), bl_exit);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Wait for bootloader to exit, and operation mode to start.
+ * Normally, this takes at least 50 ms.
+ */
+ usleep_range(50000, 100000);
+ /*
+ * In addition, when a device boots for the first time after being
+ * updated to new firmware, it must first calibrate its sensors, which
+ * can take up to an additional 2 seconds.
+ */
+ ret = cyapa_poll_state(cyapa, 2000);
+ if (ret < 0)
+ return ret;
+ if (cyapa->state != CYAPA_STATE_OP)
+ return -EAGAIN;
+
+ return 0;
+}
+
+/*
+ * Set device power mode
+ *
+ */
+static int cyapa_set_power_mode(struct cyapa *cyapa, u8 power_mode)
+{
+ struct device *dev = &cyapa->client->dev;
+ int ret;
+ u8 power;
+
+ if (cyapa->state != CYAPA_STATE_OP)
+ return 0;
+
+ ret = cyapa_read_byte(cyapa, CYAPA_CMD_POWER_MODE);
+ if (ret < 0)
+ return ret;
+
+ power = ret & ~PWR_MODE_MASK;
+ power |= power_mode & PWR_MODE_MASK;
+ ret = cyapa_write_byte(cyapa, CYAPA_CMD_POWER_MODE, power);
+ if (ret < 0)
+ dev_err(dev, "failed to set power_mode 0x%02x err = %d\n",
+ power_mode, ret);
+ return ret;
+}
+
+static int cyapa_get_query_data(struct cyapa *cyapa)
+{
+ u8 query_data[QUERY_DATA_SIZE];
+ int ret;
+
+ if (cyapa->state != CYAPA_STATE_OP)
+ return -EBUSY;
+
+ ret = cyapa_read_block(cyapa, CYAPA_CMD_GROUP_QUERY, query_data);
+ if (ret < 0)
+ return ret;
+ if (ret != QUERY_DATA_SIZE)
+ return -EIO;
+
+ memcpy(&cyapa->product_id[0], &query_data[0], 5);
+ cyapa->product_id[5] = '-';
+ memcpy(&cyapa->product_id[6], &query_data[5], 6);
+ cyapa->product_id[12] = '-';
+ memcpy(&cyapa->product_id[13], &query_data[11], 2);
+ cyapa->product_id[15] = '\0';
+
+ cyapa->btn_capability = query_data[19] & CAPABILITY_BTN_MASK;
+
+ cyapa->gen = query_data[20] & 0x0f;
+
+ cyapa->max_abs_x = ((query_data[21] & 0xf0) << 4) | query_data[22];
+ cyapa->max_abs_y = ((query_data[21] & 0x0f) << 8) | query_data[23];
+
+ cyapa->physical_size_x =
+ ((query_data[24] & 0xf0) << 4) | query_data[25];
+ cyapa->physical_size_y =
+ ((query_data[24] & 0x0f) << 8) | query_data[26];
+
+ return 0;
+}
+
+/*
+ * Check if device is operational.
+ *
+ * An operational device is responding, has exited bootloader, and has
+ * firmware supported by this driver.
+ *
+ * Returns:
+ * -EBUSY no device or in bootloader
+ * -EIO failure while reading from device
+ * -EAGAIN device is still in bootloader
+ * if ->state = CYAPA_STATE_BL_IDLE, device has invalid firmware
+ * -EINVAL device is in operational mode, but not supported by this driver
+ * 0 device is supported
+ */
+static int cyapa_check_is_operational(struct cyapa *cyapa)
+{
+ struct device *dev = &cyapa->client->dev;
+ static const char unique_str[] = "CYTRA";
+ int ret;
+
+ ret = cyapa_poll_state(cyapa, 2000);
+ if (ret < 0)
+ return ret;
+ switch (cyapa->state) {
+ case CYAPA_STATE_BL_ACTIVE:
+ ret = cyapa_bl_deactivate(cyapa);
+ if (ret)
+ return ret;
+
+ /* Fallthrough state */
+ case CYAPA_STATE_BL_IDLE:
+ ret = cyapa_bl_exit(cyapa);
+ if (ret)
+ return ret;
+
+ /* Fallthrough state */
+ case CYAPA_STATE_OP:
+ ret = cyapa_get_query_data(cyapa);
+ if (ret < 0)
+ return ret;
+
+ /* only support firmware protocol gen3 */
+ if (cyapa->gen != CYAPA_GEN3) {
+ dev_err(dev, "unsupported protocol version (%d)",
+ cyapa->gen);
+ return -EINVAL;
+ }
+
+ /* only support product ID starting with CYTRA */
+ if (memcmp(cyapa->product_id, unique_str,
+ sizeof(unique_str) - 1) != 0) {
+ dev_err(dev, "unsupported product ID (%s)\n",
+ cyapa->product_id);
+ return -EINVAL;
+ }
+ return 0;
+
+ default:
+ return -EIO;
+ }
+ return 0;
+}
+
+static irqreturn_t cyapa_irq(int irq, void *dev_id)
+{
+ struct cyapa *cyapa = dev_id;
+ struct device *dev = &cyapa->client->dev;
+ struct input_dev *input = cyapa->input;
+ struct cyapa_reg_data data;
+ int i;
+ int ret;
+ int num_fingers;
+
+ if (device_may_wakeup(dev))
+ pm_wakeup_event(dev, 0);
+
+ ret = cyapa_read_block(cyapa, CYAPA_CMD_GROUP_DATA, (u8 *)&data);
+ if (ret != sizeof(data))
+ goto out;
+
+ if ((data.device_status & OP_STATUS_SRC) != OP_STATUS_SRC ||
+ (data.device_status & OP_STATUS_DEV) != CYAPA_DEV_NORMAL ||
+ (data.finger_btn & OP_DATA_VALID) != OP_DATA_VALID) {
+ goto out;
+ }
+
+ num_fingers = (data.finger_btn >> 4) & 0x0f;
+ for (i = 0; i < num_fingers; i++) {
+ const struct cyapa_touch *touch = &data.touches[i];
+ /* Note: touch->id range is 1 to 15; slots are 0 to 14. */
+ int slot = touch->id - 1;
+
+ input_mt_slot(input, slot);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+ input_report_abs(input, ABS_MT_POSITION_X,
+ ((touch->xy_hi & 0xf0) << 4) | touch->x_lo);
+ input_report_abs(input, ABS_MT_POSITION_Y,
+ ((touch->xy_hi & 0x0f) << 8) | touch->y_lo);
+ input_report_abs(input, ABS_MT_PRESSURE, touch->pressure);
+ }
+
+ input_mt_sync_frame(input);
+
+ if (cyapa->btn_capability & CAPABILITY_LEFT_BTN_MASK)
+ input_report_key(input, BTN_LEFT,
+ data.finger_btn & OP_DATA_LEFT_BTN);
+
+ if (cyapa->btn_capability & CAPABILITY_MIDDLE_BTN_MASK)
+ input_report_key(input, BTN_MIDDLE,
+ data.finger_btn & OP_DATA_MIDDLE_BTN);
+
+ if (cyapa->btn_capability & CAPABILITY_RIGHT_BTN_MASK)
+ input_report_key(input, BTN_RIGHT,
+ data.finger_btn & OP_DATA_RIGHT_BTN);
+
+ input_sync(input);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static u8 cyapa_check_adapter_functionality(struct i2c_client *client)
+{
+ u8 ret = CYAPA_ADAPTER_FUNC_NONE;
+
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ ret |= CYAPA_ADAPTER_FUNC_I2C;
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_BLOCK_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK))
+ ret |= CYAPA_ADAPTER_FUNC_SMBUS;
+ return ret;
+}
+
+static int cyapa_create_input_dev(struct cyapa *cyapa)
+{
+ struct device *dev = &cyapa->client->dev;
+ int ret;
+ struct input_dev *input;
+
+ if (!cyapa->physical_size_x || !cyapa->physical_size_y)
+ return -EINVAL;
+
+ input = cyapa->input = input_allocate_device();
+ if (!input) {
+ dev_err(dev, "allocate memory for input device failed\n");
+ return -ENOMEM;
+ }
+
+ input->name = CYAPA_NAME;
+ input->phys = cyapa->phys;
+ input->id.bustype = BUS_I2C;
+ input->id.version = 1;
+ input->id.product = 0; /* means any product in eventcomm. */
+ input->dev.parent = &cyapa->client->dev;
+
+ input_set_drvdata(input, cyapa);
+
+ __set_bit(EV_ABS, input->evbit);
+
+ /* finger position */
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, cyapa->max_abs_x, 0,
+ 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, cyapa->max_abs_y, 0,
+ 0);
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, 255, 0, 0);
+
+ input_abs_set_res(input, ABS_MT_POSITION_X,
+ cyapa->max_abs_x / cyapa->physical_size_x);
+ input_abs_set_res(input, ABS_MT_POSITION_Y,
+ cyapa->max_abs_y / cyapa->physical_size_y);
+
+ if (cyapa->btn_capability & CAPABILITY_LEFT_BTN_MASK)
+ __set_bit(BTN_LEFT, input->keybit);
+ if (cyapa->btn_capability & CAPABILITY_MIDDLE_BTN_MASK)
+ __set_bit(BTN_MIDDLE, input->keybit);
+ if (cyapa->btn_capability & CAPABILITY_RIGHT_BTN_MASK)
+ __set_bit(BTN_RIGHT, input->keybit);
+
+ if (cyapa->btn_capability == CAPABILITY_LEFT_BTN_MASK)
+ __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+
+ /* handle pointer emulation and unused slots in core */
+ ret = input_mt_init_slots(input, CYAPA_MAX_MT_SLOTS,
+ INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED);
+ if (ret) {
+ dev_err(dev, "allocate memory for MT slots failed, %d\n", ret);
+ goto err_free_device;
+ }
+
+ /* Register the device in input subsystem */
+ ret = input_register_device(input);
+ if (ret) {
+ dev_err(dev, "input device register failed, %d\n", ret);
+ goto err_free_device;
+ }
+ return 0;
+
+err_free_device:
+ input_free_device(input);
+ cyapa->input = NULL;
+ return ret;
+}
+
+static int cyapa_probe(struct i2c_client *client,
+ const struct i2c_device_id *dev_id)
+{
+ int ret;
+ u8 adapter_func;
+ struct cyapa *cyapa;
+ struct device *dev = &client->dev;
+
+ adapter_func = cyapa_check_adapter_functionality(client);
+ if (adapter_func == CYAPA_ADAPTER_FUNC_NONE) {
+ dev_err(dev, "not a supported I2C/SMBus adapter\n");
+ return -EIO;
+ }
+
+ cyapa = kzalloc(sizeof(struct cyapa), GFP_KERNEL);
+ if (!cyapa) {
+ dev_err(dev, "allocate memory for cyapa failed\n");
+ return -ENOMEM;
+ }
+
+ cyapa->gen = CYAPA_GEN3;
+ cyapa->client = client;
+ i2c_set_clientdata(client, cyapa);
+ sprintf(cyapa->phys, "i2c-%d-%04x/input0", client->adapter->nr,
+ client->addr);
+
+ /* i2c isn't supported, use smbus */
+ if (adapter_func == CYAPA_ADAPTER_FUNC_SMBUS)
+ cyapa->smbus = true;
+ cyapa->state = CYAPA_STATE_NO_DEVICE;
+ ret = cyapa_check_is_operational(cyapa);
+ if (ret) {
+ dev_err(dev, "device not operational, %d\n", ret);
+ goto err_mem_free;
+ }
+
+ ret = cyapa_create_input_dev(cyapa);
+ if (ret) {
+ dev_err(dev, "create input_dev instance failed, %d\n", ret);
+ goto err_mem_free;
+ }
+
+ ret = cyapa_set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE);
+ if (ret) {
+ dev_err(dev, "set active power failed, %d\n", ret);
+ goto err_unregister_device;
+ }
+
+ cyapa->irq = client->irq;
+ ret = request_threaded_irq(cyapa->irq,
+ NULL,
+ cyapa_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "cyapa",
+ cyapa);
+ if (ret) {
+ dev_err(dev, "IRQ request failed: %d\n, ", ret);
+ goto err_unregister_device;
+ }
+
+ return 0;
+
+err_unregister_device:
+ input_unregister_device(cyapa->input);
+err_mem_free:
+ kfree(cyapa);
+
+ return ret;
+}
+
+static int cyapa_remove(struct i2c_client *client)
+{
+ struct cyapa *cyapa = i2c_get_clientdata(client);
+
+ free_irq(cyapa->irq, cyapa);
+ input_unregister_device(cyapa->input);
+ cyapa_set_power_mode(cyapa, PWR_MODE_OFF);
+ kfree(cyapa);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cyapa_suspend(struct device *dev)
+{
+ int ret;
+ u8 power_mode;
+ struct cyapa *cyapa = dev_get_drvdata(dev);
+
+ disable_irq(cyapa->irq);
+
+ /*
+ * Set trackpad device to idle mode if wakeup is allowed,
+ * otherwise turn off.
+ */
+ power_mode = device_may_wakeup(dev) ? PWR_MODE_IDLE
+ : PWR_MODE_OFF;
+ ret = cyapa_set_power_mode(cyapa, power_mode);
+ if (ret < 0)
+ dev_err(dev, "set power mode failed, %d\n", ret);
+
+ if (device_may_wakeup(dev))
+ cyapa->irq_wake = (enable_irq_wake(cyapa->irq) == 0);
+ return 0;
+}
+
+static int cyapa_resume(struct device *dev)
+{
+ int ret;
+ struct cyapa *cyapa = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev) && cyapa->irq_wake)
+ disable_irq_wake(cyapa->irq);
+
+ ret = cyapa_set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE);
+ if (ret)
+ dev_warn(dev, "resume active power failed, %d\n", ret);
+
+ enable_irq(cyapa->irq);
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(cyapa_pm_ops, cyapa_suspend, cyapa_resume);
+
+static const struct i2c_device_id cyapa_id_table[] = {
+ { "cyapa", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, cyapa_id_table);
+
+static struct i2c_driver cyapa_driver = {
+ .driver = {
+ .name = "cyapa",
+ .owner = THIS_MODULE,
+ .pm = &cyapa_pm_ops,
+ },
+
+ .probe = cyapa_probe,
+ .remove = cyapa_remove,
+ .id_table = cyapa_id_table,
+};
+
+module_i2c_driver(cyapa_driver);
+
+MODULE_DESCRIPTION("Cypress APA I2C Trackpad Driver");
+MODULE_AUTHOR("Dudley Du <dudl@cypress.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c
new file mode 100644
index 000000000000..1673dc6c8092
--- /dev/null
+++ b/drivers/input/mouse/cypress_ps2.c
@@ -0,0 +1,725 @@
+/*
+ * Cypress Trackpad PS/2 mouse driver
+ *
+ * Copyright (c) 2012 Cypress Semiconductor Corporation.
+ *
+ * Author:
+ * Dudley Du <dudl@cypress.com>
+ *
+ * Additional contributors include:
+ * Kamal Mostafa <kamal@canonical.com>
+ * Kyle Fazzari <git@status.e4ward.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/serio.h>
+#include <linux/libps2.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#include "cypress_ps2.h"
+
+#undef CYTP_DEBUG_VERBOSE /* define this and DEBUG for more verbose dump */
+
+static void cypress_set_packet_size(struct psmouse *psmouse, unsigned int n)
+{
+ struct cytp_data *cytp = psmouse->private;
+ cytp->pkt_size = n;
+}
+
+static const unsigned char cytp_rate[] = {10, 20, 40, 60, 100, 200};
+static const unsigned char cytp_resolution[] = {0x00, 0x01, 0x02, 0x03};
+
+static int cypress_ps2_sendbyte(struct psmouse *psmouse, int value)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+
+ if (ps2_sendbyte(ps2dev, value & 0xff, CYTP_CMD_TIMEOUT) < 0) {
+ psmouse_dbg(psmouse,
+ "sending command 0x%02x failed, resp 0x%02x\n",
+ value & 0xff, ps2dev->nak);
+ if (ps2dev->nak == CYTP_PS2_RETRY)
+ return CYTP_PS2_RETRY;
+ else
+ return CYTP_PS2_ERROR;
+ }
+
+#ifdef CYTP_DEBUG_VERBOSE
+ psmouse_dbg(psmouse, "sending command 0x%02x succeeded, resp 0xfa\n",
+ value & 0xff);
+#endif
+
+ return 0;
+}
+
+static int cypress_ps2_ext_cmd(struct psmouse *psmouse, unsigned short cmd,
+ unsigned char data)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ int tries = CYTP_PS2_CMD_TRIES;
+ int rc;
+
+ ps2_begin_command(ps2dev);
+
+ do {
+ /*
+ * Send extension command byte (0xE8 or 0xF3).
+ * If sending the command fails, send recovery command
+ * to make the device return to the ready state.
+ */
+ rc = cypress_ps2_sendbyte(psmouse, cmd & 0xff);
+ if (rc == CYTP_PS2_RETRY) {
+ rc = cypress_ps2_sendbyte(psmouse, 0x00);
+ if (rc == CYTP_PS2_RETRY)
+ rc = cypress_ps2_sendbyte(psmouse, 0x0a);
+ }
+ if (rc == CYTP_PS2_ERROR)
+ continue;
+
+ rc = cypress_ps2_sendbyte(psmouse, data);
+ if (rc == CYTP_PS2_RETRY)
+ rc = cypress_ps2_sendbyte(psmouse, data);
+ if (rc == CYTP_PS2_ERROR)
+ continue;
+ else
+ break;
+ } while (--tries > 0);
+
+ ps2_end_command(ps2dev);
+
+ return rc;
+}
+
+static int cypress_ps2_read_cmd_status(struct psmouse *psmouse,
+ unsigned char cmd,
+ unsigned char *param)
+{
+ int rc;
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ enum psmouse_state old_state;
+ int pktsize;
+
+ ps2_begin_command(&psmouse->ps2dev);
+
+ old_state = psmouse->state;
+ psmouse->state = PSMOUSE_CMD_MODE;
+ psmouse->pktcnt = 0;
+
+ pktsize = (cmd == CYTP_CMD_READ_TP_METRICS) ? 8 : 3;
+ memset(param, 0, pktsize);
+
+ rc = cypress_ps2_sendbyte(psmouse, 0xe9);
+ if (rc < 0)
+ goto out;
+
+ wait_event_timeout(ps2dev->wait,
+ (psmouse->pktcnt >= pktsize),
+ msecs_to_jiffies(CYTP_CMD_TIMEOUT));
+
+ memcpy(param, psmouse->packet, pktsize);
+
+ psmouse_dbg(psmouse, "Command 0x%02x response data (0x): %*ph\n",
+ cmd, pktsize, param);
+
+out:
+ psmouse->state = old_state;
+ psmouse->pktcnt = 0;
+
+ ps2_end_command(&psmouse->ps2dev);
+
+ return rc;
+}
+
+static bool cypress_verify_cmd_state(struct psmouse *psmouse,
+ unsigned char cmd, unsigned char *param)
+{
+ bool rate_match = false;
+ bool resolution_match = false;
+ int i;
+
+ /* callers will do further checking. */
+ if (cmd == CYTP_CMD_READ_CYPRESS_ID ||
+ cmd == CYTP_CMD_STANDARD_MODE ||
+ cmd == CYTP_CMD_READ_TP_METRICS)
+ return true;
+
+ if ((~param[0] & DFLT_RESP_BITS_VALID) == DFLT_RESP_BITS_VALID &&
+ (param[0] & DFLT_RESP_BIT_MODE) == DFLT_RESP_STREAM_MODE) {
+ for (i = 0; i < sizeof(cytp_resolution); i++)
+ if (cytp_resolution[i] == param[1])
+ resolution_match = true;
+
+ for (i = 0; i < sizeof(cytp_rate); i++)
+ if (cytp_rate[i] == param[2])
+ rate_match = true;
+
+ if (resolution_match && rate_match)
+ return true;
+ }
+
+ psmouse_dbg(psmouse, "verify cmd state failed.\n");
+ return false;
+}
+
+static int cypress_send_ext_cmd(struct psmouse *psmouse, unsigned char cmd,
+ unsigned char *param)
+{
+ int tries = CYTP_PS2_CMD_TRIES;
+ int rc;
+
+ psmouse_dbg(psmouse, "send extension cmd 0x%02x, [%d %d %d %d]\n",
+ cmd, DECODE_CMD_AA(cmd), DECODE_CMD_BB(cmd),
+ DECODE_CMD_CC(cmd), DECODE_CMD_DD(cmd));
+
+ do {
+ cypress_ps2_ext_cmd(psmouse,
+ PSMOUSE_CMD_SETRES, DECODE_CMD_DD(cmd));
+ cypress_ps2_ext_cmd(psmouse,
+ PSMOUSE_CMD_SETRES, DECODE_CMD_CC(cmd));
+ cypress_ps2_ext_cmd(psmouse,
+ PSMOUSE_CMD_SETRES, DECODE_CMD_BB(cmd));
+ cypress_ps2_ext_cmd(psmouse,
+ PSMOUSE_CMD_SETRES, DECODE_CMD_AA(cmd));
+
+ rc = cypress_ps2_read_cmd_status(psmouse, cmd, param);
+ if (rc)
+ continue;
+
+ if (cypress_verify_cmd_state(psmouse, cmd, param))
+ return 0;
+
+ } while (--tries > 0);
+
+ return -EIO;
+}
+
+int cypress_detect(struct psmouse *psmouse, bool set_properties)
+{
+ unsigned char param[3];
+
+ if (cypress_send_ext_cmd(psmouse, CYTP_CMD_READ_CYPRESS_ID, param))
+ return -ENODEV;
+
+ /* Check for Cypress Trackpad signature bytes: 0x33 0xCC */
+ if (param[0] != 0x33 || param[1] != 0xCC)
+ return -ENODEV;
+
+ if (set_properties) {
+ psmouse->vendor = "Cypress";
+ psmouse->name = "Trackpad";
+ }
+
+ return 0;
+}
+
+static int cypress_read_fw_version(struct psmouse *psmouse)
+{
+ struct cytp_data *cytp = psmouse->private;
+ unsigned char param[3];
+
+ if (cypress_send_ext_cmd(psmouse, CYTP_CMD_READ_CYPRESS_ID, param))
+ return -ENODEV;
+
+ /* Check for Cypress Trackpad signature bytes: 0x33 0xCC */
+ if (param[0] != 0x33 || param[1] != 0xCC)
+ return -ENODEV;
+
+ cytp->fw_version = param[2] & FW_VERSION_MASX;
+ cytp->tp_metrics_supported = (param[2] & TP_METRICS_MASK) ? 1 : 0;
+
+ psmouse_dbg(psmouse, "cytp->fw_version = %d\n", cytp->fw_version);
+ psmouse_dbg(psmouse, "cytp->tp_metrics_supported = %d\n",
+ cytp->tp_metrics_supported);
+
+ return 0;
+}
+
+static int cypress_read_tp_metrics(struct psmouse *psmouse)
+{
+ struct cytp_data *cytp = psmouse->private;
+ unsigned char param[8];
+
+ /* set default values for tp metrics. */
+ cytp->tp_width = CYTP_DEFAULT_WIDTH;
+ cytp->tp_high = CYTP_DEFAULT_HIGH;
+ cytp->tp_max_abs_x = CYTP_ABS_MAX_X;
+ cytp->tp_max_abs_y = CYTP_ABS_MAX_Y;
+ cytp->tp_min_pressure = CYTP_MIN_PRESSURE;
+ cytp->tp_max_pressure = CYTP_MAX_PRESSURE;
+ cytp->tp_res_x = cytp->tp_max_abs_x / cytp->tp_width;
+ cytp->tp_res_y = cytp->tp_max_abs_y / cytp->tp_high;
+
+ memset(param, 0, sizeof(param));
+ if (cypress_send_ext_cmd(psmouse, CYTP_CMD_READ_TP_METRICS, param) == 0) {
+ /* Update trackpad parameters. */
+ cytp->tp_max_abs_x = (param[1] << 8) | param[0];
+ cytp->tp_max_abs_y = (param[3] << 8) | param[2];
+ cytp->tp_min_pressure = param[4];
+ cytp->tp_max_pressure = param[5];
+ }
+
+ if (!cytp->tp_max_pressure ||
+ cytp->tp_max_pressure < cytp->tp_min_pressure ||
+ !cytp->tp_width || !cytp->tp_high ||
+ !cytp->tp_max_abs_x ||
+ cytp->tp_max_abs_x < cytp->tp_width ||
+ !cytp->tp_max_abs_y ||
+ cytp->tp_max_abs_y < cytp->tp_high)
+ return -EINVAL;
+
+ cytp->tp_res_x = cytp->tp_max_abs_x / cytp->tp_width;
+ cytp->tp_res_y = cytp->tp_max_abs_y / cytp->tp_high;
+
+#ifdef CYTP_DEBUG_VERBOSE
+ psmouse_dbg(psmouse, "Dump trackpad hardware configuration as below:\n");
+ psmouse_dbg(psmouse, "cytp->tp_width = %d\n", cytp->tp_width);
+ psmouse_dbg(psmouse, "cytp->tp_high = %d\n", cytp->tp_high);
+ psmouse_dbg(psmouse, "cytp->tp_max_abs_x = %d\n", cytp->tp_max_abs_x);
+ psmouse_dbg(psmouse, "cytp->tp_max_abs_y = %d\n", cytp->tp_max_abs_y);
+ psmouse_dbg(psmouse, "cytp->tp_min_pressure = %d\n", cytp->tp_min_pressure);
+ psmouse_dbg(psmouse, "cytp->tp_max_pressure = %d\n", cytp->tp_max_pressure);
+ psmouse_dbg(psmouse, "cytp->tp_res_x = %d\n", cytp->tp_res_x);
+ psmouse_dbg(psmouse, "cytp->tp_res_y = %d\n", cytp->tp_res_y);
+
+ psmouse_dbg(psmouse, "tp_type_APA = %d\n",
+ (param[6] & TP_METRICS_BIT_APA) ? 1 : 0);
+ psmouse_dbg(psmouse, "tp_type_MTG = %d\n",
+ (param[6] & TP_METRICS_BIT_MTG) ? 1 : 0);
+ psmouse_dbg(psmouse, "tp_palm = %d\n",
+ (param[6] & TP_METRICS_BIT_PALM) ? 1 : 0);
+ psmouse_dbg(psmouse, "tp_stubborn = %d\n",
+ (param[6] & TP_METRICS_BIT_STUBBORN) ? 1 : 0);
+ psmouse_dbg(psmouse, "tp_1f_jitter = %d\n",
+ (param[6] & TP_METRICS_BIT_1F_JITTER) >> 2);
+ psmouse_dbg(psmouse, "tp_2f_jitter = %d\n",
+ (param[6] & TP_METRICS_BIT_2F_JITTER) >> 4);
+ psmouse_dbg(psmouse, "tp_1f_spike = %d\n",
+ param[7] & TP_METRICS_BIT_1F_SPIKE);
+ psmouse_dbg(psmouse, "tp_2f_spike = %d\n",
+ (param[7] & TP_METRICS_BIT_2F_SPIKE) >> 2);
+ psmouse_dbg(psmouse, "tp_abs_packet_format_set = %d\n",
+ (param[7] & TP_METRICS_BIT_ABS_PKT_FORMAT_SET) >> 4);
+#endif
+
+ return 0;
+}
+
+static int cypress_query_hardware(struct psmouse *psmouse)
+{
+ struct cytp_data *cytp = psmouse->private;
+ int ret;
+
+ ret = cypress_read_fw_version(psmouse);
+ if (ret)
+ return ret;
+
+ if (cytp->tp_metrics_supported) {
+ ret = cypress_read_tp_metrics(psmouse);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cypress_set_absolute_mode(struct psmouse *psmouse)
+{
+ struct cytp_data *cytp = psmouse->private;
+ unsigned char param[3];
+
+ if (cypress_send_ext_cmd(psmouse, CYTP_CMD_ABS_WITH_PRESSURE_MODE, param) < 0)
+ return -1;
+
+ cytp->mode = (cytp->mode & ~CYTP_BIT_ABS_REL_MASK)
+ | CYTP_BIT_ABS_PRESSURE;
+ cypress_set_packet_size(psmouse, 5);
+
+ return 0;
+}
+
+/*
+ * Reset trackpad device.
+ * This is also the default mode when trackpad powered on.
+ */
+static void cypress_reset(struct psmouse *psmouse)
+{
+ struct cytp_data *cytp = psmouse->private;
+
+ cytp->mode = 0;
+
+ psmouse_reset(psmouse);
+}
+
+static int cypress_set_input_params(struct input_dev *input,
+ struct cytp_data *cytp)
+{
+ int ret;
+
+ if (!cytp->tp_res_x || !cytp->tp_res_y)
+ return -EINVAL;
+
+ __set_bit(EV_ABS, input->evbit);
+ input_set_abs_params(input, ABS_X, 0, cytp->tp_max_abs_x, 0, 0);
+ input_set_abs_params(input, ABS_Y, 0, cytp->tp_max_abs_y, 0, 0);
+ input_set_abs_params(input, ABS_PRESSURE,
+ cytp->tp_min_pressure, cytp->tp_max_pressure, 0, 0);
+ input_set_abs_params(input, ABS_TOOL_WIDTH, 0, 255, 0, 0);
+
+ /* finger position */
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, cytp->tp_max_abs_x, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, cytp->tp_max_abs_y, 0, 0);
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, 255, 0, 0);
+
+ ret = input_mt_init_slots(input, CYTP_MAX_MT_SLOTS,
+ INPUT_MT_DROP_UNUSED|INPUT_MT_TRACK);
+ if (ret < 0)
+ return ret;
+
+ __set_bit(INPUT_PROP_SEMI_MT, input->propbit);
+
+ input_abs_set_res(input, ABS_X, cytp->tp_res_x);
+ input_abs_set_res(input, ABS_Y, cytp->tp_res_y);
+
+ input_abs_set_res(input, ABS_MT_POSITION_X, cytp->tp_res_x);
+ input_abs_set_res(input, ABS_MT_POSITION_Y, cytp->tp_res_y);
+
+ __set_bit(BTN_TOUCH, input->keybit);
+ __set_bit(BTN_TOOL_FINGER, input->keybit);
+ __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
+ __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
+ __set_bit(BTN_TOOL_QUADTAP, input->keybit);
+ __set_bit(BTN_TOOL_QUINTTAP, input->keybit);
+
+ __clear_bit(EV_REL, input->evbit);
+ __clear_bit(REL_X, input->relbit);
+ __clear_bit(REL_Y, input->relbit);
+
+ __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+ __set_bit(EV_KEY, input->evbit);
+ __set_bit(BTN_LEFT, input->keybit);
+ __set_bit(BTN_RIGHT, input->keybit);
+ __set_bit(BTN_MIDDLE, input->keybit);
+
+ input_set_drvdata(input, cytp);
+
+ return 0;
+}
+
+static int cypress_get_finger_count(unsigned char header_byte)
+{
+ unsigned char bits6_7;
+ int finger_count;
+
+ bits6_7 = header_byte >> 6;
+ finger_count = bits6_7 & 0x03;
+
+ if (finger_count == 1)
+ return 1;
+
+ if (header_byte & ABS_HSCROLL_BIT) {
+ /* HSCROLL gets added on to 0 finger count. */
+ switch (finger_count) {
+ case 0: return 4;
+ case 2: return 5;
+ default:
+ /* Invalid contact (e.g. palm). Ignore it. */
+ return -1;
+ }
+ }
+
+ return finger_count;
+}
+
+
+static int cypress_parse_packet(struct psmouse *psmouse,
+ struct cytp_data *cytp, struct cytp_report_data *report_data)
+{
+ unsigned char *packet = psmouse->packet;
+ unsigned char header_byte = packet[0];
+ int contact_cnt;
+
+ memset(report_data, 0, sizeof(struct cytp_report_data));
+
+ contact_cnt = cypress_get_finger_count(header_byte);
+
+ if (contact_cnt < 0) /* e.g. palm detect */
+ return -EINVAL;
+
+ report_data->contact_cnt = contact_cnt;
+
+ report_data->tap = (header_byte & ABS_MULTIFINGER_TAP) ? 1 : 0;
+
+ if (report_data->contact_cnt == 1) {
+ report_data->contacts[0].x =
+ ((packet[1] & 0x70) << 4) | packet[2];
+ report_data->contacts[0].y =
+ ((packet[1] & 0x07) << 8) | packet[3];
+ if (cytp->mode & CYTP_BIT_ABS_PRESSURE)
+ report_data->contacts[0].z = packet[4];
+
+ } else if (report_data->contact_cnt >= 2) {
+ report_data->contacts[0].x =
+ ((packet[1] & 0x70) << 4) | packet[2];
+ report_data->contacts[0].y =
+ ((packet[1] & 0x07) << 8) | packet[3];
+ if (cytp->mode & CYTP_BIT_ABS_PRESSURE)
+ report_data->contacts[0].z = packet[4];
+
+ report_data->contacts[1].x =
+ ((packet[5] & 0xf0) << 4) | packet[6];
+ report_data->contacts[1].y =
+ ((packet[5] & 0x0f) << 8) | packet[7];
+ if (cytp->mode & CYTP_BIT_ABS_PRESSURE)
+ report_data->contacts[1].z = report_data->contacts[0].z;
+ }
+
+ report_data->left = (header_byte & BTN_LEFT_BIT) ? 1 : 0;
+ report_data->right = (header_byte & BTN_RIGHT_BIT) ? 1 : 0;
+
+ /*
+ * This is only true if one of the mouse buttons were tapped. Make
+ * sure it doesn't turn into a click. The regular tap-to-click
+ * functionality will handle that on its own. If we don't do this,
+ * disabling tap-to-click won't affect the mouse button zones.
+ */
+ if (report_data->tap)
+ report_data->left = 0;
+
+#ifdef CYTP_DEBUG_VERBOSE
+ {
+ int i;
+ int n = report_data->contact_cnt;
+ psmouse_dbg(psmouse, "Dump parsed report data as below:\n");
+ psmouse_dbg(psmouse, "contact_cnt = %d\n",
+ report_data->contact_cnt);
+ if (n > CYTP_MAX_MT_SLOTS)
+ n = CYTP_MAX_MT_SLOTS;
+ for (i = 0; i < n; i++)
+ psmouse_dbg(psmouse, "contacts[%d] = {%d, %d, %d}\n", i,
+ report_data->contacts[i].x,
+ report_data->contacts[i].y,
+ report_data->contacts[i].z);
+ psmouse_dbg(psmouse, "left = %d\n", report_data->left);
+ psmouse_dbg(psmouse, "right = %d\n", report_data->right);
+ psmouse_dbg(psmouse, "middle = %d\n", report_data->middle);
+ }
+#endif
+
+ return 0;
+}
+
+static void cypress_process_packet(struct psmouse *psmouse, bool zero_pkt)
+{
+ int i;
+ struct input_dev *input = psmouse->dev;
+ struct cytp_data *cytp = psmouse->private;
+ struct cytp_report_data report_data;
+ struct cytp_contact *contact;
+ struct input_mt_pos pos[CYTP_MAX_MT_SLOTS];
+ int slots[CYTP_MAX_MT_SLOTS];
+ int n;
+
+ if (cypress_parse_packet(psmouse, cytp, &report_data))
+ return;
+
+ n = report_data.contact_cnt;
+
+ if (n > CYTP_MAX_MT_SLOTS)
+ n = CYTP_MAX_MT_SLOTS;
+
+ for (i = 0; i < n; i++) {
+ contact = &report_data.contacts[i];
+ pos[i].x = contact->x;
+ pos[i].y = contact->y;
+ }
+
+ input_mt_assign_slots(input, slots, pos, n);
+
+ for (i = 0; i < n; i++) {
+ contact = &report_data.contacts[i];
+ input_mt_slot(input, slots[i]);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+ input_report_abs(input, ABS_MT_POSITION_X, contact->x);
+ input_report_abs(input, ABS_MT_POSITION_Y, contact->y);
+ input_report_abs(input, ABS_MT_PRESSURE, contact->z);
+ }
+
+ input_mt_sync_frame(input);
+
+ input_mt_report_finger_count(input, report_data.contact_cnt);
+
+ input_report_key(input, BTN_LEFT, report_data.left);
+ input_report_key(input, BTN_RIGHT, report_data.right);
+ input_report_key(input, BTN_MIDDLE, report_data.middle);
+
+ input_sync(input);
+}
+
+static psmouse_ret_t cypress_validate_byte(struct psmouse *psmouse)
+{
+ int contact_cnt;
+ int index = psmouse->pktcnt - 1;
+ unsigned char *packet = psmouse->packet;
+ struct cytp_data *cytp = psmouse->private;
+
+ if (index < 0 || index > cytp->pkt_size)
+ return PSMOUSE_BAD_DATA;
+
+ if (index == 0 && (packet[0] & 0xfc) == 0) {
+ /* call packet process for reporting finger leave. */
+ cypress_process_packet(psmouse, 1);
+ return PSMOUSE_FULL_PACKET;
+ }
+
+ /*
+ * Perform validation (and adjust packet size) based only on the
+ * first byte; allow all further bytes through.
+ */
+ if (index != 0)
+ return PSMOUSE_GOOD_DATA;
+
+ /*
+ * If absolute/relative mode bit has not been set yet, just pass
+ * the byte through.
+ */
+ if ((cytp->mode & CYTP_BIT_ABS_REL_MASK) == 0)
+ return PSMOUSE_GOOD_DATA;
+
+ if ((packet[0] & 0x08) == 0x08)
+ return PSMOUSE_BAD_DATA;
+
+ contact_cnt = cypress_get_finger_count(packet[0]);
+
+ if (contact_cnt < 0)
+ return PSMOUSE_BAD_DATA;
+
+ if (cytp->mode & CYTP_BIT_ABS_NO_PRESSURE)
+ cypress_set_packet_size(psmouse, contact_cnt == 2 ? 7 : 4);
+ else
+ cypress_set_packet_size(psmouse, contact_cnt == 2 ? 8 : 5);
+
+ return PSMOUSE_GOOD_DATA;
+}
+
+static psmouse_ret_t cypress_protocol_handler(struct psmouse *psmouse)
+{
+ struct cytp_data *cytp = psmouse->private;
+
+ if (psmouse->pktcnt >= cytp->pkt_size) {
+ cypress_process_packet(psmouse, 0);
+ return PSMOUSE_FULL_PACKET;
+ }
+
+ return cypress_validate_byte(psmouse);
+}
+
+static void cypress_set_rate(struct psmouse *psmouse, unsigned int rate)
+{
+ struct cytp_data *cytp = psmouse->private;
+
+ if (rate >= 80) {
+ psmouse->rate = 80;
+ cytp->mode |= CYTP_BIT_HIGH_RATE;
+ } else {
+ psmouse->rate = 40;
+ cytp->mode &= ~CYTP_BIT_HIGH_RATE;
+ }
+
+ ps2_command(&psmouse->ps2dev, (unsigned char *)&psmouse->rate,
+ PSMOUSE_CMD_SETRATE);
+}
+
+static void cypress_disconnect(struct psmouse *psmouse)
+{
+ cypress_reset(psmouse);
+ kfree(psmouse->private);
+ psmouse->private = NULL;
+}
+
+static int cypress_reconnect(struct psmouse *psmouse)
+{
+ int tries = CYTP_PS2_CMD_TRIES;
+ int rc;
+
+ do {
+ cypress_reset(psmouse);
+ rc = cypress_detect(psmouse, false);
+ } while (rc && (--tries > 0));
+
+ if (rc) {
+ psmouse_err(psmouse, "Reconnect: unable to detect trackpad.\n");
+ return -1;
+ }
+
+ if (cypress_set_absolute_mode(psmouse)) {
+ psmouse_err(psmouse, "Reconnect: Unable to initialize Cypress absolute mode.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int cypress_init(struct psmouse *psmouse)
+{
+ struct cytp_data *cytp;
+
+ cytp = (struct cytp_data *)kzalloc(sizeof(struct cytp_data), GFP_KERNEL);
+ psmouse->private = (void *)cytp;
+ if (cytp == NULL)
+ return -ENOMEM;
+
+ cypress_reset(psmouse);
+
+ psmouse->pktsize = 8;
+
+ if (cypress_query_hardware(psmouse)) {
+ psmouse_err(psmouse, "Unable to query Trackpad hardware.\n");
+ goto err_exit;
+ }
+
+ if (cypress_set_absolute_mode(psmouse)) {
+ psmouse_err(psmouse, "init: Unable to initialize Cypress absolute mode.\n");
+ goto err_exit;
+ }
+
+ if (cypress_set_input_params(psmouse->dev, cytp) < 0) {
+ psmouse_err(psmouse, "init: Unable to set input params.\n");
+ goto err_exit;
+ }
+
+ psmouse->model = 1;
+ psmouse->protocol_handler = cypress_protocol_handler;
+ psmouse->set_rate = cypress_set_rate;
+ psmouse->disconnect = cypress_disconnect;
+ psmouse->reconnect = cypress_reconnect;
+ psmouse->cleanup = cypress_reset;
+ psmouse->resync_time = 0;
+
+ return 0;
+
+err_exit:
+ /*
+ * Reset Cypress Trackpad as a standard mouse. Then
+ * let psmouse driver commmunicating with it as default PS2 mouse.
+ */
+ cypress_reset(psmouse);
+
+ psmouse->private = NULL;
+ kfree(cytp);
+
+ return -1;
+}
+
+bool cypress_supported(void)
+{
+ return true;
+}
diff --git a/drivers/input/mouse/cypress_ps2.h b/drivers/input/mouse/cypress_ps2.h
new file mode 100644
index 000000000000..4720f21d2d70
--- /dev/null
+++ b/drivers/input/mouse/cypress_ps2.h
@@ -0,0 +1,191 @@
+#ifndef _CYPRESS_PS2_H
+#define _CYPRESS_PS2_H
+
+#include "psmouse.h"
+
+#define CMD_BITS_MASK 0x03
+#define COMPOSIT(x, s) (((x) & CMD_BITS_MASK) << (s))
+
+#define ENCODE_CMD(aa, bb, cc, dd) \
+ (COMPOSIT((aa), 6) | COMPOSIT((bb), 4) | COMPOSIT((cc), 2) | COMPOSIT((dd), 0))
+#define CYTP_CMD_ABS_NO_PRESSURE_MODE ENCODE_CMD(0, 1, 0, 0)
+#define CYTP_CMD_ABS_WITH_PRESSURE_MODE ENCODE_CMD(0, 1, 0, 1)
+#define CYTP_CMD_SMBUS_MODE ENCODE_CMD(0, 1, 1, 0)
+#define CYTP_CMD_STANDARD_MODE ENCODE_CMD(0, 2, 0, 0) /* not implemented yet. */
+#define CYTP_CMD_CYPRESS_REL_MODE ENCODE_CMD(1, 1, 1, 1) /* not implemented yet. */
+#define CYTP_CMD_READ_CYPRESS_ID ENCODE_CMD(0, 0, 0, 0)
+#define CYTP_CMD_READ_TP_METRICS ENCODE_CMD(0, 0, 0, 1)
+#define CYTP_CMD_SET_HSCROLL_WIDTH(w) ENCODE_CMD(1, 1, 0, (w))
+#define CYTP_CMD_SET_HSCROLL_MASK ENCODE_CMD(1, 1, 0, 0)
+#define CYTP_CMD_SET_VSCROLL_WIDTH(w) ENCODE_CMD(1, 2, 0, (w))
+#define CYTP_CMD_SET_VSCROLL_MASK ENCODE_CMD(1, 2, 0, 0)
+#define CYTP_CMD_SET_PALM_GEOMETRY(e) ENCODE_CMD(1, 2, 1, (e))
+#define CYTP_CMD_PALM_GEMMETRY_MASK ENCODE_CMD(1, 2, 1, 0)
+#define CYTP_CMD_SET_PALM_SENSITIVITY(s) ENCODE_CMD(1, 2, 2, (s))
+#define CYTP_CMD_PALM_SENSITIVITY_MASK ENCODE_CMD(1, 2, 2, 0)
+#define CYTP_CMD_SET_MOUSE_SENSITIVITY(s) ENCODE_CMD(1, 3, ((s) >> 2), (s))
+#define CYTP_CMD_MOUSE_SENSITIVITY_MASK ENCODE_CMD(1, 3, 0, 0)
+#define CYTP_CMD_REQUEST_BASELINE_STATUS ENCODE_CMD(2, 0, 0, 1)
+#define CYTP_CMD_REQUEST_RECALIBRATION ENCODE_CMD(2, 0, 0, 3)
+
+#define DECODE_CMD_AA(x) (((x) >> 6) & CMD_BITS_MASK)
+#define DECODE_CMD_BB(x) (((x) >> 4) & CMD_BITS_MASK)
+#define DECODE_CMD_CC(x) (((x) >> 2) & CMD_BITS_MASK)
+#define DECODE_CMD_DD(x) ((x) & CMD_BITS_MASK)
+
+/* Cypress trackpad working mode. */
+#define CYTP_BIT_ABS_PRESSURE (1 << 3)
+#define CYTP_BIT_ABS_NO_PRESSURE (1 << 2)
+#define CYTP_BIT_CYPRESS_REL (1 << 1)
+#define CYTP_BIT_STANDARD_REL (1 << 0)
+#define CYTP_BIT_REL_MASK (CYTP_BIT_CYPRESS_REL | CYTP_BIT_STANDARD_REL)
+#define CYTP_BIT_ABS_MASK (CYTP_BIT_ABS_PRESSURE | CYTP_BIT_ABS_NO_PRESSURE)
+#define CYTP_BIT_ABS_REL_MASK (CYTP_BIT_ABS_MASK | CYTP_BIT_REL_MASK)
+
+#define CYTP_BIT_HIGH_RATE (1 << 4)
+/*
+ * report mode bit is set, firmware working in Remote Mode.
+ * report mode bit is cleared, firmware working in Stream Mode.
+ */
+#define CYTP_BIT_REPORT_MODE (1 << 5)
+
+/* scrolling width values for set HSCROLL and VSCROLL width command. */
+#define SCROLL_WIDTH_NARROW 1
+#define SCROLL_WIDTH_NORMAL 2
+#define SCROLL_WIDTH_WIDE 3
+
+#define PALM_GEOMETRY_ENABLE 1
+#define PALM_GEOMETRY_DISABLE 0
+
+#define TP_METRICS_MASK 0x80
+#define FW_VERSION_MASX 0x7f
+#define FW_VER_HIGH_MASK 0x70
+#define FW_VER_LOW_MASK 0x0f
+
+/* Times to retry a ps2_command and millisecond delay between tries. */
+#define CYTP_PS2_CMD_TRIES 3
+#define CYTP_PS2_CMD_DELAY 500
+
+/* time out for PS/2 command only in milliseconds. */
+#define CYTP_CMD_TIMEOUT 200
+#define CYTP_DATA_TIMEOUT 30
+
+#define CYTP_EXT_CMD 0xe8
+#define CYTP_PS2_RETRY 0xfe
+#define CYTP_PS2_ERROR 0xfc
+
+#define CYTP_RESP_RETRY 0x01
+#define CYTP_RESP_ERROR 0xfe
+
+
+#define CYTP_105001_WIDTH 97 /* Dell XPS 13 */
+#define CYTP_105001_HIGH 59
+#define CYTP_DEFAULT_WIDTH (CYTP_105001_WIDTH)
+#define CYTP_DEFAULT_HIGH (CYTP_105001_HIGH)
+
+#define CYTP_ABS_MAX_X 1600
+#define CYTP_ABS_MAX_Y 900
+#define CYTP_MAX_PRESSURE 255
+#define CYTP_MIN_PRESSURE 0
+
+/* header byte bits of relative package. */
+#define BTN_LEFT_BIT 0x01
+#define BTN_RIGHT_BIT 0x02
+#define BTN_MIDDLE_BIT 0x04
+#define REL_X_SIGN_BIT 0x10
+#define REL_Y_SIGN_BIT 0x20
+
+/* header byte bits of absolute package. */
+#define ABS_VSCROLL_BIT 0x10
+#define ABS_HSCROLL_BIT 0x20
+#define ABS_MULTIFINGER_TAP 0x04
+#define ABS_EDGE_MOTION_MASK 0x80
+
+#define DFLT_RESP_BITS_VALID 0x88 /* SMBus bit should not be set. */
+#define DFLT_RESP_SMBUS_BIT 0x80
+#define DFLT_SMBUS_MODE 0x80
+#define DFLT_PS2_MODE 0x00
+#define DFLT_RESP_BIT_MODE 0x40
+#define DFLT_RESP_REMOTE_MODE 0x40
+#define DFLT_RESP_STREAM_MODE 0x00
+#define DFLT_RESP_BIT_REPORTING 0x20
+#define DFLT_RESP_BIT_SCALING 0x10
+
+#define TP_METRICS_BIT_PALM 0x80
+#define TP_METRICS_BIT_STUBBORN 0x40
+#define TP_METRICS_BIT_2F_JITTER 0x30
+#define TP_METRICS_BIT_1F_JITTER 0x0c
+#define TP_METRICS_BIT_APA 0x02
+#define TP_METRICS_BIT_MTG 0x01
+#define TP_METRICS_BIT_ABS_PKT_FORMAT_SET 0xf0
+#define TP_METRICS_BIT_2F_SPIKE 0x0c
+#define TP_METRICS_BIT_1F_SPIKE 0x03
+
+/* bits of first byte response of E9h-Status Request command. */
+#define RESP_BTN_RIGHT_BIT 0x01
+#define RESP_BTN_MIDDLE_BIT 0x02
+#define RESP_BTN_LEFT_BIT 0x04
+#define RESP_SCALING_BIT 0x10
+#define RESP_ENABLE_BIT 0x20
+#define RESP_REMOTE_BIT 0x40
+#define RESP_SMBUS_BIT 0x80
+
+#define CYTP_MAX_MT_SLOTS 2
+
+struct cytp_contact {
+ int x;
+ int y;
+ int z; /* also named as touch pressure. */
+};
+
+/* The structure of Cypress Trackpad event data. */
+struct cytp_report_data {
+ int contact_cnt;
+ struct cytp_contact contacts[CYTP_MAX_MT_SLOTS];
+ unsigned int left:1;
+ unsigned int right:1;
+ unsigned int middle:1;
+ unsigned int tap:1; /* multi-finger tap detected. */
+};
+
+/* The structure of Cypress Trackpad device private data. */
+struct cytp_data {
+ int fw_version;
+
+ int pkt_size;
+ int mode;
+
+ int tp_min_pressure;
+ int tp_max_pressure;
+ int tp_width; /* X direction physical size in mm. */
+ int tp_high; /* Y direction physical size in mm. */
+ int tp_max_abs_x; /* Max X absolute units that can be reported. */
+ int tp_max_abs_y; /* Max Y absolute units that can be reported. */
+
+ int tp_res_x; /* X resolution in units/mm. */
+ int tp_res_y; /* Y resolution in units/mm. */
+
+ int tp_metrics_supported;
+};
+
+
+#ifdef CONFIG_MOUSE_PS2_CYPRESS
+int cypress_detect(struct psmouse *psmouse, bool set_properties);
+int cypress_init(struct psmouse *psmouse);
+bool cypress_supported(void);
+#else
+inline int cypress_detect(struct psmouse *psmouse, bool set_properties)
+{
+ return -ENOSYS;
+}
+inline int cypress_init(struct psmouse *psmouse)
+{
+ return -ENOSYS;
+}
+inline bool cypress_supported(void)
+{
+ return 0;
+}
+#endif /* CONFIG_MOUSE_PS2_CYPRESS */
+
+#endif /* _CYPRESS_PS2_H */
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 22fe2547e169..cff065f6261c 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -34,6 +34,7 @@
#include "touchkit_ps2.h"
#include "elantech.h"
#include "sentelic.h"
+#include "cypress_ps2.h"
#define DRIVER_DESC "PS/2 mouse driver"
@@ -759,6 +760,28 @@ static int psmouse_extensions(struct psmouse *psmouse,
}
/*
+ * Try Cypress Trackpad.
+ * Must try it before Finger Sensing Pad because Finger Sensing Pad probe
+ * upsets some modules of Cypress Trackpads.
+ */
+ if (max_proto > PSMOUSE_IMEX &&
+ cypress_detect(psmouse, set_properties) == 0) {
+ if (cypress_supported()) {
+ if (cypress_init(psmouse) == 0)
+ return PSMOUSE_CYPRESS;
+
+ /*
+ * Finger Sensing Pad probe upsets some modules of
+ * Cypress Trackpad, must avoid Finger Sensing Pad
+ * probe if Cypress Trackpad device detected.
+ */
+ return PSMOUSE_PS2;
+ }
+
+ max_proto = PSMOUSE_IMEX;
+ }
+
+/*
* Try ALPS TouchPad
*/
if (max_proto > PSMOUSE_IMEX) {
@@ -896,6 +919,15 @@ static const struct psmouse_protocol psmouse_protocols[] = {
.alias = "thinkps",
.detect = thinking_detect,
},
+#ifdef CONFIG_MOUSE_PS2_CYPRESS
+ {
+ .type = PSMOUSE_CYPRESS,
+ .name = "CyPS/2",
+ .alias = "cypress",
+ .detect = cypress_detect,
+ .init = cypress_init,
+ },
+#endif
{
.type = PSMOUSE_GENPS,
.name = "GenPS/2",
diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
index fe1df231ba4c..2f0b39d59a9b 100644
--- a/drivers/input/mouse/psmouse.h
+++ b/drivers/input/mouse/psmouse.h
@@ -95,6 +95,7 @@ enum psmouse_type {
PSMOUSE_ELANTECH,
PSMOUSE_FSP,
PSMOUSE_SYNAPTICS_RELATIVE,
+ PSMOUSE_CYPRESS,
PSMOUSE_AUTO /* This one should always be last */
};
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 12d12ca3fee0..2f78538e09d0 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -722,11 +722,13 @@ static void synaptics_report_mt_data(struct psmouse *psmouse,
default:
/*
* If the finger slot contained in SGM is valid, and either
- * hasn't changed, or is new, then report SGM in MTB slot 0.
+ * hasn't changed, or is new, or the old SGM has now moved to
+ * AGM, then report SGM in MTB slot 0.
* Otherwise, empty MTB slot 0.
*/
if (mt_state->sgm != -1 &&
- (mt_state->sgm == old->sgm || old->sgm == -1))
+ (mt_state->sgm == old->sgm ||
+ old->sgm == -1 || mt_state->agm == old->sgm))
synaptics_report_slot(dev, 0, sgm);
else
synaptics_report_slot(dev, 0, NULL);
@@ -735,9 +737,31 @@ static void synaptics_report_mt_data(struct psmouse *psmouse,
* If the finger slot contained in AGM is valid, and either
* hasn't changed, or is new, then report AGM in MTB slot 1.
* Otherwise, empty MTB slot 1.
+ *
+ * However, in the case where the AGM is new, make sure that
+ * that it is either the same as the old SGM, or there was no
+ * SGM.
+ *
+ * Otherwise, if the SGM was just 1, and the new AGM is 2, then
+ * the new AGM will keep the old SGM's tracking ID, which can
+ * cause apparent drumroll. This happens if in the following
+ * valid finger sequence:
+ *
+ * Action SGM AGM (MTB slot:Contact)
+ * 1. Touch contact 0 (0:0)
+ * 2. Touch contact 1 (0:0, 1:1)
+ * 3. Lift contact 0 (1:1)
+ * 4. Touch contacts 2,3 (0:2, 1:3)
+ *
+ * In step 4, contact 3, in AGM must not be given the same
+ * tracking ID as contact 1 had in step 3. To avoid this,
+ * the first agm with contact 3 is dropped and slot 1 is
+ * invalidated (tracking ID = -1).
*/
if (mt_state->agm != -1 &&
- (mt_state->agm == old->agm || old->agm == -1))
+ (mt_state->agm == old->agm ||
+ (old->agm == -1 &&
+ (old->sgm == -1 || mt_state->agm == old->sgm))))
synaptics_report_slot(dev, 1, agm);
else
synaptics_report_slot(dev, 1, NULL);
@@ -1247,11 +1271,11 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
input_set_abs_params(dev, ABS_PRESSURE, 0, 255, 0, 0);
if (SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) {
- input_mt_init_slots(dev, 2, 0);
set_abs_position_params(dev, priv, ABS_MT_POSITION_X,
ABS_MT_POSITION_Y);
/* Image sensors can report per-contact pressure */
input_set_abs_params(dev, ABS_MT_PRESSURE, 0, 255, 0, 0);
+ input_mt_init_slots(dev, 2, INPUT_MT_POINTER);
/* Image sensors can signal 4 and 5 finger clicks */
__set_bit(BTN_TOOL_QUADTAP, dev->keybit);
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 4a4e182c33e7..6e9cc765e0dc 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -36,6 +36,7 @@ config SERIO_I8042
config SERIO_SERPORT
tristate "Serial port line discipline"
default y
+ depends on TTY
help
Say Y here if you plan to use an input device (mouse, joystick,
tablet, 6dof) that communicates over the RS232 serial (COM) port.
@@ -236,6 +237,7 @@ config SERIO_PS2MULT
config SERIO_ARC_PS2
tristate "ARC PS/2 support"
+ depends on GENERIC_HARDIRQS
help
Say Y here if you have an ARC FPGA platform with a PS/2
controller in it.
diff --git a/drivers/input/serio/arc_ps2.c b/drivers/input/serio/arc_ps2.c
index b571eb3e4efc..c52e3e589f72 100644
--- a/drivers/input/serio/arc_ps2.c
+++ b/drivers/input/serio/arc_ps2.c
@@ -8,6 +8,7 @@
* Driver is originally developed by Pavel Sokolov <psokolov@synopsys.com>
*/
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/input.h>
@@ -206,9 +207,9 @@ static int arc_ps2_probe(struct platform_device *pdev)
return -ENOMEM;
}
- arc_ps2->addr = devm_request_and_ioremap(&pdev->dev, res);
- if (!arc_ps2->addr)
- return -EBUSY;
+ arc_ps2->addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(arc_ps2->addr))
+ return PTR_ERR(arc_ps2->addr);
dev_info(&pdev->dev, "irq = %d, address = 0x%p, ports = %i\n",
irq, arc_ps2->addr, ARC_PS2_PORTS);
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index f92d34f45a1c..aaf23aeae2ea 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -553,10 +553,10 @@ static int wacom_set_device_mode(struct usb_interface *intf, int report_id, int
if (!rep_data)
return error;
- rep_data[0] = report_id;
- rep_data[1] = mode;
-
do {
+ rep_data[0] = report_id;
+ rep_data[1] = mode;
+
error = wacom_set_report(intf, WAC_HID_FEATURE_REPORT,
report_id, rep_data, length, 1);
if (error >= 0)
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 264138f3217e..41b6fbf60112 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -359,6 +359,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
case 0x802: /* Intuos4 General Pen */
case 0x804: /* Intuos4 Marker Pen */
case 0x40802: /* Intuos4 Classic Pen */
+ case 0x18803: /* DTH2242 Grip Pen */
case 0x022:
wacom->tool[idx] = BTN_TOOL_PEN;
break;
@@ -538,6 +539,13 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
input_report_key(input, wacom->tool[1], 0);
input_report_abs(input, ABS_MISC, 0);
}
+ } else if (features->type == DTK) {
+ input_report_key(input, BTN_0, (data[6] & 0x01));
+ input_report_key(input, BTN_1, (data[6] & 0x02));
+ input_report_key(input, BTN_2, (data[6] & 0x04));
+ input_report_key(input, BTN_3, (data[6] & 0x08));
+ input_report_key(input, BTN_4, (data[6] & 0x10));
+ input_report_key(input, BTN_5, (data[6] & 0x20));
} else if (features->type == WACOM_24HD) {
input_report_key(input, BTN_0, (data[6] & 0x01));
input_report_key(input, BTN_1, (data[6] & 0x02));
@@ -785,25 +793,6 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
return 1;
}
-static int find_slot_from_contactid(struct wacom_wac *wacom, int contactid)
-{
- int touch_max = wacom->features.touch_max;
- int i;
-
- if (!wacom->slots)
- return -1;
-
- for (i = 0; i < touch_max; ++i) {
- if (wacom->slots[i] == contactid)
- return i;
- }
- for (i = 0; i < touch_max; ++i) {
- if (wacom->slots[i] == -1)
- return i;
- }
- return -1;
-}
-
static int int_dist(int x1, int y1, int x2, int y2)
{
int x = x2 - x1;
@@ -833,8 +822,7 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
for (i = 0; i < contacts_to_send; i++) {
int offset = (WACOM_BYTES_PER_24HDT_PACKET * i) + 1;
bool touch = data[offset] & 0x1 && !wacom->shared->stylus_in_proximity;
- int id = data[offset + 1];
- int slot = find_slot_from_contactid(wacom, id);
+ int slot = input_mt_get_slot_by_key(input, data[offset + 1]);
if (slot < 0)
continue;
@@ -856,9 +844,7 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h));
input_report_abs(input, ABS_MT_ORIENTATION, w > h);
}
- wacom->slots[slot] = touch ? id : -1;
}
-
input_mt_report_pointer_emulation(input, true);
wacom->num_contacts_left -= contacts_to_send;
@@ -895,7 +881,7 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
int offset = (WACOM_BYTES_PER_MT_PACKET + x_offset) * i + 3;
bool touch = data[offset] & 0x1;
int id = le16_to_cpup((__le16 *)&data[offset + 1]);
- int slot = find_slot_from_contactid(wacom, id);
+ int slot = input_mt_get_slot_by_key(input, id);
if (slot < 0)
continue;
@@ -908,9 +894,7 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
input_report_abs(input, ABS_MT_POSITION_X, x);
input_report_abs(input, ABS_MT_POSITION_Y, y);
}
- wacom->slots[slot] = touch ? id : -1;
}
-
input_mt_report_pointer_emulation(input, true);
wacom->num_contacts_left -= contacts_to_send;
@@ -942,12 +926,11 @@ static int wacom_tpc_mt_touch(struct wacom_wac *wacom)
contact_with_no_pen_down_count++;
}
}
+ input_mt_report_pointer_emulation(input, true);
/* keep touch state for pen event */
wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
- input_mt_report_pointer_emulation(input, true);
-
return 1;
}
@@ -1104,12 +1087,15 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
{
struct input_dev *input = wacom->input;
- int slot_id = data[0] - 2; /* data[0] is between 2 and 17 */
bool touch = data[1] & 0x80;
+ int slot = input_mt_get_slot_by_key(input, data[0]);
+
+ if (slot < 0)
+ return;
touch = touch && !wacom->shared->stylus_in_proximity;
- input_mt_slot(input, slot_id);
+ input_mt_slot(input, slot);
input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
if (touch) {
@@ -1162,7 +1148,6 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom)
wacom_bpt3_button_msg(wacom, data + offset);
}
-
input_mt_report_pointer_emulation(input, true);
input_sync(input);
@@ -1319,6 +1304,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
case WACOM_21UX2:
case WACOM_22HD:
case WACOM_24HD:
+ case DTK:
sync = wacom_intuos_irq(wacom_wac);
break;
@@ -1444,39 +1430,64 @@ static unsigned int wacom_calculate_touch_res(unsigned int logical_max,
return (logical_max * 100) / physical_max;
}
-int wacom_setup_input_capabilities(struct input_dev *input_dev,
- struct wacom_wac *wacom_wac)
+static void wacom_abs_set_axis(struct input_dev *input_dev,
+ struct wacom_wac *wacom_wac)
{
struct wacom_features *features = &wacom_wac->features;
- int i;
-
- input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
-
- __set_bit(BTN_TOUCH, input_dev->keybit);
-
- input_set_abs_params(input_dev, ABS_X, 0, features->x_max,
- features->x_fuzz, 0);
- input_set_abs_params(input_dev, ABS_Y, 0, features->y_max,
- features->y_fuzz, 0);
if (features->device_type == BTN_TOOL_PEN) {
- input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max,
- features->pressure_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_X, 0, features->x_max,
+ features->x_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, features->y_max,
+ features->y_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0,
+ features->pressure_max, features->pressure_fuzz, 0);
/* penabled devices have fixed resolution for each model */
input_abs_set_res(input_dev, ABS_X, features->x_resolution);
input_abs_set_res(input_dev, ABS_Y, features->y_resolution);
} else {
- input_abs_set_res(input_dev, ABS_X,
- wacom_calculate_touch_res(features->x_max,
- features->x_phy));
- input_abs_set_res(input_dev, ABS_Y,
- wacom_calculate_touch_res(features->y_max,
- features->y_phy));
+ if (features->touch_max <= 2) {
+ input_set_abs_params(input_dev, ABS_X, 0,
+ features->x_max, features->x_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0,
+ features->y_max, features->y_fuzz, 0);
+ input_abs_set_res(input_dev, ABS_X,
+ wacom_calculate_touch_res(features->x_max,
+ features->x_phy));
+ input_abs_set_res(input_dev, ABS_Y,
+ wacom_calculate_touch_res(features->y_max,
+ features->y_phy));
+ }
+
+ if (features->touch_max > 1) {
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0,
+ features->x_max, features->x_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0,
+ features->y_max, features->y_fuzz, 0);
+ input_abs_set_res(input_dev, ABS_MT_POSITION_X,
+ wacom_calculate_touch_res(features->x_max,
+ features->x_phy));
+ input_abs_set_res(input_dev, ABS_MT_POSITION_Y,
+ wacom_calculate_touch_res(features->y_max,
+ features->y_phy));
+ }
}
+}
+int wacom_setup_input_capabilities(struct input_dev *input_dev,
+ struct wacom_wac *wacom_wac)
+{
+ struct wacom_features *features = &wacom_wac->features;
+ int i;
+
+ input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+
+ __set_bit(BTN_TOUCH, input_dev->keybit);
__set_bit(ABS_MISC, input_dev->absbit);
+ wacom_abs_set_axis(input_dev, wacom_wac);
+
switch (wacom_wac->features.type) {
case WACOM_MO:
input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
@@ -1513,12 +1524,17 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_Y, input_dev->keybit);
__set_bit(BTN_Z, input_dev->keybit);
- for (i = 0; i < 10; i++)
+ for (i = 6; i < 10; i++)
__set_bit(BTN_0 + i, input_dev->keybit);
__set_bit(KEY_PROG1, input_dev->keybit);
__set_bit(KEY_PROG2, input_dev->keybit);
__set_bit(KEY_PROG3, input_dev->keybit);
+ /* fall through */
+
+ case DTK:
+ for (i = 0; i < 6; i++)
+ __set_bit(BTN_0 + i, input_dev->keybit);
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0);
@@ -1614,24 +1630,11 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
} else if (features->device_type == BTN_TOOL_FINGER) {
__clear_bit(ABS_MISC, input_dev->absbit);
- __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
- __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
- __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
- __set_bit(BTN_TOOL_QUADTAP, input_dev->keybit);
-
- input_mt_init_slots(input_dev, features->touch_max, 0);
-
input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
0, features->x_max, 0, 0);
input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR,
0, features->y_max, 0, 0);
-
- input_set_abs_params(input_dev, ABS_MT_POSITION_X,
- 0, features->x_max,
- features->x_fuzz, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
- 0, features->y_max,
- features->y_fuzz, 0);
+ input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER);
}
break;
@@ -1662,27 +1665,14 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
case MTSCREEN:
case MTTPC:
- if (features->device_type == BTN_TOOL_FINGER) {
- wacom_wac->slots = kmalloc(features->touch_max *
- sizeof(int),
- GFP_KERNEL);
- if (!wacom_wac->slots)
- return -ENOMEM;
-
- for (i = 0; i < features->touch_max; i++)
- wacom_wac->slots[i] = -1;
- }
- /* fall through */
-
case TABLETPC2FG:
if (features->device_type == BTN_TOOL_FINGER) {
- input_mt_init_slots(input_dev, features->touch_max, 0);
- input_set_abs_params(input_dev, ABS_MT_TOOL_TYPE,
- 0, MT_TOOL_MAX, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_X,
- 0, features->x_max, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
- 0, features->y_max, 0, 0);
+ unsigned int flags = INPUT_MT_DIRECT;
+
+ if (wacom_wac->features.type == TABLETPC2FG)
+ flags = 0;
+
+ input_mt_init_slots(input_dev, features->touch_max, flags);
}
/* fall through */
@@ -1725,35 +1715,26 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
__set_bit(INPUT_PROP_POINTER, input_dev->propbit);
if (features->device_type == BTN_TOOL_FINGER) {
+ unsigned int flags = INPUT_MT_POINTER;
+
__set_bit(BTN_LEFT, input_dev->keybit);
__set_bit(BTN_FORWARD, input_dev->keybit);
__set_bit(BTN_BACK, input_dev->keybit);
__set_bit(BTN_RIGHT, input_dev->keybit);
- __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
- __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
- input_mt_init_slots(input_dev, features->touch_max, 0);
-
if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
- __set_bit(BTN_TOOL_TRIPLETAP,
- input_dev->keybit);
- __set_bit(BTN_TOOL_QUADTAP,
- input_dev->keybit);
-
input_set_abs_params(input_dev,
ABS_MT_TOUCH_MAJOR,
0, features->x_max, 0, 0);
input_set_abs_params(input_dev,
ABS_MT_TOUCH_MINOR,
0, features->y_max, 0, 0);
+ } else {
+ __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+ __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
+ flags = 0;
}
-
- input_set_abs_params(input_dev, ABS_MT_POSITION_X,
- 0, features->x_max,
- features->x_fuzz, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
- 0, features->y_max,
- features->y_fuzz, 0);
+ input_mt_init_slots(input_dev, features->touch_max, flags);
} else if (features->device_type == BTN_TOOL_PEN) {
__set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
__set_bit(BTN_TOOL_PEN, input_dev->keybit);
@@ -1978,6 +1959,13 @@ static const struct wacom_features wacom_features_0xCE =
static const struct wacom_features wacom_features_0xF0 =
{ "Wacom DTU1631", WACOM_PKGLEN_GRAPHIRE, 34623, 19553, 511,
0, DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x59 = /* Pen */
+ { "Wacom DTH2242", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047,
+ 63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5D };
+static const struct wacom_features wacom_features_0x5D = /* Touch */
+ { "Wacom DTH2242", .type = WACOM_24HDT,
+ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x59, .touch_max = 10 };
static const struct wacom_features wacom_features_0xCC =
{ "Wacom Cintiq 21UX2", WACOM_PKGLEN_INTUOS, 87200, 65600, 2047,
63, WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
@@ -2152,6 +2140,8 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x43) },
{ USB_DEVICE_WACOM(0x44) },
{ USB_DEVICE_WACOM(0x45) },
+ { USB_DEVICE_WACOM(0x59) },
+ { USB_DEVICE_WACOM(0x5D) },
{ USB_DEVICE_WACOM(0xB0) },
{ USB_DEVICE_WACOM(0xB1) },
{ USB_DEVICE_WACOM(0xB2) },
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index 9396d7769f86..5f9a7721e16c 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -78,6 +78,7 @@ enum {
INTUOS5L,
WACOM_21UX2,
WACOM_22HD,
+ DTK,
WACOM_24HD,
CINTIQ,
WACOM_BEE,
@@ -135,7 +136,6 @@ struct wacom_wac {
int pid;
int battery_capacity;
int num_contacts_left;
- int *slots;
};
#endif
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 515cfe790543..f9a5fd89bc02 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -359,7 +359,7 @@ config TOUCHSCREEN_MCS5000
config TOUCHSCREEN_MMS114
tristate "MELFAS MMS114 touchscreen"
- depends on I2C
+ depends on I2C && GENERIC_HARDIRQS
help
Say Y here if you have the MELFAS MMS114 touchscreen controller
chip in your system.
diff --git a/drivers/input/touchscreen/cyttsp_spi.c b/drivers/input/touchscreen/cyttsp_spi.c
index 638e20310f12..861b7f77605b 100644
--- a/drivers/input/touchscreen/cyttsp_spi.c
+++ b/drivers/input/touchscreen/cyttsp_spi.c
@@ -193,7 +193,6 @@ static struct spi_driver cyttsp_spi_driver = {
module_spi_driver(cyttsp_spi_driver);
-MODULE_ALIAS("spi:cyttsp");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard Product (TTSP) SPI driver");
MODULE_AUTHOR("Cypress");
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 98841d8aa635..4a29ddf6bf1e 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -429,12 +429,12 @@ static int mms114_probe(struct i2c_client *client,
return -ENODEV;
}
- data = kzalloc(sizeof(struct mms114_data), GFP_KERNEL);
- input_dev = input_allocate_device();
+ data = devm_kzalloc(&client->dev, sizeof(struct mms114_data),
+ GFP_KERNEL);
+ input_dev = devm_input_allocate_device(&client->dev);
if (!data || !input_dev) {
dev_err(&client->dev, "Failed to allocate memory\n");
- error = -ENOMEM;
- goto err_free_mem;
+ return -ENOMEM;
}
data->client = client;
@@ -466,57 +466,36 @@ static int mms114_probe(struct i2c_client *client,
input_set_drvdata(input_dev, data);
i2c_set_clientdata(client, data);
- data->core_reg = regulator_get(&client->dev, "avdd");
+ data->core_reg = devm_regulator_get(&client->dev, "avdd");
if (IS_ERR(data->core_reg)) {
error = PTR_ERR(data->core_reg);
dev_err(&client->dev,
"Unable to get the Core regulator (%d)\n", error);
- goto err_free_mem;
+ return error;
}
- data->io_reg = regulator_get(&client->dev, "vdd");
+ data->io_reg = devm_regulator_get(&client->dev, "vdd");
if (IS_ERR(data->io_reg)) {
error = PTR_ERR(data->io_reg);
dev_err(&client->dev,
"Unable to get the IO regulator (%d)\n", error);
- goto err_core_reg;
+ return error;
}
- error = request_threaded_irq(client->irq, NULL, mms114_interrupt,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "mms114", data);
+ error = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ mms114_interrupt, IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(&client->dev), data);
if (error) {
dev_err(&client->dev, "Failed to register interrupt\n");
- goto err_io_reg;
+ return error;
}
disable_irq(client->irq);
error = input_register_device(data->input_dev);
- if (error)
- goto err_free_irq;
-
- return 0;
-
-err_free_irq:
- free_irq(client->irq, data);
-err_io_reg:
- regulator_put(data->io_reg);
-err_core_reg:
- regulator_put(data->core_reg);
-err_free_mem:
- input_free_device(input_dev);
- kfree(data);
- return error;
-}
-
-static int mms114_remove(struct i2c_client *client)
-{
- struct mms114_data *data = i2c_get_clientdata(client);
-
- free_irq(client->irq, data);
- regulator_put(data->io_reg);
- regulator_put(data->core_reg);
- input_unregister_device(data->input_dev);
- kfree(data);
+ if (error) {
+ dev_err(&client->dev, "Failed to register input device\n");
+ return error;
+ }
return 0;
}
@@ -590,7 +569,6 @@ static struct i2c_driver mms114_driver = {
.of_match_table = of_match_ptr(mms114_dt_match),
},
.probe = mms114_probe,
- .remove = mms114_remove,
.id_table = mms114_id,
};
diff --git a/drivers/input/touchscreen/stmpe-ts.c b/drivers/input/touchscreen/stmpe-ts.c
index 84d884b4ec3e..59e81b00f244 100644
--- a/drivers/input/touchscreen/stmpe-ts.c
+++ b/drivers/input/touchscreen/stmpe-ts.c
@@ -120,6 +120,7 @@ static void stmpe_work(struct work_struct *work)
__stmpe_reset_fifo(ts->stmpe);
input_report_abs(ts->idev, ABS_PRESSURE, 0);
+ input_report_key(ts->idev, BTN_TOUCH, 0);
input_sync(ts->idev);
}
@@ -153,6 +154,7 @@ static irqreturn_t stmpe_ts_handler(int irq, void *data)
input_report_abs(ts->idev, ABS_X, x);
input_report_abs(ts->idev, ABS_Y, y);
input_report_abs(ts->idev, ABS_PRESSURE, z);
+ input_report_key(ts->idev, BTN_TOUCH, 1);
input_sync(ts->idev);
/* flush the FIFO after we have read out our values. */
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index 9c0cdc7ea449..7213e8b07e79 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -753,3 +753,4 @@ module_spi_driver(tsc2005_driver);
MODULE_AUTHOR("Lauri Leukkunen <lauri.leukkunen@nokia.com>");
MODULE_DESCRIPTION("TSC2005 Touchscreen Driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:tsc2005");
diff --git a/drivers/input/touchscreen/wm831x-ts.c b/drivers/input/touchscreen/wm831x-ts.c
index f88fab56178c..6be2eb6a153a 100644
--- a/drivers/input/touchscreen/wm831x-ts.c
+++ b/drivers/input/touchscreen/wm831x-ts.c
@@ -247,7 +247,7 @@ static int wm831x_ts_probe(struct platform_device *pdev)
wm831x_ts = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ts),
GFP_KERNEL);
- input_dev = input_allocate_device();
+ input_dev = devm_input_allocate_device(&pdev->dev);
if (!wm831x_ts || !input_dev) {
error = -ENOMEM;
goto err_alloc;
@@ -376,7 +376,6 @@ err_pd_irq:
err_data_irq:
free_irq(wm831x_ts->data_irq, wm831x_ts);
err_alloc:
- input_free_device(input_dev);
return error;
}
@@ -387,7 +386,6 @@ static int wm831x_ts_remove(struct platform_device *pdev)
free_irq(wm831x_ts->pd_irq, wm831x_ts);
free_irq(wm831x_ts->data_irq, wm831x_ts);
- input_unregister_device(wm831x_ts->input_dev);
return 0;
}
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index e39f9dbf297b..01068987809d 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -65,8 +65,8 @@ config AMD_IOMMU_STATS
If unsure, say N.
config AMD_IOMMU_V2
- tristate "AMD IOMMU Version 2 driver (EXPERIMENTAL)"
- depends on AMD_IOMMU && PROFILING && EXPERIMENTAL
+ tristate "AMD IOMMU Version 2 driver"
+ depends on AMD_IOMMU && PROFILING
select MMU_NOTIFIER
---help---
This option enables support for the AMD IOMMUv2 features of the IOMMU
@@ -119,8 +119,8 @@ config INTEL_IOMMU_FLOPPY_WA
16MiB to make floppy (an ISA device) work.
config IRQ_REMAP
- bool "Support for Interrupt Remapping (EXPERIMENTAL)"
- depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL
+ bool "Support for Interrupt Remapping"
+ depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI
select DMAR_TABLE
---help---
Supports Interrupt remapping for IO-APIC and MSI devices.
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index c1c74e030a58..d33eaaf783ad 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -4017,10 +4017,10 @@ static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count)
index -= count - 1;
+ cfg->remapped = 1;
irte_info = &cfg->irq_2_iommu;
irte_info->sub_handle = devid;
irte_info->irte_index = index;
- irte_info->iommu = (void *)cfg;
goto out;
}
@@ -4127,9 +4127,9 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
index = attr->ioapic_pin;
/* Setup IRQ remapping info */
+ cfg->remapped = 1;
irte_info->sub_handle = devid;
irte_info->irte_index = index;
- irte_info->iommu = (void *)cfg;
/* Setup IRTE for IOMMU */
irte.val = 0;
@@ -4288,9 +4288,9 @@ static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
devid = get_device_id(&pdev->dev);
irte_info = &cfg->irq_2_iommu;
+ cfg->remapped = 1;
irte_info->sub_handle = devid;
irte_info->irte_index = index + offset;
- irte_info->iommu = (void *)cfg;
return 0;
}
@@ -4314,9 +4314,9 @@ static int setup_hpet_msi(unsigned int irq, unsigned int id)
if (index < 0)
return index;
+ cfg->remapped = 1;
irte_info->sub_handle = devid;
irte_info->irte_index = index;
- irte_info->iommu = (void *)cfg;
return 0;
}
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 81837b0710a9..faf10ba1ed9a 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -975,6 +975,38 @@ static void __init free_iommu_all(void)
}
/*
+ * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
+ * Workaround:
+ * BIOS should disable L2B micellaneous clock gating by setting
+ * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
+ */
+static void __init amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
+{
+ u32 value;
+
+ if ((boot_cpu_data.x86 != 0x15) ||
+ (boot_cpu_data.x86_model < 0x10) ||
+ (boot_cpu_data.x86_model > 0x1f))
+ return;
+
+ pci_write_config_dword(iommu->dev, 0xf0, 0x90);
+ pci_read_config_dword(iommu->dev, 0xf4, &value);
+
+ if (value & BIT(2))
+ return;
+
+ /* Select NB indirect register 0x90 and enable writing */
+ pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
+
+ pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
+ pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n",
+ dev_name(&iommu->dev->dev));
+
+ /* Clear the enable writing bit */
+ pci_write_config_dword(iommu->dev, 0xf0, 0x90);
+}
+
+/*
* This function clues the initialization function for one IOMMU
* together and also allocates the command buffer and programs the
* hardware. It does NOT enable the IOMMU. This is done afterwards.
@@ -1172,6 +1204,8 @@ static int iommu_init_pci(struct amd_iommu *iommu)
iommu->stored_l2[i] = iommu_read_l2(iommu, i);
}
+ amd_iommu_erratum_746_workaround(iommu);
+
return pci_enable_device(iommu->dev);
}
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 86e2f4a62b9a..174bb654453d 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -41,6 +41,8 @@
#include <asm/irq_remapping.h>
#include <asm/iommu_table.h>
+#include "irq_remapping.h"
+
/* No locks are needed as DMA remapping hardware unit
* list is constructed at boot time and hotplug of
* these units are not supported by the architecture.
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index b9d091157884..43d5c8b8e7ad 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -46,6 +46,8 @@
#include <asm/cacheflush.h>
#include <asm/iommu.h>
+#include "irq_remapping.h"
+
#define ROOT_SIZE VTD_PAGE_SIZE
#define CONTEXT_SIZE VTD_PAGE_SIZE
@@ -4234,6 +4236,21 @@ static struct iommu_ops intel_iommu_ops = {
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
};
+static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
+{
+ /* G4x/GM45 integrated gfx dmar support is totally busted. */
+ printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
+ dmar_map_gfx = 0;
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
+
static void quirk_iommu_rwbf(struct pci_dev *dev)
{
/*
@@ -4242,12 +4259,6 @@ static void quirk_iommu_rwbf(struct pci_dev *dev)
*/
printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
rwbf_quirk = 1;
-
- /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
- if (dev->revision == 0x07) {
- printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
- dmar_map_gfx = 0;
- }
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index af8904de1d44..f3b8f23b5d8f 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -68,6 +68,7 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
{
struct ir_table *table = iommu->ir_table;
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
+ struct irq_cfg *cfg = irq_get_chip_data(irq);
u16 index, start_index;
unsigned int mask = 0;
unsigned long flags;
@@ -115,6 +116,7 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
for (i = index; i < index + count; i++)
table->base[i].present = 1;
+ cfg->remapped = 1;
irq_iommu->iommu = iommu;
irq_iommu->irte_index = index;
irq_iommu->sub_handle = 0;
@@ -155,6 +157,7 @@ static int map_irq_to_irte_handle(int irq, u16 *sub_handle)
static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
{
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
+ struct irq_cfg *cfg = irq_get_chip_data(irq);
unsigned long flags;
if (!irq_iommu)
@@ -162,6 +165,7 @@ static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subha
raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
+ cfg->remapped = 1;
irq_iommu->iommu = iommu;
irq_iommu->irte_index = index;
irq_iommu->sub_handle = subhandle;
@@ -425,11 +429,22 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
/* Enable interrupt-remapping */
iommu->gcmd |= DMA_GCMD_IRE;
+ iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_IRES), sts);
+ /*
+ * With CFI clear in the Global Command register, we should be
+ * protected from dangerous (i.e. compatibility) interrupts
+ * regardless of x2apic status. Check just to be sure.
+ */
+ if (sts & DMA_GSTS_CFIS)
+ WARN(1, KERN_WARNING
+ "Compatibility-format IRQs enabled despite intr remapping;\n"
+ "you are vulnerable to IRQ injection.\n");
+
raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}
@@ -526,20 +541,24 @@ static int __init intel_irq_remapping_supported(void)
static int __init intel_enable_irq_remapping(void)
{
struct dmar_drhd_unit *drhd;
+ bool x2apic_present;
int setup = 0;
int eim = 0;
+ x2apic_present = x2apic_supported();
+
if (parse_ioapics_under_ir() != 1) {
printk(KERN_INFO "Not enable interrupt remapping\n");
- return -1;
+ goto error;
}
- if (x2apic_supported()) {
+ if (x2apic_present) {
eim = !dmar_x2apic_optout();
- WARN(!eim, KERN_WARNING
- "Your BIOS is broken and requested that x2apic be disabled\n"
- "This will leave your machine vulnerable to irq-injection attacks\n"
- "Use 'intremap=no_x2apic_optout' to override BIOS request\n");
+ if (!eim)
+ printk(KERN_WARNING
+ "Your BIOS is broken and requested that x2apic be disabled.\n"
+ "This will slightly decrease performance.\n"
+ "Use 'intremap=no_x2apic_optout' to override BIOS request.\n");
}
for_each_drhd_unit(drhd) {
@@ -578,7 +597,7 @@ static int __init intel_enable_irq_remapping(void)
if (eim && !ecap_eim_support(iommu->ecap)) {
printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
" ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
- return -1;
+ goto error;
}
}
@@ -594,7 +613,7 @@ static int __init intel_enable_irq_remapping(void)
printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
" invalidation, ecap %Lx, ret %d\n",
drhd->reg_base_addr, iommu->ecap, ret);
- return -1;
+ goto error;
}
}
@@ -617,6 +636,14 @@ static int __init intel_enable_irq_remapping(void)
goto error;
irq_remapping_enabled = 1;
+
+ /*
+ * VT-d has a different layout for IO-APIC entries when
+ * interrupt remapping is enabled. So it needs a special routine
+ * to print IO-APIC entries for debugging purposes too.
+ */
+ x86_io_apic_ops.print_entries = intel_ir_io_apic_print_entries;
+
pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic");
return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
@@ -625,6 +652,11 @@ error:
/*
* handle error condition gracefully here!
*/
+
+ if (x2apic_present)
+ WARN(1, KERN_WARNING
+ "Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
+
return -1;
}
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index faf85d6e33fe..d56f8c17c5fe 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -1,11 +1,18 @@
+#include <linux/seq_file.h>
+#include <linux/cpumask.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/cpumask.h>
#include <linux/errno.h>
#include <linux/msi.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
#include <asm/hw_irq.h>
#include <asm/irq_remapping.h>
+#include <asm/processor.h>
+#include <asm/x86_init.h>
+#include <asm/apic.h>
#include "irq_remapping.h"
@@ -17,6 +24,152 @@ int no_x2apic_optout;
static struct irq_remap_ops *remap_ops;
+static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec);
+static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
+ int index, int sub_handle);
+static int set_remapped_irq_affinity(struct irq_data *data,
+ const struct cpumask *mask,
+ bool force);
+
+static bool irq_remapped(struct irq_cfg *cfg)
+{
+ return (cfg->remapped == 1);
+}
+
+static void irq_remapping_disable_io_apic(void)
+{
+ /*
+ * With interrupt-remapping, for now we will use virtual wire A
+ * mode, as virtual wire B is little complex (need to configure
+ * both IOAPIC RTE as well as interrupt-remapping table entry).
+ * As this gets called during crash dump, keep this simple for
+ * now.
+ */
+ if (cpu_has_apic || apic_from_smp_config())
+ disconnect_bsp_APIC(0);
+}
+
+static int do_setup_msi_irqs(struct pci_dev *dev, int nvec)
+{
+ int node, ret, sub_handle, index = 0;
+ unsigned int irq;
+ struct msi_desc *msidesc;
+
+ nvec = __roundup_pow_of_two(nvec);
+
+ WARN_ON(!list_is_singular(&dev->msi_list));
+ msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
+ WARN_ON(msidesc->irq);
+ WARN_ON(msidesc->msi_attrib.multiple);
+
+ node = dev_to_node(&dev->dev);
+ irq = __create_irqs(get_nr_irqs_gsi(), nvec, node);
+ if (irq == 0)
+ return -ENOSPC;
+
+ msidesc->msi_attrib.multiple = ilog2(nvec);
+ for (sub_handle = 0; sub_handle < nvec; sub_handle++) {
+ if (!sub_handle) {
+ index = msi_alloc_remapped_irq(dev, irq, nvec);
+ if (index < 0) {
+ ret = index;
+ goto error;
+ }
+ } else {
+ ret = msi_setup_remapped_irq(dev, irq + sub_handle,
+ index, sub_handle);
+ if (ret < 0)
+ goto error;
+ }
+ ret = setup_msi_irq(dev, msidesc, irq, sub_handle);
+ if (ret < 0)
+ goto error;
+ }
+ return 0;
+
+error:
+ destroy_irqs(irq, nvec);
+
+ /*
+ * Restore altered MSI descriptor fields and prevent just destroyed
+ * IRQs from tearing down again in default_teardown_msi_irqs()
+ */
+ msidesc->irq = 0;
+ msidesc->msi_attrib.multiple = 0;
+
+ return ret;
+}
+
+static int do_setup_msix_irqs(struct pci_dev *dev, int nvec)
+{
+ int node, ret, sub_handle, index = 0;
+ struct msi_desc *msidesc;
+ unsigned int irq;
+
+ node = dev_to_node(&dev->dev);
+ irq = get_nr_irqs_gsi();
+ sub_handle = 0;
+
+ list_for_each_entry(msidesc, &dev->msi_list, list) {
+
+ irq = create_irq_nr(irq, node);
+ if (irq == 0)
+ return -1;
+
+ if (sub_handle == 0)
+ ret = index = msi_alloc_remapped_irq(dev, irq, nvec);
+ else
+ ret = msi_setup_remapped_irq(dev, irq, index, sub_handle);
+
+ if (ret < 0)
+ goto error;
+
+ ret = setup_msi_irq(dev, msidesc, irq, 0);
+ if (ret < 0)
+ goto error;
+
+ sub_handle += 1;
+ irq += 1;
+ }
+
+ return 0;
+
+error:
+ destroy_irq(irq);
+ return ret;
+}
+
+static int irq_remapping_setup_msi_irqs(struct pci_dev *dev,
+ int nvec, int type)
+{
+ if (type == PCI_CAP_ID_MSI)
+ return do_setup_msi_irqs(dev, nvec);
+ else
+ return do_setup_msix_irqs(dev, nvec);
+}
+
+void eoi_ioapic_pin_remapped(int apic, int pin, int vector)
+{
+ /*
+ * Intr-remapping uses pin number as the virtual vector
+ * in the RTE. Actual vector is programmed in
+ * intr-remapping table entry. Hence for the io-apic
+ * EOI we use the pin number.
+ */
+ io_apic_eoi(apic, pin);
+}
+
+static void __init irq_remapping_modify_x86_ops(void)
+{
+ x86_io_apic_ops.disable = irq_remapping_disable_io_apic;
+ x86_io_apic_ops.set_affinity = set_remapped_irq_affinity;
+ x86_io_apic_ops.setup_entry = setup_ioapic_remapped_entry;
+ x86_io_apic_ops.eoi_ioapic_pin = eoi_ioapic_pin_remapped;
+ x86_msi.setup_msi_irqs = irq_remapping_setup_msi_irqs;
+ x86_msi.setup_hpet_msi = setup_hpet_msi_remapped;
+ x86_msi.compose_msi_msg = compose_remapped_msi_msg;
+}
+
static __init int setup_nointremap(char *str)
{
disable_irq_remap = 1;
@@ -79,15 +232,24 @@ int __init irq_remapping_prepare(void)
int __init irq_remapping_enable(void)
{
+ int ret;
+
if (!remap_ops || !remap_ops->enable)
return -ENODEV;
- return remap_ops->enable();
+ ret = remap_ops->enable();
+
+ if (irq_remapping_enabled)
+ irq_remapping_modify_x86_ops();
+
+ return ret;
}
void irq_remapping_disable(void)
{
- if (!remap_ops || !remap_ops->disable)
+ if (!irq_remapping_enabled ||
+ !remap_ops ||
+ !remap_ops->disable)
return;
remap_ops->disable();
@@ -95,7 +257,9 @@ void irq_remapping_disable(void)
int irq_remapping_reenable(int mode)
{
- if (!remap_ops || !remap_ops->reenable)
+ if (!irq_remapping_enabled ||
+ !remap_ops ||
+ !remap_ops->reenable)
return 0;
return remap_ops->reenable(mode);
@@ -103,6 +267,9 @@ int irq_remapping_reenable(int mode)
int __init irq_remap_enable_fault_handling(void)
{
+ if (!irq_remapping_enabled)
+ return 0;
+
if (!remap_ops || !remap_ops->enable_faulting)
return -ENODEV;
@@ -133,23 +300,28 @@ int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask,
void free_remapped_irq(int irq)
{
+ struct irq_cfg *cfg = irq_get_chip_data(irq);
+
if (!remap_ops || !remap_ops->free_irq)
return;
- remap_ops->free_irq(irq);
+ if (irq_remapped(cfg))
+ remap_ops->free_irq(irq);
}
void compose_remapped_msi_msg(struct pci_dev *pdev,
unsigned int irq, unsigned int dest,
struct msi_msg *msg, u8 hpet_id)
{
- if (!remap_ops || !remap_ops->compose_msi_msg)
- return;
+ struct irq_cfg *cfg = irq_get_chip_data(irq);
- remap_ops->compose_msi_msg(pdev, irq, dest, msg, hpet_id);
+ if (!irq_remapped(cfg))
+ native_compose_msi_msg(pdev, irq, dest, msg, hpet_id);
+ else if (remap_ops && remap_ops->compose_msi_msg)
+ remap_ops->compose_msi_msg(pdev, irq, dest, msg, hpet_id);
}
-int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec)
+static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec)
{
if (!remap_ops || !remap_ops->msi_alloc_irq)
return -ENODEV;
@@ -157,8 +329,8 @@ int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec)
return remap_ops->msi_alloc_irq(pdev, irq, nvec);
}
-int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
- int index, int sub_handle)
+static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
+ int index, int sub_handle)
{
if (!remap_ops || !remap_ops->msi_setup_irq)
return -ENODEV;
@@ -173,3 +345,42 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
return remap_ops->setup_hpet_msi(irq, id);
}
+
+void panic_if_irq_remap(const char *msg)
+{
+ if (irq_remapping_enabled)
+ panic(msg);
+}
+
+static void ir_ack_apic_edge(struct irq_data *data)
+{
+ ack_APIC_irq();
+}
+
+static void ir_ack_apic_level(struct irq_data *data)
+{
+ ack_APIC_irq();
+ eoi_ioapic_irq(data->irq, data->chip_data);
+}
+
+static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
+{
+ seq_printf(p, " IR-%s", data->chip->name);
+}
+
+void irq_remap_modify_chip_defaults(struct irq_chip *chip)
+{
+ chip->irq_print_chip = ir_print_prefix;
+ chip->irq_ack = ir_ack_apic_edge;
+ chip->irq_eoi = ir_ack_apic_level;
+ chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
+}
+
+bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
+{
+ if (!irq_remapped(cfg))
+ return false;
+ irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
+ irq_remap_modify_chip_defaults(chip);
+ return true;
+}
diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h
index 95363acb583f..ecb637670405 100644
--- a/drivers/iommu/irq_remapping.h
+++ b/drivers/iommu/irq_remapping.h
@@ -34,6 +34,7 @@ struct msi_msg;
extern int disable_irq_remap;
extern int disable_sourceid_checking;
extern int no_x2apic_optout;
+extern int irq_remapping_enabled;
struct irq_remap_ops {
/* Check whether Interrupt Remapping is supported */
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index fc178893789a..f08dbcd2f175 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -19,6 +19,7 @@
#define pr_fmt(fmt) "%s(): " fmt, __func__
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
@@ -1176,9 +1177,9 @@ static int tegra_smmu_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res)
return -ENODEV;
- smmu->regs[i] = devm_request_and_ioremap(&pdev->dev, res);
- if (!smmu->regs[i])
- return -EBUSY;
+ smmu->regs[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(smmu->regs[i]))
+ return PTR_ERR(smmu->regs[i]);
}
err = of_get_dma_window(dev->of_node, NULL, 0, NULL, &base, &size);
diff --git a/drivers/ipack/devices/Kconfig b/drivers/ipack/devices/Kconfig
index 0b82fdc198c0..907a8cb48f2a 100644
--- a/drivers/ipack/devices/Kconfig
+++ b/drivers/ipack/devices/Kconfig
@@ -1,6 +1,6 @@
config SERIAL_IPOCTAL
tristate "IndustryPack IP-OCTAL uart support"
- depends on IPACK_BUS
+ depends on IPACK_BUS && TTY
help
This driver supports the IPOCTAL serial port device for the IndustryPack bus.
default n
diff --git a/drivers/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c
index 576d53d92677..141094e7c06e 100644
--- a/drivers/ipack/devices/ipoctal.c
+++ b/drivers/ipack/devices/ipoctal.c
@@ -20,7 +20,6 @@
#include <linux/serial.h>
#include <linux/tty_flip.h>
#include <linux/slab.h>
-#include <linux/atomic.h>
#include <linux/io.h>
#include <linux/ipack.h>
#include "ipoctal.h"
@@ -38,21 +37,19 @@ struct ipoctal_channel {
spinlock_t lock;
unsigned int pointer_read;
unsigned int pointer_write;
- atomic_t open;
struct tty_port tty_port;
union scc2698_channel __iomem *regs;
union scc2698_block __iomem *block_regs;
unsigned int board_id;
- unsigned char *board_write;
u8 isr_rx_rdy_mask;
u8 isr_tx_rdy_mask;
+ unsigned int rx_enable;
};
struct ipoctal {
struct ipack_device *dev;
unsigned int board_id;
struct ipoctal_channel channel[NR_CHANNELS];
- unsigned char write;
struct tty_driver *tty_drv;
u8 __iomem *mem8_space;
u8 __iomem *int_space;
@@ -64,28 +61,23 @@ static int ipoctal_port_activate(struct tty_port *port, struct tty_struct *tty)
channel = dev_get_drvdata(tty->dev);
+ /*
+ * Enable RX. TX will be enabled when
+ * there is something to send
+ */
iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
+ channel->rx_enable = 1;
return 0;
}
static int ipoctal_open(struct tty_struct *tty, struct file *file)
{
- int res;
struct ipoctal_channel *channel;
channel = dev_get_drvdata(tty->dev);
-
- if (atomic_read(&channel->open))
- return -EBUSY;
-
tty->driver_data = channel;
- res = tty_port_open(&channel->tty_port, tty, file);
- if (res)
- return res;
-
- atomic_inc(&channel->open);
- return 0;
+ return tty_port_open(&channel->tty_port, tty, file);
}
static void ipoctal_reset_stats(struct ipoctal_stats *stats)
@@ -111,9 +103,7 @@ static void ipoctal_close(struct tty_struct *tty, struct file *filp)
struct ipoctal_channel *channel = tty->driver_data;
tty_port_close(&channel->tty_port, tty, filp);
-
- if (atomic_dec_and_test(&channel->open))
- ipoctal_free_channel(channel);
+ ipoctal_free_channel(channel);
}
static int ipoctal_get_icount(struct tty_struct *tty,
@@ -133,15 +123,16 @@ static int ipoctal_get_icount(struct tty_struct *tty,
return 0;
}
-static void ipoctal_irq_rx(struct ipoctal_channel *channel,
- struct tty_struct *tty, u8 sr)
+static void ipoctal_irq_rx(struct ipoctal_channel *channel, u8 sr)
{
+ struct tty_port *port = &channel->tty_port;
unsigned char value;
- unsigned char flag = TTY_NORMAL;
+ unsigned char flag;
u8 isr;
do {
value = ioread8(&channel->regs->r.rhr);
+ flag = TTY_NORMAL;
/* Error: count statistics */
if (sr & SR_ERROR) {
iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr);
@@ -149,7 +140,7 @@ static void ipoctal_irq_rx(struct ipoctal_channel *channel,
if (sr & SR_OVERRUN_ERROR) {
channel->stats.overrun_err++;
/* Overrun doesn't affect the current character*/
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(port, 0, TTY_OVERRUN);
}
if (sr & SR_PARITY_ERROR) {
channel->stats.parity_err++;
@@ -165,7 +156,7 @@ static void ipoctal_irq_rx(struct ipoctal_channel *channel,
flag = TTY_BREAK;
}
}
- tty_insert_flip_char(tty, value, flag);
+ tty_insert_flip_char(port, value, flag);
/* Check if there are more characters in RX FIFO
* If there are more, the isr register for this channel
@@ -175,7 +166,7 @@ static void ipoctal_irq_rx(struct ipoctal_channel *channel,
sr = ioread8(&channel->regs->r.sr);
} while (isr & channel->isr_rx_rdy_mask);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
}
static void ipoctal_irq_tx(struct ipoctal_channel *channel)
@@ -183,10 +174,8 @@ static void ipoctal_irq_tx(struct ipoctal_channel *channel)
unsigned char value;
unsigned int *pointer_write = &channel->pointer_write;
- if (channel->nb_bytes <= 0) {
- channel->nb_bytes = 0;
+ if (channel->nb_bytes == 0)
return;
- }
value = channel->tty_port.xmit_buf[*pointer_write];
iowrite8(value, &channel->regs->w.thr);
@@ -194,55 +183,38 @@ static void ipoctal_irq_tx(struct ipoctal_channel *channel)
(*pointer_write)++;
*pointer_write = *pointer_write % PAGE_SIZE;
channel->nb_bytes--;
-
- if ((channel->nb_bytes == 0) &&
- (waitqueue_active(&channel->queue))) {
-
- if (channel->board_id != IPACK1_DEVICE_ID_SBS_OCTAL_485) {
- *channel->board_write = 1;
- wake_up_interruptible(&channel->queue);
- }
- }
}
static void ipoctal_irq_channel(struct ipoctal_channel *channel)
{
u8 isr, sr;
- struct tty_struct *tty;
- /* If there is no client, skip the check */
- if (!atomic_read(&channel->open))
- return;
-
- tty = tty_port_tty_get(&channel->tty_port);
- if (!tty)
- return;
+ spin_lock(&channel->lock);
/* The HW is organized in pair of channels. See which register we need
* to read from */
isr = ioread8(&channel->block_regs->r.isr);
sr = ioread8(&channel->regs->r.sr);
- /* In case of RS-485, change from TX to RX when finishing TX.
- * Half-duplex. */
- if ((channel->board_id == IPACK1_DEVICE_ID_SBS_OCTAL_485) &&
- (sr & SR_TX_EMPTY) && (channel->nb_bytes == 0)) {
+ if ((sr & SR_TX_EMPTY) && (channel->nb_bytes == 0)) {
iowrite8(CR_DISABLE_TX, &channel->regs->w.cr);
- iowrite8(CR_CMD_NEGATE_RTSN, &channel->regs->w.cr);
- iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
- *channel->board_write = 1;
- wake_up_interruptible(&channel->queue);
+ /* In case of RS-485, change from TX to RX when finishing TX.
+ * Half-duplex. */
+ if (channel->board_id == IPACK1_DEVICE_ID_SBS_OCTAL_485) {
+ iowrite8(CR_CMD_NEGATE_RTSN, &channel->regs->w.cr);
+ iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
+ channel->rx_enable = 1;
+ }
}
/* RX data */
if ((isr & channel->isr_rx_rdy_mask) && (sr & SR_RX_READY))
- ipoctal_irq_rx(channel, tty, sr);
+ ipoctal_irq_rx(channel, sr);
/* TX of each character */
if ((isr & channel->isr_tx_rdy_mask) && (sr & SR_TX_READY))
ipoctal_irq_tx(channel);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ spin_unlock(&channel->lock);
}
static irqreturn_t ipoctal_irq_handler(void *arg)
@@ -250,14 +222,14 @@ static irqreturn_t ipoctal_irq_handler(void *arg)
unsigned int i;
struct ipoctal *ipoctal = (struct ipoctal *) arg;
- /* Check all channels */
- for (i = 0; i < NR_CHANNELS; i++)
- ipoctal_irq_channel(&ipoctal->channel[i]);
-
/* Clear the IPack device interrupt */
readw(ipoctal->int_space + ACK_INT_REQ0);
readw(ipoctal->int_space + ACK_INT_REQ1);
+ /* Check all channels */
+ for (i = 0; i < NR_CHANNELS; i++)
+ ipoctal_irq_channel(&ipoctal->channel[i]);
+
return IRQ_HANDLED;
}
@@ -311,7 +283,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
ipoctal->mem8_space =
devm_ioremap_nocache(&ipoctal->dev->dev,
region->start, 0x8000);
- if (!addr) {
+ if (!ipoctal->mem8_space) {
dev_err(&ipoctal->dev->dev,
"Unable to map slot [%d:%d] MEM8 space!\n",
bus_nr, slot);
@@ -324,7 +296,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
struct ipoctal_channel *channel = &ipoctal->channel[i];
channel->regs = chan_regs + i;
channel->block_regs = block_regs + (i >> 1);
- channel->board_write = &ipoctal->write;
channel->board_id = ipoctal->board_id;
if (i & 1) {
channel->isr_tx_rdy_mask = ISR_TxRDY_B;
@@ -335,6 +306,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
}
iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr);
+ channel->rx_enable = 0;
iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr);
iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr);
iowrite8(MR1_CHRL_8_BITS | MR1_ERROR_CHAR | MR1_RxINT_RxRDY,
@@ -407,8 +379,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
ipoctal_reset_stats(&channel->stats);
channel->nb_bytes = 0;
- init_waitqueue_head(&channel->queue);
-
spin_lock_init(&channel->lock);
channel->pointer_read = 0;
channel->pointer_write = 0;
@@ -419,12 +389,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
continue;
}
dev_set_drvdata(tty_dev, channel);
-
- /*
- * Enable again the RX. TX will be enabled when
- * there is something to send
- */
- iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
}
return 0;
@@ -464,6 +428,7 @@ static int ipoctal_write_tty(struct tty_struct *tty,
/* As the IP-OCTAL 485 only supports half duplex, do it manually */
if (channel->board_id == IPACK1_DEVICE_ID_SBS_OCTAL_485) {
iowrite8(CR_DISABLE_RX, &channel->regs->w.cr);
+ channel->rx_enable = 0;
iowrite8(CR_CMD_ASSERT_RTSN, &channel->regs->w.cr);
}
@@ -472,10 +437,6 @@ static int ipoctal_write_tty(struct tty_struct *tty,
* operations
*/
iowrite8(CR_ENABLE_TX, &channel->regs->w.cr);
- wait_event_interruptible(channel->queue, *channel->board_write);
- iowrite8(CR_DISABLE_TX, &channel->regs->w.cr);
-
- *channel->board_write = 0;
return char_copied;
}
@@ -627,8 +588,9 @@ static void ipoctal_set_termios(struct tty_struct *tty,
iowrite8(mr2, &channel->regs->w.mr);
iowrite8(csr, &channel->regs->w.csr);
- /* Enable again the RX */
- iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
+ /* Enable again the RX, if it was before */
+ if (channel->rx_enable)
+ iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
}
static void ipoctal_hangup(struct tty_struct *tty)
@@ -648,6 +610,7 @@ static void ipoctal_hangup(struct tty_struct *tty)
tty_port_hangup(&channel->tty_port);
iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr);
+ channel->rx_enable = 0;
iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr);
iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr);
iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr);
@@ -657,6 +620,22 @@ static void ipoctal_hangup(struct tty_struct *tty)
wake_up_interruptible(&channel->tty_port.open_wait);
}
+static void ipoctal_shutdown(struct tty_struct *tty)
+{
+ struct ipoctal_channel *channel = tty->driver_data;
+
+ if (channel == NULL)
+ return;
+
+ iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr);
+ channel->rx_enable = 0;
+ iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr);
+ iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr);
+ iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr);
+ iowrite8(CR_CMD_RESET_MR, &channel->regs->w.cr);
+ clear_bit(ASYNCB_INITIALIZED, &channel->tty_port.flags);
+}
+
static const struct tty_operations ipoctal_fops = {
.ioctl = NULL,
.open = ipoctal_open,
@@ -667,6 +646,7 @@ static const struct tty_operations ipoctal_fops = {
.chars_in_buffer = ipoctal_chars_in_buffer,
.get_icount = ipoctal_get_icount,
.hangup = ipoctal_hangup,
+ .shutdown = ipoctal_shutdown,
};
static int ipoctal_probe(struct ipack_device *dev)
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 62ca575701d3..a350969e5efe 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -1,3 +1,30 @@
+config IRQCHIP
+ def_bool y
+ depends on OF_IRQ
+
+config ARM_GIC
+ bool
+ select IRQ_DOMAIN
+ select MULTI_IRQ_HANDLER
+
+config GIC_NON_BANKED
+ bool
+
+config ARM_VIC
+ bool
+ select IRQ_DOMAIN
+ select MULTI_IRQ_HANDLER
+
+config ARM_VIC_NR
+ int
+ default 4 if ARCH_S5PV210
+ default 3 if ARCH_S5PC100
+ default 2
+ depends on ARM_VIC
+ help
+ The maximum number of VICs available in the system, for
+ power management.
+
config VERSATILE_FPGA_IRQ
bool
select IRQ_DOMAIN
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index bf4609a5bd9d..e65fbf2cdf71 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -1,4 +1,9 @@
+obj-$(CONFIG_IRQCHIP) += irqchip.o
+
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
+obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o
obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi.o
-obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
+obj-$(CONFIG_ARM_GIC) += irq-gic.o
+obj-$(CONFIG_ARM_VIC) += irq-vic.o
+obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c
new file mode 100644
index 000000000000..04d86a9803f4
--- /dev/null
+++ b/drivers/irqchip/exynos-combiner.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Combiner irqchip for EXYNOS
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <asm/mach/irq.h>
+
+#include <plat/cpu.h>
+
+#include "irqchip.h"
+
+#define COMBINER_ENABLE_SET 0x0
+#define COMBINER_ENABLE_CLEAR 0x4
+#define COMBINER_INT_STATUS 0xC
+
+static DEFINE_SPINLOCK(irq_controller_lock);
+
+struct combiner_chip_data {
+ unsigned int irq_offset;
+ unsigned int irq_mask;
+ void __iomem *base;
+};
+
+static struct irq_domain *combiner_irq_domain;
+static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
+
+static inline void __iomem *combiner_base(struct irq_data *data)
+{
+ struct combiner_chip_data *combiner_data =
+ irq_data_get_irq_chip_data(data);
+
+ return combiner_data->base;
+}
+
+static void combiner_mask_irq(struct irq_data *data)
+{
+ u32 mask = 1 << (data->hwirq % 32);
+
+ __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
+}
+
+static void combiner_unmask_irq(struct irq_data *data)
+{
+ u32 mask = 1 << (data->hwirq % 32);
+
+ __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
+}
+
+static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
+{
+ struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
+ struct irq_chip *chip = irq_get_chip(irq);
+ unsigned int cascade_irq, combiner_irq;
+ unsigned long status;
+
+ chained_irq_enter(chip, desc);
+
+ spin_lock(&irq_controller_lock);
+ status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
+ spin_unlock(&irq_controller_lock);
+ status &= chip_data->irq_mask;
+
+ if (status == 0)
+ goto out;
+
+ combiner_irq = __ffs(status);
+
+ cascade_irq = combiner_irq + (chip_data->irq_offset & ~31);
+ if (unlikely(cascade_irq >= NR_IRQS))
+ do_bad_IRQ(cascade_irq, desc);
+ else
+ generic_handle_irq(cascade_irq);
+
+ out:
+ chained_irq_exit(chip, desc);
+}
+
+static struct irq_chip combiner_chip = {
+ .name = "COMBINER",
+ .irq_mask = combiner_mask_irq,
+ .irq_unmask = combiner_unmask_irq,
+};
+
+static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq)
+{
+ unsigned int max_nr;
+
+ if (soc_is_exynos5250())
+ max_nr = EXYNOS5_MAX_COMBINER_NR;
+ else
+ max_nr = EXYNOS4_MAX_COMBINER_NR;
+
+ if (combiner_nr >= max_nr)
+ BUG();
+ if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0)
+ BUG();
+ irq_set_chained_handler(irq, combiner_handle_cascade_irq);
+}
+
+static void __init combiner_init_one(unsigned int combiner_nr,
+ void __iomem *base)
+{
+ combiner_data[combiner_nr].base = base;
+ combiner_data[combiner_nr].irq_offset = irq_find_mapping(
+ combiner_irq_domain, combiner_nr * MAX_IRQ_IN_COMBINER);
+ combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
+
+ /* Disable all interrupts */
+ __raw_writel(combiner_data[combiner_nr].irq_mask,
+ base + COMBINER_ENABLE_CLEAR);
+}
+
+#ifdef CONFIG_OF
+static int combiner_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *controller,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ if (d->of_node != controller)
+ return -EINVAL;
+
+ if (intsize < 2)
+ return -EINVAL;
+
+ *out_hwirq = intspec[0] * MAX_IRQ_IN_COMBINER + intspec[1];
+ *out_type = 0;
+
+ return 0;
+}
+#else
+static int combiner_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *controller,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ return -EINVAL;
+}
+#endif
+
+static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
+ irq_set_chip_data(irq, &combiner_data[hw >> 3]);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+
+ return 0;
+}
+
+static struct irq_domain_ops combiner_irq_domain_ops = {
+ .xlate = combiner_irq_domain_xlate,
+ .map = combiner_irq_domain_map,
+};
+
+void __init combiner_init(void __iomem *combiner_base,
+ struct device_node *np)
+{
+ int i, irq, irq_base;
+ unsigned int max_nr, nr_irq;
+
+ if (np) {
+ if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
+ pr_warning("%s: number of combiners not specified, "
+ "setting default as %d.\n",
+ __func__, EXYNOS4_MAX_COMBINER_NR);
+ max_nr = EXYNOS4_MAX_COMBINER_NR;
+ }
+ } else {
+ max_nr = soc_is_exynos5250() ? EXYNOS5_MAX_COMBINER_NR :
+ EXYNOS4_MAX_COMBINER_NR;
+ }
+ nr_irq = max_nr * MAX_IRQ_IN_COMBINER;
+
+ irq_base = irq_alloc_descs(COMBINER_IRQ(0, 0), 1, nr_irq, 0);
+ if (IS_ERR_VALUE(irq_base)) {
+ irq_base = COMBINER_IRQ(0, 0);
+ pr_warning("%s: irq desc alloc failed. Continuing with %d as linux irq base\n", __func__, irq_base);
+ }
+
+ combiner_irq_domain = irq_domain_add_legacy(np, nr_irq, irq_base, 0,
+ &combiner_irq_domain_ops, &combiner_data);
+ if (WARN_ON(!combiner_irq_domain)) {
+ pr_warning("%s: irq domain init failed\n", __func__);
+ return;
+ }
+
+ for (i = 0; i < max_nr; i++) {
+ combiner_init_one(i, combiner_base + (i >> 2) * 0x10);
+ irq = IRQ_SPI(i);
+#ifdef CONFIG_OF
+ if (np)
+ irq = irq_of_parse_and_map(np, i);
+#endif
+ combiner_cascade_irq(i, irq);
+ }
+}
+
+#ifdef CONFIG_OF
+static int __init combiner_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ void __iomem *combiner_base;
+
+ combiner_base = of_iomap(np, 0);
+ if (!combiner_base) {
+ pr_err("%s: failed to map combiner registers\n", __func__);
+ return -ENXIO;
+ }
+
+ combiner_init(combiner_base, np);
+
+ return 0;
+}
+IRQCHIP_DECLARE(exynos4210_combiner, "samsung,exynos4210-combiner",
+ combiner_of_init);
+#endif
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
new file mode 100644
index 000000000000..644d72468423
--- /dev/null
+++ b/drivers/irqchip/irq-gic.c
@@ -0,0 +1,845 @@
+/*
+ * linux/arch/arm/common/gic.c
+ *
+ * Copyright (C) 2002 ARM Limited, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Interrupt architecture for the GIC:
+ *
+ * o There is one Interrupt Distributor, which receives interrupts
+ * from system devices and sends them to the Interrupt Controllers.
+ *
+ * o There is one CPU Interface per CPU, which sends interrupts sent
+ * by the Distributor, and interrupts generated locally, to the
+ * associated CPU. The base address of the CPU interface is usually
+ * aliased so that the same address points to different chips depending
+ * on the CPU it is accessed from.
+ *
+ * Note that IRQs 0-31 are special - they are local to each CPU.
+ * As such, the enable set/clear, pending set/clear and active bit
+ * registers are banked per-cpu for these sources.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/smp.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpumask.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/irqchip/arm-gic.h>
+
+#include <asm/irq.h>
+#include <asm/exception.h>
+#include <asm/smp_plat.h>
+#include <asm/mach/irq.h>
+
+#include "irqchip.h"
+
+union gic_base {
+ void __iomem *common_base;
+ void __percpu __iomem **percpu_base;
+};
+
+struct gic_chip_data {
+ union gic_base dist_base;
+ union gic_base cpu_base;
+#ifdef CONFIG_CPU_PM
+ u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
+ u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
+ u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
+ u32 __percpu *saved_ppi_enable;
+ u32 __percpu *saved_ppi_conf;
+#endif
+ struct irq_domain *domain;
+ unsigned int gic_irqs;
+#ifdef CONFIG_GIC_NON_BANKED
+ void __iomem *(*get_base)(union gic_base *);
+#endif
+};
+
+static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+
+/*
+ * The GIC mapping of CPU interfaces does not necessarily match
+ * the logical CPU numbering. Let's use a mapping as returned
+ * by the GIC itself.
+ */
+#define NR_GIC_CPU_IF 8
+static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
+
+/*
+ * Supported arch specific GIC irq extension.
+ * Default make them NULL.
+ */
+struct irq_chip gic_arch_extn = {
+ .irq_eoi = NULL,
+ .irq_mask = NULL,
+ .irq_unmask = NULL,
+ .irq_retrigger = NULL,
+ .irq_set_type = NULL,
+ .irq_set_wake = NULL,
+};
+
+#ifndef MAX_GIC_NR
+#define MAX_GIC_NR 1
+#endif
+
+static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
+
+#ifdef CONFIG_GIC_NON_BANKED
+static void __iomem *gic_get_percpu_base(union gic_base *base)
+{
+ return *__this_cpu_ptr(base->percpu_base);
+}
+
+static void __iomem *gic_get_common_base(union gic_base *base)
+{
+ return base->common_base;
+}
+
+static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
+{
+ return data->get_base(&data->dist_base);
+}
+
+static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data)
+{
+ return data->get_base(&data->cpu_base);
+}
+
+static inline void gic_set_base_accessor(struct gic_chip_data *data,
+ void __iomem *(*f)(union gic_base *))
+{
+ data->get_base = f;
+}
+#else
+#define gic_data_dist_base(d) ((d)->dist_base.common_base)
+#define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
+#define gic_set_base_accessor(d,f)
+#endif
+
+static inline void __iomem *gic_dist_base(struct irq_data *d)
+{
+ struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
+ return gic_data_dist_base(gic_data);
+}
+
+static inline void __iomem *gic_cpu_base(struct irq_data *d)
+{
+ struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
+ return gic_data_cpu_base(gic_data);
+}
+
+static inline unsigned int gic_irq(struct irq_data *d)
+{
+ return d->hwirq;
+}
+
+/*
+ * Routines to acknowledge, disable and enable interrupts
+ */
+static void gic_mask_irq(struct irq_data *d)
+{
+ u32 mask = 1 << (gic_irq(d) % 32);
+
+ raw_spin_lock(&irq_controller_lock);
+ writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
+ if (gic_arch_extn.irq_mask)
+ gic_arch_extn.irq_mask(d);
+ raw_spin_unlock(&irq_controller_lock);
+}
+
+static void gic_unmask_irq(struct irq_data *d)
+{
+ u32 mask = 1 << (gic_irq(d) % 32);
+
+ raw_spin_lock(&irq_controller_lock);
+ if (gic_arch_extn.irq_unmask)
+ gic_arch_extn.irq_unmask(d);
+ writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
+ raw_spin_unlock(&irq_controller_lock);
+}
+
+static void gic_eoi_irq(struct irq_data *d)
+{
+ if (gic_arch_extn.irq_eoi) {
+ raw_spin_lock(&irq_controller_lock);
+ gic_arch_extn.irq_eoi(d);
+ raw_spin_unlock(&irq_controller_lock);
+ }
+
+ writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
+}
+
+static int gic_set_type(struct irq_data *d, unsigned int type)
+{
+ void __iomem *base = gic_dist_base(d);
+ unsigned int gicirq = gic_irq(d);
+ u32 enablemask = 1 << (gicirq % 32);
+ u32 enableoff = (gicirq / 32) * 4;
+ u32 confmask = 0x2 << ((gicirq % 16) * 2);
+ u32 confoff = (gicirq / 16) * 4;
+ bool enabled = false;
+ u32 val;
+
+ /* Interrupt configuration for SGIs can't be changed */
+ if (gicirq < 16)
+ return -EINVAL;
+
+ if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
+ return -EINVAL;
+
+ raw_spin_lock(&irq_controller_lock);
+
+ if (gic_arch_extn.irq_set_type)
+ gic_arch_extn.irq_set_type(d, type);
+
+ val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
+ if (type == IRQ_TYPE_LEVEL_HIGH)
+ val &= ~confmask;
+ else if (type == IRQ_TYPE_EDGE_RISING)
+ val |= confmask;
+
+ /*
+ * As recommended by the spec, disable the interrupt before changing
+ * the configuration
+ */
+ if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
+ writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
+ enabled = true;
+ }
+
+ writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
+
+ if (enabled)
+ writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
+
+ raw_spin_unlock(&irq_controller_lock);
+
+ return 0;
+}
+
+static int gic_retrigger(struct irq_data *d)
+{
+ if (gic_arch_extn.irq_retrigger)
+ return gic_arch_extn.irq_retrigger(d);
+
+ return -ENXIO;
+}
+
+#ifdef CONFIG_SMP
+static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+ bool force)
+{
+ void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
+ unsigned int shift = (gic_irq(d) % 4) * 8;
+ unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ u32 val, mask, bit;
+
+ if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ mask = 0xff << shift;
+ bit = gic_cpu_map[cpu] << shift;
+
+ raw_spin_lock(&irq_controller_lock);
+ val = readl_relaxed(reg) & ~mask;
+ writel_relaxed(val | bit, reg);
+ raw_spin_unlock(&irq_controller_lock);
+
+ return IRQ_SET_MASK_OK;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int gic_set_wake(struct irq_data *d, unsigned int on)
+{
+ int ret = -ENXIO;
+
+ if (gic_arch_extn.irq_set_wake)
+ ret = gic_arch_extn.irq_set_wake(d, on);
+
+ return ret;
+}
+
+#else
+#define gic_set_wake NULL
+#endif
+
+static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+{
+ u32 irqstat, irqnr;
+ struct gic_chip_data *gic = &gic_data[0];
+ void __iomem *cpu_base = gic_data_cpu_base(gic);
+
+ do {
+ irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
+ irqnr = irqstat & ~0x1c00;
+
+ if (likely(irqnr > 15 && irqnr < 1021)) {
+ irqnr = irq_find_mapping(gic->domain, irqnr);
+ handle_IRQ(irqnr, regs);
+ continue;
+ }
+ if (irqnr < 16) {
+ writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
+#ifdef CONFIG_SMP
+ handle_IPI(irqnr, regs);
+#endif
+ continue;
+ }
+ break;
+ } while (1);
+}
+
+static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
+{
+ struct gic_chip_data *chip_data = irq_get_handler_data(irq);
+ struct irq_chip *chip = irq_get_chip(irq);
+ unsigned int cascade_irq, gic_irq;
+ unsigned long status;
+
+ chained_irq_enter(chip, desc);
+
+ raw_spin_lock(&irq_controller_lock);
+ status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
+ raw_spin_unlock(&irq_controller_lock);
+
+ gic_irq = (status & 0x3ff);
+ if (gic_irq == 1023)
+ goto out;
+
+ cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
+ if (unlikely(gic_irq < 32 || gic_irq > 1020))
+ do_bad_IRQ(cascade_irq, desc);
+ else
+ generic_handle_irq(cascade_irq);
+
+ out:
+ chained_irq_exit(chip, desc);
+}
+
+static struct irq_chip gic_chip = {
+ .name = "GIC",
+ .irq_mask = gic_mask_irq,
+ .irq_unmask = gic_unmask_irq,
+ .irq_eoi = gic_eoi_irq,
+ .irq_set_type = gic_set_type,
+ .irq_retrigger = gic_retrigger,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = gic_set_affinity,
+#endif
+ .irq_set_wake = gic_set_wake,
+};
+
+void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
+{
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+ if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0)
+ BUG();
+ irq_set_chained_handler(irq, gic_handle_cascade_irq);
+}
+
+static u8 gic_get_cpumask(struct gic_chip_data *gic)
+{
+ void __iomem *base = gic_data_dist_base(gic);
+ u32 mask, i;
+
+ for (i = mask = 0; i < 32; i += 4) {
+ mask = readl_relaxed(base + GIC_DIST_TARGET + i);
+ mask |= mask >> 16;
+ mask |= mask >> 8;
+ if (mask)
+ break;
+ }
+
+ if (!mask)
+ pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
+
+ return mask;
+}
+
+static void __init gic_dist_init(struct gic_chip_data *gic)
+{
+ unsigned int i;
+ u32 cpumask;
+ unsigned int gic_irqs = gic->gic_irqs;
+ void __iomem *base = gic_data_dist_base(gic);
+
+ writel_relaxed(0, base + GIC_DIST_CTRL);
+
+ /*
+ * Set all global interrupts to be level triggered, active low.
+ */
+ for (i = 32; i < gic_irqs; i += 16)
+ writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16);
+
+ /*
+ * Set all global interrupts to this CPU only.
+ */
+ cpumask = gic_get_cpumask(gic);
+ cpumask |= cpumask << 8;
+ cpumask |= cpumask << 16;
+ for (i = 32; i < gic_irqs; i += 4)
+ writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
+
+ /*
+ * Set priority on all global interrupts.
+ */
+ for (i = 32; i < gic_irqs; i += 4)
+ writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
+
+ /*
+ * Disable all interrupts. Leave the PPI and SGIs alone
+ * as these enables are banked registers.
+ */
+ for (i = 32; i < gic_irqs; i += 32)
+ writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
+
+ writel_relaxed(1, base + GIC_DIST_CTRL);
+}
+
+static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
+{
+ void __iomem *dist_base = gic_data_dist_base(gic);
+ void __iomem *base = gic_data_cpu_base(gic);
+ unsigned int cpu_mask, cpu = smp_processor_id();
+ int i;
+
+ /*
+ * Get what the GIC says our CPU mask is.
+ */
+ BUG_ON(cpu >= NR_GIC_CPU_IF);
+ cpu_mask = gic_get_cpumask(gic);
+ gic_cpu_map[cpu] = cpu_mask;
+
+ /*
+ * Clear our mask from the other map entries in case they're
+ * still undefined.
+ */
+ for (i = 0; i < NR_GIC_CPU_IF; i++)
+ if (i != cpu)
+ gic_cpu_map[i] &= ~cpu_mask;
+
+ /*
+ * Deal with the banked PPI and SGI interrupts - disable all
+ * PPI interrupts, ensure all SGI interrupts are enabled.
+ */
+ writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
+ writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
+
+ /*
+ * Set priority on PPI and SGI interrupts
+ */
+ for (i = 0; i < 32; i += 4)
+ writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
+
+ writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
+ writel_relaxed(1, base + GIC_CPU_CTRL);
+}
+
+#ifdef CONFIG_CPU_PM
+/*
+ * Saves the GIC distributor registers during suspend or idle. Must be called
+ * with interrupts disabled but before powering down the GIC. After calling
+ * this function, no interrupts will be delivered by the GIC, and another
+ * platform-specific wakeup source must be enabled.
+ */
+static void gic_dist_save(unsigned int gic_nr)
+{
+ unsigned int gic_irqs;
+ void __iomem *dist_base;
+ int i;
+
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ gic_irqs = gic_data[gic_nr].gic_irqs;
+ dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+
+ if (!dist_base)
+ return;
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
+ gic_data[gic_nr].saved_spi_conf[i] =
+ readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+ gic_data[gic_nr].saved_spi_target[i] =
+ readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
+ gic_data[gic_nr].saved_spi_enable[i] =
+ readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
+}
+
+/*
+ * Restores the GIC distributor registers during resume or when coming out of
+ * idle. Must be called before enabling interrupts. If a level interrupt
+ * that occured while the GIC was suspended is still present, it will be
+ * handled normally, but any edge interrupts that occured will not be seen by
+ * the GIC and need to be handled by the platform-specific wakeup source.
+ */
+static void gic_dist_restore(unsigned int gic_nr)
+{
+ unsigned int gic_irqs;
+ unsigned int i;
+ void __iomem *dist_base;
+
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ gic_irqs = gic_data[gic_nr].gic_irqs;
+ dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+
+ if (!dist_base)
+ return;
+
+ writel_relaxed(0, dist_base + GIC_DIST_CTRL);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
+ writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
+ dist_base + GIC_DIST_CONFIG + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+ writel_relaxed(0xa0a0a0a0,
+ dist_base + GIC_DIST_PRI + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+ writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
+ dist_base + GIC_DIST_TARGET + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
+ writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
+ dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+ writel_relaxed(1, dist_base + GIC_DIST_CTRL);
+}
+
+static void gic_cpu_save(unsigned int gic_nr)
+{
+ int i;
+ u32 *ptr;
+ void __iomem *dist_base;
+ void __iomem *cpu_base;
+
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+ cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
+
+ if (!dist_base || !cpu_base)
+ return;
+
+ ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
+ for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
+ ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+ ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
+ for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
+ ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
+
+}
+
+static void gic_cpu_restore(unsigned int gic_nr)
+{
+ int i;
+ u32 *ptr;
+ void __iomem *dist_base;
+ void __iomem *cpu_base;
+
+ if (gic_nr >= MAX_GIC_NR)
+ BUG();
+
+ dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+ cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
+
+ if (!dist_base || !cpu_base)
+ return;
+
+ ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
+ for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
+ writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+ ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
+ for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
+ writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
+
+ for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
+ writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
+
+ writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
+ writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
+}
+
+static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
+{
+ int i;
+
+ for (i = 0; i < MAX_GIC_NR; i++) {
+#ifdef CONFIG_GIC_NON_BANKED
+ /* Skip over unused GICs */
+ if (!gic_data[i].get_base)
+ continue;
+#endif
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ gic_cpu_save(i);
+ break;
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ gic_cpu_restore(i);
+ break;
+ case CPU_CLUSTER_PM_ENTER:
+ gic_dist_save(i);
+ break;
+ case CPU_CLUSTER_PM_ENTER_FAILED:
+ case CPU_CLUSTER_PM_EXIT:
+ gic_dist_restore(i);
+ break;
+ }
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block gic_notifier_block = {
+ .notifier_call = gic_notifier,
+};
+
+static void __init gic_pm_init(struct gic_chip_data *gic)
+{
+ gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
+ sizeof(u32));
+ BUG_ON(!gic->saved_ppi_enable);
+
+ gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
+ sizeof(u32));
+ BUG_ON(!gic->saved_ppi_conf);
+
+ if (gic == &gic_data[0])
+ cpu_pm_register_notifier(&gic_notifier_block);
+}
+#else
+static void __init gic_pm_init(struct gic_chip_data *gic)
+{
+}
+#endif
+
+#ifdef CONFIG_SMP
+void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
+{
+ int cpu;
+ unsigned long map = 0;
+
+ /* Convert our logical CPU mask into a physical one. */
+ for_each_cpu(cpu, mask)
+ map |= 1 << cpu_logical_map(cpu);
+
+ /*
+ * Ensure that stores to Normal memory are visible to the
+ * other CPUs before issuing the IPI.
+ */
+ dsb();
+
+ /* this always happens on GIC0 */
+ writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+}
+#endif
+
+static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ if (hw < 32) {
+ irq_set_percpu_devid(irq);
+ irq_set_chip_and_handler(irq, &gic_chip,
+ handle_percpu_devid_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
+ } else {
+ irq_set_chip_and_handler(irq, &gic_chip,
+ handle_fasteoi_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ }
+ irq_set_chip_data(irq, d->host_data);
+ return 0;
+}
+
+static int gic_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *controller,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type)
+{
+ if (d->of_node != controller)
+ return -EINVAL;
+ if (intsize < 3)
+ return -EINVAL;
+
+ /* Get the interrupt number and add 16 to skip over SGIs */
+ *out_hwirq = intspec[1] + 16;
+
+ /* For SPIs, we need to add 16 more to get the GIC irq ID number */
+ if (!intspec[0])
+ *out_hwirq += 16;
+
+ *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
+ return 0;
+}
+
+const struct irq_domain_ops gic_irq_domain_ops = {
+ .map = gic_irq_domain_map,
+ .xlate = gic_irq_domain_xlate,
+};
+
+void __init gic_init_bases(unsigned int gic_nr, int irq_start,
+ void __iomem *dist_base, void __iomem *cpu_base,
+ u32 percpu_offset, struct device_node *node)
+{
+ irq_hw_number_t hwirq_base;
+ struct gic_chip_data *gic;
+ int gic_irqs, irq_base, i;
+
+ BUG_ON(gic_nr >= MAX_GIC_NR);
+
+ gic = &gic_data[gic_nr];
+#ifdef CONFIG_GIC_NON_BANKED
+ if (percpu_offset) { /* Frankein-GIC without banked registers... */
+ unsigned int cpu;
+
+ gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
+ gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
+ if (WARN_ON(!gic->dist_base.percpu_base ||
+ !gic->cpu_base.percpu_base)) {
+ free_percpu(gic->dist_base.percpu_base);
+ free_percpu(gic->cpu_base.percpu_base);
+ return;
+ }
+
+ for_each_possible_cpu(cpu) {
+ unsigned long offset = percpu_offset * cpu_logical_map(cpu);
+ *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
+ *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
+ }
+
+ gic_set_base_accessor(gic, gic_get_percpu_base);
+ } else
+#endif
+ { /* Normal, sane GIC... */
+ WARN(percpu_offset,
+ "GIC_NON_BANKED not enabled, ignoring %08x offset!",
+ percpu_offset);
+ gic->dist_base.common_base = dist_base;
+ gic->cpu_base.common_base = cpu_base;
+ gic_set_base_accessor(gic, gic_get_common_base);
+ }
+
+ /*
+ * Initialize the CPU interface map to all CPUs.
+ * It will be refined as each CPU probes its ID.
+ */
+ for (i = 0; i < NR_GIC_CPU_IF; i++)
+ gic_cpu_map[i] = 0xff;
+
+ /*
+ * For primary GICs, skip over SGIs.
+ * For secondary GICs, skip over PPIs, too.
+ */
+ if (gic_nr == 0 && (irq_start & 31) > 0) {
+ hwirq_base = 16;
+ if (irq_start != -1)
+ irq_start = (irq_start & ~31) + 16;
+ } else {
+ hwirq_base = 32;
+ }
+
+ /*
+ * Find out how many interrupts are supported.
+ * The GIC only supports up to 1020 interrupt sources.
+ */
+ gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
+ gic_irqs = (gic_irqs + 1) * 32;
+ if (gic_irqs > 1020)
+ gic_irqs = 1020;
+ gic->gic_irqs = gic_irqs;
+
+ gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
+ irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id());
+ if (IS_ERR_VALUE(irq_base)) {
+ WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
+ irq_start);
+ irq_base = irq_start;
+ }
+ gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
+ hwirq_base, &gic_irq_domain_ops, gic);
+ if (WARN_ON(!gic->domain))
+ return;
+
+#ifdef CONFIG_SMP
+ set_smp_cross_call(gic_raise_softirq);
+#endif
+
+ set_handle_irq(gic_handle_irq);
+
+ gic_chip.flags |= gic_arch_extn.flags;
+ gic_dist_init(gic);
+ gic_cpu_init(gic);
+ gic_pm_init(gic);
+}
+
+void __cpuinit gic_secondary_init(unsigned int gic_nr)
+{
+ BUG_ON(gic_nr >= MAX_GIC_NR);
+
+ gic_cpu_init(&gic_data[gic_nr]);
+}
+
+#ifdef CONFIG_OF
+static int gic_cnt __initdata = 0;
+
+int __init gic_of_init(struct device_node *node, struct device_node *parent)
+{
+ void __iomem *cpu_base;
+ void __iomem *dist_base;
+ u32 percpu_offset;
+ int irq;
+
+ if (WARN_ON(!node))
+ return -ENODEV;
+
+ dist_base = of_iomap(node, 0);
+ WARN(!dist_base, "unable to map gic dist registers\n");
+
+ cpu_base = of_iomap(node, 1);
+ WARN(!cpu_base, "unable to map gic cpu registers\n");
+
+ if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
+ percpu_offset = 0;
+
+ gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
+
+ if (parent) {
+ irq = irq_of_parse_and_map(node, 0);
+ gic_cascade_irq(gic_cnt, irq);
+ }
+ gic_cnt++;
+ return 0;
+}
+IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
+IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
+IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
+IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
+
+#endif
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
new file mode 100644
index 000000000000..3cf97aaebe40
--- /dev/null
+++ b/drivers/irqchip/irq-vic.c
@@ -0,0 +1,489 @@
+/*
+ * linux/arch/arm/common/vic.c
+ *
+ * Copyright (C) 1999 - 2003 ARM Limited
+ * Copyright (C) 2000 Deep Blue Solutions Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/syscore_ops.h>
+#include <linux/device.h>
+#include <linux/amba/bus.h>
+#include <linux/irqchip/arm-vic.h>
+
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#include "irqchip.h"
+
+#define VIC_IRQ_STATUS 0x00
+#define VIC_FIQ_STATUS 0x04
+#define VIC_INT_SELECT 0x0c /* 1 = FIQ, 0 = IRQ */
+#define VIC_INT_SOFT 0x18
+#define VIC_INT_SOFT_CLEAR 0x1c
+#define VIC_PROTECT 0x20
+#define VIC_PL190_VECT_ADDR 0x30 /* PL190 only */
+#define VIC_PL190_DEF_VECT_ADDR 0x34 /* PL190 only */
+
+#define VIC_VECT_ADDR0 0x100 /* 0 to 15 (0..31 PL192) */
+#define VIC_VECT_CNTL0 0x200 /* 0 to 15 (0..31 PL192) */
+#define VIC_ITCR 0x300 /* VIC test control register */
+
+#define VIC_VECT_CNTL_ENABLE (1 << 5)
+
+#define VIC_PL192_VECT_ADDR 0xF00
+
+/**
+ * struct vic_device - VIC PM device
+ * @irq: The IRQ number for the base of the VIC.
+ * @base: The register base for the VIC.
+ * @valid_sources: A bitmask of valid interrupts
+ * @resume_sources: A bitmask of interrupts for resume.
+ * @resume_irqs: The IRQs enabled for resume.
+ * @int_select: Save for VIC_INT_SELECT.
+ * @int_enable: Save for VIC_INT_ENABLE.
+ * @soft_int: Save for VIC_INT_SOFT.
+ * @protect: Save for VIC_PROTECT.
+ * @domain: The IRQ domain for the VIC.
+ */
+struct vic_device {
+ void __iomem *base;
+ int irq;
+ u32 valid_sources;
+ u32 resume_sources;
+ u32 resume_irqs;
+ u32 int_select;
+ u32 int_enable;
+ u32 soft_int;
+ u32 protect;
+ struct irq_domain *domain;
+};
+
+/* we cannot allocate memory when VICs are initially registered */
+static struct vic_device vic_devices[CONFIG_ARM_VIC_NR];
+
+static int vic_id;
+
+static void vic_handle_irq(struct pt_regs *regs);
+
+/**
+ * vic_init2 - common initialisation code
+ * @base: Base of the VIC.
+ *
+ * Common initialisation code for registration
+ * and resume.
+*/
+static void vic_init2(void __iomem *base)
+{
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4);
+ writel(VIC_VECT_CNTL_ENABLE | i, reg);
+ }
+
+ writel(32, base + VIC_PL190_DEF_VECT_ADDR);
+}
+
+#ifdef CONFIG_PM
+static void resume_one_vic(struct vic_device *vic)
+{
+ void __iomem *base = vic->base;
+
+ printk(KERN_DEBUG "%s: resuming vic at %p\n", __func__, base);
+
+ /* re-initialise static settings */
+ vic_init2(base);
+
+ writel(vic->int_select, base + VIC_INT_SELECT);
+ writel(vic->protect, base + VIC_PROTECT);
+
+ /* set the enabled ints and then clear the non-enabled */
+ writel(vic->int_enable, base + VIC_INT_ENABLE);
+ writel(~vic->int_enable, base + VIC_INT_ENABLE_CLEAR);
+
+ /* and the same for the soft-int register */
+
+ writel(vic->soft_int, base + VIC_INT_SOFT);
+ writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR);
+}
+
+static void vic_resume(void)
+{
+ int id;
+
+ for (id = vic_id - 1; id >= 0; id--)
+ resume_one_vic(vic_devices + id);
+}
+
+static void suspend_one_vic(struct vic_device *vic)
+{
+ void __iomem *base = vic->base;
+
+ printk(KERN_DEBUG "%s: suspending vic at %p\n", __func__, base);
+
+ vic->int_select = readl(base + VIC_INT_SELECT);
+ vic->int_enable = readl(base + VIC_INT_ENABLE);
+ vic->soft_int = readl(base + VIC_INT_SOFT);
+ vic->protect = readl(base + VIC_PROTECT);
+
+ /* set the interrupts (if any) that are used for
+ * resuming the system */
+
+ writel(vic->resume_irqs, base + VIC_INT_ENABLE);
+ writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR);
+}
+
+static int vic_suspend(void)
+{
+ int id;
+
+ for (id = 0; id < vic_id; id++)
+ suspend_one_vic(vic_devices + id);
+
+ return 0;
+}
+
+struct syscore_ops vic_syscore_ops = {
+ .suspend = vic_suspend,
+ .resume = vic_resume,
+};
+
+/**
+ * vic_pm_init - initicall to register VIC pm
+ *
+ * This is called via late_initcall() to register
+ * the resources for the VICs due to the early
+ * nature of the VIC's registration.
+*/
+static int __init vic_pm_init(void)
+{
+ if (vic_id > 0)
+ register_syscore_ops(&vic_syscore_ops);
+
+ return 0;
+}
+late_initcall(vic_pm_init);
+#endif /* CONFIG_PM */
+
+static struct irq_chip vic_chip;
+
+static int vic_irqdomain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct vic_device *v = d->host_data;
+
+ /* Skip invalid IRQs, only register handlers for the real ones */
+ if (!(v->valid_sources & (1 << hwirq)))
+ return -ENOTSUPP;
+ irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq);
+ irq_set_chip_data(irq, v->base);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ return 0;
+}
+
+/*
+ * Handle each interrupt in a single VIC. Returns non-zero if we've
+ * handled at least one interrupt. This reads the status register
+ * before handling each interrupt, which is necessary given that
+ * handle_IRQ may briefly re-enable interrupts for soft IRQ handling.
+ */
+static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
+{
+ u32 stat, irq;
+ int handled = 0;
+
+ while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) {
+ irq = ffs(stat) - 1;
+ handle_IRQ(irq_find_mapping(vic->domain, irq), regs);
+ handled = 1;
+ }
+
+ return handled;
+}
+
+/*
+ * Keep iterating over all registered VIC's until there are no pending
+ * interrupts.
+ */
+static asmlinkage void __exception_irq_entry vic_handle_irq(struct pt_regs *regs)
+{
+ int i, handled;
+
+ do {
+ for (i = 0, handled = 0; i < vic_id; ++i)
+ handled |= handle_one_vic(&vic_devices[i], regs);
+ } while (handled);
+}
+
+static struct irq_domain_ops vic_irqdomain_ops = {
+ .map = vic_irqdomain_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+/**
+ * vic_register() - Register a VIC.
+ * @base: The base address of the VIC.
+ * @irq: The base IRQ for the VIC.
+ * @valid_sources: bitmask of valid interrupts
+ * @resume_sources: bitmask of interrupts allowed for resume sources.
+ * @node: The device tree node associated with the VIC.
+ *
+ * Register the VIC with the system device tree so that it can be notified
+ * of suspend and resume requests and ensure that the correct actions are
+ * taken to re-instate the settings on resume.
+ *
+ * This also configures the IRQ domain for the VIC.
+ */
+static void __init vic_register(void __iomem *base, unsigned int irq,
+ u32 valid_sources, u32 resume_sources,
+ struct device_node *node)
+{
+ struct vic_device *v;
+ int i;
+
+ if (vic_id >= ARRAY_SIZE(vic_devices)) {
+ printk(KERN_ERR "%s: too few VICs, increase CONFIG_ARM_VIC_NR\n", __func__);
+ return;
+ }
+
+ v = &vic_devices[vic_id];
+ v->base = base;
+ v->valid_sources = valid_sources;
+ v->resume_sources = resume_sources;
+ v->irq = irq;
+ set_handle_irq(vic_handle_irq);
+ vic_id++;
+ v->domain = irq_domain_add_simple(node, fls(valid_sources), irq,
+ &vic_irqdomain_ops, v);
+ /* create an IRQ mapping for each valid IRQ */
+ for (i = 0; i < fls(valid_sources); i++)
+ if (valid_sources & (1 << i))
+ irq_create_mapping(v->domain, i);
+}
+
+static void vic_ack_irq(struct irq_data *d)
+{
+ void __iomem *base = irq_data_get_irq_chip_data(d);
+ unsigned int irq = d->hwirq;
+ writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
+ /* moreover, clear the soft-triggered, in case it was the reason */
+ writel(1 << irq, base + VIC_INT_SOFT_CLEAR);
+}
+
+static void vic_mask_irq(struct irq_data *d)
+{
+ void __iomem *base = irq_data_get_irq_chip_data(d);
+ unsigned int irq = d->hwirq;
+ writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
+}
+
+static void vic_unmask_irq(struct irq_data *d)
+{
+ void __iomem *base = irq_data_get_irq_chip_data(d);
+ unsigned int irq = d->hwirq;
+ writel(1 << irq, base + VIC_INT_ENABLE);
+}
+
+#if defined(CONFIG_PM)
+static struct vic_device *vic_from_irq(unsigned int irq)
+{
+ struct vic_device *v = vic_devices;
+ unsigned int base_irq = irq & ~31;
+ int id;
+
+ for (id = 0; id < vic_id; id++, v++) {
+ if (v->irq == base_irq)
+ return v;
+ }
+
+ return NULL;
+}
+
+static int vic_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct vic_device *v = vic_from_irq(d->irq);
+ unsigned int off = d->hwirq;
+ u32 bit = 1 << off;
+
+ if (!v)
+ return -EINVAL;
+
+ if (!(bit & v->resume_sources))
+ return -EINVAL;
+
+ if (on)
+ v->resume_irqs |= bit;
+ else
+ v->resume_irqs &= ~bit;
+
+ return 0;
+}
+#else
+#define vic_set_wake NULL
+#endif /* CONFIG_PM */
+
+static struct irq_chip vic_chip = {
+ .name = "VIC",
+ .irq_ack = vic_ack_irq,
+ .irq_mask = vic_mask_irq,
+ .irq_unmask = vic_unmask_irq,
+ .irq_set_wake = vic_set_wake,
+};
+
+static void __init vic_disable(void __iomem *base)
+{
+ writel(0, base + VIC_INT_SELECT);
+ writel(0, base + VIC_INT_ENABLE);
+ writel(~0, base + VIC_INT_ENABLE_CLEAR);
+ writel(0, base + VIC_ITCR);
+ writel(~0, base + VIC_INT_SOFT_CLEAR);
+}
+
+static void __init vic_clear_interrupts(void __iomem *base)
+{
+ unsigned int i;
+
+ writel(0, base + VIC_PL190_VECT_ADDR);
+ for (i = 0; i < 19; i++) {
+ unsigned int value;
+
+ value = readl(base + VIC_PL190_VECT_ADDR);
+ writel(value, base + VIC_PL190_VECT_ADDR);
+ }
+}
+
+/*
+ * The PL190 cell from ARM has been modified by ST to handle 64 interrupts.
+ * The original cell has 32 interrupts, while the modified one has 64,
+ * replocating two blocks 0x00..0x1f in 0x20..0x3f. In that case
+ * the probe function is called twice, with base set to offset 000
+ * and 020 within the page. We call this "second block".
+ */
+static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
+ u32 vic_sources, struct device_node *node)
+{
+ unsigned int i;
+ int vic_2nd_block = ((unsigned long)base & ~PAGE_MASK) != 0;
+
+ /* Disable all interrupts initially. */
+ vic_disable(base);
+
+ /*
+ * Make sure we clear all existing interrupts. The vector registers
+ * in this cell are after the second block of general registers,
+ * so we can address them using standard offsets, but only from
+ * the second base address, which is 0x20 in the page
+ */
+ if (vic_2nd_block) {
+ vic_clear_interrupts(base);
+
+ /* ST has 16 vectors as well, but we don't enable them by now */
+ for (i = 0; i < 16; i++) {
+ void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4);
+ writel(0, reg);
+ }
+
+ writel(32, base + VIC_PL190_DEF_VECT_ADDR);
+ }
+
+ vic_register(base, irq_start, vic_sources, 0, node);
+}
+
+void __init __vic_init(void __iomem *base, int irq_start,
+ u32 vic_sources, u32 resume_sources,
+ struct device_node *node)
+{
+ unsigned int i;
+ u32 cellid = 0;
+ enum amba_vendor vendor;
+
+ /* Identify which VIC cell this one is, by reading the ID */
+ for (i = 0; i < 4; i++) {
+ void __iomem *addr;
+ addr = (void __iomem *)((u32)base & PAGE_MASK) + 0xfe0 + (i * 4);
+ cellid |= (readl(addr) & 0xff) << (8 * i);
+ }
+ vendor = (cellid >> 12) & 0xff;
+ printk(KERN_INFO "VIC @%p: id 0x%08x, vendor 0x%02x\n",
+ base, cellid, vendor);
+
+ switch(vendor) {
+ case AMBA_VENDOR_ST:
+ vic_init_st(base, irq_start, vic_sources, node);
+ return;
+ default:
+ printk(KERN_WARNING "VIC: unknown vendor, continuing anyways\n");
+ /* fall through */
+ case AMBA_VENDOR_ARM:
+ break;
+ }
+
+ /* Disable all interrupts initially. */
+ vic_disable(base);
+
+ /* Make sure we clear all existing interrupts */
+ vic_clear_interrupts(base);
+
+ vic_init2(base);
+
+ vic_register(base, irq_start, vic_sources, resume_sources, node);
+}
+
+/**
+ * vic_init() - initialise a vectored interrupt controller
+ * @base: iomem base address
+ * @irq_start: starting interrupt number, must be muliple of 32
+ * @vic_sources: bitmask of interrupt sources to allow
+ * @resume_sources: bitmask of interrupt sources to allow for resume
+ */
+void __init vic_init(void __iomem *base, unsigned int irq_start,
+ u32 vic_sources, u32 resume_sources)
+{
+ __vic_init(base, irq_start, vic_sources, resume_sources, NULL);
+}
+
+#ifdef CONFIG_OF
+int __init vic_of_init(struct device_node *node, struct device_node *parent)
+{
+ void __iomem *regs;
+
+ if (WARN(parent, "non-root VICs are not supported"))
+ return -EINVAL;
+
+ regs = of_iomap(node, 0);
+ if (WARN_ON(!regs))
+ return -EIO;
+
+ /*
+ * Passing 0 as first IRQ makes the simple domain allocate descriptors
+ */
+ __vic_init(regs, 0, ~0, ~0, node);
+
+ return 0;
+}
+IRQCHIP_DECLARE(arm_pl190_vic, "arm,pl190-vic", vic_of_init);
+IRQCHIP_DECLARE(arm_pl192_vic, "arm,pl192-vic", vic_of_init);
+IRQCHIP_DECLARE(arm_versatile_vic, "arm,versatile-vic", vic_of_init);
+#endif /* CONFIG OF */
diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c
new file mode 100644
index 000000000000..f496afce29de
--- /dev/null
+++ b/drivers/irqchip/irqchip.c
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2012 Thomas Petazzoni
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/of_irq.h>
+
+#include "irqchip.h"
+
+/*
+ * This special of_device_id is the sentinel at the end of the
+ * of_device_id[] array of all irqchips. It is automatically placed at
+ * the end of the array by the linker, thanks to being part of a
+ * special section.
+ */
+static const struct of_device_id
+irqchip_of_match_end __used __section(__irqchip_of_end);
+
+extern struct of_device_id __irqchip_begin[];
+
+void __init irqchip_init(void)
+{
+ of_irq_init(__irqchip_begin);
+}
diff --git a/drivers/irqchip/irqchip.h b/drivers/irqchip/irqchip.h
new file mode 100644
index 000000000000..e445ba2d6add
--- /dev/null
+++ b/drivers/irqchip/irqchip.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2012 Thomas Petazzoni
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _IRQCHIP_H
+#define _IRQCHIP_H
+
+/*
+ * This macro must be used by the different irqchip drivers to declare
+ * the association between their DT compatible string and their
+ * initialization function.
+ *
+ * @name: name that must be unique accross all IRQCHIP_DECLARE of the
+ * same file.
+ * @compstr: compatible string of the irqchip driver
+ * @fn: initialization function
+ */
+#define IRQCHIP_DECLARE(name,compstr,fn) \
+ static const struct of_device_id irqchip_of_match_##name \
+ __used __section(__irqchip_of_table) \
+ = { .compatible = compstr, .data = fn }
+
+#endif
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
index 80e1d2fd9d4c..8527743b5cef 100644
--- a/drivers/irqchip/spear-shirq.c
+++ b/drivers/irqchip/spear-shirq.c
@@ -25,6 +25,8 @@
#include <linux/of_irq.h>
#include <linux/spinlock.h>
+#include "irqchip.h"
+
static DEFINE_SPINLOCK(lock);
/* spear300 shared irq registers offsets and masks */
@@ -300,6 +302,7 @@ int __init spear300_shirq_of_init(struct device_node *np,
return shirq_init(spear300_shirq_blocks,
ARRAY_SIZE(spear300_shirq_blocks), np);
}
+IRQCHIP_DECLARE(spear300_shirq, "st,spear300-shirq", spear300_shirq_of_init);
int __init spear310_shirq_of_init(struct device_node *np,
struct device_node *parent)
@@ -307,6 +310,7 @@ int __init spear310_shirq_of_init(struct device_node *np,
return shirq_init(spear310_shirq_blocks,
ARRAY_SIZE(spear310_shirq_blocks), np);
}
+IRQCHIP_DECLARE(spear310_shirq, "st,spear310-shirq", spear310_shirq_of_init);
int __init spear320_shirq_of_init(struct device_node *np,
struct device_node *parent)
@@ -314,3 +318,4 @@ int __init spear320_shirq_of_init(struct device_node *np,
return shirq_init(spear320_shirq_blocks,
ARRAY_SIZE(spear320_shirq_blocks), np);
}
+IRQCHIP_DECLARE(spear320_shirq, "st,spear320-shirq", spear320_shirq_of_init);
diff --git a/drivers/isdn/Kconfig b/drivers/isdn/Kconfig
index 86cd75a0e84d..ef661acdda17 100644
--- a/drivers/isdn/Kconfig
+++ b/drivers/isdn/Kconfig
@@ -22,6 +22,7 @@ if ISDN
menuconfig ISDN_I4L
tristate "Old ISDN4Linux (deprecated)"
+ depends on TTY
---help---
This driver allows you to use an ISDN adapter for networking
connections and as dialin/out device. The isdn-tty's have a built
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig
index 15c3ffd9d860..f04686580040 100644
--- a/drivers/isdn/capi/Kconfig
+++ b/drivers/isdn/capi/Kconfig
@@ -18,6 +18,7 @@ config CAPI_TRACE
config ISDN_CAPI_MIDDLEWARE
bool "CAPI2.0 Middleware support"
+ depends on TTY
help
This option will enhance the capabilities of the /dev/capi20
interface. It will provide a means of moving a data connection,
diff --git a/drivers/isdn/divert/divert_init.c b/drivers/isdn/divert/divert_init.c
index 5374c25f036c..267dede13bfd 100644
--- a/drivers/isdn/divert/divert_init.c
+++ b/drivers/isdn/divert/divert_init.c
@@ -22,13 +22,13 @@ MODULE_LICENSE("GPL");
/****************************************/
/* structure containing interface to hl */
/****************************************/
-isdn_divert_if divert_if =
-{ DIVERT_IF_MAGIC, /* magic value */
- DIVERT_CMD_REG, /* register cmd */
- ll_callback, /* callback routine from ll */
- NULL, /* command still not specified */
- NULL, /* drv_to_name */
- NULL, /* name_to_drv */
+isdn_divert_if divert_if = {
+ DIVERT_IF_MAGIC, /* magic value */
+ DIVERT_CMD_REG, /* register cmd */
+ ll_callback, /* callback routine from ll */
+ NULL, /* command still not specified */
+ NULL, /* drv_to_name */
+ NULL, /* name_to_drv */
};
/*************************/
@@ -36,14 +36,15 @@ isdn_divert_if divert_if =
/* no cmd line parms */
/*************************/
static int __init divert_init(void)
-{ int i;
+{
+ int i;
- if (divert_dev_init())
- { printk(KERN_WARNING "dss1_divert: cannot install device, not loaded\n");
+ if (divert_dev_init()) {
+ printk(KERN_WARNING "dss1_divert: cannot install device, not loaded\n");
return (-EIO);
}
- if ((i = DIVERT_REG_NAME(&divert_if)) != DIVERT_NO_ERR)
- { divert_dev_deinit();
+ if ((i = DIVERT_REG_NAME(&divert_if)) != DIVERT_NO_ERR) {
+ divert_dev_deinit();
printk(KERN_WARNING "dss1_divert: error %d registering module, not loaded\n", i);
return (-EIO);
}
@@ -61,13 +62,13 @@ static void __exit divert_exit(void)
spin_lock_irqsave(&divert_lock, flags);
divert_if.cmd = DIVERT_CMD_REL; /* release */
- if ((i = DIVERT_REG_NAME(&divert_if)) != DIVERT_NO_ERR)
- { printk(KERN_WARNING "dss1_divert: error %d releasing module\n", i);
+ if ((i = DIVERT_REG_NAME(&divert_if)) != DIVERT_NO_ERR) {
+ printk(KERN_WARNING "dss1_divert: error %d releasing module\n", i);
spin_unlock_irqrestore(&divert_lock, flags);
return;
}
- if (divert_dev_deinit())
- { printk(KERN_WARNING "dss1_divert: device busy, remove cancelled\n");
+ if (divert_dev_deinit()) {
+ printk(KERN_WARNING "dss1_divert: device busy, remove cancelled\n");
spin_unlock_irqrestore(&divert_lock, flags);
return;
}
diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c
index e61e55f1f193..db432e635496 100644
--- a/drivers/isdn/divert/isdn_divert.c
+++ b/drivers/isdn/divert/isdn_divert.c
@@ -19,8 +19,8 @@
/**********************************/
/* structure keeping calling info */
/**********************************/
-struct call_struc
-{ isdn_ctrl ics; /* delivered setup + driver parameters */
+struct call_struc {
+ isdn_ctrl ics; /* delivered setup + driver parameters */
ulong divert_id; /* Id delivered to user */
unsigned char akt_state; /* actual state */
char deflect_dest[35]; /* deflection destination */
@@ -34,8 +34,8 @@ struct call_struc
/********************************************/
/* structure keeping deflection table entry */
/********************************************/
-struct deflect_struc
-{ struct deflect_struc *next, *prev;
+struct deflect_struc {
+ struct deflect_struc *next, *prev;
divert_rule rule; /* used rule */
};
@@ -64,16 +64,16 @@ static void deflect_timer_expire(ulong arg)
del_timer(&cs->timer); /* delete active timer */
spin_unlock_irqrestore(&divert_lock, flags);
- switch (cs->akt_state)
- { case DEFLECT_PROCEED:
- cs->ics.command = ISDN_CMD_HANGUP; /* cancel action */
- divert_if.ll_cmd(&cs->ics);
- spin_lock_irqsave(&divert_lock, flags);
- cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
- cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
- add_timer(&cs->timer);
- spin_unlock_irqrestore(&divert_lock, flags);
- break;
+ switch (cs->akt_state) {
+ case DEFLECT_PROCEED:
+ cs->ics.command = ISDN_CMD_HANGUP; /* cancel action */
+ divert_if.ll_cmd(&cs->ics);
+ spin_lock_irqsave(&divert_lock, flags);
+ cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
+ cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
+ add_timer(&cs->timer);
+ spin_unlock_irqrestore(&divert_lock, flags);
+ break;
case DEFLECT_ALERT:
cs->ics.command = ISDN_CMD_REDIR; /* protocol */
@@ -111,7 +111,8 @@ static void deflect_timer_expire(ulong arg)
int cf_command(int drvid, int mode,
u_char proc, char *msn,
u_char service, char *fwd_nr, ulong *procid)
-{ unsigned long flags;
+{
+ unsigned long flags;
int retval, msnlen;
int fwd_len;
char *p, *ielenp, tmp[60];
@@ -130,8 +131,8 @@ int cf_command(int drvid, int mode,
*p++ = 1; /* length */
*p++ = service; /* service to handle */
- if (mode == 1)
- { if (!*fwd_nr) return (-EINVAL); /* destination missing */
+ if (mode == 1) {
+ if (!*fwd_nr) return (-EINVAL); /* destination missing */
if (strchr(fwd_nr, '.')) return (-EINVAL); /* subaddress not allowed */
fwd_len = strlen(fwd_nr);
*p++ = 0x30; /* number enumeration */
@@ -144,12 +145,12 @@ int cf_command(int drvid, int mode,
msnlen = strlen(msn);
*p++ = 0x80; /* msn number */
- if (msnlen > 1)
- { *p++ = msnlen; /* length */
+ if (msnlen > 1) {
+ *p++ = msnlen; /* length */
strcpy(p, msn);
p += msnlen;
- }
- else *p++ = 0;
+ } else
+ *p++ = 0;
*ielenp = p - ielenp - 1; /* set total IE length */
@@ -186,14 +187,13 @@ int cf_command(int drvid, int mode,
retval = divert_if.ll_cmd(&cs->ics); /* execute command */
- if (!retval)
- { cs->prev = NULL;
+ if (!retval) {
+ cs->prev = NULL;
spin_lock_irqsave(&divert_lock, flags);
cs->next = divert_head;
divert_head = cs;
spin_unlock_irqrestore(&divert_lock, flags);
- }
- else
+ } else
kfree(cs);
return (retval);
} /* cf_command */
@@ -203,15 +203,16 @@ int cf_command(int drvid, int mode,
/* handle a external deflection command */
/****************************************/
int deflect_extern_action(u_char cmd, ulong callid, char *to_nr)
-{ struct call_struc *cs;
+{
+ struct call_struc *cs;
isdn_ctrl ic;
unsigned long flags;
int i;
if ((cmd & 0x7F) > 2) return (-EINVAL); /* invalid command */
cs = divert_head; /* start of parameter list */
- while (cs)
- { if (cs->divert_id == callid) break; /* found */
+ while (cs) {
+ if (cs->divert_id == callid) break; /* found */
cs = cs->next;
} /* search entry */
if (!cs) return (-EINVAL); /* invalid callid */
@@ -220,32 +221,30 @@ int deflect_extern_action(u_char cmd, ulong callid, char *to_nr)
ic.arg = cs->ics.arg;
i = -EINVAL;
if (cs->akt_state == DEFLECT_AUTODEL) return (i); /* no valid call */
- switch (cmd & 0x7F)
- { case 0: /* hangup */
- del_timer(&cs->timer);
- ic.command = ISDN_CMD_HANGUP;
- i = divert_if.ll_cmd(&ic);
- spin_lock_irqsave(&divert_lock, flags);
- cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
- cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
- add_timer(&cs->timer);
- spin_unlock_irqrestore(&divert_lock, flags);
- break;
+ switch (cmd & 0x7F) {
+ case 0: /* hangup */
+ del_timer(&cs->timer);
+ ic.command = ISDN_CMD_HANGUP;
+ i = divert_if.ll_cmd(&ic);
+ spin_lock_irqsave(&divert_lock, flags);
+ cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
+ cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
+ add_timer(&cs->timer);
+ spin_unlock_irqrestore(&divert_lock, flags);
+ break;
case 1: /* alert */
if (cs->akt_state == DEFLECT_ALERT) return (0);
cmd &= 0x7F; /* never wait */
del_timer(&cs->timer);
ic.command = ISDN_CMD_ALERT;
- if ((i = divert_if.ll_cmd(&ic)))
- {
+ if ((i = divert_if.ll_cmd(&ic))) {
spin_lock_irqsave(&divert_lock, flags);
cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
add_timer(&cs->timer);
spin_unlock_irqrestore(&divert_lock, flags);
- }
- else
+ } else
cs->akt_state = DEFLECT_ALERT;
break;
@@ -254,15 +253,13 @@ int deflect_extern_action(u_char cmd, ulong callid, char *to_nr)
strlcpy(cs->ics.parm.setup.phone, to_nr, sizeof(cs->ics.parm.setup.phone));
strcpy(cs->ics.parm.setup.eazmsn, "Testtext manual");
ic.command = ISDN_CMD_REDIR;
- if ((i = divert_if.ll_cmd(&ic)))
- {
+ if ((i = divert_if.ll_cmd(&ic))) {
spin_lock_irqsave(&divert_lock, flags);
cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
add_timer(&cs->timer);
spin_unlock_irqrestore(&divert_lock, flags);
- }
- else
+ } else
cs->akt_state = DEFLECT_ALERT;
break;
@@ -274,19 +271,19 @@ int deflect_extern_action(u_char cmd, ulong callid, char *to_nr)
/* insert a new rule before idx */
/********************************/
int insertrule(int idx, divert_rule *newrule)
-{ struct deflect_struc *ds, *ds1 = NULL;
+{
+ struct deflect_struc *ds, *ds1 = NULL;
unsigned long flags;
- if (!(ds = kmalloc(sizeof(struct deflect_struc),
- GFP_KERNEL)))
+ if (!(ds = kmalloc(sizeof(struct deflect_struc), GFP_KERNEL)))
return (-ENOMEM); /* no memory */
ds->rule = *newrule; /* set rule */
spin_lock_irqsave(&divert_lock, flags);
- if (idx >= 0)
- { ds1 = table_head;
+ if (idx >= 0) {
+ ds1 = table_head;
while ((ds1) && (idx > 0))
{ idx--;
ds1 = ds1->next;
@@ -294,17 +291,16 @@ int insertrule(int idx, divert_rule *newrule)
if (!ds1) idx = -1;
}
- if (idx < 0)
- { ds->prev = table_tail; /* previous entry */
+ if (idx < 0) {
+ ds->prev = table_tail; /* previous entry */
ds->next = NULL; /* end of chain */
if (ds->prev)
ds->prev->next = ds; /* last forward */
else
table_head = ds; /* is first entry */
table_tail = ds; /* end of queue */
- }
- else
- { ds->next = ds1; /* next entry */
+ } else {
+ ds->next = ds1; /* next entry */
ds->prev = ds1->prev; /* prev entry */
ds1->prev = ds; /* backward chain old element */
if (!ds->prev)
@@ -319,17 +315,18 @@ int insertrule(int idx, divert_rule *newrule)
/* delete the rule at position idx */
/***********************************/
int deleterule(int idx)
-{ struct deflect_struc *ds, *ds1;
+{
+ struct deflect_struc *ds, *ds1;
unsigned long flags;
- if (idx < 0)
- { spin_lock_irqsave(&divert_lock, flags);
+ if (idx < 0) {
+ spin_lock_irqsave(&divert_lock, flags);
ds = table_head;
table_head = NULL;
table_tail = NULL;
spin_unlock_irqrestore(&divert_lock, flags);
- while (ds)
- { ds1 = ds;
+ while (ds) {
+ ds1 = ds;
ds = ds->next;
kfree(ds1);
}
@@ -339,13 +336,12 @@ int deleterule(int idx)
spin_lock_irqsave(&divert_lock, flags);
ds = table_head;
- while ((ds) && (idx > 0))
- { idx--;
+ while ((ds) && (idx > 0)) {
+ idx--;
ds = ds->next;
}
- if (!ds)
- {
+ if (!ds) {
spin_unlock_irqrestore(&divert_lock, flags);
return (-EINVAL);
}
@@ -369,12 +365,13 @@ int deleterule(int idx)
/* get a pointer to a specific rule number */
/*******************************************/
divert_rule *getruleptr(int idx)
-{ struct deflect_struc *ds = table_head;
+{
+ struct deflect_struc *ds = table_head;
if (idx < 0) return (NULL);
- while ((ds) && (idx >= 0))
- { if (!(idx--))
- { return (&ds->rule);
+ while ((ds) && (idx >= 0)) {
+ if (!(idx--)) {
+ return (&ds->rule);
break;
}
ds = ds->next;
@@ -386,7 +383,8 @@ divert_rule *getruleptr(int idx)
/* called from common module on an incoming call */
/*************************************************/
static int isdn_divert_icall(isdn_ctrl *ic)
-{ int retval = 0;
+{
+ int retval = 0;
unsigned long flags;
struct call_struc *cs = NULL;
struct deflect_struc *dv;
@@ -394,8 +392,8 @@ static int isdn_divert_icall(isdn_ctrl *ic)
u_char accept;
/* first check the internal deflection table */
- for (dv = table_head; dv; dv = dv->next)
- { /* scan table */
+ for (dv = table_head; dv; dv = dv->next) {
+ /* scan table */
if (((dv->rule.callopt == 1) && (ic->command == ISDN_STAT_ICALLW)) ||
((dv->rule.callopt == 2) && (ic->command == ISDN_STAT_ICALL)))
continue; /* call option check */
@@ -409,10 +407,10 @@ static int isdn_divert_icall(isdn_ctrl *ic)
p = dv->rule.my_msn;
p1 = ic->parm.setup.eazmsn;
accept = 0;
- while (*p)
- { /* complete compare */
- if (*p == '-')
- { accept = 1; /* call accepted */
+ while (*p) {
+ /* complete compare */
+ if (*p == '-') {
+ accept = 1; /* call accepted */
break;
}
if (*p++ != *p1++)
@@ -422,14 +420,15 @@ static int isdn_divert_icall(isdn_ctrl *ic)
} /* complete compare */
if (!accept) continue; /* not accepted */
- if ((strcmp(dv->rule.caller, "0")) || (ic->parm.setup.phone[0]))
- { p = dv->rule.caller;
+ if ((strcmp(dv->rule.caller, "0")) ||
+ (ic->parm.setup.phone[0])) {
+ p = dv->rule.caller;
p1 = ic->parm.setup.phone;
accept = 0;
- while (*p)
- { /* complete compare */
- if (*p == '-')
- { accept = 1; /* call accepted */
+ while (*p) {
+ /* complete compare */
+ if (*p == '-') {
+ accept = 1; /* call accepted */
break;
}
if (*p++ != *p1++)
@@ -440,10 +439,10 @@ static int isdn_divert_icall(isdn_ctrl *ic)
if (!accept) continue; /* not accepted */
}
- switch (dv->rule.action)
- { case DEFLECT_IGNORE:
- return (0);
- break;
+ switch (dv->rule.action) {
+ case DEFLECT_IGNORE:
+ return (0);
+ break;
case DEFLECT_ALERT:
case DEFLECT_PROCEED:
@@ -465,31 +464,29 @@ static int isdn_divert_icall(isdn_ctrl *ic)
cs->ics.parm.setup.screen = dv->rule.screen;
if (dv->rule.waittime)
cs->timer.expires = jiffies + (HZ * dv->rule.waittime);
+ else if (dv->rule.action == DEFLECT_PROCEED)
+ cs->timer.expires = jiffies + (HZ * extern_wait_max);
else
- if (dv->rule.action == DEFLECT_PROCEED)
- cs->timer.expires = jiffies + (HZ * extern_wait_max);
- else
- cs->timer.expires = 0;
+ cs->timer.expires = 0;
cs->akt_state = dv->rule.action;
spin_lock_irqsave(&divert_lock, flags);
cs->divert_id = next_id++; /* new sequence number */
spin_unlock_irqrestore(&divert_lock, flags);
cs->prev = NULL;
- if (cs->akt_state == DEFLECT_ALERT)
- { strcpy(cs->deflect_dest, dv->rule.to_nr);
- if (!cs->timer.expires)
- { strcpy(ic->parm.setup.eazmsn, "Testtext direct");
+ if (cs->akt_state == DEFLECT_ALERT) {
+ strcpy(cs->deflect_dest, dv->rule.to_nr);
+ if (!cs->timer.expires) {
+ strcpy(ic->parm.setup.eazmsn,
+ "Testtext direct");
ic->parm.setup.screen = dv->rule.screen;
strlcpy(ic->parm.setup.phone, dv->rule.to_nr, sizeof(ic->parm.setup.phone));
cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
retval = 5;
- }
- else
+ } else
retval = 1; /* alerting */
- }
- else
- { cs->deflect_dest[0] = '\0';
+ } else {
+ cs->deflect_dest[0] = '\0';
retval = 4; /* only proceed */
}
sprintf(cs->info, "%d 0x%lx %s %s %s %s 0x%x 0x%x %d %d %s\n",
@@ -505,8 +502,8 @@ static int isdn_divert_icall(isdn_ctrl *ic)
dv->rule.waittime,
cs->deflect_dest);
if ((dv->rule.action == DEFLECT_REPORT) ||
- (dv->rule.action == DEFLECT_REJECT))
- { put_info_buffer(cs->info);
+ (dv->rule.action == DEFLECT_REJECT)) {
+ put_info_buffer(cs->info);
kfree(cs); /* remove */
return ((dv->rule.action == DEFLECT_REPORT) ? 0 : 2); /* nothing to do */
}
@@ -519,8 +516,8 @@ static int isdn_divert_icall(isdn_ctrl *ic)
break;
} /* scan_table */
- if (cs)
- { cs->prev = NULL;
+ if (cs) {
+ cs->prev = NULL;
spin_lock_irqsave(&divert_lock, flags);
cs->next = divert_head;
divert_head = cs;
@@ -529,21 +526,21 @@ static int isdn_divert_icall(isdn_ctrl *ic)
put_info_buffer(cs->info);
return (retval);
- }
- else
+ } else
return (0);
} /* isdn_divert_icall */
void deleteprocs(void)
-{ struct call_struc *cs, *cs1;
+{
+ struct call_struc *cs, *cs1;
unsigned long flags;
spin_lock_irqsave(&divert_lock, flags);
cs = divert_head;
divert_head = NULL;
- while (cs)
- { del_timer(&cs->timer);
+ while (cs) {
+ del_timer(&cs->timer);
cs1 = cs;
cs = cs->next;
kfree(cs1);
@@ -555,12 +552,13 @@ void deleteprocs(void)
/* put a address including address type into buffer */
/****************************************************/
static int put_address(char *st, u_char *p, int len)
-{ u_char retval = 0;
+{
+ u_char retval = 0;
u_char adr_typ = 0; /* network standard */
if (len < 2) return (retval);
- if (*p == 0xA1)
- { retval = *(++p) + 2; /* total length */
+ if (*p == 0xA1) {
+ retval = *(++p) + 2; /* total length */
if (retval > len) return (0); /* too short */
len = retval - 2; /* remaining length */
if (len < 3) return (0);
@@ -572,16 +570,13 @@ static int put_address(char *st, u_char *p, int len)
if (*p++ != 0x12) return (0);
if (*p > len) return (0); /* check number length */
len = *p++;
- }
- else
- if (*p == 0x80)
- { retval = *(++p) + 2; /* total length */
- if (retval > len) return (0);
- len = retval - 2;
- p++;
- }
- else
- return (0); /* invalid address information */
+ } else if (*p == 0x80) {
+ retval = *(++p) + 2; /* total length */
+ if (retval > len) return (0);
+ len = retval - 2;
+ p++;
+ } else
+ return (0); /* invalid address information */
sprintf(st, "%d ", adr_typ);
st += strlen(st);
@@ -598,7 +593,8 @@ static int put_address(char *st, u_char *p, int len)
/* report a successful interrogation */
/*************************************/
static int interrogate_success(isdn_ctrl *ic, struct call_struc *cs)
-{ char *src = ic->parm.dss1_io.data;
+{
+ char *src = ic->parm.dss1_io.data;
int restlen = ic->parm.dss1_io.datalen;
int cnt = 1;
u_char n, n1;
@@ -608,50 +604,44 @@ static int interrogate_success(isdn_ctrl *ic, struct call_struc *cs)
if (*src++ != 0x30) return (-101);
if ((n = *src++) > 0x81) return (-102); /* invalid length field */
restlen -= 2; /* remaining bytes */
- if (n == 0x80)
- { if (restlen < 2) return (-103);
+ if (n == 0x80) {
+ if (restlen < 2) return (-103);
if ((*(src + restlen - 1)) || (*(src + restlen - 2))) return (-104);
restlen -= 2;
- }
+ } else if (n == 0x81) {
+ n = *src++;
+ restlen--;
+ if (n > restlen) return (-105);
+ restlen = n;
+ } else if (n > restlen)
+ return (-106);
else
- if (n == 0x81)
- { n = *src++;
- restlen--;
- if (n > restlen) return (-105);
- restlen = n;
- }
- else
- if (n > restlen) return (-106);
- else
- restlen = n; /* standard format */
+ restlen = n; /* standard format */
if (restlen < 3) return (-107); /* no procedure */
if ((*src++ != 2) || (*src++ != 1) || (*src++ != 0x0B)) return (-108);
restlen -= 3;
if (restlen < 2) return (-109); /* list missing */
- if (*src == 0x31)
- { src++;
+ if (*src == 0x31) {
+ src++;
if ((n = *src++) > 0x81) return (-110); /* invalid length field */
restlen -= 2; /* remaining bytes */
- if (n == 0x80)
- { if (restlen < 2) return (-111);
+ if (n == 0x80) {
+ if (restlen < 2) return (-111);
if ((*(src + restlen - 1)) || (*(src + restlen - 2))) return (-112);
restlen -= 2;
- }
+ } else if (n == 0x81) {
+ n = *src++;
+ restlen--;
+ if (n > restlen) return (-113);
+ restlen = n;
+ } else if (n > restlen)
+ return (-114);
else
- if (n == 0x81)
- { n = *src++;
- restlen--;
- if (n > restlen) return (-113);
- restlen = n;
- }
- else
- if (n > restlen) return (-114);
- else
- restlen = n; /* standard format */
+ restlen = n; /* standard format */
} /* result list header */
- while (restlen >= 2)
- { stp = st;
+ while (restlen >= 2) {
+ stp = st;
sprintf(stp, "%d 0x%lx %d %s ", DIVERT_REPORT, ic->parm.dss1_io.ll_id,
cnt++, divert_if.drv_to_name(ic->driver));
stp += strlen(stp);
@@ -674,8 +664,8 @@ static int interrogate_success(isdn_ctrl *ic, struct call_struc *cs)
sprintf(stp, "%d ", (*p++) & 0xFF);
stp += strlen(stp);
n -= 6;
- if (n > 2)
- { if (*p++ != 0x30) continue;
+ if (n > 2) {
+ if (*p++ != 0x30) continue;
if (*p > (n - 2)) continue;
n = *p++;
if (!(n1 = put_address(stp, p, n & 0xFF))) continue;
@@ -692,58 +682,58 @@ static int interrogate_success(isdn_ctrl *ic, struct call_struc *cs)
/* callback for protocol specific extensions */
/*********************************************/
static int prot_stat_callback(isdn_ctrl *ic)
-{ struct call_struc *cs, *cs1;
+{
+ struct call_struc *cs, *cs1;
int i;
unsigned long flags;
cs = divert_head; /* start of list */
cs1 = NULL;
- while (cs)
- { if (ic->driver == cs->ics.driver)
- { switch (cs->ics.arg)
- { case DSS1_CMD_INVOKE:
- if ((cs->ics.parm.dss1_io.ll_id == ic->parm.dss1_io.ll_id) &&
- (cs->ics.parm.dss1_io.hl_id == ic->parm.dss1_io.hl_id))
- { switch (ic->arg)
- { case DSS1_STAT_INVOKE_ERR:
- sprintf(cs->info, "128 0x%lx 0x%x\n",
- ic->parm.dss1_io.ll_id,
- ic->parm.dss1_io.timeout);
- put_info_buffer(cs->info);
- break;
-
- case DSS1_STAT_INVOKE_RES:
- switch (cs->ics.parm.dss1_io.proc)
- { case 7:
- case 8:
- put_info_buffer(cs->info);
- break;
-
- case 11:
- i = interrogate_success(ic, cs);
- if (i)
- sprintf(cs->info, "%d 0x%lx %d\n", DIVERT_REPORT,
- ic->parm.dss1_io.ll_id, i);
- put_info_buffer(cs->info);
- break;
-
- default:
- printk(KERN_WARNING "dss1_divert: unknown proc %d\n", cs->ics.parm.dss1_io.proc);
- break;
- }
-
+ while (cs) {
+ if (ic->driver == cs->ics.driver) {
+ switch (cs->ics.arg) {
+ case DSS1_CMD_INVOKE:
+ if ((cs->ics.parm.dss1_io.ll_id == ic->parm.dss1_io.ll_id) &&
+ (cs->ics.parm.dss1_io.hl_id == ic->parm.dss1_io.hl_id)) {
+ switch (ic->arg) {
+ case DSS1_STAT_INVOKE_ERR:
+ sprintf(cs->info, "128 0x%lx 0x%x\n",
+ ic->parm.dss1_io.ll_id,
+ ic->parm.dss1_io.timeout);
+ put_info_buffer(cs->info);
+ break;
+
+ case DSS1_STAT_INVOKE_RES:
+ switch (cs->ics.parm.dss1_io.proc) {
+ case 7:
+ case 8:
+ put_info_buffer(cs->info);
+ break;
+ case 11:
+ i = interrogate_success(ic, cs);
+ if (i)
+ sprintf(cs->info, "%d 0x%lx %d\n", DIVERT_REPORT,
+ ic->parm.dss1_io.ll_id, i);
+ put_info_buffer(cs->info);
break;
default:
- printk(KERN_WARNING "dss1_divert unknown invoke answer %lx\n", ic->arg);
+ printk(KERN_WARNING "dss1_divert: unknown proc %d\n", cs->ics.parm.dss1_io.proc);
break;
}
- cs1 = cs; /* remember structure */
- cs = NULL;
- continue; /* abort search */
- } /* id found */
- break;
+
+ break;
+
+ default:
+ printk(KERN_WARNING "dss1_divert unknown invoke answer %lx\n", ic->arg);
+ break;
+ }
+ cs1 = cs; /* remember structure */
+ cs = NULL;
+ continue; /* abort search */
+ } /* id found */
+ break;
case DSS1_CMD_INVOKE_ABORT:
printk(KERN_WARNING "dss1_divert unhandled invoke abort\n");
@@ -757,13 +747,12 @@ static int prot_stat_callback(isdn_ctrl *ic)
} /* driver ok */
}
- if (!cs1)
- { printk(KERN_WARNING "dss1_divert unhandled process\n");
+ if (!cs1) {
+ printk(KERN_WARNING "dss1_divert unhandled process\n");
return (0);
}
- if (cs1->ics.driver == -1)
- {
+ if (cs1->ics.driver == -1) {
spin_lock_irqsave(&divert_lock, flags);
del_timer(&cs1->timer);
if (cs1->prev)
@@ -784,20 +773,22 @@ static int prot_stat_callback(isdn_ctrl *ic)
/* status callback from HL */
/***************************/
static int isdn_divert_stat_callback(isdn_ctrl *ic)
-{ struct call_struc *cs, *cs1;
+{
+ struct call_struc *cs, *cs1;
unsigned long flags;
int retval;
retval = -1;
cs = divert_head; /* start of list */
- while (cs)
- { if ((ic->driver == cs->ics.driver) && (ic->arg == cs->ics.arg))
- { switch (ic->command)
- { case ISDN_STAT_DHUP:
- sprintf(cs->info, "129 0x%lx\n", cs->divert_id);
- del_timer(&cs->timer);
- cs->ics.driver = -1;
- break;
+ while (cs) {
+ if ((ic->driver == cs->ics.driver) &&
+ (ic->arg == cs->ics.arg)) {
+ switch (ic->command) {
+ case ISDN_STAT_DHUP:
+ sprintf(cs->info, "129 0x%lx\n", cs->divert_id);
+ del_timer(&cs->timer);
+ cs->ics.driver = -1;
+ break;
case ISDN_STAT_CAUSE:
sprintf(cs->info, "130 0x%lx %s\n", cs->divert_id, ic->parm.num);
@@ -818,8 +809,7 @@ static int isdn_divert_stat_callback(isdn_ctrl *ic)
}
cs1 = cs;
cs = cs->next;
- if (cs1->ics.driver == -1)
- {
+ if (cs1->ics.driver == -1) {
spin_lock_irqsave(&divert_lock, flags);
if (cs1->prev)
cs1->prev->next = cs1->next; /* forward link */
@@ -840,20 +830,19 @@ static int isdn_divert_stat_callback(isdn_ctrl *ic)
/********************/
int ll_callback(isdn_ctrl *ic)
{
- switch (ic->command)
- { case ISDN_STAT_ICALL:
+ switch (ic->command) {
+ case ISDN_STAT_ICALL:
case ISDN_STAT_ICALLW:
return (isdn_divert_icall(ic));
break;
case ISDN_STAT_PROT:
- if ((ic->arg & 0xFF) == ISDN_PTYPE_EURO)
- { if (ic->arg != DSS1_STAT_INVOKE_BRD)
+ if ((ic->arg & 0xFF) == ISDN_PTYPE_EURO) {
+ if (ic->arg != DSS1_STAT_INVOKE_BRD)
return (prot_stat_callback(ic));
else
return (0); /* DSS1 invoke broadcast */
- }
- else
+ } else
return (-1); /* protocol not euro */
default:
diff --git a/drivers/isdn/divert/isdn_divert.h b/drivers/isdn/divert/isdn_divert.h
index 42f289320d2d..55033dd872c0 100644
--- a/drivers/isdn/divert/isdn_divert.h
+++ b/drivers/isdn/divert/isdn_divert.h
@@ -43,8 +43,8 @@
#define DEFLECT_ALL_IDS 0xFFFFFFFF /* all drivers selected */
-typedef struct
-{ ulong drvid; /* driver ids, bit mapped */
+typedef struct {
+ ulong drvid; /* driver ids, bit mapped */
char my_msn[35]; /* desired msn, subaddr allowed */
char caller[35]; /* caller id, partial string with * + subaddr allowed */
char to_nr[35]; /* deflected to number incl. subaddress */
@@ -65,18 +65,18 @@ typedef struct
u_char waittime; /* maximum wait time for proceeding */
} divert_rule;
-typedef union
-{ int drv_version; /* return of driver version */
- struct
- { int drvid; /* id of driver */
+typedef union {
+ int drv_version; /* return of driver version */
+ struct {
+ int drvid; /* id of driver */
char drvnam[30]; /* name of driver */
} getid;
- struct
- { int ruleidx; /* index of rule */
+ struct {
+ int ruleidx; /* index of rule */
divert_rule rule; /* rule parms */
} getsetrule;
- struct
- { u_char subcmd; /* 0 = hangup/reject,
+ struct {
+ u_char subcmd; /* 0 = hangup/reject,
1 = alert,
2 = deflect */
ulong callid; /* id of call delivered by ascii output */
@@ -84,8 +84,8 @@ typedef union
else uus1 string (maxlen 31),
data from rule used if empty */
} fwd_ctrl;
- struct
- { int drvid; /* id of driver */
+ struct {
+ int drvid; /* id of driver */
u_char cfproc; /* cfu = 0, cfb = 1, cfnr = 2 */
ulong procid; /* process id returned when no error */
u_char service; /* basically coded service, 0 = all */
@@ -104,8 +104,8 @@ typedef union
/**************************************************/
/* structure keeping ascii info for device output */
/**************************************************/
-struct divert_info
-{ struct divert_info *next;
+struct divert_info {
+ struct divert_info *next;
ulong usage_cnt; /* number of files still to work */
char info_start[2]; /* info string start */
};
diff --git a/drivers/isdn/gigaset/Kconfig b/drivers/isdn/gigaset/Kconfig
index b18a92c32184..dde5e09e6267 100644
--- a/drivers/isdn/gigaset/Kconfig
+++ b/drivers/isdn/gigaset/Kconfig
@@ -1,5 +1,6 @@
menuconfig ISDN_DRV_GIGASET
tristate "Siemens Gigaset support"
+ depends on TTY
select CRC_CCITT
select BITREVERSE
help
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index 68452b768da2..03a0a01a4054 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -248,6 +248,8 @@ static inline void dump_rawmsg(enum debuglevel level, const char *tag,
CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l,
CAPIMSG_CONTROL(data));
l -= 12;
+ if (l <= 0)
+ return;
dbgline = kmalloc(3 * l, GFP_ATOMIC);
if (!dbgline)
return;
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index 6849a11a1b24..7c7814497e3e 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -467,11 +467,6 @@ void gigaset_freecs(struct cardstate *cs)
mutex_lock(&cs->mutex);
- if (!cs->bcs)
- goto f_cs;
- if (!cs->inbuf)
- goto f_bcs;
-
spin_lock_irqsave(&cs->lock, flags);
cs->running = 0;
spin_unlock_irqrestore(&cs->lock, flags); /* event handler and timer are
@@ -507,17 +502,16 @@ void gigaset_freecs(struct cardstate *cs)
gig_dbg(DEBUG_INIT, "clearing at_state");
clear_at_state(&cs->at_state);
dealloc_temp_at_states(cs);
+ clear_events(cs);
tty_port_destroy(&cs->port);
/* fall through */
case 0: /* error in basic setup */
- clear_events(cs);
gig_dbg(DEBUG_INIT, "freeing inbuf");
kfree(cs->inbuf);
+ kfree(cs->bcs);
}
-f_bcs: gig_dbg(DEBUG_INIT, "freeing bcs[]");
- kfree(cs->bcs);
-f_cs: gig_dbg(DEBUG_INIT, "freeing cs");
+
mutex_unlock(&cs->mutex);
free_cs(cs);
}
@@ -687,19 +681,6 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
return NULL;
}
- gig_dbg(DEBUG_INIT, "allocating bcs[0..%d]", channels - 1);
- cs->bcs = kmalloc(channels * sizeof(struct bc_state), GFP_KERNEL);
- if (!cs->bcs) {
- pr_err("out of memory\n");
- goto error;
- }
- gig_dbg(DEBUG_INIT, "allocating inbuf");
- cs->inbuf = kmalloc(sizeof(struct inbuf_t), GFP_KERNEL);
- if (!cs->inbuf) {
- pr_err("out of memory\n");
- goto error;
- }
-
cs->cs_init = 0;
cs->channels = channels;
cs->onechannel = onechannel;
@@ -729,6 +710,12 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
cs->mode = M_UNKNOWN;
cs->mstate = MS_UNINITIALIZED;
+ cs->bcs = kmalloc(channels * sizeof(struct bc_state), GFP_KERNEL);
+ cs->inbuf = kmalloc(sizeof(struct inbuf_t), GFP_KERNEL);
+ if (!cs->bcs || !cs->inbuf) {
+ pr_err("out of memory\n");
+ goto error;
+ }
++cs->cs_init;
gig_dbg(DEBUG_INIT, "setting up at_state");
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
index 2e6963dc740e..7459b127ddd5 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -351,10 +351,11 @@ struct reply_t gigaset_tab_cid[] =
static const struct resp_type_t {
- unsigned char *response;
- int resp_code;
- int type;
-} resp_type[] =
+ char *response;
+ int resp_code;
+ int type;
+}
+resp_type[] =
{
{"OK", RSP_OK, RT_NOTHING},
{"ERROR", RSP_ERROR, RT_NOTHING},
@@ -374,11 +375,12 @@ static const struct resp_type_t {
};
static const struct zsau_resp_t {
- unsigned char *str;
- int code;
-} zsau_resp[] =
+ char *str;
+ int code;
+}
+zsau_resp[] =
{
- {"OUTGOING_CALL_PROCEEDING", ZSAU_OUTGOING_CALL_PROCEEDING},
+ {"OUTGOING_CALL_PROCEEDING", ZSAU_PROCEEDING},
{"CALL_DELIVERED", ZSAU_CALL_DELIVERED},
{"ACTIVE", ZSAU_ACTIVE},
{"DISCONNECT_IND", ZSAU_DISCONNECT_IND},
@@ -434,7 +436,7 @@ void gigaset_handle_modem_response(struct cardstate *cs)
len = cs->cbytes;
if (!len) {
/* ignore additional LFs/CRs (M10x config mode or cx100) */
- gig_dbg(DEBUG_MCMD, "skipped EOL [%02X]", cs->respdata[len]);
+ gig_dbg(DEBUG_MCMD, "skipped EOL [%02X]", cs->respdata[0]);
return;
}
cs->respdata[len] = 0;
@@ -707,27 +709,29 @@ static void schedule_init(struct cardstate *cs, int state)
cs->commands_pending = 1;
}
-/* Add "AT" to a command, add the cid, dle encode it, send the result to the
- hardware. */
-static void send_command(struct cardstate *cs, const char *cmd, int cid,
- int dle, gfp_t kmallocflags)
+/* send an AT command
+ * adding the "AT" prefix, cid and DLE encapsulation as appropriate
+ */
+static void send_command(struct cardstate *cs, const char *cmd,
+ struct at_state_t *at_state)
{
+ int cid = at_state->cid;
struct cmdbuf_t *cb;
size_t buflen;
buflen = strlen(cmd) + 12; /* DLE ( A T 1 2 3 4 5 <cmd> DLE ) \0 */
- cb = kmalloc(sizeof(struct cmdbuf_t) + buflen, kmallocflags);
+ cb = kmalloc(sizeof(struct cmdbuf_t) + buflen, GFP_ATOMIC);
if (!cb) {
dev_err(cs->dev, "%s: out of memory\n", __func__);
return;
}
if (cid > 0 && cid <= 65535)
cb->len = snprintf(cb->buf, buflen,
- dle ? "\020(AT%d%s\020)" : "AT%d%s",
+ cs->dle ? "\020(AT%d%s\020)" : "AT%d%s",
cid, cmd);
else
cb->len = snprintf(cb->buf, buflen,
- dle ? "\020(AT%s\020)" : "AT%s",
+ cs->dle ? "\020(AT%s\020)" : "AT%s",
cmd);
cb->offset = 0;
cb->next = NULL;
@@ -886,7 +890,7 @@ static void finish_shutdown(struct cardstate *cs)
gigaset_isdn_stop(cs);
}
- /* The rest is done by cleanup_cs () in user mode. */
+ /* The rest is done by cleanup_cs() in process context. */
cs->cmd_result = -ENODEV;
cs->waiting = 0;
@@ -976,10 +980,9 @@ exit:
}
static void handle_icall(struct cardstate *cs, struct bc_state *bcs,
- struct at_state_t **p_at_state)
+ struct at_state_t *at_state)
{
int retval;
- struct at_state_t *at_state = *p_at_state;
retval = gigaset_isdn_icall(at_state);
switch (retval) {
@@ -1176,7 +1179,7 @@ static void do_action(int action, struct cardstate *cs,
spin_unlock_irqrestore(&cs->lock, flags);
break;
case ACT_ICALL:
- handle_icall(cs, bcs, p_at_state);
+ handle_icall(cs, bcs, at_state);
break;
case ACT_FAILSDOWN:
dev_warn(cs->dev, "Could not shut down the device.\n");
@@ -1264,7 +1267,7 @@ static void do_action(int action, struct cardstate *cs,
cs->commands_pending = 1;
break;
}
- /* fall through */
+ /* bad cid: fall through */
case ACT_FAILCID:
cs->cur_at_seq = SEQ_NONE;
channel = cs->curchannel;
@@ -1339,7 +1342,6 @@ static void do_action(int action, struct cardstate *cs,
*p_resp_code = RSP_ERROR;
break;
}
- /*at_state->getstring = 1;*/
cs->gotfwver = 0;
break;
case ACT_GOTVER:
@@ -1471,7 +1473,6 @@ static void process_event(struct cardstate *cs, struct event_t *ev)
int rcode;
int genresp = 0;
int resp_code = RSP_ERROR;
- int sendcid;
struct at_state_t *at_state;
int index;
int curact;
@@ -1499,7 +1500,6 @@ static void process_event(struct cardstate *cs, struct event_t *ev)
at_state->ConState, ev->type);
bcs = at_state->bcs;
- sendcid = at_state->cid;
/* Setting the pointer to the dial array */
rep = at_state->replystruct;
@@ -1510,10 +1510,12 @@ static void process_event(struct cardstate *cs, struct event_t *ev)
|| !at_state->timer_active) {
ev->type = RSP_NONE; /* old timeout */
gig_dbg(DEBUG_EVENT, "old timeout");
- } else if (!at_state->waiting)
- gig_dbg(DEBUG_EVENT, "timeout occurred");
- else
- gig_dbg(DEBUG_EVENT, "stopped waiting");
+ } else {
+ if (at_state->waiting)
+ gig_dbg(DEBUG_EVENT, "stopped waiting");
+ else
+ gig_dbg(DEBUG_EVENT, "timeout occurred");
+ }
}
spin_unlock_irqrestore(&cs->lock, flags);
@@ -1561,45 +1563,40 @@ static void process_event(struct cardstate *cs, struct event_t *ev)
do_action(rep->action[curact], cs, bcs, &at_state, &p_command,
&genresp, &resp_code, ev);
if (!at_state)
- break; /* may be freed after disconnect */
+ /* at_state destroyed by disconnect */
+ return;
}
- if (at_state) {
- /* Jump to the next con-state regarding the array */
- if (rep->new_ConState >= 0)
- at_state->ConState = rep->new_ConState;
+ /* Jump to the next con-state regarding the array */
+ if (rep->new_ConState >= 0)
+ at_state->ConState = rep->new_ConState;
- if (genresp) {
- spin_lock_irqsave(&cs->lock, flags);
+ if (genresp) {
+ spin_lock_irqsave(&cs->lock, flags);
+ at_state->timer_expires = 0;
+ at_state->timer_active = 0;
+ spin_unlock_irqrestore(&cs->lock, flags);
+ gigaset_add_event(cs, at_state, resp_code, NULL, 0, NULL);
+ } else {
+ /* Send command to modem if not NULL... */
+ if (p_command) {
+ if (cs->connected)
+ send_command(cs, p_command, at_state);
+ else
+ gigaset_add_event(cs, at_state, RSP_NODEV,
+ NULL, 0, NULL);
+ }
+
+ spin_lock_irqsave(&cs->lock, flags);
+ if (!rep->timeout) {
at_state->timer_expires = 0;
at_state->timer_active = 0;
- spin_unlock_irqrestore(&cs->lock, flags);
- gigaset_add_event(cs, at_state, resp_code,
- NULL, 0, NULL);
- } else {
- /* Send command to modem if not NULL... */
- if (p_command) {
- if (cs->connected)
- send_command(cs, p_command,
- sendcid, cs->dle,
- GFP_ATOMIC);
- else
- gigaset_add_event(cs, at_state,
- RSP_NODEV,
- NULL, 0, NULL);
- }
-
- spin_lock_irqsave(&cs->lock, flags);
- if (!rep->timeout) {
- at_state->timer_expires = 0;
- at_state->timer_active = 0;
- } else if (rep->timeout > 0) { /* new timeout */
- at_state->timer_expires = rep->timeout * 10;
- at_state->timer_active = 1;
- ++at_state->timer_index;
- }
- spin_unlock_irqrestore(&cs->lock, flags);
+ } else if (rep->timeout > 0) { /* new timeout */
+ at_state->timer_expires = rep->timeout * 10;
+ at_state->timer_active = 1;
+ ++at_state->timer_index;
}
+ spin_unlock_irqrestore(&cs->lock, flags);
}
}
@@ -1693,6 +1690,11 @@ static void process_command_flags(struct cardstate *cs)
for (i = 0; i < cs->channels; ++i) {
bcs = cs->bcs + i;
if (bcs->at_state.pending_commands & PC_HUP) {
+ if (cs->dle) {
+ cs->curchannel = bcs->channel;
+ schedule_sequence(cs, &cs->at_state, SEQ_DLE0);
+ return;
+ }
bcs->at_state.pending_commands &= ~PC_HUP;
if (bcs->at_state.pending_commands & PC_CID) {
/* not yet dialing: PC_NOCID is sufficient */
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
index 8e2fc8f31d16..eb63a0f7a02a 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -111,11 +111,10 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
/* connection state */
#define ZSAU_NONE 0
-#define ZSAU_DISCONNECT_IND 4
-#define ZSAU_OUTGOING_CALL_PROCEEDING 1
#define ZSAU_PROCEEDING 1
#define ZSAU_CALL_DELIVERED 2
#define ZSAU_ACTIVE 3
+#define ZSAU_DISCONNECT_IND 4
#define ZSAU_NULL 5
#define ZSAU_DISCONNECT_REQ 6
#define ZSAU_UNKNOWN -1
@@ -183,18 +182,22 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
#define AT_NUM 7
/* variables in struct at_state_t */
+/* - numeric */
#define VAR_ZSAU 0
#define VAR_ZDLE 1
#define VAR_ZCTP 2
+/* total number */
#define VAR_NUM 3
-
+/* - string */
#define STR_NMBR 0
#define STR_ZCPN 1
#define STR_ZCON 2
#define STR_ZBC 3
#define STR_ZHLC 4
+/* total number */
#define STR_NUM 5
+/* event types */
#define EV_TIMEOUT -105
#define EV_IF_VER -106
#define EV_PROC_CIDMODE -107
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index 67abf3ff45e8..e2b539675b66 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -112,36 +112,6 @@ static int if_config(struct cardstate *cs, int *arg)
}
/*** the terminal driver ***/
-/* stolen from usbserial and some other tty drivers */
-
-static int if_open(struct tty_struct *tty, struct file *filp);
-static void if_close(struct tty_struct *tty, struct file *filp);
-static int if_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg);
-static int if_write_room(struct tty_struct *tty);
-static int if_chars_in_buffer(struct tty_struct *tty);
-static void if_throttle(struct tty_struct *tty);
-static void if_unthrottle(struct tty_struct *tty);
-static void if_set_termios(struct tty_struct *tty, struct ktermios *old);
-static int if_tiocmget(struct tty_struct *tty);
-static int if_tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear);
-static int if_write(struct tty_struct *tty,
- const unsigned char *buf, int count);
-
-static const struct tty_operations if_ops = {
- .open = if_open,
- .close = if_close,
- .ioctl = if_ioctl,
- .write = if_write,
- .write_room = if_write_room,
- .chars_in_buffer = if_chars_in_buffer,
- .set_termios = if_set_termios,
- .throttle = if_throttle,
- .unthrottle = if_unthrottle,
- .tiocmget = if_tiocmget,
- .tiocmset = if_tiocmset,
-};
static int if_open(struct tty_struct *tty, struct file *filp)
{
@@ -164,7 +134,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
if (cs->port.count == 1) {
tty_port_tty_set(&cs->port, tty);
- tty->low_latency = 1;
+ cs->port.low_latency = 1;
}
mutex_unlock(&cs->mutex);
@@ -355,7 +325,7 @@ done:
static int if_write_room(struct tty_struct *tty)
{
struct cardstate *cs = tty->driver_data;
- int retval = -ENODEV;
+ int retval;
gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
@@ -498,6 +468,20 @@ out:
mutex_unlock(&cs->mutex);
}
+static const struct tty_operations if_ops = {
+ .open = if_open,
+ .close = if_close,
+ .ioctl = if_ioctl,
+ .write = if_write,
+ .write_room = if_write_room,
+ .chars_in_buffer = if_chars_in_buffer,
+ .set_termios = if_set_termios,
+ .throttle = if_throttle,
+ .unthrottle = if_unthrottle,
+ .tiocmget = if_tiocmget,
+ .tiocmset = if_tiocmset,
+};
+
/* wakeup tasklet for the write operation */
static void if_wake(unsigned long data)
@@ -562,16 +546,8 @@ void gigaset_if_free(struct cardstate *cs)
void gigaset_if_receive(struct cardstate *cs,
unsigned char *buffer, size_t len)
{
- struct tty_struct *tty = tty_port_tty_get(&cs->port);
-
- if (tty == NULL) {
- gig_dbg(DEBUG_IF, "receive on closed device");
- return;
- }
-
- tty_insert_flip_string(tty, buffer, len);
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
+ tty_insert_flip_string(&cs->port, buffer, len);
+ tty_flip_buffer_push(&cs->port);
}
EXPORT_SYMBOL_GPL(gigaset_if_receive);
diff --git a/drivers/isdn/hardware/eicon/divacapi.h b/drivers/isdn/hardware/eicon/divacapi.h
index 3942efbbfb58..a315a2914d70 100644
--- a/drivers/isdn/hardware/eicon/divacapi.h
+++ b/drivers/isdn/hardware/eicon/divacapi.h
@@ -422,11 +422,11 @@ struct _DIVA_CAPI_ADAPTER {
#define LAPD 6 /* lapd (Q.921) */
#define X25_L2 7 /* x.25 layer-2 */
#define V120_L2 8 /* V.120 layer-2 protocol */
-#define V42_IN 9 /* V.42 layer-2 protocol, incomming */
+#define V42_IN 9 /* V.42 layer-2 protocol, incoming */
#define V42 10 /* V.42 layer-2 protocol */
#define MDM_ATP 11 /* AT Parser built in the L2 */
#define X75_V42BIS 12 /* ISO7776 (X.75 SLP) modified to support V.42 bis compression */
-#define RTPL2_IN 13 /* RTP layer-2 protocol, incomming */
+#define RTPL2_IN 13 /* RTP layer-2 protocol, incoming */
#define RTPL2 14 /* RTP layer-2 protocol */
#define V120_V42BIS 15 /* V.120 layer-2 protocol supporting V.42 bis compression */
@@ -1125,7 +1125,7 @@ extern word li_total_channels;
| Direction | word | Enable compression/decompression for |
| | | 0: All direction |
| | | 1: disable outgoing data |
- | | | 2: disable incomming data |
+ | | | 2: disable incoming data |
| | | 3: disable both direction (default) |
+---------------------+------+-----------------------------------------+
| Number of code | word | Parameter P1 of V.42bis in accordance |
diff --git a/drivers/isdn/hardware/eicon/pc.h b/drivers/isdn/hardware/eicon/pc.h
index 889dc984bbca..329c0c26abfb 100644
--- a/drivers/isdn/hardware/eicon/pc.h
+++ b/drivers/isdn/hardware/eicon/pc.h
@@ -419,11 +419,11 @@ struct dual
#define LAPD 6 /* lapd (Q.921) */
#define X25_L2 7 /* x.25 layer-2 */
#define V120_L2 8 /* V.120 layer-2 protocol */
-#define V42_IN 9 /* V.42 layer-2 protocol, incomming */
+#define V42_IN 9 /* V.42 layer-2 protocol, incoming */
#define V42 10 /* V.42 layer-2 protocol */
#define MDM_ATP 11 /* AT Parser built in the L2 */
#define X75_V42BIS 12 /* x.75 with V.42bis */
-#define RTPL2_IN 13 /* RTP layer-2 protocol, incomming */
+#define RTPL2_IN 13 /* RTP layer-2 protocol, incoming */
#define RTPL2 14 /* RTP layer-2 protocol */
#define V120_V42BIS 15 /* V.120 asynchronous mode supporting V.42bis compression */
#define LISTENER 27 /* Layer 2 to listen line */
diff --git a/drivers/isdn/hardware/mISDN/Kconfig b/drivers/isdn/hardware/mISDN/Kconfig
index eadc1cd34a20..b8611e3e5e74 100644
--- a/drivers/isdn/hardware/mISDN/Kconfig
+++ b/drivers/isdn/hardware/mISDN/Kconfig
@@ -76,6 +76,7 @@ config MISDN_NETJET
tristate "Support for NETJet cards"
depends on MISDN
depends on PCI
+ depends on TTY
select MISDN_IPAC
select ISDN_HDLC
select ISDN_I4L
diff --git a/drivers/isdn/hisax/Kconfig b/drivers/isdn/hisax/Kconfig
index 70ecd0c19500..5313c9ea44dc 100644
--- a/drivers/isdn/hisax/Kconfig
+++ b/drivers/isdn/hisax/Kconfig
@@ -389,8 +389,8 @@ config HISAX_TELES_CS
comment "HiSax sub driver modules"
config HISAX_ST5481
- tristate "ST5481 USB ISDN modem (EXPERIMENTAL)"
- depends on USB && EXPERIMENTAL
+ tristate "ST5481 USB ISDN modem"
+ depends on USB
select ISDN_HDLC
select CRC_CCITT
select BITREVERSE
@@ -399,20 +399,19 @@ config HISAX_ST5481
e.g. the BeWan Gazel 128 USB
config HISAX_HFCUSB
- tristate "HFC USB based ISDN modems (EXPERIMENTAL)"
- depends on USB && EXPERIMENTAL
+ tristate "HFC USB based ISDN modems"
+ depends on USB
help
This enables the driver for HFC USB based ISDN modems.
config HISAX_HFC4S8S
- tristate "HFC-4S/8S based ISDN cards (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ tristate "HFC-4S/8S based ISDN cards"
help
This enables the driver for HFC-4S/8S based ISDN cards.
config HISAX_FRITZ_PCIPNP
- tristate "AVM Fritz!Card PCI/PCIv2/PnP support (EXPERIMENTAL)"
- depends on PCI && EXPERIMENTAL
+ tristate "AVM Fritz!Card PCI/PCIv2/PnP support"
+ depends on PCI
help
This enables the driver for the AVM Fritz!Card PCI,
Fritz!Card PCI v2 and Fritz!Card PnP.
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index e2a945ee9f05..b87d9e577be2 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -876,7 +876,7 @@ isdn_readbchan(int di, int channel, u_char *buf, u_char *fp, int len, wait_queue
* of the mapping (di,ch)<->minor, happen during the sleep? --he
*/
int
-isdn_readbchan_tty(int di, int channel, struct tty_struct *tty, int cisco_hack)
+isdn_readbchan_tty(int di, int channel, struct tty_port *port, int cisco_hack)
{
int count;
int count_pull;
@@ -891,7 +891,7 @@ isdn_readbchan_tty(int di, int channel, struct tty_struct *tty, int cisco_hack)
if (skb_queue_empty(&dev->drv[di]->rpqueue[channel]))
return 0;
- len = tty_buffer_request_room(tty, dev->drv[di]->rcvcount[channel]);
+ len = tty_buffer_request_room(port, dev->drv[di]->rcvcount[channel]);
if (len == 0)
return len;
@@ -912,7 +912,7 @@ isdn_readbchan_tty(int di, int channel, struct tty_struct *tty, int cisco_hack)
while ((count_pull < skb->len) && (len > 0)) {
/* push every character but the last to the tty buffer directly */
if (count_put)
- tty_insert_flip_char(tty, last, TTY_NORMAL);
+ tty_insert_flip_char(port, last, TTY_NORMAL);
len--;
if (dev->drv[di]->DLEflag & DLEmask) {
last = DLE;
@@ -940,7 +940,7 @@ isdn_readbchan_tty(int di, int channel, struct tty_struct *tty, int cisco_hack)
}
count_put = count_pull;
if (count_put > 1)
- tty_insert_flip_string(tty, skb->data, count_put - 1);
+ tty_insert_flip_string(port, skb->data, count_put - 1);
last = skb->data[count_put - 1];
len -= count_put;
#ifdef CONFIG_ISDN_AUDIO
@@ -952,16 +952,16 @@ isdn_readbchan_tty(int di, int channel, struct tty_struct *tty, int cisco_hack)
* Now we can dequeue it.
*/
if (cisco_hack)
- tty_insert_flip_char(tty, last, 0xFF);
+ tty_insert_flip_char(port, last, 0xFF);
else
- tty_insert_flip_char(tty, last, TTY_NORMAL);
+ tty_insert_flip_char(port, last, TTY_NORMAL);
#ifdef CONFIG_ISDN_AUDIO
ISDN_AUDIO_SKB_LOCK(skb) = 0;
#endif
skb = skb_dequeue(&dev->drv[di]->rpqueue[channel]);
dev_kfree_skb(skb);
} else {
- tty_insert_flip_char(tty, last, TTY_NORMAL);
+ tty_insert_flip_char(port, last, TTY_NORMAL);
/* Not yet emptied this buff, so it
* must stay in the queue, for further calls
* but we pull off the data we got until now.
diff --git a/drivers/isdn/i4l/isdn_common.h b/drivers/isdn/i4l/isdn_common.h
index 9a471f62e1d4..2260ef07ab9c 100644
--- a/drivers/isdn/i4l/isdn_common.h
+++ b/drivers/isdn/i4l/isdn_common.h
@@ -37,7 +37,7 @@ extern void isdn_timer_ctrl(int tf, int onoff);
extern void isdn_unexclusive_channel(int di, int ch);
extern int isdn_getnum(char **);
extern int isdn_readbchan(int, int, u_char *, u_char *, int, wait_queue_head_t *);
-extern int isdn_readbchan_tty(int, int, struct tty_struct *, int);
+extern int isdn_readbchan_tty(int, int, struct tty_port *, int);
extern int isdn_get_free_channel(int, int, int, int, int, char *);
extern int isdn_writebuf_skb_stub(int, int, int, struct sk_buff *);
extern int register_isdn(isdn_if *i);
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index e09dc8a5e743..d8a7d8323414 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -60,18 +60,14 @@ static int si2bit[8] =
static int
isdn_tty_try_read(modem_info *info, struct sk_buff *skb)
{
+ struct tty_port *port = &info->port;
int c;
int len;
- struct tty_struct *tty;
char last;
if (!info->online)
return 0;
- tty = info->port.tty;
- if (!tty)
- return 0;
-
if (!(info->mcr & UART_MCR_RTS))
return 0;
@@ -81,7 +77,7 @@ isdn_tty_try_read(modem_info *info, struct sk_buff *skb)
#endif
;
- c = tty_buffer_request_room(tty, len);
+ c = tty_buffer_request_room(port, len);
if (c < len)
return 0;
@@ -91,25 +87,25 @@ isdn_tty_try_read(modem_info *info, struct sk_buff *skb)
unsigned char *dp = skb->data;
while (--l) {
if (*dp == DLE)
- tty_insert_flip_char(tty, DLE, 0);
- tty_insert_flip_char(tty, *dp++, 0);
+ tty_insert_flip_char(port, DLE, 0);
+ tty_insert_flip_char(port, *dp++, 0);
}
if (*dp == DLE)
- tty_insert_flip_char(tty, DLE, 0);
+ tty_insert_flip_char(port, DLE, 0);
last = *dp;
} else {
#endif
if (len > 1)
- tty_insert_flip_string(tty, skb->data, len - 1);
+ tty_insert_flip_string(port, skb->data, len - 1);
last = skb->data[len - 1];
#ifdef CONFIG_ISDN_AUDIO
}
#endif
if (info->emu.mdmreg[REG_CPPP] & BIT_CPPP)
- tty_insert_flip_char(tty, last, 0xFF);
+ tty_insert_flip_char(port, last, 0xFF);
else
- tty_insert_flip_char(tty, last, TTY_NORMAL);
- tty_flip_buffer_push(tty);
+ tty_insert_flip_char(port, last, TTY_NORMAL);
+ tty_flip_buffer_push(port);
kfree_skb(skb);
return 1;
@@ -126,7 +122,6 @@ isdn_tty_readmodem(void)
int midx;
int i;
int r;
- struct tty_struct *tty;
modem_info *info;
for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
@@ -144,20 +139,21 @@ isdn_tty_readmodem(void)
if ((info->vonline & 1) && (info->emu.vpar[1]))
isdn_audio_eval_silence(info);
#endif
- tty = info->port.tty;
- if (tty) {
- if (info->mcr & UART_MCR_RTS) {
- /* CISCO AsyncPPP Hack */
- if (!(info->emu.mdmreg[REG_CPPP] & BIT_CPPP))
- r = isdn_readbchan_tty(info->isdn_driver, info->isdn_channel, tty, 0);
- else
- r = isdn_readbchan_tty(info->isdn_driver, info->isdn_channel, tty, 1);
- if (r)
- tty_flip_buffer_push(tty);
- } else
- r = 1;
+ if (info->mcr & UART_MCR_RTS) {
+ /* CISCO AsyncPPP Hack */
+ if (!(info->emu.mdmreg[REG_CPPP] & BIT_CPPP))
+ r = isdn_readbchan_tty(info->isdn_driver,
+ info->isdn_channel,
+ &info->port, 0);
+ else
+ r = isdn_readbchan_tty(info->isdn_driver,
+ info->isdn_channel,
+ &info->port, 1);
+ if (r)
+ tty_flip_buffer_push(&info->port);
} else
r = 1;
+
if (r) {
info->rcvsched = 0;
resched = 1;
@@ -2229,7 +2225,7 @@ isdn_tty_stat_callback(int i, isdn_ctrl *c)
void
isdn_tty_at_cout(char *msg, modem_info *info)
{
- struct tty_struct *tty;
+ struct tty_port *port = &info->port;
atemu *m = &info->emu;
char *p;
char c;
@@ -2246,15 +2242,14 @@ isdn_tty_at_cout(char *msg, modem_info *info)
l = strlen(msg);
spin_lock_irqsave(&info->readlock, flags);
- tty = info->port.tty;
- if ((info->port.flags & ASYNC_CLOSING) || (!tty)) {
+ if (port->flags & ASYNC_CLOSING) {
spin_unlock_irqrestore(&info->readlock, flags);
return;
}
/* use queue instead of direct, if online and */
/* data is in queue or buffer is full */
- if (info->online && ((tty_buffer_request_room(tty, l) < l) ||
+ if (info->online && ((tty_buffer_request_room(port, l) < l) ||
!skb_queue_empty(&dev->drv[info->isdn_driver]->rpqueue[info->isdn_channel]))) {
skb = alloc_skb(l, GFP_ATOMIC);
if (!skb) {
@@ -2285,7 +2280,7 @@ isdn_tty_at_cout(char *msg, modem_info *info)
if (skb) {
*sp++ = c;
} else {
- if (tty_insert_flip_char(tty, c, TTY_NORMAL) == 0)
+ if (tty_insert_flip_char(port, c, TTY_NORMAL) == 0)
break;
}
}
@@ -2299,7 +2294,7 @@ isdn_tty_at_cout(char *msg, modem_info *info)
} else {
spin_unlock_irqrestore(&info->readlock, flags);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
}
}
diff --git a/drivers/isdn/i4l/isdn_x25iface.h b/drivers/isdn/i4l/isdn_x25iface.h
index 0b26e3b336e7..ca08e082cf7c 100644
--- a/drivers/isdn/i4l/isdn_x25iface.h
+++ b/drivers/isdn/i4l/isdn_x25iface.h
@@ -19,7 +19,6 @@
#endif
#include <linux/skbuff.h>
-#include <linux/wanrouter.h>
#include <linux/isdn.h>
#include <linux/concap.h>
diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c
index 3e245712bba7..da30c5cb9609 100644
--- a/drivers/isdn/mISDN/core.c
+++ b/drivers/isdn/mISDN/core.c
@@ -168,13 +168,13 @@ static struct class mISDN_class = {
};
static int
-_get_mdevice(struct device *dev, void *id)
+_get_mdevice(struct device *dev, const void *id)
{
struct mISDNdevice *mdev = dev_to_mISDN(dev);
if (!mdev)
return 0;
- if (mdev->id != *(u_int *)id)
+ if (mdev->id != *(const u_int *)id)
return 0;
return 1;
}
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index f8e405c383a0..2c0d2c2bf946 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -689,7 +689,7 @@ l1oip_socket_thread(void *data)
hc->sin_remote.sin_addr.s_addr = htonl(hc->remoteip);
hc->sin_remote.sin_port = htons((unsigned short)hc->remoteport);
- /* bind to incomming port */
+ /* bind to incoming port */
if (socket->ops->bind(socket, (struct sockaddr *)&hc->sin_local,
sizeof(hc->sin_local))) {
printk(KERN_ERR "%s: Failed to bind socket to port %d.\n",
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 5f21f629b7ae..deda591f70b9 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/mISDNif.h>
#include <linux/kthread.h>
+#include <linux/sched.h>
#include "core.h"
static u_int *debug;
@@ -202,6 +203,9 @@ static int
mISDNStackd(void *data)
{
struct mISDNstack *st = data;
+#ifdef MISDN_MSG_STATS
+ cputime_t utime, stime;
+#endif
int err = 0;
sigfillset(&current->blocked);
@@ -303,9 +307,10 @@ mISDNStackd(void *data)
"msg %d sleep %d stopped\n",
dev_name(&st->dev->dev), st->msg_cnt, st->sleep_cnt,
st->stopped_cnt);
+ task_cputime(st->thread, &utime, &stime);
printk(KERN_DEBUG
"mISDNStackd daemon for %s utime(%ld) stime(%ld)\n",
- dev_name(&st->dev->dev), st->thread->utime, st->thread->stime);
+ dev_name(&st->dev->dev), utime, stime);
printk(KERN_DEBUG
"mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n",
dev_name(&st->dev->dev), st->thread->nvcsw, st->thread->nivcsw);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index b58bc8a14b9c..4469b441b785 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -154,7 +154,7 @@ config LEDS_HP6XX
config LEDS_PCA9532
tristate "LED driver for PCA9532 dimmer"
depends on LEDS_CLASS
- depends on I2C && INPUT && EXPERIMENTAL
+ depends on I2C && INPUT
help
This option enables support for NXP pca9532
LED controller. It is generally only useful
diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig
index 34ae49dc557c..89875ea19ade 100644
--- a/drivers/lguest/Kconfig
+++ b/drivers/lguest/Kconfig
@@ -1,6 +1,6 @@
config LGUEST
tristate "Linux hypervisor example code"
- depends on X86_32 && EXPERIMENTAL && EVENTFD
+ depends on X86_32 && EVENTFD && TTY
select HVC_DRIVER
---help---
This is a very simple module which allows you to run
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index a555da64224e..696238b9f0f7 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -278,7 +278,7 @@ config PMAC_RACKMETER
config SENSORS_AMS
tristate "Apple Motion Sensor driver"
- depends on PPC_PMAC && !PPC64 && INPUT && ((ADB_PMU && I2C = y) || (ADB_PMU && !I2C) || I2C) && EXPERIMENTAL
+ depends on PPC_PMAC && !PPC64 && INPUT && ((ADB_PMU && I2C = y) || (ADB_PMU && !I2C) || I2C)
select INPUT_POLLDEV
help
Support for the motion sensor included in PowerBooks. Includes
diff --git a/drivers/macintosh/windfarm_pm112.c b/drivers/macintosh/windfarm_pm112.c
index 35ef6e2582b8..3024685e4cca 100644
--- a/drivers/macintosh/windfarm_pm112.c
+++ b/drivers/macintosh/windfarm_pm112.c
@@ -681,7 +681,7 @@ static int __init wf_pm112_init(void)
/* Count the number of CPU cores */
nr_cores = 0;
- for (cpu = NULL; (cpu = of_find_node_by_type(cpu, "cpu")) != NULL; )
+ for_each_node_by_type(cpu, "cpu")
++nr_cores;
printk(KERN_INFO "windfarm: initializing for dual-core desktop G5\n");
diff --git a/drivers/macintosh/windfarm_pm72.c b/drivers/macintosh/windfarm_pm72.c
index 6e5585357cd3..2f506b9d5a52 100644
--- a/drivers/macintosh/windfarm_pm72.c
+++ b/drivers/macintosh/windfarm_pm72.c
@@ -804,7 +804,7 @@ static int __init wf_pm72_init(void)
/* Count the number of CPU cores */
nr_chips = 0;
- for (cpu = NULL; (cpu = of_find_node_by_type(cpu, "cpu")) != NULL; )
+ for_each_node_by_type(cpu, "cpu")
++nr_chips;
if (nr_chips > NR_CHIPS)
nr_chips = NR_CHIPS;
diff --git a/drivers/macintosh/windfarm_rm31.c b/drivers/macintosh/windfarm_rm31.c
index 844003fb4ef0..0b9a79b2f48a 100644
--- a/drivers/macintosh/windfarm_rm31.c
+++ b/drivers/macintosh/windfarm_rm31.c
@@ -696,7 +696,7 @@ static int __init wf_rm31_init(void)
/* Count the number of CPU cores */
nr_chips = 0;
- for (cpu = NULL; (cpu = of_find_node_by_type(cpu, "cpu")) != NULL; )
+ for_each_node_by_type(cpu, "cpu")
++nr_chips;
if (nr_chips > NR_CHIPS)
nr_chips = NR_CHIPS;
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
new file mode 100644
index 000000000000..9545c9f03809
--- /dev/null
+++ b/drivers/mailbox/Kconfig
@@ -0,0 +1,19 @@
+menuconfig MAILBOX
+ bool "Mailbox Hardware Support"
+ help
+ Mailbox is a framework to control hardware communication between
+ on-chip processors through queued messages and interrupt driven
+ signals. Say Y if your platform supports hardware mailboxes.
+
+if MAILBOX
+config PL320_MBOX
+ bool "ARM PL320 Mailbox"
+ depends on ARM_AMBA
+ help
+ An implementation of the ARM PL320 Interprocessor Communication
+ Mailbox (IPCM), tailored for the Calxeda Highbank. It is used to
+ send short messages between Highbank's A9 cores and the EnergyCore
+ Management Engine, primarily for cpufreq. Say Y here if you want
+ to use the PL320 IPCM support.
+
+endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
new file mode 100644
index 000000000000..543ad6a79505
--- /dev/null
+++ b/drivers/mailbox/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
diff --git a/drivers/mailbox/pl320-ipc.c b/drivers/mailbox/pl320-ipc.c
new file mode 100644
index 000000000000..c45b3aedafba
--- /dev/null
+++ b/drivers/mailbox/pl320-ipc.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright 2012 Calxeda, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/amba/bus.h>
+
+#include <linux/mailbox.h>
+
+#define IPCMxSOURCE(m) ((m) * 0x40)
+#define IPCMxDSET(m) (((m) * 0x40) + 0x004)
+#define IPCMxDCLEAR(m) (((m) * 0x40) + 0x008)
+#define IPCMxDSTATUS(m) (((m) * 0x40) + 0x00C)
+#define IPCMxMODE(m) (((m) * 0x40) + 0x010)
+#define IPCMxMSET(m) (((m) * 0x40) + 0x014)
+#define IPCMxMCLEAR(m) (((m) * 0x40) + 0x018)
+#define IPCMxMSTATUS(m) (((m) * 0x40) + 0x01C)
+#define IPCMxSEND(m) (((m) * 0x40) + 0x020)
+#define IPCMxDR(m, dr) (((m) * 0x40) + ((dr) * 4) + 0x024)
+
+#define IPCMMIS(irq) (((irq) * 8) + 0x800)
+#define IPCMRIS(irq) (((irq) * 8) + 0x804)
+
+#define MBOX_MASK(n) (1 << (n))
+#define IPC_TX_MBOX 1
+#define IPC_RX_MBOX 2
+
+#define CHAN_MASK(n) (1 << (n))
+#define A9_SOURCE 1
+#define M3_SOURCE 0
+
+static void __iomem *ipc_base;
+static int ipc_irq;
+static DEFINE_MUTEX(ipc_m1_lock);
+static DECLARE_COMPLETION(ipc_completion);
+static ATOMIC_NOTIFIER_HEAD(ipc_notifier);
+
+static inline void set_destination(int source, int mbox)
+{
+ __raw_writel(CHAN_MASK(source), ipc_base + IPCMxDSET(mbox));
+ __raw_writel(CHAN_MASK(source), ipc_base + IPCMxMSET(mbox));
+}
+
+static inline void clear_destination(int source, int mbox)
+{
+ __raw_writel(CHAN_MASK(source), ipc_base + IPCMxDCLEAR(mbox));
+ __raw_writel(CHAN_MASK(source), ipc_base + IPCMxMCLEAR(mbox));
+}
+
+static void __ipc_send(int mbox, u32 *data)
+{
+ int i;
+ for (i = 0; i < 7; i++)
+ __raw_writel(data[i], ipc_base + IPCMxDR(mbox, i));
+ __raw_writel(0x1, ipc_base + IPCMxSEND(mbox));
+}
+
+static u32 __ipc_rcv(int mbox, u32 *data)
+{
+ int i;
+ for (i = 0; i < 7; i++)
+ data[i] = __raw_readl(ipc_base + IPCMxDR(mbox, i));
+ return data[1];
+}
+
+/* blocking implmentation from the A9 side, not usuable in interrupts! */
+int pl320_ipc_transmit(u32 *data)
+{
+ int ret;
+
+ mutex_lock(&ipc_m1_lock);
+
+ init_completion(&ipc_completion);
+ __ipc_send(IPC_TX_MBOX, data);
+ ret = wait_for_completion_timeout(&ipc_completion,
+ msecs_to_jiffies(1000));
+ if (ret == 0) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ ret = __ipc_rcv(IPC_TX_MBOX, data);
+out:
+ mutex_unlock(&ipc_m1_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pl320_ipc_transmit);
+
+static irqreturn_t ipc_handler(int irq, void *dev)
+{
+ u32 irq_stat;
+ u32 data[7];
+
+ irq_stat = __raw_readl(ipc_base + IPCMMIS(1));
+ if (irq_stat & MBOX_MASK(IPC_TX_MBOX)) {
+ __raw_writel(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
+ complete(&ipc_completion);
+ }
+ if (irq_stat & MBOX_MASK(IPC_RX_MBOX)) {
+ __ipc_rcv(IPC_RX_MBOX, data);
+ atomic_notifier_call_chain(&ipc_notifier, data[0], data + 1);
+ __raw_writel(2, ipc_base + IPCMxSEND(IPC_RX_MBOX));
+ }
+
+ return IRQ_HANDLED;
+}
+
+int pl320_ipc_register_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&ipc_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(pl320_ipc_register_notifier);
+
+int pl320_ipc_unregister_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&ipc_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(pl320_ipc_unregister_notifier);
+
+static int __init pl320_probe(struct amba_device *adev,
+ const struct amba_id *id)
+{
+ int ret;
+
+ ipc_base = ioremap(adev->res.start, resource_size(&adev->res));
+ if (ipc_base == NULL)
+ return -ENOMEM;
+
+ __raw_writel(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
+
+ ipc_irq = adev->irq[0];
+ ret = request_irq(ipc_irq, ipc_handler, 0, dev_name(&adev->dev), NULL);
+ if (ret < 0)
+ goto err;
+
+ /* Init slow mailbox */
+ __raw_writel(CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxSOURCE(IPC_TX_MBOX));
+ __raw_writel(CHAN_MASK(M3_SOURCE),
+ ipc_base + IPCMxDSET(IPC_TX_MBOX));
+ __raw_writel(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxMSET(IPC_TX_MBOX));
+
+ /* Init receive mailbox */
+ __raw_writel(CHAN_MASK(M3_SOURCE),
+ ipc_base + IPCMxSOURCE(IPC_RX_MBOX));
+ __raw_writel(CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxDSET(IPC_RX_MBOX));
+ __raw_writel(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxMSET(IPC_RX_MBOX));
+
+ return 0;
+err:
+ iounmap(ipc_base);
+ return ret;
+}
+
+static struct amba_id pl320_ids[] = {
+ {
+ .id = 0x00041320,
+ .mask = 0x000fffff,
+ },
+ { 0, 0 },
+};
+
+static struct amba_driver pl320_driver = {
+ .drv = {
+ .name = "pl320",
+ },
+ .id_table = pl320_ids,
+ .probe = pl320_probe,
+};
+
+static int __init ipc_init(void)
+{
+ return amba_driver_register(&pl320_driver);
+}
+module_init(ipc_init);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 3d8984edeff7..9e58dbd8d8cb 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -340,24 +340,22 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
}
/*
- * validate_rebuild_devices
+ * validate_raid_redundancy
* @rs
*
- * Determine if the devices specified for rebuild can result in a valid
- * usable array that is capable of rebuilding the given devices.
+ * Determine if there are enough devices in the array that haven't
+ * failed (or are being rebuilt) to form a usable array.
*
* Returns: 0 on success, -EINVAL on failure.
*/
-static int validate_rebuild_devices(struct raid_set *rs)
+static int validate_raid_redundancy(struct raid_set *rs)
{
unsigned i, rebuild_cnt = 0;
unsigned rebuilds_per_group, copies, d;
- if (!(rs->print_flags & DMPF_REBUILD))
- return 0;
-
for (i = 0; i < rs->md.raid_disks; i++)
- if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
+ if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
+ !rs->dev[i].rdev.sb_page)
rebuild_cnt++;
switch (rs->raid_type->level) {
@@ -393,27 +391,24 @@ static int validate_rebuild_devices(struct raid_set *rs)
* A A B B C
* C D D E E
*/
- rebuilds_per_group = 0;
for (i = 0; i < rs->md.raid_disks * copies; i++) {
+ if (!(i % copies))
+ rebuilds_per_group = 0;
d = i % rs->md.raid_disks;
- if (!test_bit(In_sync, &rs->dev[d].rdev.flags) &&
+ if ((!rs->dev[d].rdev.sb_page ||
+ !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
(++rebuilds_per_group >= copies))
goto too_many;
- if (!((i + 1) % copies))
- rebuilds_per_group = 0;
}
break;
default:
- DMERR("The rebuild parameter is not supported for %s",
- rs->raid_type->name);
- rs->ti->error = "Rebuild not supported for this RAID type";
- return -EINVAL;
+ if (rebuild_cnt)
+ return -EINVAL;
}
return 0;
too_many:
- rs->ti->error = "Too many rebuild devices specified";
return -EINVAL;
}
@@ -664,9 +659,6 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
}
rs->md.dev_sectors = sectors_per_dev;
- if (validate_rebuild_devices(rs))
- return -EINVAL;
-
/* Assume there are no metadata devices until the drives are parsed */
rs->md.persistent = 0;
rs->md.external = 1;
@@ -995,28 +987,10 @@ static int super_validate(struct mddev *mddev, struct md_rdev *rdev)
static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
{
int ret;
- unsigned redundancy = 0;
struct raid_dev *dev;
struct md_rdev *rdev, *tmp, *freshest;
struct mddev *mddev = &rs->md;
- switch (rs->raid_type->level) {
- case 1:
- redundancy = rs->md.raid_disks - 1;
- break;
- case 4:
- case 5:
- case 6:
- redundancy = rs->raid_type->parity_devs;
- break;
- case 10:
- redundancy = raid10_md_layout_to_copies(mddev->layout) - 1;
- break;
- default:
- ti->error = "Unknown RAID type";
- return -EINVAL;
- }
-
freshest = NULL;
rdev_for_each_safe(rdev, tmp, mddev) {
/*
@@ -1045,44 +1019,43 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
break;
default:
dev = container_of(rdev, struct raid_dev, rdev);
- if (redundancy--) {
- if (dev->meta_dev)
- dm_put_device(ti, dev->meta_dev);
-
- dev->meta_dev = NULL;
- rdev->meta_bdev = NULL;
+ if (dev->meta_dev)
+ dm_put_device(ti, dev->meta_dev);
- if (rdev->sb_page)
- put_page(rdev->sb_page);
+ dev->meta_dev = NULL;
+ rdev->meta_bdev = NULL;
- rdev->sb_page = NULL;
+ if (rdev->sb_page)
+ put_page(rdev->sb_page);
- rdev->sb_loaded = 0;
+ rdev->sb_page = NULL;
- /*
- * We might be able to salvage the data device
- * even though the meta device has failed. For
- * now, we behave as though '- -' had been
- * set for this device in the table.
- */
- if (dev->data_dev)
- dm_put_device(ti, dev->data_dev);
+ rdev->sb_loaded = 0;
- dev->data_dev = NULL;
- rdev->bdev = NULL;
+ /*
+ * We might be able to salvage the data device
+ * even though the meta device has failed. For
+ * now, we behave as though '- -' had been
+ * set for this device in the table.
+ */
+ if (dev->data_dev)
+ dm_put_device(ti, dev->data_dev);
- list_del(&rdev->same_set);
+ dev->data_dev = NULL;
+ rdev->bdev = NULL;
- continue;
- }
- ti->error = "Failed to load superblock";
- return ret;
+ list_del(&rdev->same_set);
}
}
if (!freshest)
return 0;
+ if (validate_raid_redundancy(rs)) {
+ rs->ti->error = "Insufficient redundancy to activate array";
+ return -EINVAL;
+ }
+
/*
* Validation of the freshest device provides the source of
* validation for the remaining devices.
@@ -1432,7 +1405,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 4, 0},
+ .version = {1, 4, 1},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 675ae5274016..5409607d4875 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2746,19 +2746,9 @@ static int thin_iterate_devices(struct dm_target *ti,
return 0;
}
-/*
- * A thin device always inherits its queue limits from its pool.
- */
-static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
-{
- struct thin_c *tc = ti->private;
-
- *limits = bdev_get_queue(tc->pool_dev->bdev)->limits;
-}
-
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 6, 0},
+ .version = {1, 7, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
@@ -2767,7 +2757,6 @@ static struct target_type thin_target = {
.postsuspend = thin_postsuspend,
.status = thin_status,
.iterate_devices = thin_iterate_devices,
- .io_hints = thin_io_hints,
};
/*----------------------------------------------------------------*/
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index c72e4d5a9617..314a0e2faf79 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1188,6 +1188,7 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,
{
struct dm_target *ti;
sector_t len;
+ unsigned num_requests;
do {
ti = dm_table_find_target(ci->map, ci->sector);
@@ -1200,7 +1201,8 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,
* reconfiguration might also have changed that since the
* check was performed.
*/
- if (!get_num_requests || !get_num_requests(ti))
+ num_requests = get_num_requests ? get_num_requests(ti) : 0;
+ if (!num_requests)
return -EOPNOTSUPP;
if (is_split_required && !is_split_required(ti))
@@ -1208,7 +1210,7 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,
else
len = min(ci->sector_count, max_io_len(ci->sector, ti));
- __issue_target_requests(ci, ti, ti->num_discard_requests, len);
+ __issue_target_requests(ci, ti, num_requests, len);
ci->sector += len;
} while (ci->sector_count -= len);
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
index d247a35da3c6..7b17a1fdeaf9 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.c
+++ b/drivers/md/persistent-data/dm-transaction-manager.c
@@ -25,8 +25,8 @@ struct shadow_info {
/*
* It would be nice if we scaled with the size of transaction.
*/
-#define HASH_SIZE 256
-#define HASH_MASK (HASH_SIZE - 1)
+#define DM_HASH_SIZE 256
+#define DM_HASH_MASK (DM_HASH_SIZE - 1)
struct dm_transaction_manager {
int is_clone;
@@ -36,7 +36,7 @@ struct dm_transaction_manager {
struct dm_space_map *sm;
spinlock_t lock;
- struct hlist_head buckets[HASH_SIZE];
+ struct hlist_head buckets[DM_HASH_SIZE];
};
/*----------------------------------------------------------------*/
@@ -44,7 +44,7 @@ struct dm_transaction_manager {
static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
{
int r = 0;
- unsigned bucket = dm_hash_block(b, HASH_MASK);
+ unsigned bucket = dm_hash_block(b, DM_HASH_MASK);
struct shadow_info *si;
struct hlist_node *n;
@@ -71,7 +71,7 @@ static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
si = kmalloc(sizeof(*si), GFP_NOIO);
if (si) {
si->where = b;
- bucket = dm_hash_block(b, HASH_MASK);
+ bucket = dm_hash_block(b, DM_HASH_MASK);
spin_lock(&tm->lock);
hlist_add_head(&si->hlist, tm->buckets + bucket);
spin_unlock(&tm->lock);
@@ -86,7 +86,7 @@ static void wipe_shadow_table(struct dm_transaction_manager *tm)
int i;
spin_lock(&tm->lock);
- for (i = 0; i < HASH_SIZE; i++) {
+ for (i = 0; i < DM_HASH_SIZE; i++) {
bucket = tm->buckets + i;
hlist_for_each_entry_safe(si, n, tmp, bucket, hlist)
kfree(si);
@@ -115,7 +115,7 @@ static struct dm_transaction_manager *dm_tm_create(struct dm_block_manager *bm,
tm->sm = sm;
spin_lock_init(&tm->lock);
- for (i = 0; i < HASH_SIZE; i++)
+ for (i = 0; i < DM_HASH_SIZE; i++)
INIT_HLIST_HEAD(tm->buckets + i);
return tm;
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 84d85b921d6d..7f5a7cac6dc7 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -79,8 +79,7 @@ config MEDIA_RC_SUPPORT
#
config MEDIA_CONTROLLER
- bool "Media Controller API (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ bool "Media Controller API"
depends on MEDIA_CAMERA_SUPPORT
---help---
Enable the media controller API used to query media devices internal
@@ -100,8 +99,8 @@ config VIDEO_DEV
default y
config VIDEO_V4L2_SUBDEV_API
- bool "V4L2 sub-device userspace API (EXPERIMENTAL)"
- depends on VIDEO_DEV && MEDIA_CONTROLLER && EXPERIMENTAL
+ bool "V4L2 sub-device userspace API"
+ depends on VIDEO_DEV && MEDIA_CONTROLLER
---help---
Enables the V4L2 sub-device pad-level userspace API used to configure
video format, size and frame rate between hardware blocks.
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index b059abf572d6..6e50a7581568 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -1863,7 +1863,7 @@ static int dvb_frontend_ioctl(struct file *file,
struct dvb_frontend *fe = dvbdev->priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct dvb_frontend_private *fepriv = fe->frontend_priv;
- int err = -ENOTTY;
+ int err = -EOPNOTSUPP;
dev_dbg(fe->dvb->device, "%s: (%d)\n", __func__, _IOC_NR(cmd));
if (down_interruptible(&fepriv->sem))
@@ -1985,7 +1985,7 @@ static int dvb_frontend_ioctl_properties(struct file *file,
}
} else
- err = -ENOTTY;
+ err = -EOPNOTSUPP;
out:
kfree(tvp);
@@ -2118,7 +2118,7 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
struct dvb_frontend *fe = dvbdev->priv;
struct dvb_frontend_private *fepriv = fe->frontend_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int err = -ENOTTY;
+ int err = -EOPNOTSUPP;
switch (cmd) {
case FE_GET_INFO: {
diff --git a/drivers/media/pci/cx25821/Kconfig b/drivers/media/pci/cx25821/Kconfig
index 5f6b54213713..4017c9420348 100644
--- a/drivers/media/pci/cx25821/Kconfig
+++ b/drivers/media/pci/cx25821/Kconfig
@@ -18,7 +18,7 @@ config VIDEO_CX25821
config VIDEO_CX25821_ALSA
tristate "Conexant 25821 DMA audio support"
- depends on VIDEO_CX25821 && SND && EXPERIMENTAL
+ depends on VIDEO_CX25821 && SND
select SND_PCM
---help---
This is a video4linux driver for direct (DMA) audio on
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 2433e2bbeee4..05d7b6333461 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -98,8 +98,8 @@ config VIDEO_OMAP2
This is a v4l2 driver for the TI OMAP2 camera capture interface
config VIDEO_OMAP3
- tristate "OMAP 3 Camera support (EXPERIMENTAL)"
- depends on OMAP_IOVMM && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3 && EXPERIMENTAL
+ tristate "OMAP 3 Camera support"
+ depends on OMAP_IOVMM && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3
---help---
Driver for an OMAP 3 camera controller.
@@ -169,8 +169,8 @@ config VIDEO_SAMSUNG_S5P_G2D
2d graphics accelerator.
config VIDEO_SAMSUNG_S5P_JPEG
- tristate "Samsung S5P/Exynos4 JPEG codec driver (EXPERIMENTAL)"
- depends on VIDEO_DEV && VIDEO_V4L2 && PLAT_S5P && EXPERIMENTAL
+ tristate "Samsung S5P/Exynos4 JPEG codec driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && PLAT_S5P
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
---help---
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
index 8d20b644bf2c..20827ba168fc 100644
--- a/drivers/media/platform/coda.c
+++ b/drivers/media/platform/coda.c
@@ -23,8 +23,8 @@
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <linux/of.h>
+#include <linux/platform_data/imx-iram.h>
-#include <mach/iram.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
index 494d32231fb5..a19c552232d1 100644
--- a/drivers/media/platform/davinci/vpss.c
+++ b/drivers/media/platform/davinci/vpss.c
@@ -25,7 +25,6 @@
#include <linux/spinlock.h>
#include <linux/compiler.h>
#include <linux/io.h>
-#include <mach/hardware.h>
#include <media/davinci/vpss.h>
MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/s5p-fimc/Kconfig b/drivers/media/platform/s5p-fimc/Kconfig
index c16b20d86ed2..f997a5203b7c 100644
--- a/drivers/media/platform/s5p-fimc/Kconfig
+++ b/drivers/media/platform/s5p-fimc/Kconfig
@@ -2,7 +2,6 @@
config VIDEO_SAMSUNG_S5P_FIMC
bool "Samsung S5P/EXYNOS SoC camera interface driver (experimental)"
depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && PLAT_S5P && PM_RUNTIME
- depends on EXPERIMENTAL
help
Say Y here to enable camera host interface devices for
Samsung S5P and EXYNOS SoC series.
diff --git a/drivers/media/platform/s5p-tv/Kconfig b/drivers/media/platform/s5p-tv/Kconfig
index ea11a513033f..7b659bd09bfd 100644
--- a/drivers/media/platform/s5p-tv/Kconfig
+++ b/drivers/media/platform/s5p-tv/Kconfig
@@ -7,9 +7,8 @@
# Licensed under GPL
config VIDEO_SAMSUNG_S5P_TV
- bool "Samsung TV driver for S5P platform (experimental)"
+ bool "Samsung TV driver for S5P platform"
depends on PLAT_S5P && PM_RUNTIME
- depends on EXPERIMENTAL
default n
---help---
Say Y here to enable selecting the TV output devices for
diff --git a/drivers/media/platform/soc_camera/mx2_camera.c b/drivers/media/platform/soc_camera/mx2_camera.c
index ea56b7589211..ffba7d91f413 100644
--- a/drivers/media/platform/soc_camera/mx2_camera.c
+++ b/drivers/media/platform/soc_camera/mx2_camera.c
@@ -289,7 +289,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
/*
* This is a generic configuration which is valid for most
* prp input-output format combinations.
- * We set the incomming and outgoing pixelformat to a
+ * We set the incoming and outgoing pixelformat to a
* 16 Bit wide format and adjust the bytesperline
* accordingly. With this configuration the inputdata
* will not be changed by the emma and could be any type
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index ead992853730..24e64a09884c 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -192,7 +192,7 @@ config RADIO_TIMBERDALE
config RADIO_WL1273
tristate "Texas Instruments WL1273 I2C FM Radio"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_V4L2 && GENERIC_HARDIRQS
select MFD_CORE
select MFD_WL1273_CORE
select FW_LOADER
diff --git a/drivers/media/radio/radio-keene.c b/drivers/media/radio/radio-keene.c
index e10e525f33e5..296941a9ae25 100644
--- a/drivers/media/radio/radio-keene.c
+++ b/drivers/media/radio/radio-keene.c
@@ -374,6 +374,7 @@ static int usb_keene_probe(struct usb_interface *intf,
radio->vdev.ioctl_ops = &usb_keene_ioctl_ops;
radio->vdev.lock = &radio->lock;
radio->vdev.release = video_device_release_empty;
+ radio->vdev.vfl_dir = VFL_DIR_TX;
radio->usbdev = interface_to_usbdev(intf);
radio->intf = intf;
diff --git a/drivers/media/radio/radio-si4713.c b/drivers/media/radio/radio-si4713.c
index a082e400ed0f..1507c9d508d7 100644
--- a/drivers/media/radio/radio-si4713.c
+++ b/drivers/media/radio/radio-si4713.c
@@ -250,6 +250,7 @@ static struct video_device radio_si4713_vdev_template = {
.name = "radio-si4713",
.release = video_device_release,
.ioctl_ops = &radio_si4713_ioctl_ops,
+ .vfl_dir = VFL_DIR_TX,
};
/* Platform driver interface */
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 681d4aeee530..02151e0e6e63 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1971,6 +1971,7 @@ static struct video_device wl1273_viddev_template = {
.ioctl_ops = &wl1273_ioctl_ops,
.name = WL1273_FM_DRIVER_NAME,
.release = wl1273_vdev_release,
+ .vfl_dir = VFL_DIR_TX,
};
static int wl1273_fm_radio_remove(struct platform_device *pdev)
diff --git a/drivers/media/radio/wl128x/Kconfig b/drivers/media/radio/wl128x/Kconfig
index ea1e6545df36..f359be7e9dd9 100644
--- a/drivers/media/radio/wl128x/Kconfig
+++ b/drivers/media/radio/wl128x/Kconfig
@@ -4,7 +4,7 @@
menu "Texas Instruments WL128x FM driver (ST based)"
config RADIO_WL128X
tristate "Texas Instruments WL128x FM Radio"
- depends on VIDEO_V4L2 && RFKILL && GPIOLIB
+ depends on VIDEO_V4L2 && RFKILL && GPIOLIB && TTY
select TI_ST if NET
help
Choose Y here if you have this FM radio chip.
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index 048de4536036..0a8ee8fab924 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -518,6 +518,16 @@ static struct video_device fm_viddev_template = {
.ioctl_ops = &fm_drv_ioctl_ops,
.name = FM_DRV_NAME,
.release = video_device_release,
+ /*
+ * To ensure both the tuner and modulator ioctls are accessible we
+ * set the vfl_dir to M2M to indicate this.
+ *
+ * It is not really a mem2mem device of course, but it can both receive
+ * and transmit using the same radio device. It's the only radio driver
+ * that does this and it should really be split in two radio devices,
+ * but that would affect applications using this driver.
+ */
+ .vfl_dir = VFL_DIR_M2M,
};
int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 79ba242fe263..19f3563c61da 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -291,7 +291,7 @@ config IR_TTUSBIR
config IR_RX51
tristate "Nokia N900 IR transmitter diode"
- depends on OMAP_DM_TIMER && LIRC
+ depends on OMAP_DM_TIMER && LIRC && !ARCH_MULTIPLATFORM
---help---
Say Y or M here if you want to enable support for the IR
transmitter diode built in the Nokia N900 (RX51) device.
diff --git a/drivers/media/usb/dvb-usb-v2/Kconfig b/drivers/media/usb/dvb-usb-v2/Kconfig
index 7b5773fe6367..692224d97d06 100644
--- a/drivers/media/usb/dvb-usb-v2/Kconfig
+++ b/drivers/media/usb/dvb-usb-v2/Kconfig
@@ -136,7 +136,7 @@ config DVB_USB_MXL111SF
config DVB_USB_RTL28XXU
tristate "Realtek RTL28xxU DVB USB support"
- depends on DVB_USB_V2 && EXPERIMENTAL
+ depends on DVB_USB_V2
select DVB_RTL2830
select DVB_RTL2832
select MEDIA_TUNER_QT1010 if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/usb/pvrusb2/Kconfig b/drivers/media/usb/pvrusb2/Kconfig
index 32b11c15bb1a..60a2604e4cb3 100644
--- a/drivers/media/usb/pvrusb2/Kconfig
+++ b/drivers/media/usb/pvrusb2/Kconfig
@@ -17,9 +17,9 @@ config VIDEO_PVRUSB2
module will be called pvrusb2
config VIDEO_PVRUSB2_SYSFS
- bool "pvrusb2 sysfs support (EXPERIMENTAL)"
+ bool "pvrusb2 sysfs support"
default y
- depends on VIDEO_PVRUSB2 && SYSFS && EXPERIMENTAL
+ depends on VIDEO_PVRUSB2 && SYSFS
---help---
This option enables the operation of a sysfs based
interface for query and control of the pvrusb2 driver.
@@ -33,9 +33,9 @@ config VIDEO_PVRUSB2_SYSFS
Note: This feature is experimental and subject to change.
config VIDEO_PVRUSB2_DVB
- bool "pvrusb2 ATSC/DVB support (EXPERIMENTAL)"
+ bool "pvrusb2 ATSC/DVB support"
default y
- depends on VIDEO_PVRUSB2 && DVB_CORE && EXPERIMENTAL
+ depends on VIDEO_PVRUSB2 && DVB_CORE
select DVB_LGDT330X if MEDIA_SUBDRV_AUTOSELECT
select DVB_S5H1409 if MEDIA_SUBDRV_AUTOSELECT
select DVB_S5H1411 if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c
index 06d31c99e6ac..df0873694858 100644
--- a/drivers/memory/emif.c
+++ b/drivers/memory/emif.c
@@ -10,6 +10,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/platform_data/emif_plat.h>
@@ -1468,12 +1469,9 @@ static int __init_or_module emif_probe(struct platform_device *pdev)
goto error;
}
- emif->base = devm_request_and_ioremap(emif->dev, res);
- if (!emif->base) {
- dev_err(emif->dev, "%s: devm_request_and_ioremap() failed\n",
- __func__);
+ emif->base = devm_ioremap_resource(emif->dev, res);
+ if (IS_ERR(emif->base))
goto error;
- }
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
diff --git a/drivers/memory/tegra20-mc.c b/drivers/memory/tegra20-mc.c
index 186f27d9e5f1..2ca5f2814f4a 100644
--- a/drivers/memory/tegra20-mc.c
+++ b/drivers/memory/tegra20-mc.c
@@ -17,6 +17,7 @@
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ratelimit.h>
@@ -216,9 +217,9 @@ static int tegra20_mc_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res)
return -ENODEV;
- mc->regs[i] = devm_request_and_ioremap(&pdev->dev, res);
- if (!mc->regs[i])
- return -EBUSY;
+ mc->regs[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mc->regs[i]))
+ return PTR_ERR(mc->regs[i]);
}
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
diff --git a/drivers/memory/tegra30-mc.c b/drivers/memory/tegra30-mc.c
index 0b7ab9332a18..0b975986777d 100644
--- a/drivers/memory/tegra30-mc.c
+++ b/drivers/memory/tegra30-mc.c
@@ -17,6 +17,7 @@
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ratelimit.h>
@@ -336,9 +337,9 @@ static int tegra30_mc_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res)
return -ENODEV;
- mc->regs[i] = devm_request_and_ioremap(&pdev->dev, res);
- if (!mc->regs[i])
- return -EBUSY;
+ mc->regs[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mc->regs[i]))
+ return PTR_ERR(mc->regs[i]);
}
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
diff --git a/drivers/memstick/Kconfig b/drivers/memstick/Kconfig
index f0ca41c20323..1314605d791f 100644
--- a/drivers/memstick/Kconfig
+++ b/drivers/memstick/Kconfig
@@ -3,7 +3,7 @@
#
menuconfig MEMSTICK
- tristate "Sony MemoryStick card support (EXPERIMENTAL)"
+ tristate "Sony MemoryStick card support"
help
Sony MemoryStick is a proprietary storage/extension card protocol.
diff --git a/drivers/memstick/host/Kconfig b/drivers/memstick/host/Kconfig
index 4f7a17fd1aa7..1b37cf8cd204 100644
--- a/drivers/memstick/host/Kconfig
+++ b/drivers/memstick/host/Kconfig
@@ -5,8 +5,8 @@
comment "MemoryStick Host Controller Drivers"
config MEMSTICK_TIFM_MS
- tristate "TI Flash Media MemoryStick Interface support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && PCI
+ tristate "TI Flash Media MemoryStick Interface support "
+ depends on PCI
select TIFM_CORE
help
Say Y here if you want to be able to access MemoryStick cards with
@@ -21,8 +21,8 @@ config MEMSTICK_TIFM_MS
module will be called tifm_ms.
config MEMSTICK_JMICRON_38X
- tristate "JMicron JMB38X MemoryStick interface support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && PCI
+ tristate "JMicron JMB38X MemoryStick interface support"
+ depends on PCI
help
Say Y here if you want to be able to access MemoryStick cards with
@@ -32,8 +32,8 @@ config MEMSTICK_JMICRON_38X
module will be called jmb38x_ms.
config MEMSTICK_R592
- tristate "Ricoh R5C592 MemoryStick interface support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && PCI
+ tristate "Ricoh R5C592 MemoryStick interface support"
+ depends on PCI
help
Say Y here if you want to be able to access MemoryStick cards with
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 1c0abd4dfc43..ff553babf455 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -237,6 +237,7 @@ config MFD_TPS65910
depends on I2C=y && GPIOLIB
select MFD_CORE
select REGMAP_I2C
+ select REGMAP_IRQ
select IRQ_DOMAIN
help
if you say yes here you get support for the TPS65910 series of
@@ -292,6 +293,7 @@ config TWL4030_CORE
bool "Texas Instruments TWL4030/TWL5030/TWL6030/TPS659x0 Support"
depends on I2C=y && GENERIC_HARDIRQS
select IRQ_DOMAIN
+ select REGMAP_I2C
help
Say yes here if you have TWL4030 / TWL6030 family chip on your board.
This core driver provides register access and IRQ handling
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index e1650badd106..8b5d685ab980 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -19,6 +19,7 @@
#include <linux/mfd/core.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/abx500/ab8500-bm.h>
#include <linux/mfd/dbx500-prcmu.h>
#include <linux/regulator/ab8500.h>
#include <linux/of.h>
@@ -749,6 +750,12 @@ static struct resource ab8500_charger_resources[] = {
.end = AB8500_INT_CH_WD_EXP,
.flags = IORESOURCE_IRQ,
},
+ {
+ .name = "VBUS_CH_DROP_END",
+ .start = AB8500_INT_VBUS_CH_DROP_END,
+ .end = AB8500_INT_VBUS_CH_DROP_END,
+ .flags = IORESOURCE_IRQ,
+ },
};
static struct resource ab8500_btemp_resources[] = {
@@ -1011,40 +1018,32 @@ static struct mfd_cell ab8500_bm_devs[] = {
.of_compatible = "stericsson,ab8500-charger",
.num_resources = ARRAY_SIZE(ab8500_charger_resources),
.resources = ab8500_charger_resources,
-#ifndef CONFIG_OF
.platform_data = &ab8500_bm_data,
.pdata_size = sizeof(ab8500_bm_data),
-#endif
},
{
.name = "ab8500-btemp",
.of_compatible = "stericsson,ab8500-btemp",
.num_resources = ARRAY_SIZE(ab8500_btemp_resources),
.resources = ab8500_btemp_resources,
-#ifndef CONFIG_OF
.platform_data = &ab8500_bm_data,
.pdata_size = sizeof(ab8500_bm_data),
-#endif
},
{
.name = "ab8500-fg",
.of_compatible = "stericsson,ab8500-fg",
.num_resources = ARRAY_SIZE(ab8500_fg_resources),
.resources = ab8500_fg_resources,
-#ifndef CONFIG_OF
.platform_data = &ab8500_bm_data,
.pdata_size = sizeof(ab8500_bm_data),
-#endif
},
{
.name = "ab8500-chargalg",
.of_compatible = "stericsson,ab8500-chargalg",
.num_resources = ARRAY_SIZE(ab8500_chargalg_resources),
.resources = ab8500_chargalg_resources,
-#ifndef CONFIG_OF
.platform_data = &ab8500_bm_data,
.pdata_size = sizeof(ab8500_bm_data),
-#endif
},
};
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index bc8a3edb6bbf..222c03a5ddc0 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -239,7 +239,12 @@ static int arizona_runtime_resume(struct device *dev)
return ret;
}
- regcache_sync(arizona->regmap);
+ ret = regcache_sync(arizona->regmap);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to restore register cache\n");
+ regulator_disable(arizona->dcvdd);
+ return ret;
+ }
return 0;
}
diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
index 74713bf5371f..2bec5f0db3ee 100644
--- a/drivers/mfd/arizona-irq.c
+++ b/drivers/mfd/arizona-irq.c
@@ -176,14 +176,7 @@ int arizona_irq_init(struct arizona *arizona)
aod = &wm5102_aod;
irq = &wm5102_irq;
- switch (arizona->rev) {
- case 0:
- case 1:
- ctrlif_error = false;
- break;
- default:
- break;
- }
+ ctrlif_error = false;
break;
#endif
#ifdef CONFIG_MFD_WM5110
@@ -191,14 +184,7 @@ int arizona_irq_init(struct arizona *arizona)
aod = &wm5110_aod;
irq = &wm5110_irq;
- switch (arizona->rev) {
- case 0:
- case 1:
- ctrlif_error = false;
- break;
- default:
- break;
- }
+ ctrlif_error = false;
break;
#endif
default:
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
index ac74a4d1daea..885e56780358 100644
--- a/drivers/mfd/da9052-i2c.c
+++ b/drivers/mfd/da9052-i2c.c
@@ -27,6 +27,66 @@
#include <linux/of_device.h>
#endif
+/* I2C safe register check */
+static inline bool i2c_safe_reg(unsigned char reg)
+{
+ switch (reg) {
+ case DA9052_STATUS_A_REG:
+ case DA9052_STATUS_B_REG:
+ case DA9052_STATUS_C_REG:
+ case DA9052_STATUS_D_REG:
+ case DA9052_ADC_RES_L_REG:
+ case DA9052_ADC_RES_H_REG:
+ case DA9052_VDD_RES_REG:
+ case DA9052_ICHG_AV_REG:
+ case DA9052_TBAT_RES_REG:
+ case DA9052_ADCIN4_RES_REG:
+ case DA9052_ADCIN5_RES_REG:
+ case DA9052_ADCIN6_RES_REG:
+ case DA9052_TJUNC_RES_REG:
+ case DA9052_TSI_X_MSB_REG:
+ case DA9052_TSI_Y_MSB_REG:
+ case DA9052_TSI_LSB_REG:
+ case DA9052_TSI_Z_MSB_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * There is an issue with DA9052 and DA9053_AA/BA/BB PMIC where the PMIC
+ * gets lockup up or fails to respond following a system reset.
+ * This fix is to follow any read or write with a dummy read to a safe
+ * register.
+ */
+int da9052_i2c_fix(struct da9052 *da9052, unsigned char reg)
+{
+ int val;
+
+ switch (da9052->chip_id) {
+ case DA9052:
+ case DA9053_AA:
+ case DA9053_BA:
+ case DA9053_BB:
+ /* A dummy read to a safe register address. */
+ if (!i2c_safe_reg(reg))
+ return regmap_read(da9052->regmap,
+ DA9052_PARK_REGISTER,
+ &val);
+ break;
+ default:
+ /*
+ * For other chips parking of I2C register
+ * to a safe place is not required.
+ */
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(da9052_i2c_fix);
+
static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)
{
int reg_val, ret;
@@ -83,6 +143,7 @@ static int da9052_i2c_probe(struct i2c_client *client,
da9052->dev = &client->dev;
da9052->chip_irq = client->irq;
+ da9052->fix_io = da9052_i2c_fix;
i2c_set_clientdata(client, da9052);
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index dc8826d8d69d..a2bacf95b59e 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -26,22 +26,18 @@
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
+#include <linux/irqchip/arm-gic.h>
#include <linux/mfd/core.h>
#include <linux/mfd/dbx500-prcmu.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/regulator/db8500-prcmu.h>
#include <linux/regulator/machine.h>
#include <linux/cpufreq.h>
-#include <asm/hardware/gic.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
#include <mach/db8500-regs.h>
-#include <mach/id.h>
#include "dbx500-prcmu-regs.h"
-/* Offset for the firmware version within the TCPM */
-#define PRCMU_FW_VERSION_OFFSET 0xA4
-
/* Index of different voltages to be used when accessing AVSData */
#define PRCM_AVS_BASE 0x2FC
#define PRCM_AVS_VBB_RET (PRCM_AVS_BASE + 0x0)
@@ -216,10 +212,8 @@
#define PRCM_REQ_MB5_I2C_HW_BITS (PRCM_REQ_MB5 + 0x1)
#define PRCM_REQ_MB5_I2C_REG (PRCM_REQ_MB5 + 0x2)
#define PRCM_REQ_MB5_I2C_VAL (PRCM_REQ_MB5 + 0x3)
-#define PRCMU_I2C_WRITE(slave) \
- (((slave) << 1) | (cpu_is_u8500v2() ? BIT(6) : 0))
-#define PRCMU_I2C_READ(slave) \
- (((slave) << 1) | BIT(0) | (cpu_is_u8500v2() ? BIT(6) : 0))
+#define PRCMU_I2C_WRITE(slave) (((slave) << 1) | BIT(6))
+#define PRCMU_I2C_READ(slave) (((slave) << 1) | BIT(0) | BIT(6))
#define PRCMU_I2C_STOP_EN BIT(3)
/* Mailbox 5 ACKs */
@@ -1049,12 +1043,13 @@ int db8500_prcmu_get_ddr_opp(void)
*
* This function sets the operating point of the DDR.
*/
+static bool enable_set_ddr_opp;
int db8500_prcmu_set_ddr_opp(u8 opp)
{
if (opp < DDR_100_OPP || opp > DDR_25_OPP)
return -EINVAL;
/* Changing the DDR OPP can hang the hardware pre-v21 */
- if (cpu_is_u8500v20_or_later() && !cpu_is_u8500v20())
+ if (enable_set_ddr_opp)
writeb(opp, PRCM_DDR_SUBSYS_APE_MINBW);
return 0;
@@ -2524,7 +2519,7 @@ static bool read_mailbox_0(void)
for (n = 0; n < NUM_PRCMU_WAKEUPS; n++) {
if (ev & prcmu_irq_bit[n])
- generic_handle_irq(IRQ_PRCMU_BASE + n);
+ generic_handle_irq(irq_find_mapping(db8500_irq_domain, n));
}
r = true;
break;
@@ -2706,21 +2701,43 @@ static struct irq_chip prcmu_irq_chip = {
.irq_unmask = prcmu_irq_unmask,
};
-static char *fw_project_name(u8 project)
+static __init char *fw_project_name(u32 project)
{
switch (project) {
case PRCMU_FW_PROJECT_U8500:
return "U8500";
- case PRCMU_FW_PROJECT_U8500_C2:
- return "U8500 C2";
+ case PRCMU_FW_PROJECT_U8400:
+ return "U8400";
case PRCMU_FW_PROJECT_U9500:
return "U9500";
- case PRCMU_FW_PROJECT_U9500_C2:
- return "U9500 C2";
+ case PRCMU_FW_PROJECT_U8500_MBB:
+ return "U8500 MBB";
+ case PRCMU_FW_PROJECT_U8500_C1:
+ return "U8500 C1";
+ case PRCMU_FW_PROJECT_U8500_C2:
+ return "U8500 C2";
+ case PRCMU_FW_PROJECT_U8500_C3:
+ return "U8500 C3";
+ case PRCMU_FW_PROJECT_U8500_C4:
+ return "U8500 C4";
+ case PRCMU_FW_PROJECT_U9500_MBL:
+ return "U9500 MBL";
+ case PRCMU_FW_PROJECT_U8500_MBL:
+ return "U8500 MBL";
+ case PRCMU_FW_PROJECT_U8500_MBL2:
+ return "U8500 MBL2";
case PRCMU_FW_PROJECT_U8520:
- return "U8520";
+ return "U8520 MBL";
case PRCMU_FW_PROJECT_U8420:
return "U8420";
+ case PRCMU_FW_PROJECT_U9540:
+ return "U9540";
+ case PRCMU_FW_PROJECT_A9420:
+ return "A9420";
+ case PRCMU_FW_PROJECT_L8540:
+ return "L8540";
+ case PRCMU_FW_PROJECT_L8580:
+ return "L8580";
default:
return "Unknown";
}
@@ -2737,13 +2754,14 @@ static int db8500_irq_map(struct irq_domain *d, unsigned int virq,
}
static struct irq_domain_ops db8500_irq_ops = {
- .map = db8500_irq_map,
- .xlate = irq_domain_xlate_twocell,
+ .map = db8500_irq_map,
+ .xlate = irq_domain_xlate_twocell,
};
static int db8500_irq_init(struct device_node *np)
{
- int irq_base = -1;
+ int irq_base = 0;
+ int i;
/* In the device tree case, just take some IRQs */
if (!np)
@@ -2758,39 +2776,51 @@ static int db8500_irq_init(struct device_node *np)
return -ENOSYS;
}
+ /* All wakeups will be used, so create mappings for all */
+ for (i = 0; i < NUM_PRCMU_WAKEUPS; i++)
+ irq_create_mapping(db8500_irq_domain, i);
+
return 0;
}
-void __init db8500_prcmu_early_init(void)
+static void dbx500_fw_version_init(struct platform_device *pdev,
+ u32 version_offset)
{
- if (cpu_is_u8500v2() || cpu_is_u9540()) {
- void *tcpm_base = ioremap_nocache(U8500_PRCMU_TCPM_BASE, SZ_4K);
-
- if (tcpm_base != NULL) {
- u32 version;
- version = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET);
- fw_info.version.project = version & 0xFF;
- fw_info.version.api_version = (version >> 8) & 0xFF;
- fw_info.version.func_version = (version >> 16) & 0xFF;
- fw_info.version.errata = (version >> 24) & 0xFF;
- fw_info.valid = true;
- pr_info("PRCMU firmware: %s, version %d.%d.%d\n",
- fw_project_name(fw_info.version.project),
- (version >> 8) & 0xFF, (version >> 16) & 0xFF,
- (version >> 24) & 0xFF);
- iounmap(tcpm_base);
- }
+ struct resource *res;
+ void __iomem *tcpm_base;
- if (cpu_is_u9540())
- tcdm_base = ioremap_nocache(U8500_PRCMU_TCDM_BASE,
- SZ_4K + SZ_8K) + SZ_8K;
- else
- tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE);
- } else {
- pr_err("prcmu: Unsupported chip version\n");
- BUG();
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "prcmu-tcpm");
+ if (!res) {
+ dev_err(&pdev->dev,
+ "Error: no prcmu tcpm memory region provided\n");
+ return;
+ }
+ tcpm_base = ioremap(res->start, resource_size(res));
+ if (tcpm_base != NULL) {
+ u32 version;
+
+ version = readl(tcpm_base + version_offset);
+ fw_info.version.project = (version & 0xFF);
+ fw_info.version.api_version = (version >> 8) & 0xFF;
+ fw_info.version.func_version = (version >> 16) & 0xFF;
+ fw_info.version.errata = (version >> 24) & 0xFF;
+ strncpy(fw_info.version.project_name,
+ fw_project_name(fw_info.version.project),
+ PRCMU_FW_PROJECT_NAME_LEN);
+ fw_info.valid = true;
+ pr_info("PRCMU firmware: %s(%d), version %d.%d.%d\n",
+ fw_info.version.project_name,
+ fw_info.version.project,
+ fw_info.version.api_version,
+ fw_info.version.func_version,
+ fw_info.version.errata);
+ iounmap(tcpm_base);
}
+}
+void __init db8500_prcmu_early_init(void)
+{
spin_lock_init(&mb0_transfer.lock);
spin_lock_init(&mb0_transfer.dbb_irqs_lock);
mutex_init(&mb0_transfer.ac_wake_lock);
@@ -3072,8 +3102,8 @@ static struct mfd_cell db8500_prcmu_devs[] = {
.pdata_size = sizeof(db8500_regulators),
},
{
- .name = "cpufreq-u8500",
- .of_compatible = "stericsson,cpufreq-u8500",
+ .name = "cpufreq-ux500",
+ .of_compatible = "stericsson,cpufreq-ux500",
.platform_data = &db8500_cpufreq_table,
.pdata_size = sizeof(db8500_cpufreq_table),
},
@@ -3100,23 +3130,30 @@ static void db8500_prcmu_update_cpufreq(void)
*/
static int db8500_prcmu_probe(struct platform_device *pdev)
{
- struct ab8500_platform_data *ab8500_platdata = pdev->dev.platform_data;
struct device_node *np = pdev->dev.of_node;
+ struct prcmu_pdata *pdata = dev_get_platdata(&pdev->dev);
int irq = 0, err = 0, i;
-
- if (ux500_is_svp())
- return -ENODEV;
+ struct resource *res;
init_prcm_registers();
+ dbx500_fw_version_init(pdev, pdata->version_offset);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "prcmu-tcdm");
+ if (!res) {
+ dev_err(&pdev->dev, "no prcmu tcdm region provided\n");
+ return -ENOENT;
+ }
+ tcdm_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+
/* Clean up the mailbox interrupts after pre-kernel code. */
writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR);
- if (np)
- irq = platform_get_irq(pdev, 0);
-
- if (!np || irq <= 0)
- irq = IRQ_DB8500_PRCMU1;
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "no prcmu irq provided\n");
+ return -ENOENT;
+ }
err = request_threaded_irq(irq, prcmu_irq_handler,
prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
@@ -3130,13 +3167,12 @@ static int db8500_prcmu_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(db8500_prcmu_devs); i++) {
if (!strcmp(db8500_prcmu_devs[i].name, "ab8500-core")) {
- db8500_prcmu_devs[i].platform_data = ab8500_platdata;
+ db8500_prcmu_devs[i].platform_data = pdata->ab_platdata;
db8500_prcmu_devs[i].pdata_size = sizeof(struct ab8500_platform_data);
}
}
- if (cpu_is_u8500v20_or_later())
- prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
+ prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
db8500_prcmu_update_cpufreq();
diff --git a/drivers/mfd/intel_msic.c b/drivers/mfd/intel_msic.c
index ab8d0b2739b2..1804331bd52c 100644
--- a/drivers/mfd/intel_msic.c
+++ b/drivers/mfd/intel_msic.c
@@ -9,6 +9,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -424,11 +425,9 @@ static int intel_msic_probe(struct platform_device *pdev)
return -ENODEV;
}
- msic->irq_base = devm_request_and_ioremap(&pdev->dev, res);
- if (!msic->irq_base) {
- dev_err(&pdev->dev, "failed to map SRAM memory\n");
- return -ENOMEM;
- }
+ msic->irq_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(msic->irq_base))
+ return PTR_ERR(msic->irq_base);
platform_set_drvdata(pdev, msic);
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index f6878f8db57d..4d73963cd8f0 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -93,15 +93,6 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
if (max77686 == NULL)
return -ENOMEM;
- max77686->regmap = regmap_init_i2c(i2c, &max77686_regmap_config);
- if (IS_ERR(max77686->regmap)) {
- ret = PTR_ERR(max77686->regmap);
- dev_err(max77686->dev, "Failed to allocate register map: %d\n",
- ret);
- kfree(max77686);
- return ret;
- }
-
i2c_set_clientdata(i2c, max77686);
max77686->dev = &i2c->dev;
max77686->i2c = i2c;
@@ -111,6 +102,15 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
max77686->irq_gpio = pdata->irq_gpio;
max77686->irq = i2c->irq;
+ max77686->regmap = regmap_init_i2c(i2c, &max77686_regmap_config);
+ if (IS_ERR(max77686->regmap)) {
+ ret = PTR_ERR(max77686->regmap);
+ dev_err(max77686->dev, "Failed to allocate register map: %d\n",
+ ret);
+ kfree(max77686);
+ return ret;
+ }
+
if (regmap_read(max77686->regmap,
MAX77686_REG_DEVICE_ID, &data) < 0) {
dev_err(max77686->dev,
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index cc5155e20494..9e60fed5ff82 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -114,35 +114,37 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
u8 reg_data;
int ret = 0;
+ if (!pdata) {
+ dev_err(&i2c->dev, "No platform data found.\n");
+ return -EINVAL;
+ }
+
max77693 = devm_kzalloc(&i2c->dev,
sizeof(struct max77693_dev), GFP_KERNEL);
if (max77693 == NULL)
return -ENOMEM;
- max77693->regmap = devm_regmap_init_i2c(i2c, &max77693_regmap_config);
- if (IS_ERR(max77693->regmap)) {
- ret = PTR_ERR(max77693->regmap);
- dev_err(max77693->dev,"failed to allocate register map: %d\n",
- ret);
- goto err_regmap;
- }
-
i2c_set_clientdata(i2c, max77693);
max77693->dev = &i2c->dev;
max77693->i2c = i2c;
max77693->irq = i2c->irq;
max77693->type = id->driver_data;
- if (!pdata)
- goto err_regmap;
+ max77693->regmap = devm_regmap_init_i2c(i2c, &max77693_regmap_config);
+ if (IS_ERR(max77693->regmap)) {
+ ret = PTR_ERR(max77693->regmap);
+ dev_err(max77693->dev, "failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
max77693->wakeup = pdata->wakeup;
- if (max77693_read_reg(max77693->regmap,
- MAX77693_PMIC_REG_PMIC_ID2, &reg_data) < 0) {
+ ret = max77693_read_reg(max77693->regmap, MAX77693_PMIC_REG_PMIC_ID2,
+ &reg_data);
+ if (ret < 0) {
dev_err(max77693->dev, "device not found on this channel\n");
- ret = -ENODEV;
- goto err_regmap;
+ return ret;
} else
dev_info(max77693->dev, "device ID: 0x%x\n", reg_data);
@@ -163,7 +165,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
ret = PTR_ERR(max77693->regmap_muic);
dev_err(max77693->dev,
"failed to allocate register map: %d\n", ret);
- goto err_regmap;
+ goto err_regmap_muic;
}
ret = max77693_irq_init(max77693);
@@ -184,9 +186,9 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
err_mfd:
max77693_irq_exit(max77693);
err_irq:
+err_regmap_muic:
i2c_unregister_device(max77693->muic);
i2c_unregister_device(max77693->haptic);
-err_regmap:
return ret;
}
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 64803f13bcec..d11567307fbe 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -208,6 +208,8 @@ static int pcf50633_probe(struct i2c_client *client,
if (!pcf)
return -ENOMEM;
+ i2c_set_clientdata(client, pcf);
+ pcf->dev = &client->dev;
pcf->pdata = pdata;
mutex_init(&pcf->lock);
@@ -219,9 +221,6 @@ static int pcf50633_probe(struct i2c_client *client,
return ret;
}
- i2c_set_clientdata(client, pcf);
- pcf->dev = &client->dev;
-
version = pcf50633_reg_read(pcf, 0);
variant = pcf50633_reg_read(pcf, 1);
if (version < 0 || variant < 0) {
diff --git a/drivers/mfd/rtl8411.c b/drivers/mfd/rtl8411.c
index 89f046ca9e41..3d3b4addf81a 100644
--- a/drivers/mfd/rtl8411.c
+++ b/drivers/mfd/rtl8411.c
@@ -112,6 +112,21 @@ static int rtl8411_card_power_off(struct rtsx_pcr *pcr, int card)
BPP_LDO_POWB, BPP_LDO_SUSPEND);
}
+static int rtl8411_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ u8 mask, val;
+
+ mask = (BPP_REG_TUNED18 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_MASK;
+ if (voltage == OUTPUT_3V3)
+ val = (BPP_ASIC_3V3 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_3V3;
+ else if (voltage == OUTPUT_1V8)
+ val = (BPP_ASIC_1V8 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_1V8;
+ else
+ return -EINVAL;
+
+ return rtsx_pci_write_register(pcr, LDO_CTL, mask, val);
+}
+
static unsigned int rtl8411_cd_deglitch(struct rtsx_pcr *pcr)
{
unsigned int card_exist;
@@ -163,6 +178,18 @@ static unsigned int rtl8411_cd_deglitch(struct rtsx_pcr *pcr)
return card_exist;
}
+static int rtl8411_conv_clk_and_div_n(int input, int dir)
+{
+ int output;
+
+ if (dir == CLK_TO_DIV_N)
+ output = input * 4 / 5 - 2;
+ else
+ output = (input + 2) * 5 / 4;
+
+ return output;
+}
+
static const struct pcr_ops rtl8411_pcr_ops = {
.extra_init_hw = rtl8411_extra_init_hw,
.optimize_phy = NULL,
@@ -172,7 +199,9 @@ static const struct pcr_ops rtl8411_pcr_ops = {
.disable_auto_blink = rtl8411_disable_auto_blink,
.card_power_on = rtl8411_card_power_on,
.card_power_off = rtl8411_card_power_off,
+ .switch_output_voltage = rtl8411_switch_output_voltage,
.cd_deglitch = rtl8411_cd_deglitch,
+ .conv_clk_and_div_n = rtl8411_conv_clk_and_div_n,
};
/* SD Pull Control Enable:
diff --git a/drivers/mfd/rts5209.c b/drivers/mfd/rts5209.c
index 283a4f148084..98fe0f39463e 100644
--- a/drivers/mfd/rts5209.c
+++ b/drivers/mfd/rts5209.c
@@ -144,6 +144,25 @@ static int rts5209_card_power_off(struct rtsx_pcr *pcr, int card)
return rtsx_pci_send_cmd(pcr, 100);
}
+static int rts5209_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ int err;
+
+ if (voltage == OUTPUT_3V3) {
+ err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
+ if (err < 0)
+ return err;
+ } else if (voltage == OUTPUT_1V8) {
+ err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
+ if (err < 0)
+ return err;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct pcr_ops rts5209_pcr_ops = {
.extra_init_hw = rts5209_extra_init_hw,
.optimize_phy = rts5209_optimize_phy,
@@ -153,7 +172,9 @@ static const struct pcr_ops rts5209_pcr_ops = {
.disable_auto_blink = rts5209_disable_auto_blink,
.card_power_on = rts5209_card_power_on,
.card_power_off = rts5209_card_power_off,
+ .switch_output_voltage = rts5209_switch_output_voltage,
.cd_deglitch = NULL,
+ .conv_clk_and_div_n = NULL,
};
/* SD Pull Control Enable:
diff --git a/drivers/mfd/rts5229.c b/drivers/mfd/rts5229.c
index b9dbab266fda..29d889cbb9c5 100644
--- a/drivers/mfd/rts5229.c
+++ b/drivers/mfd/rts5229.c
@@ -114,6 +114,25 @@ static int rts5229_card_power_off(struct rtsx_pcr *pcr, int card)
return rtsx_pci_send_cmd(pcr, 100);
}
+static int rts5229_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ int err;
+
+ if (voltage == OUTPUT_3V3) {
+ err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
+ if (err < 0)
+ return err;
+ } else if (voltage == OUTPUT_1V8) {
+ err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
+ if (err < 0)
+ return err;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct pcr_ops rts5229_pcr_ops = {
.extra_init_hw = rts5229_extra_init_hw,
.optimize_phy = rts5229_optimize_phy,
@@ -123,7 +142,9 @@ static const struct pcr_ops rts5229_pcr_ops = {
.disable_auto_blink = rts5229_disable_auto_blink,
.card_power_on = rts5229_card_power_on,
.card_power_off = rts5229_card_power_off,
+ .switch_output_voltage = rts5229_switch_output_voltage,
.cd_deglitch = NULL,
+ .conv_clk_and_div_n = NULL,
};
/* SD Pull Control Enable:
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index 7a7b0bda4618..9fc57009e228 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -630,7 +630,10 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
if (clk == pcr->cur_clock)
return 0;
- N = (u8)(clk - 2);
+ if (pcr->ops->conv_clk_and_div_n)
+ N = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
+ else
+ N = (u8)(clk - 2);
if ((clk <= 2) || (N > max_N))
return -EINVAL;
@@ -641,7 +644,14 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
/* Make sure that the SSC clock div_n is equal or greater than min_N */
div = CLK_DIV_1;
while ((N < min_N) && (div < max_div)) {
- N = (N + 2) * 2 - 2;
+ if (pcr->ops->conv_clk_and_div_n) {
+ int dbl_clk = pcr->ops->conv_clk_and_div_n(N,
+ DIV_N_TO_CLK) * 2;
+ N = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
+ CLK_TO_DIV_N);
+ } else {
+ N = (N + 2) * 2 - 2;
+ }
div++;
}
dev_dbg(&(pcr->pci->dev), "N = %d, div = %d\n", N, div);
@@ -703,6 +713,15 @@ int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
}
EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
+int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ if (pcr->ops->switch_output_voltage)
+ return pcr->ops->switch_output_voltage(pcr, voltage);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
+
unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
{
unsigned int val;
@@ -767,10 +786,10 @@ static void rtsx_pci_card_detect(struct work_struct *work)
spin_unlock_irqrestore(&pcr->lock, flags);
- if (card_detect & SD_EXIST)
+ if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
pcr->slots[RTSX_SD_CARD].card_event(
pcr->slots[RTSX_SD_CARD].p_dev);
- if (card_detect & MS_EXIST)
+ if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
pcr->slots[RTSX_MS_CARD].card_event(
pcr->slots[RTSX_MS_CARD].p_dev);
}
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index 49d361a618d0..77ee26ef5941 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -17,6 +17,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/of_irq.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
@@ -60,6 +61,15 @@ static struct mfd_cell s2mps11_devs[] = {
},
};
+#ifdef CONFIG_OF
+static struct of_device_id sec_dt_match[] = {
+ { .compatible = "samsung,s5m8767-pmic",
+ .data = (void *)S5M8767X,
+ },
+ {},
+};
+#endif
+
int sec_reg_read(struct sec_pmic_dev *sec_pmic, u8 reg, void *dest)
{
return regmap_read(sec_pmic->regmap, reg, dest);
@@ -95,6 +105,57 @@ static struct regmap_config sec_regmap_config = {
.val_bits = 8,
};
+
+#ifdef CONFIG_OF
+/*
+ * Only the common platform data elements for s5m8767 are parsed here from the
+ * device tree. Other sub-modules of s5m8767 such as pmic, rtc , charger and
+ * others have to parse their own platform data elements from device tree.
+ *
+ * The s5m8767 platform data structure is instantiated here and the drivers for
+ * the sub-modules need not instantiate another instance while parsing their
+ * platform data.
+ */
+static struct sec_platform_data *sec_pmic_i2c_parse_dt_pdata(
+ struct device *dev)
+{
+ struct sec_platform_data *pd;
+
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd) {
+ dev_err(dev, "could not allocate memory for pdata\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * ToDo: the 'wakeup' member in the platform data is more of a linux
+ * specfic information. Hence, there is no binding for that yet and
+ * not parsed here.
+ */
+
+ return pd;
+}
+#else
+static struct sec_platform_data *sec_pmic_i2c_parse_dt_pdata(
+ struct device *dev)
+{
+ return 0;
+}
+#endif
+
+static inline int sec_i2c_get_driver_data(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+#ifdef CONFIG_OF
+ if (i2c->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(sec_dt_match, i2c->dev.of_node);
+ return (int)match->data;
+ }
+#endif
+ return (int)id->driver_data;
+}
+
static int sec_pmic_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@@ -111,13 +172,22 @@ static int sec_pmic_probe(struct i2c_client *i2c,
sec_pmic->dev = &i2c->dev;
sec_pmic->i2c = i2c;
sec_pmic->irq = i2c->irq;
- sec_pmic->type = id->driver_data;
-
+ sec_pmic->type = sec_i2c_get_driver_data(i2c, id);
+
+ if (sec_pmic->dev->of_node) {
+ pdata = sec_pmic_i2c_parse_dt_pdata(sec_pmic->dev);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ return ret;
+ }
+ pdata->device_type = sec_pmic->type;
+ }
if (pdata) {
sec_pmic->device_type = pdata->device_type;
sec_pmic->ono = pdata->ono;
sec_pmic->irq_base = pdata->irq_base;
sec_pmic->wakeup = pdata->wakeup;
+ sec_pmic->pdata = pdata;
}
sec_pmic->regmap = devm_regmap_init_i2c(i2c, &sec_regmap_config);
@@ -192,6 +262,7 @@ static struct i2c_driver sec_pmic_driver = {
.driver = {
.name = "sec_pmic",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(sec_dt_match),
},
.probe = sec_pmic_probe,
.remove = sec_pmic_remove,
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index a06d66b929b1..ecc092c7f745 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -219,25 +219,18 @@ static void tc3589x_irq_unmap(struct irq_domain *d, unsigned int virq)
}
static struct irq_domain_ops tc3589x_irq_ops = {
- .map = tc3589x_irq_map,
+ .map = tc3589x_irq_map,
.unmap = tc3589x_irq_unmap,
- .xlate = irq_domain_xlate_twocell,
+ .xlate = irq_domain_xlate_twocell,
};
static int tc3589x_irq_init(struct tc3589x *tc3589x, struct device_node *np)
{
int base = tc3589x->irq_base;
- if (base) {
- tc3589x->domain = irq_domain_add_legacy(
- NULL, TC3589x_NR_INTERNAL_IRQS, base,
- 0, &tc3589x_irq_ops, tc3589x);
- }
- else {
- tc3589x->domain = irq_domain_add_linear(
- np, TC3589x_NR_INTERNAL_IRQS,
- &tc3589x_irq_ops, tc3589x);
- }
+ tc3589x->domain = irq_domain_add_simple(
+ np, TC3589x_NR_INTERNAL_IRQS, base,
+ &tc3589x_irq_ops, tc3589x);
if (!tc3589x->domain) {
dev_err(tc3589x->dev, "Failed to create irqdomain\n");
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
index 4dae241e5017..dd362c1078e1 100644
--- a/drivers/mfd/twl4030-power.c
+++ b/drivers/mfd/twl4030-power.c
@@ -159,7 +159,7 @@ out:
static int twl4030_write_script(u8 address, struct twl4030_ins *script,
int len)
{
- int err;
+ int err = -EINVAL;
for (; len; len--, address++, script++) {
if (len == 1) {
diff --git a/drivers/mfd/vexpress-config.c b/drivers/mfd/vexpress-config.c
index fae15d880758..3c1723aa6225 100644
--- a/drivers/mfd/vexpress-config.c
+++ b/drivers/mfd/vexpress-config.c
@@ -67,6 +67,7 @@ struct vexpress_config_bridge *vexpress_config_bridge_register(
return bridge;
}
+EXPORT_SYMBOL(vexpress_config_bridge_register);
void vexpress_config_bridge_unregister(struct vexpress_config_bridge *bridge)
{
@@ -83,6 +84,7 @@ void vexpress_config_bridge_unregister(struct vexpress_config_bridge *bridge)
while (!list_empty(&__bridge.transactions))
cpu_relax();
}
+EXPORT_SYMBOL(vexpress_config_bridge_unregister);
struct vexpress_config_func {
@@ -142,6 +144,7 @@ struct vexpress_config_func *__vexpress_config_func_get(struct device *dev,
return func;
}
+EXPORT_SYMBOL(__vexpress_config_func_get);
void vexpress_config_func_put(struct vexpress_config_func *func)
{
@@ -149,7 +152,7 @@ void vexpress_config_func_put(struct vexpress_config_func *func)
of_node_put(func->bridge->node);
kfree(func);
}
-
+EXPORT_SYMBOL(vexpress_config_func_put);
struct vexpress_config_trans {
struct vexpress_config_func *func;
@@ -229,6 +232,7 @@ void vexpress_config_complete(struct vexpress_config_bridge *bridge,
complete(&trans->completion);
}
+EXPORT_SYMBOL(vexpress_config_complete);
int vexpress_config_wait(struct vexpress_config_trans *trans)
{
@@ -236,7 +240,7 @@ int vexpress_config_wait(struct vexpress_config_trans *trans)
return trans->status;
}
-
+EXPORT_SYMBOL(vexpress_config_wait);
int vexpress_config_read(struct vexpress_config_func *func, int offset,
u32 *data)
diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c
index e5d8f63b252a..558c2928f261 100644
--- a/drivers/mfd/vexpress-sysreg.c
+++ b/drivers/mfd/vexpress-sysreg.c
@@ -313,19 +313,11 @@ static void vexpress_sysreg_config_complete(unsigned long data)
}
-void __init vexpress_sysreg_early_init(void __iomem *base)
+void vexpress_sysreg_setup(struct device_node *node)
{
- struct device_node *node = of_find_compatible_node(NULL, NULL,
- "arm,vexpress-sysreg");
-
- if (node)
- base = of_iomap(node, 0);
-
- if (WARN_ON(!base))
+ if (WARN_ON(!vexpress_sysreg_base))
return;
- vexpress_sysreg_base = base;
-
if (readl(vexpress_sysreg_base + SYS_MISC) & SYS_MISC_MASTERSITE)
vexpress_master_site = VEXPRESS_SITE_DB2;
else
@@ -336,9 +328,23 @@ void __init vexpress_sysreg_early_init(void __iomem *base)
WARN_ON(!vexpress_sysreg_config_bridge);
}
+void __init vexpress_sysreg_early_init(void __iomem *base)
+{
+ vexpress_sysreg_base = base;
+ vexpress_sysreg_setup(NULL);
+}
+
void __init vexpress_sysreg_of_early_init(void)
{
- vexpress_sysreg_early_init(NULL);
+ struct device_node *node = of_find_compatible_node(NULL, NULL,
+ "arm,vexpress-sysreg");
+
+ if (node) {
+ vexpress_sysreg_base = of_iomap(node, 0);
+ vexpress_sysreg_setup(node);
+ } else {
+ pr_info("vexpress-sysreg: No Device Tree node found.");
+ }
}
@@ -426,9 +432,11 @@ static int vexpress_sysreg_probe(struct platform_device *pdev)
return -EBUSY;
}
- if (!vexpress_sysreg_base)
+ if (!vexpress_sysreg_base) {
vexpress_sysreg_base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
+ vexpress_sysreg_setup(pdev->dev.of_node);
+ }
if (!vexpress_sysreg_base) {
dev_err(&pdev->dev, "Failed to obtain base address!\n");
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index 088872ab6338..a9d9d41d95d3 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -84,6 +84,12 @@ int wm5102_patch(struct arizona *arizona)
}
static const struct regmap_irq wm5102_aod_irqs[ARIZONA_NUM_IRQ] = {
+ [ARIZONA_IRQ_MICD_CLAMP_FALL] = {
+ .mask = ARIZONA_MICD_CLAMP_FALL_EINT1
+ },
+ [ARIZONA_IRQ_MICD_CLAMP_RISE] = {
+ .mask = ARIZONA_MICD_CLAMP_RISE_EINT1
+ },
[ARIZONA_IRQ_GP5_FALL] = { .mask = ARIZONA_GP5_FALL_EINT1 },
[ARIZONA_IRQ_GP5_RISE] = { .mask = ARIZONA_GP5_RISE_EINT1 },
[ARIZONA_IRQ_JD_FALL] = { .mask = ARIZONA_JD1_FALL_EINT1 },
@@ -96,6 +102,7 @@ const struct regmap_irq_chip wm5102_aod = {
.mask_base = ARIZONA_AOD_IRQ_MASK_IRQ1,
.ack_base = ARIZONA_AOD_IRQ1,
.wake_base = ARIZONA_WAKE_CONTROL,
+ .wake_invert = 1,
.num_regs = 1,
.irqs = wm5102_aod_irqs,
.num_irqs = ARRAY_SIZE(wm5102_aod_irqs),
@@ -312,6 +319,7 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x0000021A, 0x01A6 }, /* R538 - Mic Bias Ctrl 3 */
{ 0x00000293, 0x0000 }, /* R659 - Accessory Detect Mode 1 */
{ 0x0000029B, 0x0020 }, /* R667 - Headphone Detect 1 */
+ { 0x000002A2, 0x0000 }, /* R674 - Micd clamp control */
{ 0x000002A3, 0x1102 }, /* R675 - Mic Detect 1 */
{ 0x000002A4, 0x009F }, /* R676 - Mic Detect 2 */
{ 0x000002A5, 0x0000 }, /* R677 - Mic Detect 3 */
@@ -1106,6 +1114,8 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_ACCESSORY_DETECT_MODE_1:
case ARIZONA_HEADPHONE_DETECT_1:
case ARIZONA_HEADPHONE_DETECT_2:
+ case ARIZONA_HP_DACVAL:
+ case ARIZONA_MICD_CLAMP_CONTROL:
case ARIZONA_MIC_DETECT_1:
case ARIZONA_MIC_DETECT_2:
case ARIZONA_MIC_DETECT_3:
@@ -1875,6 +1885,7 @@ static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
case ARIZONA_DSP1_STATUS_2:
case ARIZONA_DSP1_STATUS_3:
case ARIZONA_HEADPHONE_DETECT_2:
+ case ARIZONA_HP_DACVAL:
case ARIZONA_MIC_DETECT_3:
return true;
default:
@@ -1882,7 +1893,7 @@ static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
}
}
-#define WM5102_MAX_REGISTER 0x1a8fff
+#define WM5102_MAX_REGISTER 0x1a9800
const struct regmap_config wm5102_spi_regmap = {
.reg_bits = 32,
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index adda6b10b90d..c41599815299 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -255,6 +255,7 @@ const struct regmap_irq_chip wm5110_aod = {
.mask_base = ARIZONA_AOD_IRQ_MASK_IRQ1,
.ack_base = ARIZONA_AOD_IRQ1,
.wake_base = ARIZONA_WAKE_CONTROL,
+ .wake_invert = 1,
.num_regs = 1,
.irqs = wm5110_aod_irqs,
.num_irqs = ARRAY_SIZE(wm5110_aod_irqs),
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index b151b7c1bd59..e83fdfe0c8ca 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -127,7 +127,7 @@ config PHANTOM
config INTEL_MID_PTI
tristate "Parallel Trace Interface for MIPI P1149.7 cJTAG standard"
- depends on PCI
+ depends on PCI && TTY
default n
help
The PTI (Parallel Trace Interface) driver directs
@@ -192,7 +192,7 @@ config ICS932S401
config ATMEL_SSC
tristate "Device driver for Atmel SSC peripheral"
- depends on AVR32 || ARCH_AT91
+ depends on HAS_IOMEM
---help---
This option enables device driver support for Atmel Synchronized
Serial Communication peripheral (SSC).
@@ -499,6 +499,17 @@ config USB_SWITCH_FSA9480
stereo and mono audio, video, microphone and UART data to use
a common connector port.
+config LATTICE_ECP3_CONFIG
+ tristate "Lattice ECP3 FPGA bitstream configuration via SPI"
+ depends on SPI && SYSFS
+ select FW_LOADER
+ default n
+ help
+ This option enables support for bitstream configuration (programming
+ or loading) of the Lattice ECP3 FPGA family via SPI.
+
+ If unsure, say N.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
@@ -507,4 +518,5 @@ source "drivers/misc/lis3lv02d/Kconfig"
source "drivers/misc/carma/Kconfig"
source "drivers/misc/altera-stapl/Kconfig"
source "drivers/misc/mei/Kconfig"
+source "drivers/misc/vmw_vmci/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 2129377c0de6..35a1463c72d9 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -49,3 +49,6 @@ obj-y += carma/
obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/
obj-$(CONFIG_INTEL_MEI) += mei/
+obj-$(CONFIG_MAX8997_MUIC) += max8997-muic.o
+obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
+obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index 158da5a81a66..c09c28f92055 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
/* Serialize access to ssc_list and user count */
static DEFINE_SPINLOCK(user_lock);
@@ -131,6 +132,13 @@ static int ssc_probe(struct platform_device *pdev)
struct resource *regs;
struct ssc_device *ssc;
const struct atmel_ssc_platform_data *plat_dat;
+ struct pinctrl *pinctrl;
+
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl)) {
+ dev_err(&pdev->dev, "Failed to request pinctrl\n");
+ return PTR_ERR(pinctrl);
+ }
ssc = devm_kzalloc(&pdev->dev, sizeof(struct ssc_device), GFP_KERNEL);
if (!ssc) {
@@ -151,11 +159,9 @@ static int ssc_probe(struct platform_device *pdev)
return -ENXIO;
}
- ssc->regs = devm_request_and_ioremap(&pdev->dev, regs);
- if (!ssc->regs) {
- dev_dbg(&pdev->dev, "ioremap failed\n");
- return -EINVAL;
- }
+ ssc->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(ssc->regs))
+ return PTR_ERR(ssc->regs);
ssc->phybase = regs->start;
@@ -167,7 +173,7 @@ static int ssc_probe(struct platform_device *pdev)
/* disable all interrupts */
clk_enable(ssc->clk);
- ssc_writel(ssc->regs, IDR, ~0UL);
+ ssc_writel(ssc->regs, IDR, -1);
ssc_readl(ssc->regs, SR);
clk_disable(ssc->clk);
diff --git a/drivers/misc/cb710/Kconfig b/drivers/misc/cb710/Kconfig
index 22429b8b1068..5acb9c5b49c4 100644
--- a/drivers/misc/cb710/Kconfig
+++ b/drivers/misc/cb710/Kconfig
@@ -1,6 +1,6 @@
config CB710_CORE
tristate "ENE CB710/720 Flash memory card reader support"
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
help
This option enables support for PCI ENE CB710/720 Flash memory card
reader found in some laptops (ie. some versions of HP Compaq nx9500).
diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c
new file mode 100644
index 000000000000..155700bfd2b6
--- /dev/null
+++ b/drivers/misc/lattice-ecp3-config.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2012 Stefan Roese <sr@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spi/spi.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+#define FIRMWARE_NAME "lattice-ecp3.bit"
+
+/*
+ * The JTAG ID's of the supported FPGA's. The ID is 32bit wide
+ * reversed as noted in the manual.
+ */
+#define ID_ECP3_17 0xc2088080
+#define ID_ECP3_35 0xc2048080
+
+/* FPGA commands */
+#define FPGA_CMD_READ_ID 0x07 /* plus 24 bits */
+#define FPGA_CMD_READ_STATUS 0x09 /* plus 24 bits */
+#define FPGA_CMD_CLEAR 0x70
+#define FPGA_CMD_REFRESH 0x71
+#define FPGA_CMD_WRITE_EN 0x4a /* plus 2 bits */
+#define FPGA_CMD_WRITE_DIS 0x4f /* plus 8 bits */
+#define FPGA_CMD_WRITE_INC 0x41 /* plus 0 bits */
+
+/*
+ * The status register is 32bit revered, DONE is bit 17 from the TN1222.pdf
+ * (LatticeECP3 Slave SPI Port User's Guide)
+ */
+#define FPGA_STATUS_DONE 0x00004000
+#define FPGA_STATUS_CLEARED 0x00010000
+
+#define FPGA_CLEAR_TIMEOUT 5000 /* max. 5000ms for FPGA clear */
+#define FPGA_CLEAR_MSLEEP 10
+#define FPGA_CLEAR_LOOP_COUNT (FPGA_CLEAR_TIMEOUT / FPGA_CLEAR_MSLEEP)
+
+struct fpga_data {
+ struct completion fw_loaded;
+};
+
+struct ecp3_dev {
+ u32 jedec_id;
+ char *name;
+};
+
+static const struct ecp3_dev ecp3_dev[] = {
+ {
+ .jedec_id = ID_ECP3_17,
+ .name = "Lattice ECP3-17",
+ },
+ {
+ .jedec_id = ID_ECP3_35,
+ .name = "Lattice ECP3-35",
+ },
+};
+
+static void firmware_load(const struct firmware *fw, void *context)
+{
+ struct spi_device *spi = (struct spi_device *)context;
+ struct fpga_data *data = dev_get_drvdata(&spi->dev);
+ u8 *buffer;
+ int ret;
+ u8 txbuf[8];
+ u8 rxbuf[8];
+ int rx_len = 8;
+ int i;
+ u32 jedec_id;
+ u32 status;
+
+ if (fw->size == 0) {
+ dev_err(&spi->dev, "Error: Firmware size is 0!\n");
+ return;
+ }
+
+ /* Fill dummy data (24 stuffing bits for commands) */
+ txbuf[1] = 0x00;
+ txbuf[2] = 0x00;
+ txbuf[3] = 0x00;
+
+ /* Trying to speak with the FPGA via SPI... */
+ txbuf[0] = FPGA_CMD_READ_ID;
+ ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+ dev_dbg(&spi->dev, "FPGA JTAG ID=%08x\n", *(u32 *)&rxbuf[4]);
+ jedec_id = *(u32 *)&rxbuf[4];
+
+ for (i = 0; i < ARRAY_SIZE(ecp3_dev); i++) {
+ if (jedec_id == ecp3_dev[i].jedec_id)
+ break;
+ }
+ if (i == ARRAY_SIZE(ecp3_dev)) {
+ dev_err(&spi->dev,
+ "Error: No supported FPGA detected (JEDEC_ID=%08x)!\n",
+ jedec_id);
+ return;
+ }
+
+ dev_info(&spi->dev, "FPGA %s detected\n", ecp3_dev[i].name);
+
+ txbuf[0] = FPGA_CMD_READ_STATUS;
+ ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+ dev_dbg(&spi->dev, "FPGA Status=%08x\n", *(u32 *)&rxbuf[4]);
+
+ buffer = kzalloc(fw->size + 8, GFP_KERNEL);
+ if (!buffer) {
+ dev_err(&spi->dev, "Error: Can't allocate memory!\n");
+ return;
+ }
+
+ /*
+ * Insert WRITE_INC command into stream (one SPI frame)
+ */
+ buffer[0] = FPGA_CMD_WRITE_INC;
+ buffer[1] = 0xff;
+ buffer[2] = 0xff;
+ buffer[3] = 0xff;
+ memcpy(buffer + 4, fw->data, fw->size);
+
+ txbuf[0] = FPGA_CMD_REFRESH;
+ ret = spi_write(spi, txbuf, 4);
+
+ txbuf[0] = FPGA_CMD_WRITE_EN;
+ ret = spi_write(spi, txbuf, 4);
+
+ txbuf[0] = FPGA_CMD_CLEAR;
+ ret = spi_write(spi, txbuf, 4);
+
+ /*
+ * Wait for FPGA memory to become cleared
+ */
+ for (i = 0; i < FPGA_CLEAR_LOOP_COUNT; i++) {
+ txbuf[0] = FPGA_CMD_READ_STATUS;
+ ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+ status = *(u32 *)&rxbuf[4];
+ if (status == FPGA_STATUS_CLEARED)
+ break;
+
+ msleep(FPGA_CLEAR_MSLEEP);
+ }
+
+ if (i == FPGA_CLEAR_LOOP_COUNT) {
+ dev_err(&spi->dev,
+ "Error: Timeout waiting for FPGA to clear (status=%08x)!\n",
+ status);
+ kfree(buffer);
+ return;
+ }
+
+ dev_info(&spi->dev, "Configuring the FPGA...\n");
+ ret = spi_write(spi, buffer, fw->size + 8);
+
+ txbuf[0] = FPGA_CMD_WRITE_DIS;
+ ret = spi_write(spi, txbuf, 4);
+
+ txbuf[0] = FPGA_CMD_READ_STATUS;
+ ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+ dev_dbg(&spi->dev, "FPGA Status=%08x\n", *(u32 *)&rxbuf[4]);
+ status = *(u32 *)&rxbuf[4];
+
+ /* Check result */
+ if (status & FPGA_STATUS_DONE)
+ dev_info(&spi->dev, "FPGA succesfully configured!\n");
+ else
+ dev_info(&spi->dev, "FPGA not configured (DONE not set)\n");
+
+ /*
+ * Don't forget to release the firmware again
+ */
+ release_firmware(fw);
+
+ kfree(buffer);
+
+ complete(&data->fw_loaded);
+}
+
+static int lattice_ecp3_probe(struct spi_device *spi)
+{
+ struct fpga_data *data;
+ int err;
+
+ data = devm_kzalloc(&spi->dev, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&spi->dev, "Memory allocation for fpga_data failed\n");
+ return -ENOMEM;
+ }
+ spi_set_drvdata(spi, data);
+
+ init_completion(&data->fw_loaded);
+ err = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG,
+ FIRMWARE_NAME, &spi->dev,
+ GFP_KERNEL, spi, firmware_load);
+ if (err) {
+ dev_err(&spi->dev, "Firmware loading failed with %d!\n", err);
+ return err;
+ }
+
+ dev_info(&spi->dev, "FPGA bitstream configuration driver registered\n");
+
+ return 0;
+}
+
+static int lattice_ecp3_remove(struct spi_device *spi)
+{
+ struct fpga_data *data = spi_get_drvdata(spi);
+
+ wait_for_completion(&data->fw_loaded);
+
+ return 0;
+}
+
+static const struct spi_device_id lattice_ecp3_id[] = {
+ { "ecp3-17", 0 },
+ { "ecp3-35", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, lattice_ecp3_id);
+
+static struct spi_driver lattice_ecp3_driver = {
+ .driver = {
+ .name = "lattice-ecp3",
+ .owner = THIS_MODULE,
+ },
+ .probe = lattice_ecp3_probe,
+ .remove = lattice_ecp3_remove,
+ .id_table = lattice_ecp3_id,
+};
+
+module_spi_driver(lattice_ecp3_driver);
+
+MODULE_AUTHOR("Stefan Roese <sr@denx.de>");
+MODULE_DESCRIPTION("Lattice ECP3 FPGA configuration via SPI");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index 5a79ccde2fdf..d21b4d006a55 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -1,11 +1,22 @@
config INTEL_MEI
- tristate "Intel Management Engine Interface (Intel MEI)"
+ tristate "Intel Management Engine Interface"
depends on X86 && PCI && WATCHDOG_CORE
help
The Intel Management Engine (Intel ME) provides Manageability,
Security and Media services for system containing Intel chipsets.
if selected /dev/mei misc device will be created.
+ For more information see
+ <http://software.intel.com/en-us/manageability/>
+
+config INTEL_MEI_ME
+ bool "ME Enabled Intel Chipsets"
+ depends on INTEL_MEI
+ depends on X86 && PCI && WATCHDOG_CORE
+ default y
+ help
+ MEI support for ME Enabled Intel chipsets.
+
Supported Chipsets are:
7 Series Chipset Family
6 Series Chipset Family
@@ -24,5 +35,3 @@ config INTEL_MEI
82Q33 Express
82X38/X48 Express
- For more information see
- <http://software.intel.com/en-us/manageability/>
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index 0017842e166c..040af6c7b147 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -4,9 +4,11 @@
#
obj-$(CONFIG_INTEL_MEI) += mei.o
mei-objs := init.o
+mei-objs += hbm.o
mei-objs += interrupt.o
-mei-objs += interface.o
-mei-objs += iorw.o
+mei-objs += client.o
mei-objs += main.o
mei-objs += amthif.o
mei-objs += wd.o
+mei-$(CONFIG_INTEL_MEI_ME) += pci-me.o
+mei-$(CONFIG_INTEL_MEI_ME) += hw-me.o
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index 18794aea6062..c86d7e3839a4 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -31,15 +31,16 @@
#include <linux/jiffies.h>
#include <linux/uaccess.h>
+#include <linux/mei.h>
#include "mei_dev.h"
-#include "hw.h"
-#include <linux/mei.h>
-#include "interface.h"
+#include "hbm.h"
+#include "hw-me.h"
+#include "client.h"
-const uuid_le mei_amthi_guid = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d, 0xac,
- 0xa8, 0x46, 0xe0, 0xff, 0x65,
- 0x81, 0x4c);
+const uuid_le mei_amthif_guid = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d,
+ 0xac, 0xa8, 0x46, 0xe0,
+ 0xff, 0x65, 0x81, 0x4c);
/**
* mei_amthif_reset_params - initializes mei device iamthif
@@ -64,22 +65,24 @@ void mei_amthif_reset_params(struct mei_device *dev)
* @dev: the device structure
*
*/
-void mei_amthif_host_init(struct mei_device *dev)
+int mei_amthif_host_init(struct mei_device *dev)
{
- int i;
+ struct mei_cl *cl = &dev->iamthif_cl;
unsigned char *msg_buf;
+ int ret, i;
- mei_cl_init(&dev->iamthif_cl, dev);
- dev->iamthif_cl.state = MEI_FILE_DISCONNECTED;
+ dev->iamthif_state = MEI_IAMTHIF_IDLE;
+
+ mei_cl_init(cl, dev);
- /* find ME amthi client */
- i = mei_me_cl_link(dev, &dev->iamthif_cl,
- &mei_amthi_guid, MEI_IAMTHIF_HOST_CLIENT_ID);
+ i = mei_me_cl_by_uuid(dev, &mei_amthif_guid);
if (i < 0) {
- dev_info(&dev->pdev->dev, "failed to find iamthif client.\n");
- return;
+ dev_info(&dev->pdev->dev, "amthif: failed to find the client\n");
+ return -ENOENT;
}
+ cl->me_client_id = dev->me_clients[i].client_id;
+
/* Assign iamthif_mtu to the value received from ME */
dev->iamthif_mtu = dev->me_clients[i].props.max_msg_length;
@@ -93,19 +96,29 @@ void mei_amthif_host_init(struct mei_device *dev)
msg_buf = kcalloc(dev->iamthif_mtu,
sizeof(unsigned char), GFP_KERNEL);
if (!msg_buf) {
- dev_dbg(&dev->pdev->dev, "memory allocation for ME message buffer failed.\n");
- return;
+ dev_err(&dev->pdev->dev, "amthif: memory allocation for ME message buffer failed.\n");
+ return -ENOMEM;
}
dev->iamthif_msg_buf = msg_buf;
- if (mei_connect(dev, &dev->iamthif_cl)) {
- dev_dbg(&dev->pdev->dev, "Failed to connect to AMTHI client\n");
- dev->iamthif_cl.state = MEI_FILE_DISCONNECTED;
- dev->iamthif_cl.host_client_id = 0;
+ ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID);
+
+ if (ret < 0) {
+ dev_err(&dev->pdev->dev, "amthif: failed link client\n");
+ return -ENOENT;
+ }
+
+ cl->state = MEI_FILE_CONNECTING;
+
+ if (mei_hbm_cl_connect_req(dev, cl)) {
+ dev_dbg(&dev->pdev->dev, "amthif: Failed to connect to ME client\n");
+ cl->state = MEI_FILE_DISCONNECTED;
+ cl->host_client_id = 0;
} else {
- dev->iamthif_cl.timer_count = MEI_CONNECT_TIMEOUT;
+ cl->timer_count = MEI_CONNECT_TIMEOUT;
}
+ return 0;
}
/**
@@ -168,10 +181,10 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
i = mei_me_cl_by_id(dev, dev->iamthif_cl.me_client_id);
if (i < 0) {
- dev_dbg(&dev->pdev->dev, "amthi client not found.\n");
+ dev_dbg(&dev->pdev->dev, "amthif client not found.\n");
return -ENODEV;
}
- dev_dbg(&dev->pdev->dev, "checking amthi data\n");
+ dev_dbg(&dev->pdev->dev, "checking amthif data\n");
cb = mei_amthif_find_read_list_entry(dev, file);
/* Check for if we can block or not*/
@@ -179,7 +192,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
return -EAGAIN;
- dev_dbg(&dev->pdev->dev, "waiting for amthi data\n");
+ dev_dbg(&dev->pdev->dev, "waiting for amthif data\n");
while (cb == NULL) {
/* unlock the Mutex */
mutex_unlock(&dev->device_lock);
@@ -187,27 +200,27 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
wait_ret = wait_event_interruptible(dev->iamthif_cl.wait,
(cb = mei_amthif_find_read_list_entry(dev, file)));
+ /* Locking again the Mutex */
+ mutex_lock(&dev->device_lock);
+
if (wait_ret)
return -ERESTARTSYS;
dev_dbg(&dev->pdev->dev, "woke up from sleep\n");
-
- /* Locking again the Mutex */
- mutex_lock(&dev->device_lock);
}
- dev_dbg(&dev->pdev->dev, "Got amthi data\n");
+ dev_dbg(&dev->pdev->dev, "Got amthif data\n");
dev->iamthif_timer = 0;
if (cb) {
timeout = cb->read_time +
mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
- dev_dbg(&dev->pdev->dev, "amthi timeout = %lud\n",
+ dev_dbg(&dev->pdev->dev, "amthif timeout = %lud\n",
timeout);
if (time_after(jiffies, timeout)) {
- dev_dbg(&dev->pdev->dev, "amthi Time out\n");
+ dev_dbg(&dev->pdev->dev, "amthif Time out\n");
/* 15 sec for the message has expired */
list_del(&cb->list);
rets = -ETIMEDOUT;
@@ -227,9 +240,9 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
* remove message from deletion list
*/
- dev_dbg(&dev->pdev->dev, "amthi cb->response_buffer size - %d\n",
+ dev_dbg(&dev->pdev->dev, "amthif cb->response_buffer size - %d\n",
cb->response_buffer.size);
- dev_dbg(&dev->pdev->dev, "amthi cb->buf_idx - %lu\n", cb->buf_idx);
+ dev_dbg(&dev->pdev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx);
/* length is being turncated to PAGE_SIZE, however,
* the buf_idx may point beyond */
@@ -245,7 +258,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
}
}
free:
- dev_dbg(&dev->pdev->dev, "free amthi cb memory.\n");
+ dev_dbg(&dev->pdev->dev, "free amthif cb memory.\n");
*offset = 0;
mei_io_cb_free(cb);
out:
@@ -269,7 +282,7 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
if (!dev || !cb)
return -ENODEV;
- dev_dbg(&dev->pdev->dev, "write data to amthi client.\n");
+ dev_dbg(&dev->pdev->dev, "write data to amthif client.\n");
dev->iamthif_state = MEI_IAMTHIF_WRITING;
dev->iamthif_current_cb = cb;
@@ -280,15 +293,15 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
memcpy(dev->iamthif_msg_buf, cb->request_buffer.data,
cb->request_buffer.size);
- ret = mei_flow_ctrl_creds(dev, &dev->iamthif_cl);
+ ret = mei_cl_flow_ctrl_creds(&dev->iamthif_cl);
if (ret < 0)
return ret;
- if (ret && dev->mei_host_buffer_is_empty) {
+ if (ret && dev->hbuf_is_ready) {
ret = 0;
- dev->mei_host_buffer_is_empty = false;
- if (cb->request_buffer.size > mei_hbuf_max_data(dev)) {
- mei_hdr.length = mei_hbuf_max_data(dev);
+ dev->hbuf_is_ready = false;
+ if (cb->request_buffer.size > mei_hbuf_max_len(dev)) {
+ mei_hdr.length = mei_hbuf_max_len(dev);
mei_hdr.msg_complete = 0;
} else {
mei_hdr.length = cb->request_buffer.size;
@@ -300,25 +313,24 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
mei_hdr.reserved = 0;
dev->iamthif_msg_buf_index += mei_hdr.length;
if (mei_write_message(dev, &mei_hdr,
- (unsigned char *)(dev->iamthif_msg_buf),
- mei_hdr.length))
+ (unsigned char *)dev->iamthif_msg_buf))
return -ENODEV;
if (mei_hdr.msg_complete) {
- if (mei_flow_ctrl_reduce(dev, &dev->iamthif_cl))
+ if (mei_cl_flow_ctrl_reduce(&dev->iamthif_cl))
return -ENODEV;
dev->iamthif_flow_control_pending = true;
dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
- dev_dbg(&dev->pdev->dev, "add amthi cb to write waiting list\n");
+ dev_dbg(&dev->pdev->dev, "add amthif cb to write waiting list\n");
dev->iamthif_current_cb = cb;
dev->iamthif_file_object = cb->file_object;
list_add_tail(&cb->list, &dev->write_waiting_list.list);
} else {
- dev_dbg(&dev->pdev->dev, "message does not complete, so add amthi cb to write list.\n");
+ dev_dbg(&dev->pdev->dev, "message does not complete, so add amthif cb to write list.\n");
list_add_tail(&cb->list, &dev->write_list.list);
}
} else {
- if (!(dev->mei_host_buffer_is_empty))
+ if (!dev->hbuf_is_ready)
dev_dbg(&dev->pdev->dev, "host buffer is not empty");
dev_dbg(&dev->pdev->dev, "No flow control credentials, so add iamthif cb to write list.\n");
@@ -383,7 +395,7 @@ void mei_amthif_run_next_cmd(struct mei_device *dev)
dev->iamthif_timer = 0;
dev->iamthif_file_object = NULL;
- dev_dbg(&dev->pdev->dev, "complete amthi cmd_list cb.\n");
+ dev_dbg(&dev->pdev->dev, "complete amthif cmd_list cb.\n");
list_for_each_entry_safe(pos, next, &dev->amthif_cmd_list.list, list) {
list_del(&pos->list);
@@ -392,7 +404,7 @@ void mei_amthif_run_next_cmd(struct mei_device *dev)
status = mei_amthif_send_cmd(dev, pos);
if (status) {
dev_dbg(&dev->pdev->dev,
- "amthi write failed status = %d\n",
+ "amthif write failed status = %d\n",
status);
return;
}
@@ -412,7 +424,7 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
dev->iamthif_file_object == file) {
mask |= (POLLIN | POLLRDNORM);
- dev_dbg(&dev->pdev->dev, "run next amthi cb\n");
+ dev_dbg(&dev->pdev->dev, "run next amthif cb\n");
mei_amthif_run_next_cmd(dev);
}
return mask;
@@ -434,54 +446,51 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
int mei_amthif_irq_write_complete(struct mei_device *dev, s32 *slots,
struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list)
{
- struct mei_msg_hdr *mei_hdr;
+ struct mei_msg_hdr mei_hdr;
struct mei_cl *cl = cb->cl;
size_t len = dev->iamthif_msg_buf_size - dev->iamthif_msg_buf_index;
size_t msg_slots = mei_data2slots(len);
- mei_hdr = (struct mei_msg_hdr *)&dev->wr_msg_buf[0];
- mei_hdr->host_addr = cl->host_client_id;
- mei_hdr->me_addr = cl->me_client_id;
- mei_hdr->reserved = 0;
+ mei_hdr.host_addr = cl->host_client_id;
+ mei_hdr.me_addr = cl->me_client_id;
+ mei_hdr.reserved = 0;
if (*slots >= msg_slots) {
- mei_hdr->length = len;
- mei_hdr->msg_complete = 1;
+ mei_hdr.length = len;
+ mei_hdr.msg_complete = 1;
/* Split the message only if we can write the whole host buffer */
} else if (*slots == dev->hbuf_depth) {
msg_slots = *slots;
len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
- mei_hdr->length = len;
- mei_hdr->msg_complete = 0;
+ mei_hdr.length = len;
+ mei_hdr.msg_complete = 0;
} else {
/* wait for next time the host buffer is empty */
return 0;
}
- dev_dbg(&dev->pdev->dev, "msg: len = %d complete = %d\n",
- mei_hdr->length, mei_hdr->msg_complete);
+ dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
*slots -= msg_slots;
- if (mei_write_message(dev, mei_hdr,
- dev->iamthif_msg_buf + dev->iamthif_msg_buf_index,
- mei_hdr->length)) {
+ if (mei_write_message(dev, &mei_hdr,
+ dev->iamthif_msg_buf + dev->iamthif_msg_buf_index)) {
dev->iamthif_state = MEI_IAMTHIF_IDLE;
cl->status = -ENODEV;
list_del(&cb->list);
return -ENODEV;
}
- if (mei_flow_ctrl_reduce(dev, cl))
+ if (mei_cl_flow_ctrl_reduce(cl))
return -ENODEV;
- dev->iamthif_msg_buf_index += mei_hdr->length;
+ dev->iamthif_msg_buf_index += mei_hdr.length;
cl->status = 0;
- if (mei_hdr->msg_complete) {
+ if (mei_hdr.msg_complete) {
dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
dev->iamthif_flow_control_pending = true;
- /* save iamthif cb sent to amthi client */
+ /* save iamthif cb sent to amthif client */
cb->buf_idx = dev->iamthif_msg_buf_index;
dev->iamthif_current_cb = cb;
@@ -494,11 +503,11 @@ int mei_amthif_irq_write_complete(struct mei_device *dev, s32 *slots,
/**
* mei_amthif_irq_read_message - read routine after ISR to
- * handle the read amthi message
+ * handle the read amthif message
*
* @complete_list: An instance of our list structure
* @dev: the device structure
- * @mei_hdr: header of amthi message
+ * @mei_hdr: header of amthif message
*
* returns 0 on success, <0 on failure.
*/
@@ -522,10 +531,10 @@ int mei_amthif_irq_read_message(struct mei_cl_cb *complete_list,
return 0;
dev_dbg(&dev->pdev->dev,
- "amthi_message_buffer_index =%d\n",
+ "amthif_message_buffer_index =%d\n",
mei_hdr->length);
- dev_dbg(&dev->pdev->dev, "completed amthi read.\n ");
+ dev_dbg(&dev->pdev->dev, "completed amthif read.\n ");
if (!dev->iamthif_current_cb)
return -ENODEV;
@@ -540,8 +549,8 @@ int mei_amthif_irq_read_message(struct mei_cl_cb *complete_list,
cb->read_time = jiffies;
if (dev->iamthif_ioctl && cb->cl == &dev->iamthif_cl) {
/* found the iamthif cb */
- dev_dbg(&dev->pdev->dev, "complete the amthi read cb.\n ");
- dev_dbg(&dev->pdev->dev, "add the amthi read cb to complete.\n ");
+ dev_dbg(&dev->pdev->dev, "complete the amthif read cb.\n ");
+ dev_dbg(&dev->pdev->dev, "add the amthif read cb to complete.\n ");
list_add_tail(&cb->list, &complete_list->list);
}
return 0;
@@ -563,7 +572,7 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots)
return -EMSGSIZE;
}
*slots -= mei_data2slots(sizeof(struct hbm_flow_control));
- if (mei_send_flow_control(dev, &dev->iamthif_cl)) {
+ if (mei_hbm_cl_flow_control_req(dev, &dev->iamthif_cl)) {
dev_dbg(&dev->pdev->dev, "iamthif flow control failed\n");
return -EIO;
}
@@ -574,7 +583,7 @@ int mei_amthif_irq_read(struct mei_device *dev, s32 *slots)
dev->iamthif_msg_buf_index = 0;
dev->iamthif_msg_buf_size = 0;
dev->iamthif_stall_timer = MEI_IAMTHIF_STALL_TIMER;
- dev->mei_host_buffer_is_empty = mei_hbuf_is_empty(dev);
+ dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
return 0;
}
@@ -593,7 +602,7 @@ void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
dev->iamthif_msg_buf,
dev->iamthif_msg_buf_index);
list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list);
- dev_dbg(&dev->pdev->dev, "amthi read completed\n");
+ dev_dbg(&dev->pdev->dev, "amthif read completed\n");
dev->iamthif_timer = jiffies;
dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n",
dev->iamthif_timer);
@@ -601,7 +610,7 @@ void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
mei_amthif_run_next_cmd(dev);
}
- dev_dbg(&dev->pdev->dev, "completing amthi call back.\n");
+ dev_dbg(&dev->pdev->dev, "completing amthif call back.\n");
wake_up_interruptible(&dev->iamthif_cl.wait);
}
@@ -635,7 +644,8 @@ static bool mei_clear_list(struct mei_device *dev,
if (dev->iamthif_current_cb == cb_pos) {
dev->iamthif_current_cb = NULL;
/* send flow control to iamthif client */
- mei_send_flow_control(dev, &dev->iamthif_cl);
+ mei_hbm_cl_flow_control_req(dev,
+ &dev->iamthif_cl);
}
/* free all allocated buffers */
mei_io_cb_free(cb_pos);
@@ -706,11 +716,11 @@ int mei_amthif_release(struct mei_device *dev, struct file *file)
if (dev->iamthif_file_object == file &&
dev->iamthif_state != MEI_IAMTHIF_IDLE) {
- dev_dbg(&dev->pdev->dev, "amthi canceled iamthif state %d\n",
+ dev_dbg(&dev->pdev->dev, "amthif canceled iamthif state %d\n",
dev->iamthif_state);
dev->iamthif_canceled = true;
if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE) {
- dev_dbg(&dev->pdev->dev, "run next amthi iamthif cb\n");
+ dev_dbg(&dev->pdev->dev, "run next amthif iamthif cb\n");
mei_amthif_run_next_cmd(dev);
}
}
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
new file mode 100644
index 000000000000..1569afe935de
--- /dev/null
+++ b/drivers/misc/mei/client.c
@@ -0,0 +1,729 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+
+#include <linux/mei.h>
+
+#include "mei_dev.h"
+#include "hbm.h"
+#include "client.h"
+
+/**
+ * mei_me_cl_by_uuid - locate index of me client
+ *
+ * @dev: mei device
+ * returns me client index or -ENOENT if not found
+ */
+int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid)
+{
+ int i, res = -ENOENT;
+
+ for (i = 0; i < dev->me_clients_num; ++i)
+ if (uuid_le_cmp(*uuid,
+ dev->me_clients[i].props.protocol_name) == 0) {
+ res = i;
+ break;
+ }
+
+ return res;
+}
+
+
+/**
+ * mei_me_cl_by_id return index to me_clients for client_id
+ *
+ * @dev: the device structure
+ * @client_id: me client id
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * returns index on success, -ENOENT on failure.
+ */
+
+int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
+{
+ int i;
+ for (i = 0; i < dev->me_clients_num; i++)
+ if (dev->me_clients[i].client_id == client_id)
+ break;
+ if (WARN_ON(dev->me_clients[i].client_id != client_id))
+ return -ENOENT;
+
+ if (i == dev->me_clients_num)
+ return -ENOENT;
+
+ return i;
+}
+
+
+/**
+ * mei_io_list_flush - removes list entry belonging to cl.
+ *
+ * @list: An instance of our list structure
+ * @cl: host client
+ */
+void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
+{
+ struct mei_cl_cb *cb;
+ struct mei_cl_cb *next;
+
+ list_for_each_entry_safe(cb, next, &list->list, list) {
+ if (cb->cl && mei_cl_cmp_id(cl, cb->cl))
+ list_del(&cb->list);
+ }
+}
+
+/**
+ * mei_io_cb_free - free mei_cb_private related memory
+ *
+ * @cb: mei callback struct
+ */
+void mei_io_cb_free(struct mei_cl_cb *cb)
+{
+ if (cb == NULL)
+ return;
+
+ kfree(cb->request_buffer.data);
+ kfree(cb->response_buffer.data);
+ kfree(cb);
+}
+
+/**
+ * mei_io_cb_init - allocate and initialize io callback
+ *
+ * @cl - mei client
+ * @file: pointer to file structure
+ *
+ * returns mei_cl_cb pointer or NULL;
+ */
+struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
+{
+ struct mei_cl_cb *cb;
+
+ cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
+ if (!cb)
+ return NULL;
+
+ mei_io_list_init(cb);
+
+ cb->file_object = fp;
+ cb->cl = cl;
+ cb->buf_idx = 0;
+ return cb;
+}
+
+/**
+ * mei_io_cb_alloc_req_buf - allocate request buffer
+ *
+ * @cb - io callback structure
+ * @size: size of the buffer
+ *
+ * returns 0 on success
+ * -EINVAL if cb is NULL
+ * -ENOMEM if allocation failed
+ */
+int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
+{
+ if (!cb)
+ return -EINVAL;
+
+ if (length == 0)
+ return 0;
+
+ cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
+ if (!cb->request_buffer.data)
+ return -ENOMEM;
+ cb->request_buffer.size = length;
+ return 0;
+}
+/**
+ * mei_io_cb_alloc_req_buf - allocate respose buffer
+ *
+ * @cb - io callback structure
+ * @size: size of the buffer
+ *
+ * returns 0 on success
+ * -EINVAL if cb is NULL
+ * -ENOMEM if allocation failed
+ */
+int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
+{
+ if (!cb)
+ return -EINVAL;
+
+ if (length == 0)
+ return 0;
+
+ cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
+ if (!cb->response_buffer.data)
+ return -ENOMEM;
+ cb->response_buffer.size = length;
+ return 0;
+}
+
+
+
+/**
+ * mei_cl_flush_queues - flushes queue lists belonging to cl.
+ *
+ * @dev: the device structure
+ * @cl: host client
+ */
+int mei_cl_flush_queues(struct mei_cl *cl)
+{
+ if (WARN_ON(!cl || !cl->dev))
+ return -EINVAL;
+
+ dev_dbg(&cl->dev->pdev->dev, "remove list entry belonging to cl\n");
+ mei_io_list_flush(&cl->dev->read_list, cl);
+ mei_io_list_flush(&cl->dev->write_list, cl);
+ mei_io_list_flush(&cl->dev->write_waiting_list, cl);
+ mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
+ mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
+ mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
+ mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
+ return 0;
+}
+
+
+/**
+ * mei_cl_init - initializes intialize cl.
+ *
+ * @cl: host client to be initialized
+ * @dev: mei device
+ */
+void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
+{
+ memset(cl, 0, sizeof(struct mei_cl));
+ init_waitqueue_head(&cl->wait);
+ init_waitqueue_head(&cl->rx_wait);
+ init_waitqueue_head(&cl->tx_wait);
+ INIT_LIST_HEAD(&cl->link);
+ cl->reading_state = MEI_IDLE;
+ cl->writing_state = MEI_IDLE;
+ cl->dev = dev;
+}
+
+/**
+ * mei_cl_allocate - allocates cl structure and sets it up.
+ *
+ * @dev: mei device
+ * returns The allocated file or NULL on failure
+ */
+struct mei_cl *mei_cl_allocate(struct mei_device *dev)
+{
+ struct mei_cl *cl;
+
+ cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
+ if (!cl)
+ return NULL;
+
+ mei_cl_init(cl, dev);
+
+ return cl;
+}
+
+/**
+ * mei_cl_find_read_cb - find this cl's callback in the read list
+ *
+ * @dev: device structure
+ * returns cb on success, NULL on error
+ */
+struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
+{
+ struct mei_device *dev = cl->dev;
+ struct mei_cl_cb *cb = NULL;
+ struct mei_cl_cb *next = NULL;
+
+ list_for_each_entry_safe(cb, next, &dev->read_list.list, list)
+ if (mei_cl_cmp_id(cl, cb->cl))
+ return cb;
+ return NULL;
+}
+
+/** mei_cl_link: allocte host id in the host map
+ *
+ * @cl - host client
+ * @id - fixed host id or -1 for genereting one
+ * returns 0 on success
+ * -EINVAL on incorrect values
+ * -ENONET if client not found
+ */
+int mei_cl_link(struct mei_cl *cl, int id)
+{
+ struct mei_device *dev;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -EINVAL;
+
+ dev = cl->dev;
+
+ /* If Id is not asigned get one*/
+ if (id == MEI_HOST_CLIENT_ID_ANY)
+ id = find_first_zero_bit(dev->host_clients_map,
+ MEI_CLIENTS_MAX);
+
+ if (id >= MEI_CLIENTS_MAX) {
+ dev_err(&dev->pdev->dev, "id exceded %d", MEI_CLIENTS_MAX) ;
+ return -ENOENT;
+ }
+
+ dev->open_handle_count++;
+
+ cl->host_client_id = id;
+ list_add_tail(&cl->link, &dev->file_list);
+
+ set_bit(id, dev->host_clients_map);
+
+ cl->state = MEI_FILE_INITIALIZING;
+
+ dev_dbg(&dev->pdev->dev, "link cl host id = %d\n", cl->host_client_id);
+ return 0;
+}
+
+/**
+ * mei_cl_unlink - remove me_cl from the list
+ *
+ * @dev: the device structure
+ */
+int mei_cl_unlink(struct mei_cl *cl)
+{
+ struct mei_device *dev;
+ struct mei_cl *pos, *next;
+
+ /* don't shout on error exit path */
+ if (!cl)
+ return 0;
+
+ /* wd and amthif might not be initialized */
+ if (!cl->dev)
+ return 0;
+
+ dev = cl->dev;
+
+ list_for_each_entry_safe(pos, next, &dev->file_list, link) {
+ if (cl->host_client_id == pos->host_client_id) {
+ dev_dbg(&dev->pdev->dev, "remove host client = %d, ME client = %d\n",
+ pos->host_client_id, pos->me_client_id);
+ list_del_init(&pos->link);
+ break;
+ }
+ }
+ return 0;
+}
+
+
+void mei_host_client_init(struct work_struct *work)
+{
+ struct mei_device *dev = container_of(work,
+ struct mei_device, init_work);
+ struct mei_client_properties *client_props;
+ int i;
+
+ mutex_lock(&dev->device_lock);
+
+ bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
+ dev->open_handle_count = 0;
+
+ /*
+ * Reserving the first three client IDs
+ * 0: Reserved for MEI Bus Message communications
+ * 1: Reserved for Watchdog
+ * 2: Reserved for AMTHI
+ */
+ bitmap_set(dev->host_clients_map, 0, 3);
+
+ for (i = 0; i < dev->me_clients_num; i++) {
+ client_props = &dev->me_clients[i].props;
+
+ if (!uuid_le_cmp(client_props->protocol_name, mei_amthif_guid))
+ mei_amthif_host_init(dev);
+ else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid))
+ mei_wd_host_init(dev);
+ }
+
+ dev->dev_state = MEI_DEV_ENABLED;
+
+ mutex_unlock(&dev->device_lock);
+}
+
+
+/**
+ * mei_cl_disconnect - disconnect host clinet form the me one
+ *
+ * @cl: host client
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * returns 0 on success, <0 on failure.
+ */
+int mei_cl_disconnect(struct mei_cl *cl)
+{
+ struct mei_device *dev;
+ struct mei_cl_cb *cb;
+ int rets, err;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ if (cl->state != MEI_FILE_DISCONNECTING)
+ return 0;
+
+ cb = mei_io_cb_init(cl, NULL);
+ if (!cb)
+ return -ENOMEM;
+
+ cb->fop_type = MEI_FOP_CLOSE;
+ if (dev->hbuf_is_ready) {
+ dev->hbuf_is_ready = false;
+ if (mei_hbm_cl_disconnect_req(dev, cl)) {
+ rets = -ENODEV;
+ dev_err(&dev->pdev->dev, "failed to disconnect.\n");
+ goto free;
+ }
+ mdelay(10); /* Wait for hardware disconnection ready */
+ list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
+ } else {
+ dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n");
+ list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+
+ }
+ mutex_unlock(&dev->device_lock);
+
+ err = wait_event_timeout(dev->wait_recvd_msg,
+ MEI_FILE_DISCONNECTED == cl->state,
+ mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+
+ mutex_lock(&dev->device_lock);
+ if (MEI_FILE_DISCONNECTED == cl->state) {
+ rets = 0;
+ dev_dbg(&dev->pdev->dev, "successfully disconnected from FW client.\n");
+ } else {
+ rets = -ENODEV;
+ if (MEI_FILE_DISCONNECTED != cl->state)
+ dev_dbg(&dev->pdev->dev, "wrong status client disconnect.\n");
+
+ if (err)
+ dev_dbg(&dev->pdev->dev,
+ "wait failed disconnect err=%08x\n",
+ err);
+
+ dev_dbg(&dev->pdev->dev, "failed to disconnect from FW client.\n");
+ }
+
+ mei_io_list_flush(&dev->ctrl_rd_list, cl);
+ mei_io_list_flush(&dev->ctrl_wr_list, cl);
+free:
+ mei_io_cb_free(cb);
+ return rets;
+}
+
+
+/**
+ * mei_cl_is_other_connecting - checks if other
+ * client with the same me client id is connecting
+ *
+ * @cl: private data of the file object
+ *
+ * returns ture if other client is connected, 0 - otherwise.
+ */
+bool mei_cl_is_other_connecting(struct mei_cl *cl)
+{
+ struct mei_device *dev;
+ struct mei_cl *pos;
+ struct mei_cl *next;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return false;
+
+ dev = cl->dev;
+
+ list_for_each_entry_safe(pos, next, &dev->file_list, link) {
+ if ((pos->state == MEI_FILE_CONNECTING) &&
+ (pos != cl) && cl->me_client_id == pos->me_client_id)
+ return true;
+
+ }
+
+ return false;
+}
+
+/**
+ * mei_cl_connect - connect host clinet to the me one
+ *
+ * @cl: host client
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * returns 0 on success, <0 on failure.
+ */
+int mei_cl_connect(struct mei_cl *cl, struct file *file)
+{
+ struct mei_device *dev;
+ struct mei_cl_cb *cb;
+ long timeout = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT);
+ int rets;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ cb = mei_io_cb_init(cl, file);
+ if (!cb) {
+ rets = -ENOMEM;
+ goto out;
+ }
+
+ cb->fop_type = MEI_FOP_IOCTL;
+
+ if (dev->hbuf_is_ready && !mei_cl_is_other_connecting(cl)) {
+ dev->hbuf_is_ready = false;
+
+ if (mei_hbm_cl_connect_req(dev, cl)) {
+ rets = -ENODEV;
+ goto out;
+ }
+ cl->timer_count = MEI_CONNECT_TIMEOUT;
+ list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
+ } else {
+ list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+ }
+
+ mutex_unlock(&dev->device_lock);
+ rets = wait_event_timeout(dev->wait_recvd_msg,
+ (cl->state == MEI_FILE_CONNECTED ||
+ cl->state == MEI_FILE_DISCONNECTED),
+ timeout * HZ);
+ mutex_lock(&dev->device_lock);
+
+ if (cl->state != MEI_FILE_CONNECTED) {
+ rets = -EFAULT;
+
+ mei_io_list_flush(&dev->ctrl_rd_list, cl);
+ mei_io_list_flush(&dev->ctrl_wr_list, cl);
+ goto out;
+ }
+
+ rets = cl->status;
+
+out:
+ mei_io_cb_free(cb);
+ return rets;
+}
+
+/**
+ * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
+ *
+ * @dev: the device structure
+ * @cl: private data of the file object
+ *
+ * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
+ * -ENOENT if mei_cl is not present
+ * -EINVAL if single_recv_buf == 0
+ */
+int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
+{
+ struct mei_device *dev;
+ int i;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -EINVAL;
+
+ dev = cl->dev;
+
+ if (!dev->me_clients_num)
+ return 0;
+
+ if (cl->mei_flow_ctrl_creds > 0)
+ return 1;
+
+ for (i = 0; i < dev->me_clients_num; i++) {
+ struct mei_me_client *me_cl = &dev->me_clients[i];
+ if (me_cl->client_id == cl->me_client_id) {
+ if (me_cl->mei_flow_ctrl_creds) {
+ if (WARN_ON(me_cl->props.single_recv_buf == 0))
+ return -EINVAL;
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+ }
+ return -ENOENT;
+}
+
+/**
+ * mei_cl_flow_ctrl_reduce - reduces flow_control.
+ *
+ * @dev: the device structure
+ * @cl: private data of the file object
+ * @returns
+ * 0 on success
+ * -ENOENT when me client is not found
+ * -EINVAL when ctrl credits are <= 0
+ */
+int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
+{
+ struct mei_device *dev;
+ int i;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -EINVAL;
+
+ dev = cl->dev;
+
+ if (!dev->me_clients_num)
+ return -ENOENT;
+
+ for (i = 0; i < dev->me_clients_num; i++) {
+ struct mei_me_client *me_cl = &dev->me_clients[i];
+ if (me_cl->client_id == cl->me_client_id) {
+ if (me_cl->props.single_recv_buf != 0) {
+ if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
+ return -EINVAL;
+ dev->me_clients[i].mei_flow_ctrl_creds--;
+ } else {
+ if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
+ return -EINVAL;
+ cl->mei_flow_ctrl_creds--;
+ }
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+/**
+ * mei_cl_start_read - the start read client message function.
+ *
+ * @cl: host client
+ *
+ * returns 0 on success, <0 on failure.
+ */
+int mei_cl_read_start(struct mei_cl *cl)
+{
+ struct mei_device *dev;
+ struct mei_cl_cb *cb;
+ int rets;
+ int i;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ if (cl->state != MEI_FILE_CONNECTED)
+ return -ENODEV;
+
+ if (dev->dev_state != MEI_DEV_ENABLED)
+ return -ENODEV;
+
+ if (cl->read_cb) {
+ dev_dbg(&dev->pdev->dev, "read is pending.\n");
+ return -EBUSY;
+ }
+ i = mei_me_cl_by_id(dev, cl->me_client_id);
+ if (i < 0) {
+ dev_err(&dev->pdev->dev, "no such me client %d\n",
+ cl->me_client_id);
+ return -ENODEV;
+ }
+
+ cb = mei_io_cb_init(cl, NULL);
+ if (!cb)
+ return -ENOMEM;
+
+ rets = mei_io_cb_alloc_resp_buf(cb,
+ dev->me_clients[i].props.max_msg_length);
+ if (rets)
+ goto err;
+
+ cb->fop_type = MEI_FOP_READ;
+ cl->read_cb = cb;
+ if (dev->hbuf_is_ready) {
+ dev->hbuf_is_ready = false;
+ if (mei_hbm_cl_flow_control_req(dev, cl)) {
+ rets = -ENODEV;
+ goto err;
+ }
+ list_add_tail(&cb->list, &dev->read_list.list);
+ } else {
+ list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+ }
+ return rets;
+err:
+ mei_io_cb_free(cb);
+ return rets;
+}
+
+/**
+ * mei_cl_all_disconnect - disconnect forcefully all connected clients
+ *
+ * @dev - mei device
+ */
+
+void mei_cl_all_disconnect(struct mei_device *dev)
+{
+ struct mei_cl *cl, *next;
+
+ list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+ cl->state = MEI_FILE_DISCONNECTED;
+ cl->mei_flow_ctrl_creds = 0;
+ cl->read_cb = NULL;
+ cl->timer_count = 0;
+ }
+}
+
+
+/**
+ * mei_cl_all_read_wakeup - wake up all readings so they can be interrupted
+ *
+ * @dev - mei device
+ */
+void mei_cl_all_read_wakeup(struct mei_device *dev)
+{
+ struct mei_cl *cl, *next;
+ list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+ if (waitqueue_active(&cl->rx_wait)) {
+ dev_dbg(&dev->pdev->dev, "Waking up client!\n");
+ wake_up_interruptible(&cl->rx_wait);
+ }
+ }
+}
+
+/**
+ * mei_cl_all_write_clear - clear all pending writes
+
+ * @dev - mei device
+ */
+void mei_cl_all_write_clear(struct mei_device *dev)
+{
+ struct mei_cl_cb *cb, *next;
+
+ list_for_each_entry_safe(cb, next, &dev->write_list.list, list) {
+ list_del(&cb->list);
+ mei_io_cb_free(cb);
+ }
+}
+
+
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
new file mode 100644
index 000000000000..214b2397ec3e
--- /dev/null
+++ b/drivers/misc/mei/client.h
@@ -0,0 +1,102 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef _MEI_CLIENT_H_
+#define _MEI_CLIENT_H_
+
+#include <linux/types.h>
+#include <linux/watchdog.h>
+#include <linux/poll.h>
+#include <linux/mei.h>
+
+#include "mei_dev.h"
+
+int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid);
+int mei_me_cl_by_id(struct mei_device *dev, u8 client_id);
+
+/*
+ * MEI IO Functions
+ */
+struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp);
+void mei_io_cb_free(struct mei_cl_cb *priv_cb);
+int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length);
+int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length);
+
+
+/**
+ * mei_io_list_init - Sets up a queue list.
+ *
+ * @list: An instance cl callback structure
+ */
+static inline void mei_io_list_init(struct mei_cl_cb *list)
+{
+ INIT_LIST_HEAD(&list->list);
+}
+void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl);
+
+/*
+ * MEI Host Client Functions
+ */
+
+struct mei_cl *mei_cl_allocate(struct mei_device *dev);
+void mei_cl_init(struct mei_cl *cl, struct mei_device *dev);
+
+
+int mei_cl_link(struct mei_cl *cl, int id);
+int mei_cl_unlink(struct mei_cl *cl);
+
+int mei_cl_flush_queues(struct mei_cl *cl);
+struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl);
+
+/**
+ * mei_cl_cmp_id - tells if file private data have same id
+ *
+ * @fe1: private data of 1. file object
+ * @fe2: private data of 2. file object
+ *
+ * returns true - if ids are the same and not NULL
+ */
+static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
+ const struct mei_cl *cl2)
+{
+ return cl1 && cl2 &&
+ (cl1->host_client_id == cl2->host_client_id) &&
+ (cl1->me_client_id == cl2->me_client_id);
+}
+
+
+int mei_cl_flow_ctrl_creds(struct mei_cl *cl);
+
+int mei_cl_flow_ctrl_reduce(struct mei_cl *cl);
+/*
+ * MEI input output function prototype
+ */
+bool mei_cl_is_other_connecting(struct mei_cl *cl);
+int mei_cl_disconnect(struct mei_cl *cl);
+
+int mei_cl_read_start(struct mei_cl *cl);
+
+int mei_cl_connect(struct mei_cl *cl, struct file *file);
+
+void mei_host_client_init(struct work_struct *work);
+
+
+void mei_cl_all_disconnect(struct mei_device *dev);
+void mei_cl_all_read_wakeup(struct mei_device *dev);
+void mei_cl_all_write_clear(struct mei_device *dev);
+
+
+#endif /* _MEI_CLIENT_H_ */
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
new file mode 100644
index 000000000000..fb9e63ba3bb1
--- /dev/null
+++ b/drivers/misc/mei/hbm.c
@@ -0,0 +1,669 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/mei.h>
+
+#include "mei_dev.h"
+#include "hbm.h"
+#include "hw-me.h"
+
+/**
+ * mei_hbm_me_cl_allocate - allocates storage for me clients
+ *
+ * @dev: the device structure
+ *
+ * returns none.
+ */
+static void mei_hbm_me_cl_allocate(struct mei_device *dev)
+{
+ struct mei_me_client *clients;
+ int b;
+
+ /* count how many ME clients we have */
+ for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)
+ dev->me_clients_num++;
+
+ if (dev->me_clients_num <= 0)
+ return;
+
+ kfree(dev->me_clients);
+ dev->me_clients = NULL;
+
+ dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%zd.\n",
+ dev->me_clients_num * sizeof(struct mei_me_client));
+ /* allocate storage for ME clients representation */
+ clients = kcalloc(dev->me_clients_num,
+ sizeof(struct mei_me_client), GFP_KERNEL);
+ if (!clients) {
+ dev_err(&dev->pdev->dev, "memory allocation for ME clients failed.\n");
+ dev->dev_state = MEI_DEV_RESETING;
+ mei_reset(dev, 1);
+ return;
+ }
+ dev->me_clients = clients;
+ return;
+}
+
+/**
+ * mei_hbm_cl_hdr - construct client hbm header
+ * @cl: - client
+ * @hbm_cmd: host bus message command
+ * @buf: buffer for cl header
+ * @len: buffer length
+ */
+static inline
+void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len)
+{
+ struct mei_hbm_cl_cmd *cmd = buf;
+
+ memset(cmd, 0, len);
+
+ cmd->hbm_cmd = hbm_cmd;
+ cmd->host_addr = cl->host_client_id;
+ cmd->me_addr = cl->me_client_id;
+}
+
+/**
+ * same_disconn_addr - tells if they have the same address
+ *
+ * @file: private data of the file object.
+ * @disconn: disconnection request.
+ *
+ * returns true if addres are same
+ */
+static inline
+bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf)
+{
+ struct mei_hbm_cl_cmd *cmd = buf;
+ return cl->host_client_id == cmd->host_addr &&
+ cl->me_client_id == cmd->me_addr;
+}
+
+
+/**
+ * is_treat_specially_client - checks if the message belongs
+ * to the file private data.
+ *
+ * @cl: private data of the file object
+ * @rs: connect response bus message
+ *
+ */
+static bool is_treat_specially_client(struct mei_cl *cl,
+ struct hbm_client_connect_response *rs)
+{
+ if (mei_hbm_cl_addr_equal(cl, rs)) {
+ if (!rs->status) {
+ cl->state = MEI_FILE_CONNECTED;
+ cl->status = 0;
+
+ } else {
+ cl->state = MEI_FILE_DISCONNECTED;
+ cl->status = -ENODEV;
+ }
+ cl->timer_count = 0;
+
+ return true;
+ }
+ return false;
+}
+
+/**
+ * mei_hbm_start_req - sends start request message.
+ *
+ * @dev: the device structure
+ */
+void mei_hbm_start_req(struct mei_device *dev)
+{
+ struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+ struct hbm_host_version_request *start_req;
+ const size_t len = sizeof(struct hbm_host_version_request);
+
+ mei_hbm_hdr(mei_hdr, len);
+
+ /* host start message */
+ start_req = (struct hbm_host_version_request *)dev->wr_msg.data;
+ memset(start_req, 0, len);
+ start_req->hbm_cmd = HOST_START_REQ_CMD;
+ start_req->host_version.major_version = HBM_MAJOR_VERSION;
+ start_req->host_version.minor_version = HBM_MINOR_VERSION;
+
+ dev->recvd_msg = false;
+ if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
+ dev_dbg(&dev->pdev->dev, "write send version message to FW fail.\n");
+ dev->dev_state = MEI_DEV_RESETING;
+ mei_reset(dev, 1);
+ }
+ dev->init_clients_state = MEI_START_MESSAGE;
+ dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ return ;
+}
+
+/**
+ * mei_hbm_enum_clients_req - sends enumeration client request message.
+ *
+ * @dev: the device structure
+ *
+ * returns none.
+ */
+static void mei_hbm_enum_clients_req(struct mei_device *dev)
+{
+ struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+ struct hbm_host_enum_request *enum_req;
+ const size_t len = sizeof(struct hbm_host_enum_request);
+ /* enumerate clients */
+ mei_hbm_hdr(mei_hdr, len);
+
+ enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data;
+ memset(enum_req, 0, len);
+ enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
+
+ if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
+ dev->dev_state = MEI_DEV_RESETING;
+ dev_dbg(&dev->pdev->dev, "write send enumeration request message to FW fail.\n");
+ mei_reset(dev, 1);
+ }
+ dev->init_clients_state = MEI_ENUM_CLIENTS_MESSAGE;
+ dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ return;
+}
+
+/**
+ * mei_hbm_prop_requsest - request property for a single client
+ *
+ * @dev: the device structure
+ *
+ * returns none.
+ */
+
+static int mei_hbm_prop_req(struct mei_device *dev)
+{
+
+ struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+ struct hbm_props_request *prop_req;
+ const size_t len = sizeof(struct hbm_props_request);
+ unsigned long next_client_index;
+ u8 client_num;
+
+
+ client_num = dev->me_client_presentation_num;
+
+ next_client_index = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX,
+ dev->me_client_index);
+
+ /* We got all client properties */
+ if (next_client_index == MEI_CLIENTS_MAX) {
+ schedule_work(&dev->init_work);
+
+ return 0;
+ }
+
+ dev->me_clients[client_num].client_id = next_client_index;
+ dev->me_clients[client_num].mei_flow_ctrl_creds = 0;
+
+ mei_hbm_hdr(mei_hdr, len);
+ prop_req = (struct hbm_props_request *)dev->wr_msg.data;
+
+ memset(prop_req, 0, sizeof(struct hbm_props_request));
+
+
+ prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
+ prop_req->address = next_client_index;
+
+ if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
+ dev->dev_state = MEI_DEV_RESETING;
+ dev_err(&dev->pdev->dev, "Properties request command failed\n");
+ mei_reset(dev, 1);
+
+ return -EIO;
+ }
+
+ dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ dev->me_client_index = next_client_index;
+
+ return 0;
+}
+
+/**
+ * mei_hbm_stop_req_prepare - perpare stop request message
+ *
+ * @dev - mei device
+ * @mei_hdr - mei message header
+ * @data - hbm message body buffer
+ */
+static void mei_hbm_stop_req_prepare(struct mei_device *dev,
+ struct mei_msg_hdr *mei_hdr, unsigned char *data)
+{
+ struct hbm_host_stop_request *req =
+ (struct hbm_host_stop_request *)data;
+ const size_t len = sizeof(struct hbm_host_stop_request);
+
+ mei_hbm_hdr(mei_hdr, len);
+
+ memset(req, 0, len);
+ req->hbm_cmd = HOST_STOP_REQ_CMD;
+ req->reason = DRIVER_STOP_REQUEST;
+}
+
+/**
+ * mei_hbm_cl_flow_control_req - sends flow control requst.
+ *
+ * @dev: the device structure
+ * @cl: client info
+ *
+ * This function returns -EIO on write failure
+ */
+int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl)
+{
+ struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+ const size_t len = sizeof(struct hbm_flow_control);
+
+ mei_hbm_hdr(mei_hdr, len);
+ mei_hbm_cl_hdr(cl, MEI_FLOW_CONTROL_CMD, dev->wr_msg.data, len);
+
+ dev_dbg(&dev->pdev->dev, "sending flow control host client = %d, ME client = %d\n",
+ cl->host_client_id, cl->me_client_id);
+
+ return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+}
+
+/**
+ * add_single_flow_creds - adds single buffer credentials.
+ *
+ * @file: private data ot the file object.
+ * @flow: flow control.
+ */
+static void mei_hbm_add_single_flow_creds(struct mei_device *dev,
+ struct hbm_flow_control *flow)
+{
+ struct mei_me_client *client;
+ int i;
+
+ for (i = 0; i < dev->me_clients_num; i++) {
+ client = &dev->me_clients[i];
+ if (client && flow->me_addr == client->client_id) {
+ if (client->props.single_recv_buf) {
+ client->mei_flow_ctrl_creds++;
+ dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n",
+ flow->me_addr);
+ dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n",
+ client->mei_flow_ctrl_creds);
+ } else {
+ BUG(); /* error in flow control */
+ }
+ }
+ }
+}
+
+/**
+ * mei_hbm_cl_flow_control_res - flow control response from me
+ *
+ * @dev: the device structure
+ * @flow_control: flow control response bus message
+ */
+static void mei_hbm_cl_flow_control_res(struct mei_device *dev,
+ struct hbm_flow_control *flow_control)
+{
+ struct mei_cl *cl = NULL;
+ struct mei_cl *next = NULL;
+
+ if (!flow_control->host_addr) {
+ /* single receive buffer */
+ mei_hbm_add_single_flow_creds(dev, flow_control);
+ return;
+ }
+
+ /* normal connection */
+ list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+ if (mei_hbm_cl_addr_equal(cl, flow_control)) {
+ cl->mei_flow_ctrl_creds++;
+ dev_dbg(&dev->pdev->dev, "flow ctrl msg for host %d ME %d.\n",
+ flow_control->host_addr, flow_control->me_addr);
+ dev_dbg(&dev->pdev->dev, "flow control credentials = %d.\n",
+ cl->mei_flow_ctrl_creds);
+ break;
+ }
+ }
+}
+
+
+/**
+ * mei_hbm_cl_disconnect_req - sends disconnect message to fw.
+ *
+ * @dev: the device structure
+ * @cl: a client to disconnect from
+ *
+ * This function returns -EIO on write failure
+ */
+int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl)
+{
+ struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+ const size_t len = sizeof(struct hbm_client_connect_request);
+
+ mei_hbm_hdr(mei_hdr, len);
+ mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_REQ_CMD, dev->wr_msg.data, len);
+
+ return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+}
+
+/**
+ * mei_hbm_cl_disconnect_res - disconnect response from ME
+ *
+ * @dev: the device structure
+ * @rs: disconnect response bus message
+ */
+static void mei_hbm_cl_disconnect_res(struct mei_device *dev,
+ struct hbm_client_connect_response *rs)
+{
+ struct mei_cl *cl;
+ struct mei_cl_cb *pos = NULL, *next = NULL;
+
+ dev_dbg(&dev->pdev->dev,
+ "disconnect_response:\n"
+ "ME Client = %d\n"
+ "Host Client = %d\n"
+ "Status = %d\n",
+ rs->me_addr,
+ rs->host_addr,
+ rs->status);
+
+ list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
+ cl = pos->cl;
+
+ if (!cl) {
+ list_del(&pos->list);
+ return;
+ }
+
+ dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n");
+ if (mei_hbm_cl_addr_equal(cl, rs)) {
+ list_del(&pos->list);
+ if (!rs->status)
+ cl->state = MEI_FILE_DISCONNECTED;
+
+ cl->status = 0;
+ cl->timer_count = 0;
+ break;
+ }
+ }
+}
+
+/**
+ * mei_hbm_cl_connect_req - send connection request to specific me client
+ *
+ * @dev: the device structure
+ * @cl: a client to connect to
+ *
+ * returns -EIO on write failure
+ */
+int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl)
+{
+ struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+ const size_t len = sizeof(struct hbm_client_connect_request);
+
+ mei_hbm_hdr(mei_hdr, len);
+ mei_hbm_cl_hdr(cl, CLIENT_CONNECT_REQ_CMD, dev->wr_msg.data, len);
+
+ return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+}
+
+/**
+ * mei_hbm_cl_connect_res - connect resposne from the ME
+ *
+ * @dev: the device structure
+ * @rs: connect response bus message
+ */
+static void mei_hbm_cl_connect_res(struct mei_device *dev,
+ struct hbm_client_connect_response *rs)
+{
+
+ struct mei_cl *cl;
+ struct mei_cl_cb *pos = NULL, *next = NULL;
+
+ dev_dbg(&dev->pdev->dev,
+ "connect_response:\n"
+ "ME Client = %d\n"
+ "Host Client = %d\n"
+ "Status = %d\n",
+ rs->me_addr,
+ rs->host_addr,
+ rs->status);
+
+ /* if WD or iamthif client treat specially */
+
+ if (is_treat_specially_client(&dev->wd_cl, rs)) {
+ dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n");
+ mei_watchdog_register(dev);
+
+ return;
+ }
+
+ if (is_treat_specially_client(&dev->iamthif_cl, rs)) {
+ dev->iamthif_state = MEI_IAMTHIF_IDLE;
+ return;
+ }
+ list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
+
+ cl = pos->cl;
+ if (!cl) {
+ list_del(&pos->list);
+ return;
+ }
+ if (pos->fop_type == MEI_FOP_IOCTL) {
+ if (is_treat_specially_client(cl, rs)) {
+ list_del(&pos->list);
+ cl->status = 0;
+ cl->timer_count = 0;
+ break;
+ }
+ }
+ }
+}
+
+
+/**
+ * mei_client_disconnect_request - disconnect request initiated by me
+ * host sends disoconnect response
+ *
+ * @dev: the device structure.
+ * @disconnect_req: disconnect request bus message from the me
+ */
+static void mei_hbm_fw_disconnect_req(struct mei_device *dev,
+ struct hbm_client_connect_request *disconnect_req)
+{
+ struct mei_cl *cl, *next;
+ const size_t len = sizeof(struct hbm_client_connect_response);
+
+ list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+ if (mei_hbm_cl_addr_equal(cl, disconnect_req)) {
+ dev_dbg(&dev->pdev->dev, "disconnect request host client %d ME client %d.\n",
+ disconnect_req->host_addr,
+ disconnect_req->me_addr);
+ cl->state = MEI_FILE_DISCONNECTED;
+ cl->timer_count = 0;
+ if (cl == &dev->wd_cl)
+ dev->wd_pending = false;
+ else if (cl == &dev->iamthif_cl)
+ dev->iamthif_timer = 0;
+
+ /* prepare disconnect response */
+ mei_hbm_hdr(&dev->wr_ext_msg.hdr, len);
+ mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD,
+ dev->wr_ext_msg.data, len);
+ break;
+ }
+ }
+}
+
+
+/**
+ * mei_hbm_dispatch - bottom half read routine after ISR to
+ * handle the read bus message cmd processing.
+ *
+ * @dev: the device structure
+ * @mei_hdr: header of bus message
+ */
+void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
+{
+ struct mei_bus_message *mei_msg;
+ struct mei_me_client *me_client;
+ struct hbm_host_version_response *version_res;
+ struct hbm_client_connect_response *connect_res;
+ struct hbm_client_connect_response *disconnect_res;
+ struct hbm_client_connect_request *disconnect_req;
+ struct hbm_flow_control *flow_control;
+ struct hbm_props_response *props_res;
+ struct hbm_host_enum_response *enum_res;
+
+ /* read the message to our buffer */
+ BUG_ON(hdr->length >= sizeof(dev->rd_msg_buf));
+ mei_read_slots(dev, dev->rd_msg_buf, hdr->length);
+ mei_msg = (struct mei_bus_message *)dev->rd_msg_buf;
+
+ switch (mei_msg->hbm_cmd) {
+ case HOST_START_RES_CMD:
+ version_res = (struct hbm_host_version_response *)mei_msg;
+ if (!version_res->host_version_supported) {
+ dev->version = version_res->me_max_version;
+ dev_dbg(&dev->pdev->dev, "version mismatch.\n");
+
+ mei_hbm_stop_req_prepare(dev, &dev->wr_msg.hdr,
+ dev->wr_msg.data);
+ mei_write_message(dev, &dev->wr_msg.hdr,
+ dev->wr_msg.data);
+ return;
+ }
+
+ dev->version.major_version = HBM_MAJOR_VERSION;
+ dev->version.minor_version = HBM_MINOR_VERSION;
+ if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
+ dev->init_clients_state == MEI_START_MESSAGE) {
+ dev->init_clients_timer = 0;
+ mei_hbm_enum_clients_req(dev);
+ } else {
+ dev->recvd_msg = false;
+ dev_dbg(&dev->pdev->dev, "reset due to received hbm: host start\n");
+ mei_reset(dev, 1);
+ return;
+ }
+
+ dev->recvd_msg = true;
+ dev_dbg(&dev->pdev->dev, "host start response message received.\n");
+ break;
+
+ case CLIENT_CONNECT_RES_CMD:
+ connect_res = (struct hbm_client_connect_response *) mei_msg;
+ mei_hbm_cl_connect_res(dev, connect_res);
+ dev_dbg(&dev->pdev->dev, "client connect response message received.\n");
+ wake_up(&dev->wait_recvd_msg);
+ break;
+
+ case CLIENT_DISCONNECT_RES_CMD:
+ disconnect_res = (struct hbm_client_connect_response *) mei_msg;
+ mei_hbm_cl_disconnect_res(dev, disconnect_res);
+ dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n");
+ wake_up(&dev->wait_recvd_msg);
+ break;
+
+ case MEI_FLOW_CONTROL_CMD:
+ flow_control = (struct hbm_flow_control *) mei_msg;
+ mei_hbm_cl_flow_control_res(dev, flow_control);
+ dev_dbg(&dev->pdev->dev, "client flow control response message received.\n");
+ break;
+
+ case HOST_CLIENT_PROPERTIES_RES_CMD:
+ props_res = (struct hbm_props_response *)mei_msg;
+ me_client = &dev->me_clients[dev->me_client_presentation_num];
+
+ if (props_res->status || !dev->me_clients) {
+ dev_dbg(&dev->pdev->dev, "reset due to received host client properties response bus message wrong status.\n");
+ mei_reset(dev, 1);
+ return;
+ }
+
+ if (me_client->client_id != props_res->address) {
+ dev_err(&dev->pdev->dev,
+ "Host client properties reply mismatch\n");
+ mei_reset(dev, 1);
+
+ return;
+ }
+
+ if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
+ dev->init_clients_state != MEI_CLIENT_PROPERTIES_MESSAGE) {
+ dev_err(&dev->pdev->dev,
+ "Unexpected client properties reply\n");
+ mei_reset(dev, 1);
+
+ return;
+ }
+
+ me_client->props = props_res->client_properties;
+ dev->me_client_index++;
+ dev->me_client_presentation_num++;
+
+ /* request property for the next client */
+ mei_hbm_prop_req(dev);
+
+ break;
+
+ case HOST_ENUM_RES_CMD:
+ enum_res = (struct hbm_host_enum_response *) mei_msg;
+ memcpy(dev->me_clients_map, enum_res->valid_addresses, 32);
+ if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
+ dev->init_clients_state == MEI_ENUM_CLIENTS_MESSAGE) {
+ dev->init_clients_timer = 0;
+ dev->me_client_presentation_num = 0;
+ dev->me_client_index = 0;
+ mei_hbm_me_cl_allocate(dev);
+ dev->init_clients_state =
+ MEI_CLIENT_PROPERTIES_MESSAGE;
+
+ /* first property reqeust */
+ mei_hbm_prop_req(dev);
+ } else {
+ dev_dbg(&dev->pdev->dev, "reset due to received host enumeration clients response bus message.\n");
+ mei_reset(dev, 1);
+ return;
+ }
+ break;
+
+ case HOST_STOP_RES_CMD:
+ dev->dev_state = MEI_DEV_DISABLED;
+ dev_dbg(&dev->pdev->dev, "resetting because of FW stop response.\n");
+ mei_reset(dev, 1);
+ break;
+
+ case CLIENT_DISCONNECT_REQ_CMD:
+ /* search for client */
+ disconnect_req = (struct hbm_client_connect_request *)mei_msg;
+ mei_hbm_fw_disconnect_req(dev, disconnect_req);
+ break;
+
+ case ME_STOP_REQ_CMD:
+
+ mei_hbm_stop_req_prepare(dev, &dev->wr_ext_msg.hdr,
+ dev->wr_ext_msg.data);
+ break;
+ default:
+ BUG();
+ break;
+
+ }
+}
+
diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h
new file mode 100644
index 000000000000..b552afbaf85c
--- /dev/null
+++ b/drivers/misc/mei/hbm.h
@@ -0,0 +1,39 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef _MEI_HBM_H_
+#define _MEI_HBM_H_
+
+void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr);
+
+static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length)
+{
+ hdr->host_addr = 0;
+ hdr->me_addr = 0;
+ hdr->length = length;
+ hdr->msg_complete = 1;
+ hdr->reserved = 0;
+}
+
+void mei_hbm_start_req(struct mei_device *dev);
+
+int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl);
+int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl);
+int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl);
+
+
+#endif /* _MEI_HBM_H_ */
+
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
new file mode 100644
index 000000000000..6a203b6e8346
--- /dev/null
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -0,0 +1,167 @@
+/******************************************************************************
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Intel MEI Interface Header
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation.
+ * linux-mei@linux.intel.com
+ * http://www.intel.com
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef _MEI_HW_MEI_REGS_H_
+#define _MEI_HW_MEI_REGS_H_
+
+/*
+ * MEI device IDs
+ */
+#define MEI_DEV_ID_82946GZ 0x2974 /* 82946GZ/GL */
+#define MEI_DEV_ID_82G35 0x2984 /* 82G35 Express */
+#define MEI_DEV_ID_82Q965 0x2994 /* 82Q963/Q965 */
+#define MEI_DEV_ID_82G965 0x29A4 /* 82P965/G965 */
+
+#define MEI_DEV_ID_82GM965 0x2A04 /* Mobile PM965/GM965 */
+#define MEI_DEV_ID_82GME965 0x2A14 /* Mobile GME965/GLE960 */
+
+#define MEI_DEV_ID_ICH9_82Q35 0x29B4 /* 82Q35 Express */
+#define MEI_DEV_ID_ICH9_82G33 0x29C4 /* 82G33/G31/P35/P31 Express */
+#define MEI_DEV_ID_ICH9_82Q33 0x29D4 /* 82Q33 Express */
+#define MEI_DEV_ID_ICH9_82X38 0x29E4 /* 82X38/X48 Express */
+#define MEI_DEV_ID_ICH9_3200 0x29F4 /* 3200/3210 Server */
+
+#define MEI_DEV_ID_ICH9_6 0x28B4 /* Bearlake */
+#define MEI_DEV_ID_ICH9_7 0x28C4 /* Bearlake */
+#define MEI_DEV_ID_ICH9_8 0x28D4 /* Bearlake */
+#define MEI_DEV_ID_ICH9_9 0x28E4 /* Bearlake */
+#define MEI_DEV_ID_ICH9_10 0x28F4 /* Bearlake */
+
+#define MEI_DEV_ID_ICH9M_1 0x2A44 /* Cantiga */
+#define MEI_DEV_ID_ICH9M_2 0x2A54 /* Cantiga */
+#define MEI_DEV_ID_ICH9M_3 0x2A64 /* Cantiga */
+#define MEI_DEV_ID_ICH9M_4 0x2A74 /* Cantiga */
+
+#define MEI_DEV_ID_ICH10_1 0x2E04 /* Eaglelake */
+#define MEI_DEV_ID_ICH10_2 0x2E14 /* Eaglelake */
+#define MEI_DEV_ID_ICH10_3 0x2E24 /* Eaglelake */
+#define MEI_DEV_ID_ICH10_4 0x2E34 /* Eaglelake */
+
+#define MEI_DEV_ID_IBXPK_1 0x3B64 /* Calpella */
+#define MEI_DEV_ID_IBXPK_2 0x3B65 /* Calpella */
+
+#define MEI_DEV_ID_CPT_1 0x1C3A /* Couger Point */
+#define MEI_DEV_ID_PBG_1 0x1D3A /* C600/X79 Patsburg */
+
+#define MEI_DEV_ID_PPT_1 0x1E3A /* Panther Point */
+#define MEI_DEV_ID_PPT_2 0x1CBA /* Panther Point */
+#define MEI_DEV_ID_PPT_3 0x1DBA /* Panther Point */
+
+#define MEI_DEV_ID_LPT 0x8C3A /* Lynx Point */
+#define MEI_DEV_ID_LPT_LP 0x9C3A /* Lynx Point LP */
+/*
+ * MEI HW Section
+ */
+
+/* MEI registers */
+/* H_CB_WW - Host Circular Buffer (CB) Write Window register */
+#define H_CB_WW 0
+/* H_CSR - Host Control Status register */
+#define H_CSR 4
+/* ME_CB_RW - ME Circular Buffer Read Window register (read only) */
+#define ME_CB_RW 8
+/* ME_CSR_HA - ME Control Status Host Access register (read only) */
+#define ME_CSR_HA 0xC
+
+
+/* register bits of H_CSR (Host Control Status register) */
+/* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */
+#define H_CBD 0xFF000000
+/* Host Circular Buffer Write Pointer */
+#define H_CBWP 0x00FF0000
+/* Host Circular Buffer Read Pointer */
+#define H_CBRP 0x0000FF00
+/* Host Reset */
+#define H_RST 0x00000010
+/* Host Ready */
+#define H_RDY 0x00000008
+/* Host Interrupt Generate */
+#define H_IG 0x00000004
+/* Host Interrupt Status */
+#define H_IS 0x00000002
+/* Host Interrupt Enable */
+#define H_IE 0x00000001
+
+
+/* register bits of ME_CSR_HA (ME Control Status Host Access register) */
+/* ME CB (Circular Buffer) Depth HRA (Host Read Access) - host read only
+access to ME_CBD */
+#define ME_CBD_HRA 0xFF000000
+/* ME CB Write Pointer HRA - host read only access to ME_CBWP */
+#define ME_CBWP_HRA 0x00FF0000
+/* ME CB Read Pointer HRA - host read only access to ME_CBRP */
+#define ME_CBRP_HRA 0x0000FF00
+/* ME Reset HRA - host read only access to ME_RST */
+#define ME_RST_HRA 0x00000010
+/* ME Ready HRA - host read only access to ME_RDY */
+#define ME_RDY_HRA 0x00000008
+/* ME Interrupt Generate HRA - host read only access to ME_IG */
+#define ME_IG_HRA 0x00000004
+/* ME Interrupt Status HRA - host read only access to ME_IS */
+#define ME_IS_HRA 0x00000002
+/* ME Interrupt Enable HRA - host read only access to ME_IE */
+#define ME_IE_HRA 0x00000001
+
+#endif /* _MEI_HW_MEI_REGS_H_ */
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
new file mode 100644
index 000000000000..45ea7185c003
--- /dev/null
+++ b/drivers/misc/mei/hw-me.c
@@ -0,0 +1,576 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/pci.h>
+
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+
+#include "mei_dev.h"
+#include "hw-me.h"
+
+#include "hbm.h"
+
+
+/**
+ * mei_reg_read - Reads 32bit data from the mei device
+ *
+ * @dev: the device structure
+ * @offset: offset from which to read the data
+ *
+ * returns register value (u32)
+ */
+static inline u32 mei_reg_read(const struct mei_me_hw *hw,
+ unsigned long offset)
+{
+ return ioread32(hw->mem_addr + offset);
+}
+
+
+/**
+ * mei_reg_write - Writes 32bit data to the mei device
+ *
+ * @dev: the device structure
+ * @offset: offset from which to write the data
+ * @value: register value to write (u32)
+ */
+static inline void mei_reg_write(const struct mei_me_hw *hw,
+ unsigned long offset, u32 value)
+{
+ iowrite32(value, hw->mem_addr + offset);
+}
+
+/**
+ * mei_mecbrw_read - Reads 32bit data from ME circular buffer
+ * read window register
+ *
+ * @dev: the device structure
+ *
+ * returns ME_CB_RW register value (u32)
+ */
+static u32 mei_me_mecbrw_read(const struct mei_device *dev)
+{
+ return mei_reg_read(to_me_hw(dev), ME_CB_RW);
+}
+/**
+ * mei_mecsr_read - Reads 32bit data from the ME CSR
+ *
+ * @dev: the device structure
+ *
+ * returns ME_CSR_HA register value (u32)
+ */
+static inline u32 mei_mecsr_read(const struct mei_me_hw *hw)
+{
+ return mei_reg_read(hw, ME_CSR_HA);
+}
+
+/**
+ * mei_hcsr_read - Reads 32bit data from the host CSR
+ *
+ * @dev: the device structure
+ *
+ * returns H_CSR register value (u32)
+ */
+static inline u32 mei_hcsr_read(const struct mei_me_hw *hw)
+{
+ return mei_reg_read(hw, H_CSR);
+}
+
+/**
+ * mei_hcsr_set - writes H_CSR register to the mei device,
+ * and ignores the H_IS bit for it is write-one-to-zero.
+ *
+ * @dev: the device structure
+ */
+static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr)
+{
+ hcsr &= ~H_IS;
+ mei_reg_write(hw, H_CSR, hcsr);
+}
+
+
+/**
+ * me_hw_config - configure hw dependent settings
+ *
+ * @dev: mei device
+ */
+static void mei_me_hw_config(struct mei_device *dev)
+{
+ u32 hcsr = mei_hcsr_read(to_me_hw(dev));
+ /* Doesn't change in runtime */
+ dev->hbuf_depth = (hcsr & H_CBD) >> 24;
+}
+/**
+ * mei_clear_interrupts - clear and stop interrupts
+ *
+ * @dev: the device structure
+ */
+static void mei_me_intr_clear(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 hcsr = mei_hcsr_read(hw);
+ if ((hcsr & H_IS) == H_IS)
+ mei_reg_write(hw, H_CSR, hcsr);
+}
+/**
+ * mei_me_intr_enable - enables mei device interrupts
+ *
+ * @dev: the device structure
+ */
+static void mei_me_intr_enable(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 hcsr = mei_hcsr_read(hw);
+ hcsr |= H_IE;
+ mei_hcsr_set(hw, hcsr);
+}
+
+/**
+ * mei_disable_interrupts - disables mei device interrupts
+ *
+ * @dev: the device structure
+ */
+static void mei_me_intr_disable(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 hcsr = mei_hcsr_read(hw);
+ hcsr &= ~H_IE;
+ mei_hcsr_set(hw, hcsr);
+}
+
+/**
+ * mei_me_hw_reset - resets fw via mei csr register.
+ *
+ * @dev: the device structure
+ * @interrupts_enabled: if interrupt should be enabled after reset.
+ */
+static void mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 hcsr = mei_hcsr_read(hw);
+
+ dev_dbg(&dev->pdev->dev, "before reset HCSR = 0x%08x.\n", hcsr);
+
+ hcsr |= (H_RST | H_IG);
+
+ if (intr_enable)
+ hcsr |= H_IE;
+ else
+ hcsr &= ~H_IE;
+
+ mei_hcsr_set(hw, hcsr);
+
+ hcsr = mei_hcsr_read(hw) | H_IG;
+ hcsr &= ~H_RST;
+
+ mei_hcsr_set(hw, hcsr);
+
+ hcsr = mei_hcsr_read(hw);
+
+ dev_dbg(&dev->pdev->dev, "current HCSR = 0x%08x.\n", hcsr);
+}
+
+/**
+ * mei_me_host_set_ready - enable device
+ *
+ * @dev - mei device
+ * returns bool
+ */
+
+static void mei_me_host_set_ready(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ hw->host_hw_state |= H_IE | H_IG | H_RDY;
+ mei_hcsr_set(hw, hw->host_hw_state);
+}
+/**
+ * mei_me_host_is_ready - check whether the host has turned ready
+ *
+ * @dev - mei device
+ * returns bool
+ */
+static bool mei_me_host_is_ready(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ hw->host_hw_state = mei_hcsr_read(hw);
+ return (hw->host_hw_state & H_RDY) == H_RDY;
+}
+
+/**
+ * mei_me_hw_is_ready - check whether the me(hw) has turned ready
+ *
+ * @dev - mei device
+ * returns bool
+ */
+static bool mei_me_hw_is_ready(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ hw->me_hw_state = mei_mecsr_read(hw);
+ return (hw->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA;
+}
+
+/**
+ * mei_hbuf_filled_slots - gets number of device filled buffer slots
+ *
+ * @dev: the device structure
+ *
+ * returns number of filled slots
+ */
+static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ char read_ptr, write_ptr;
+
+ hw->host_hw_state = mei_hcsr_read(hw);
+
+ read_ptr = (char) ((hw->host_hw_state & H_CBRP) >> 8);
+ write_ptr = (char) ((hw->host_hw_state & H_CBWP) >> 16);
+
+ return (unsigned char) (write_ptr - read_ptr);
+}
+
+/**
+ * mei_hbuf_is_empty - checks if host buffer is empty.
+ *
+ * @dev: the device structure
+ *
+ * returns true if empty, false - otherwise.
+ */
+static bool mei_me_hbuf_is_empty(struct mei_device *dev)
+{
+ return mei_hbuf_filled_slots(dev) == 0;
+}
+
+/**
+ * mei_me_hbuf_empty_slots - counts write empty slots.
+ *
+ * @dev: the device structure
+ *
+ * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise empty slots count
+ */
+static int mei_me_hbuf_empty_slots(struct mei_device *dev)
+{
+ unsigned char filled_slots, empty_slots;
+
+ filled_slots = mei_hbuf_filled_slots(dev);
+ empty_slots = dev->hbuf_depth - filled_slots;
+
+ /* check for overflow */
+ if (filled_slots > dev->hbuf_depth)
+ return -EOVERFLOW;
+
+ return empty_slots;
+}
+
+static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
+{
+ return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
+}
+
+
+/**
+ * mei_write_message - writes a message to mei device.
+ *
+ * @dev: the device structure
+ * @header: mei HECI header of message
+ * @buf: message payload will be written
+ *
+ * This function returns -EIO if write has failed
+ */
+static int mei_me_write_message(struct mei_device *dev,
+ struct mei_msg_hdr *header,
+ unsigned char *buf)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ unsigned long rem, dw_cnt;
+ unsigned long length = header->length;
+ u32 *reg_buf = (u32 *)buf;
+ u32 hcsr;
+ int i;
+ int empty_slots;
+
+ dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
+
+ empty_slots = mei_hbuf_empty_slots(dev);
+ dev_dbg(&dev->pdev->dev, "empty slots = %hu.\n", empty_slots);
+
+ dw_cnt = mei_data2slots(length);
+ if (empty_slots < 0 || dw_cnt > empty_slots)
+ return -EIO;
+
+ mei_reg_write(hw, H_CB_WW, *((u32 *) header));
+
+ for (i = 0; i < length / 4; i++)
+ mei_reg_write(hw, H_CB_WW, reg_buf[i]);
+
+ rem = length & 0x3;
+ if (rem > 0) {
+ u32 reg = 0;
+ memcpy(&reg, &buf[length - rem], rem);
+ mei_reg_write(hw, H_CB_WW, reg);
+ }
+
+ hcsr = mei_hcsr_read(hw) | H_IG;
+ mei_hcsr_set(hw, hcsr);
+ if (!mei_me_hw_is_ready(dev))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * mei_me_count_full_read_slots - counts read full slots.
+ *
+ * @dev: the device structure
+ *
+ * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise filled slots count
+ */
+static int mei_me_count_full_read_slots(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ char read_ptr, write_ptr;
+ unsigned char buffer_depth, filled_slots;
+
+ hw->me_hw_state = mei_mecsr_read(hw);
+ buffer_depth = (unsigned char)((hw->me_hw_state & ME_CBD_HRA) >> 24);
+ read_ptr = (char) ((hw->me_hw_state & ME_CBRP_HRA) >> 8);
+ write_ptr = (char) ((hw->me_hw_state & ME_CBWP_HRA) >> 16);
+ filled_slots = (unsigned char) (write_ptr - read_ptr);
+
+ /* check for overflow */
+ if (filled_slots > buffer_depth)
+ return -EOVERFLOW;
+
+ dev_dbg(&dev->pdev->dev, "filled_slots =%08x\n", filled_slots);
+ return (int)filled_slots;
+}
+
+/**
+ * mei_me_read_slots - reads a message from mei device.
+ *
+ * @dev: the device structure
+ * @buffer: message buffer will be written
+ * @buffer_length: message size will be read
+ */
+static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
+ unsigned long buffer_length)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 *reg_buf = (u32 *)buffer;
+ u32 hcsr;
+
+ for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
+ *reg_buf++ = mei_me_mecbrw_read(dev);
+
+ if (buffer_length > 0) {
+ u32 reg = mei_me_mecbrw_read(dev);
+ memcpy(reg_buf, &reg, buffer_length);
+ }
+
+ hcsr = mei_hcsr_read(hw) | H_IG;
+ mei_hcsr_set(hw, hcsr);
+ return 0;
+}
+
+/**
+ * mei_me_irq_quick_handler - The ISR of the MEI device
+ *
+ * @irq: The irq number
+ * @dev_id: pointer to the device structure
+ *
+ * returns irqreturn_t
+ */
+
+irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
+{
+ struct mei_device *dev = (struct mei_device *) dev_id;
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 csr_reg = mei_hcsr_read(hw);
+
+ if ((csr_reg & H_IS) != H_IS)
+ return IRQ_NONE;
+
+ /* clear H_IS bit in H_CSR */
+ mei_reg_write(hw, H_CSR, csr_reg);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
+ * processing.
+ *
+ * @irq: The irq number
+ * @dev_id: pointer to the device structure
+ *
+ * returns irqreturn_t
+ *
+ */
+irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
+{
+ struct mei_device *dev = (struct mei_device *) dev_id;
+ struct mei_cl_cb complete_list;
+ struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
+ struct mei_cl *cl;
+ s32 slots;
+ int rets;
+ bool bus_message_received;
+
+
+ dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n");
+ /* initialize our complete list */
+ mutex_lock(&dev->device_lock);
+ mei_io_list_init(&complete_list);
+
+ /* Ack the interrupt here
+ * In case of MSI we don't go through the quick handler */
+ if (pci_dev_msi_enabled(dev->pdev))
+ mei_clear_interrupts(dev);
+
+ /* check if ME wants a reset */
+ if (!mei_hw_is_ready(dev) &&
+ dev->dev_state != MEI_DEV_RESETING &&
+ dev->dev_state != MEI_DEV_INITIALIZING) {
+ dev_dbg(&dev->pdev->dev, "FW not ready.\n");
+ mei_reset(dev, 1);
+ mutex_unlock(&dev->device_lock);
+ return IRQ_HANDLED;
+ }
+
+ /* check if we need to start the dev */
+ if (!mei_host_is_ready(dev)) {
+ if (mei_hw_is_ready(dev)) {
+ dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
+
+ mei_host_set_ready(dev);
+
+ dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n");
+ /* link is established * start sending messages. */
+
+ dev->dev_state = MEI_DEV_INIT_CLIENTS;
+
+ mei_hbm_start_req(dev);
+ mutex_unlock(&dev->device_lock);
+ return IRQ_HANDLED;
+ } else {
+ dev_dbg(&dev->pdev->dev, "FW not ready.\n");
+ mutex_unlock(&dev->device_lock);
+ return IRQ_HANDLED;
+ }
+ }
+ /* check slots available for reading */
+ slots = mei_count_full_read_slots(dev);
+ while (slots > 0) {
+ /* we have urgent data to send so break the read */
+ if (dev->wr_ext_msg.hdr.length)
+ break;
+ dev_dbg(&dev->pdev->dev, "slots =%08x\n", slots);
+ dev_dbg(&dev->pdev->dev, "call mei_irq_read_handler.\n");
+ rets = mei_irq_read_handler(dev, &complete_list, &slots);
+ if (rets)
+ goto end;
+ }
+ rets = mei_irq_write_handler(dev, &complete_list);
+end:
+ dev_dbg(&dev->pdev->dev, "end of bottom half function.\n");
+ dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
+
+ bus_message_received = false;
+ if (dev->recvd_msg && waitqueue_active(&dev->wait_recvd_msg)) {
+ dev_dbg(&dev->pdev->dev, "received waiting bus message\n");
+ bus_message_received = true;
+ }
+ mutex_unlock(&dev->device_lock);
+ if (bus_message_received) {
+ dev_dbg(&dev->pdev->dev, "wake up dev->wait_recvd_msg\n");
+ wake_up_interruptible(&dev->wait_recvd_msg);
+ bus_message_received = false;
+ }
+ if (list_empty(&complete_list.list))
+ return IRQ_HANDLED;
+
+
+ list_for_each_entry_safe(cb_pos, cb_next, &complete_list.list, list) {
+ cl = cb_pos->cl;
+ list_del(&cb_pos->list);
+ if (cl) {
+ if (cl != &dev->iamthif_cl) {
+ dev_dbg(&dev->pdev->dev, "completing call back.\n");
+ mei_irq_complete_handler(cl, cb_pos);
+ cb_pos = NULL;
+ } else if (cl == &dev->iamthif_cl) {
+ mei_amthif_complete(dev, cb_pos);
+ }
+ }
+ }
+ return IRQ_HANDLED;
+}
+static const struct mei_hw_ops mei_me_hw_ops = {
+
+ .host_set_ready = mei_me_host_set_ready,
+ .host_is_ready = mei_me_host_is_ready,
+
+ .hw_is_ready = mei_me_hw_is_ready,
+ .hw_reset = mei_me_hw_reset,
+ .hw_config = mei_me_hw_config,
+
+ .intr_clear = mei_me_intr_clear,
+ .intr_enable = mei_me_intr_enable,
+ .intr_disable = mei_me_intr_disable,
+
+ .hbuf_free_slots = mei_me_hbuf_empty_slots,
+ .hbuf_is_ready = mei_me_hbuf_is_empty,
+ .hbuf_max_len = mei_me_hbuf_max_len,
+
+ .write = mei_me_write_message,
+
+ .rdbuf_full_slots = mei_me_count_full_read_slots,
+ .read_hdr = mei_me_mecbrw_read,
+ .read = mei_me_read_slots
+};
+
+/**
+ * init_mei_device - allocates and initializes the mei device structure
+ *
+ * @pdev: The pci device structure
+ *
+ * returns The mei_device_device pointer on success, NULL on failure.
+ */
+struct mei_device *mei_me_dev_init(struct pci_dev *pdev)
+{
+ struct mei_device *dev;
+
+ dev = kzalloc(sizeof(struct mei_device) +
+ sizeof(struct mei_me_hw), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ mei_device_init(dev);
+
+ INIT_LIST_HEAD(&dev->wd_cl.link);
+ INIT_LIST_HEAD(&dev->iamthif_cl.link);
+ mei_io_list_init(&dev->amthif_cmd_list);
+ mei_io_list_init(&dev->amthif_rd_complete_list);
+
+ INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
+ INIT_WORK(&dev->init_work, mei_host_client_init);
+
+ dev->ops = &mei_me_hw_ops;
+
+ dev->pdev = pdev;
+ return dev;
+}
+
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
new file mode 100644
index 000000000000..8518d3eeb838
--- /dev/null
+++ b/drivers/misc/mei/hw-me.h
@@ -0,0 +1,48 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+
+
+#ifndef _MEI_INTERFACE_H_
+#define _MEI_INTERFACE_H_
+
+#include <linux/mei.h>
+#include "mei_dev.h"
+#include "client.h"
+
+struct mei_me_hw {
+ void __iomem *mem_addr;
+ /*
+ * hw states of host and fw(ME)
+ */
+ u32 host_hw_state;
+ u32 me_hw_state;
+};
+
+#define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw)
+
+struct mei_device *mei_me_dev_init(struct pci_dev *pdev);
+
+/* get slots (dwords) from a message length + header (bytes) */
+static inline unsigned char mei_data2slots(size_t length)
+{
+ return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4);
+}
+
+irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id);
+irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id);
+
+#endif /* _MEI_INTERFACE_H_ */
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index be8ca6b333ca..cb2f556b4252 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -31,109 +31,6 @@
#define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */
#define MEI_IAMTHIF_READ_TIMER 10 /* HPS */
-/*
- * Internal Clients Number
- */
-#define MEI_WD_HOST_CLIENT_ID 1
-#define MEI_IAMTHIF_HOST_CLIENT_ID 2
-
-/*
- * MEI device IDs
- */
-#define MEI_DEV_ID_82946GZ 0x2974 /* 82946GZ/GL */
-#define MEI_DEV_ID_82G35 0x2984 /* 82G35 Express */
-#define MEI_DEV_ID_82Q965 0x2994 /* 82Q963/Q965 */
-#define MEI_DEV_ID_82G965 0x29A4 /* 82P965/G965 */
-
-#define MEI_DEV_ID_82GM965 0x2A04 /* Mobile PM965/GM965 */
-#define MEI_DEV_ID_82GME965 0x2A14 /* Mobile GME965/GLE960 */
-
-#define MEI_DEV_ID_ICH9_82Q35 0x29B4 /* 82Q35 Express */
-#define MEI_DEV_ID_ICH9_82G33 0x29C4 /* 82G33/G31/P35/P31 Express */
-#define MEI_DEV_ID_ICH9_82Q33 0x29D4 /* 82Q33 Express */
-#define MEI_DEV_ID_ICH9_82X38 0x29E4 /* 82X38/X48 Express */
-#define MEI_DEV_ID_ICH9_3200 0x29F4 /* 3200/3210 Server */
-
-#define MEI_DEV_ID_ICH9_6 0x28B4 /* Bearlake */
-#define MEI_DEV_ID_ICH9_7 0x28C4 /* Bearlake */
-#define MEI_DEV_ID_ICH9_8 0x28D4 /* Bearlake */
-#define MEI_DEV_ID_ICH9_9 0x28E4 /* Bearlake */
-#define MEI_DEV_ID_ICH9_10 0x28F4 /* Bearlake */
-
-#define MEI_DEV_ID_ICH9M_1 0x2A44 /* Cantiga */
-#define MEI_DEV_ID_ICH9M_2 0x2A54 /* Cantiga */
-#define MEI_DEV_ID_ICH9M_3 0x2A64 /* Cantiga */
-#define MEI_DEV_ID_ICH9M_4 0x2A74 /* Cantiga */
-
-#define MEI_DEV_ID_ICH10_1 0x2E04 /* Eaglelake */
-#define MEI_DEV_ID_ICH10_2 0x2E14 /* Eaglelake */
-#define MEI_DEV_ID_ICH10_3 0x2E24 /* Eaglelake */
-#define MEI_DEV_ID_ICH10_4 0x2E34 /* Eaglelake */
-
-#define MEI_DEV_ID_IBXPK_1 0x3B64 /* Calpella */
-#define MEI_DEV_ID_IBXPK_2 0x3B65 /* Calpella */
-
-#define MEI_DEV_ID_CPT_1 0x1C3A /* Couger Point */
-#define MEI_DEV_ID_PBG_1 0x1D3A /* C600/X79 Patsburg */
-
-#define MEI_DEV_ID_PPT_1 0x1E3A /* Panther Point */
-#define MEI_DEV_ID_PPT_2 0x1CBA /* Panther Point */
-#define MEI_DEV_ID_PPT_3 0x1DBA /* Panther Point */
-
-#define MEI_DEV_ID_LPT 0x8C3A /* Lynx Point */
-#define MEI_DEV_ID_LPT_LP 0x9C3A /* Lynx Point LP */
-/*
- * MEI HW Section
- */
-
-/* MEI registers */
-/* H_CB_WW - Host Circular Buffer (CB) Write Window register */
-#define H_CB_WW 0
-/* H_CSR - Host Control Status register */
-#define H_CSR 4
-/* ME_CB_RW - ME Circular Buffer Read Window register (read only) */
-#define ME_CB_RW 8
-/* ME_CSR_HA - ME Control Status Host Access register (read only) */
-#define ME_CSR_HA 0xC
-
-
-/* register bits of H_CSR (Host Control Status register) */
-/* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */
-#define H_CBD 0xFF000000
-/* Host Circular Buffer Write Pointer */
-#define H_CBWP 0x00FF0000
-/* Host Circular Buffer Read Pointer */
-#define H_CBRP 0x0000FF00
-/* Host Reset */
-#define H_RST 0x00000010
-/* Host Ready */
-#define H_RDY 0x00000008
-/* Host Interrupt Generate */
-#define H_IG 0x00000004
-/* Host Interrupt Status */
-#define H_IS 0x00000002
-/* Host Interrupt Enable */
-#define H_IE 0x00000001
-
-
-/* register bits of ME_CSR_HA (ME Control Status Host Access register) */
-/* ME CB (Circular Buffer) Depth HRA (Host Read Access) - host read only
-access to ME_CBD */
-#define ME_CBD_HRA 0xFF000000
-/* ME CB Write Pointer HRA - host read only access to ME_CBWP */
-#define ME_CBWP_HRA 0x00FF0000
-/* ME CB Read Pointer HRA - host read only access to ME_CBRP */
-#define ME_CBRP_HRA 0x0000FF00
-/* ME Reset HRA - host read only access to ME_RST */
-#define ME_RST_HRA 0x00000010
-/* ME Ready HRA - host read only access to ME_RDY */
-#define ME_RDY_HRA 0x00000008
-/* ME Interrupt Generate HRA - host read only access to ME_IG */
-#define ME_IG_HRA 0x00000004
-/* ME Interrupt Status HRA - host read only access to ME_IS */
-#define ME_IS_HRA 0x00000002
-/* ME Interrupt Enable HRA - host read only access to ME_IE */
-#define ME_IE_HRA 0x00000001
/*
* MEI Version
@@ -224,6 +121,22 @@ struct mei_bus_message {
u8 data[0];
} __packed;
+/**
+ * struct hbm_cl_cmd - client specific host bus command
+ * CONNECT, DISCONNECT, and FlOW CONTROL
+ *
+ * @hbm_cmd - bus message command header
+ * @me_addr - address of the client in ME
+ * @host_addr - address of the client in the driver
+ * @data
+ */
+struct mei_hbm_cl_cmd {
+ u8 hbm_cmd;
+ u8 me_addr;
+ u8 host_addr;
+ u8 data;
+};
+
struct hbm_version {
u8 minor_version;
u8 major_version;
@@ -333,11 +246,5 @@ struct hbm_flow_control {
u8 reserved[MEI_FC_MESSAGE_RESERVED_LENGTH];
} __packed;
-struct mei_me_client {
- struct mei_client_properties props;
- u8 client_id;
- u8 mei_flow_ctrl_creds;
-} __packed;
-
#endif
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index a54cd5567ca2..6ec530168afb 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -19,11 +19,11 @@
#include <linux/wait.h>
#include <linux/delay.h>
-#include "mei_dev.h"
-#include "hw.h"
-#include "interface.h"
#include <linux/mei.h>
+#include "mei_dev.h"
+#include "client.h"
+
const char *mei_dev_state_str(int state)
{
#define MEI_DEV_STATE(state) case MEI_DEV_##state: return #state
@@ -42,84 +42,20 @@ const char *mei_dev_state_str(int state)
#undef MEI_DEV_STATE
}
-
-
-/**
- * mei_io_list_flush - removes list entry belonging to cl.
- *
- * @list: An instance of our list structure
- * @cl: private data of the file object
- */
-void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
-{
- struct mei_cl_cb *pos;
- struct mei_cl_cb *next;
-
- list_for_each_entry_safe(pos, next, &list->list, list) {
- if (pos->cl) {
- if (mei_cl_cmp_id(cl, pos->cl))
- list_del(&pos->list);
- }
- }
-}
-/**
- * mei_cl_flush_queues - flushes queue lists belonging to cl.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- */
-int mei_cl_flush_queues(struct mei_cl *cl)
+void mei_device_init(struct mei_device *dev)
{
- if (!cl || !cl->dev)
- return -EINVAL;
-
- dev_dbg(&cl->dev->pdev->dev, "remove list entry belonging to cl\n");
- mei_io_list_flush(&cl->dev->read_list, cl);
- mei_io_list_flush(&cl->dev->write_list, cl);
- mei_io_list_flush(&cl->dev->write_waiting_list, cl);
- mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
- mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
- mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
- mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
- return 0;
-}
-
-
-
-/**
- * init_mei_device - allocates and initializes the mei device structure
- *
- * @pdev: The pci device structure
- *
- * returns The mei_device_device pointer on success, NULL on failure.
- */
-struct mei_device *mei_device_init(struct pci_dev *pdev)
-{
- struct mei_device *dev;
-
- dev = kzalloc(sizeof(struct mei_device), GFP_KERNEL);
- if (!dev)
- return NULL;
-
/* setup our list array */
INIT_LIST_HEAD(&dev->file_list);
- INIT_LIST_HEAD(&dev->wd_cl.link);
- INIT_LIST_HEAD(&dev->iamthif_cl.link);
mutex_init(&dev->device_lock);
init_waitqueue_head(&dev->wait_recvd_msg);
init_waitqueue_head(&dev->wait_stop_wd);
dev->dev_state = MEI_DEV_INITIALIZING;
- dev->iamthif_state = MEI_IAMTHIF_IDLE;
mei_io_list_init(&dev->read_list);
mei_io_list_init(&dev->write_list);
mei_io_list_init(&dev->write_waiting_list);
mei_io_list_init(&dev->ctrl_wr_list);
mei_io_list_init(&dev->ctrl_rd_list);
- mei_io_list_init(&dev->amthif_cmd_list);
- mei_io_list_init(&dev->amthif_rd_complete_list);
- dev->pdev = pdev;
- return dev;
}
/**
@@ -131,101 +67,64 @@ struct mei_device *mei_device_init(struct pci_dev *pdev)
*/
int mei_hw_init(struct mei_device *dev)
{
- int err = 0;
- int ret;
+ int ret = 0;
mutex_lock(&dev->device_lock);
- dev->host_hw_state = mei_hcsr_read(dev);
- dev->me_hw_state = mei_mecsr_read(dev);
- dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, mestate = 0x%08x.\n",
- dev->host_hw_state, dev->me_hw_state);
-
/* acknowledge interrupt and stop interupts */
- if ((dev->host_hw_state & H_IS) == H_IS)
- mei_reg_write(dev, H_CSR, dev->host_hw_state);
+ mei_clear_interrupts(dev);
- /* Doesn't change in runtime */
- dev->hbuf_depth = (dev->host_hw_state & H_CBD) >> 24;
+ mei_hw_config(dev);
dev->recvd_msg = false;
dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n");
mei_reset(dev, 1);
- dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
- dev->host_hw_state, dev->me_hw_state);
-
/* wait for ME to turn on ME_RDY */
if (!dev->recvd_msg) {
mutex_unlock(&dev->device_lock);
- err = wait_event_interruptible_timeout(dev->wait_recvd_msg,
+ ret = wait_event_interruptible_timeout(dev->wait_recvd_msg,
dev->recvd_msg,
mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT));
mutex_lock(&dev->device_lock);
}
- if (err <= 0 && !dev->recvd_msg) {
+ if (ret <= 0 && !dev->recvd_msg) {
dev->dev_state = MEI_DEV_DISABLED;
dev_dbg(&dev->pdev->dev,
"wait_event_interruptible_timeout failed"
"on wait for ME to turn on ME_RDY.\n");
- ret = -ENODEV;
- goto out;
+ goto err;
}
- if (!(((dev->host_hw_state & H_RDY) == H_RDY) &&
- ((dev->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA))) {
- dev->dev_state = MEI_DEV_DISABLED;
- dev_dbg(&dev->pdev->dev,
- "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
- dev->host_hw_state, dev->me_hw_state);
-
- if (!(dev->host_hw_state & H_RDY))
- dev_dbg(&dev->pdev->dev, "host turn off H_RDY.\n");
- if (!(dev->me_hw_state & ME_RDY_HRA))
- dev_dbg(&dev->pdev->dev, "ME turn off ME_RDY.\n");
+ if (!mei_host_is_ready(dev)) {
+ dev_err(&dev->pdev->dev, "host is not ready.\n");
+ goto err;
+ }
- dev_err(&dev->pdev->dev, "link layer initialization failed.\n");
- ret = -ENODEV;
- goto out;
+ if (!mei_hw_is_ready(dev)) {
+ dev_err(&dev->pdev->dev, "ME is not ready.\n");
+ goto err;
}
if (dev->version.major_version != HBM_MAJOR_VERSION ||
dev->version.minor_version != HBM_MINOR_VERSION) {
dev_dbg(&dev->pdev->dev, "MEI start failed.\n");
- ret = -ENODEV;
- goto out;
+ goto err;
}
dev->recvd_msg = false;
- dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
- dev->host_hw_state, dev->me_hw_state);
- dev_dbg(&dev->pdev->dev, "ME turn on ME_RDY and host turn on H_RDY.\n");
dev_dbg(&dev->pdev->dev, "link layer has been established.\n");
- dev_dbg(&dev->pdev->dev, "MEI start success.\n");
- ret = 0;
-out:
mutex_unlock(&dev->device_lock);
- return ret;
-}
-
-/**
- * mei_hw_reset - resets fw via mei csr register.
- *
- * @dev: the device structure
- * @interrupts_enabled: if interrupt should be enabled after reset.
- */
-static void mei_hw_reset(struct mei_device *dev, int interrupts_enabled)
-{
- dev->host_hw_state |= (H_RST | H_IG);
-
- if (interrupts_enabled)
- mei_enable_interrupts(dev);
- else
- mei_disable_interrupts(dev);
+ return 0;
+err:
+ dev_err(&dev->pdev->dev, "link layer initialization failed.\n");
+ dev->dev_state = MEI_DEV_DISABLED;
+ mutex_unlock(&dev->device_lock);
+ return -ENODEV;
}
/**
@@ -236,56 +135,34 @@ static void mei_hw_reset(struct mei_device *dev, int interrupts_enabled)
*/
void mei_reset(struct mei_device *dev, int interrupts_enabled)
{
- struct mei_cl *cl_pos = NULL;
- struct mei_cl *cl_next = NULL;
- struct mei_cl_cb *cb_pos = NULL;
- struct mei_cl_cb *cb_next = NULL;
bool unexpected;
- if (dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
- dev->need_reset = true;
+ if (dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET)
return;
- }
unexpected = (dev->dev_state != MEI_DEV_INITIALIZING &&
dev->dev_state != MEI_DEV_DISABLED &&
dev->dev_state != MEI_DEV_POWER_DOWN &&
dev->dev_state != MEI_DEV_POWER_UP);
- dev->host_hw_state = mei_hcsr_read(dev);
-
- dev_dbg(&dev->pdev->dev, "before reset host_hw_state = 0x%08x.\n",
- dev->host_hw_state);
-
mei_hw_reset(dev, interrupts_enabled);
- dev->host_hw_state &= ~H_RST;
- dev->host_hw_state |= H_IG;
-
- mei_hcsr_set(dev);
-
- dev_dbg(&dev->pdev->dev, "currently saved host_hw_state = 0x%08x.\n",
- dev->host_hw_state);
-
- dev->need_reset = false;
if (dev->dev_state != MEI_DEV_INITIALIZING) {
if (dev->dev_state != MEI_DEV_DISABLED &&
dev->dev_state != MEI_DEV_POWER_DOWN)
dev->dev_state = MEI_DEV_RESETING;
- list_for_each_entry_safe(cl_pos,
- cl_next, &dev->file_list, link) {
- cl_pos->state = MEI_FILE_DISCONNECTED;
- cl_pos->mei_flow_ctrl_creds = 0;
- cl_pos->read_cb = NULL;
- cl_pos->timer_count = 0;
- }
+ mei_cl_all_disconnect(dev);
+
/* remove entry if already in list */
dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n");
- mei_me_cl_unlink(dev, &dev->wd_cl);
-
- mei_me_cl_unlink(dev, &dev->iamthif_cl);
+ mei_cl_unlink(&dev->wd_cl);
+ if (dev->open_handle_count > 0)
+ dev->open_handle_count--;
+ mei_cl_unlink(&dev->iamthif_cl);
+ if (dev->open_handle_count > 0)
+ dev->open_handle_count--;
mei_amthif_reset_params(dev);
memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
@@ -295,392 +172,17 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
dev->rd_msg_hdr = 0;
dev->wd_pending = false;
- /* update the state of the registers after reset */
- dev->host_hw_state = mei_hcsr_read(dev);
- dev->me_hw_state = mei_mecsr_read(dev);
-
- dev_dbg(&dev->pdev->dev, "after reset host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
- dev->host_hw_state, dev->me_hw_state);
-
if (unexpected)
dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n",
mei_dev_state_str(dev->dev_state));
- /* Wake up all readings so they can be interrupted */
- list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
- if (waitqueue_active(&cl_pos->rx_wait)) {
- dev_dbg(&dev->pdev->dev, "Waking up client!\n");
- wake_up_interruptible(&cl_pos->rx_wait);
- }
- }
- /* remove all waiting requests */
- list_for_each_entry_safe(cb_pos, cb_next, &dev->write_list.list, list) {
- list_del(&cb_pos->list);
- mei_io_cb_free(cb_pos);
- }
-}
-
-
-
-/**
- * host_start_message - mei host sends start message.
- *
- * @dev: the device structure
- *
- * returns none.
- */
-void mei_host_start_message(struct mei_device *dev)
-{
- struct mei_msg_hdr *mei_hdr;
- struct hbm_host_version_request *start_req;
- const size_t len = sizeof(struct hbm_host_version_request);
-
- mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
-
- /* host start message */
- start_req = (struct hbm_host_version_request *)&dev->wr_msg_buf[1];
- memset(start_req, 0, len);
- start_req->hbm_cmd = HOST_START_REQ_CMD;
- start_req->host_version.major_version = HBM_MAJOR_VERSION;
- start_req->host_version.minor_version = HBM_MINOR_VERSION;
-
- dev->recvd_msg = false;
- if (mei_write_message(dev, mei_hdr, (unsigned char *)start_req, len)) {
- dev_dbg(&dev->pdev->dev, "write send version message to FW fail.\n");
- dev->dev_state = MEI_DEV_RESETING;
- mei_reset(dev, 1);
- }
- dev->init_clients_state = MEI_START_MESSAGE;
- dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
- return ;
-}
-
-/**
- * host_enum_clients_message - host sends enumeration client request message.
- *
- * @dev: the device structure
- *
- * returns none.
- */
-void mei_host_enum_clients_message(struct mei_device *dev)
-{
- struct mei_msg_hdr *mei_hdr;
- struct hbm_host_enum_request *enum_req;
- const size_t len = sizeof(struct hbm_host_enum_request);
- /* enumerate clients */
- mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
-
- enum_req = (struct hbm_host_enum_request *) &dev->wr_msg_buf[1];
- memset(enum_req, 0, sizeof(struct hbm_host_enum_request));
- enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
-
- if (mei_write_message(dev, mei_hdr, (unsigned char *)enum_req, len)) {
- dev->dev_state = MEI_DEV_RESETING;
- dev_dbg(&dev->pdev->dev, "write send enumeration request message to FW fail.\n");
- mei_reset(dev, 1);
- }
- dev->init_clients_state = MEI_ENUM_CLIENTS_MESSAGE;
- dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
- return;
-}
-
-
-/**
- * allocate_me_clients_storage - allocates storage for me clients
- *
- * @dev: the device structure
- *
- * returns none.
- */
-void mei_allocate_me_clients_storage(struct mei_device *dev)
-{
- struct mei_me_client *clients;
- int b;
-
- /* count how many ME clients we have */
- for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)
- dev->me_clients_num++;
-
- if (dev->me_clients_num <= 0)
- return ;
-
-
- if (dev->me_clients != NULL) {
- kfree(dev->me_clients);
- dev->me_clients = NULL;
- }
- dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%zd.\n",
- dev->me_clients_num * sizeof(struct mei_me_client));
- /* allocate storage for ME clients representation */
- clients = kcalloc(dev->me_clients_num,
- sizeof(struct mei_me_client), GFP_KERNEL);
- if (!clients) {
- dev_dbg(&dev->pdev->dev, "memory allocation for ME clients failed.\n");
- dev->dev_state = MEI_DEV_RESETING;
- mei_reset(dev, 1);
- return ;
- }
- dev->me_clients = clients;
- return ;
-}
-
-void mei_host_client_init(struct work_struct *work)
-{
- struct mei_device *dev = container_of(work,
- struct mei_device, init_work);
- struct mei_client_properties *client_props;
- int i;
-
- mutex_lock(&dev->device_lock);
-
- bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
- dev->open_handle_count = 0;
-
- /*
- * Reserving the first three client IDs
- * 0: Reserved for MEI Bus Message communications
- * 1: Reserved for Watchdog
- * 2: Reserved for AMTHI
- */
- bitmap_set(dev->host_clients_map, 0, 3);
-
- for (i = 0; i < dev->me_clients_num; i++) {
- client_props = &dev->me_clients[i].props;
-
- if (!uuid_le_cmp(client_props->protocol_name, mei_amthi_guid))
- mei_amthif_host_init(dev);
- else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid))
- mei_wd_host_init(dev);
- }
-
- dev->dev_state = MEI_DEV_ENABLED;
-
- mutex_unlock(&dev->device_lock);
-}
-
-int mei_host_client_enumerate(struct mei_device *dev)
-{
-
- struct mei_msg_hdr *mei_hdr;
- struct hbm_props_request *prop_req;
- const size_t len = sizeof(struct hbm_props_request);
- unsigned long next_client_index;
- u8 client_num;
-
-
- client_num = dev->me_client_presentation_num;
-
- next_client_index = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX,
- dev->me_client_index);
-
- /* We got all client properties */
- if (next_client_index == MEI_CLIENTS_MAX) {
- schedule_work(&dev->init_work);
-
- return 0;
- }
-
- dev->me_clients[client_num].client_id = next_client_index;
- dev->me_clients[client_num].mei_flow_ctrl_creds = 0;
-
- mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
- prop_req = (struct hbm_props_request *)&dev->wr_msg_buf[1];
-
- memset(prop_req, 0, sizeof(struct hbm_props_request));
-
-
- prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
- prop_req->address = next_client_index;
-
- if (mei_write_message(dev, mei_hdr, (unsigned char *) prop_req,
- mei_hdr->length)) {
- dev->dev_state = MEI_DEV_RESETING;
- dev_err(&dev->pdev->dev, "Properties request command failed\n");
- mei_reset(dev, 1);
-
- return -EIO;
- }
-
- dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
- dev->me_client_index = next_client_index;
-
- return 0;
-}
-
-/**
- * mei_init_file_private - initializes private file structure.
- *
- * @priv: private file structure to be initialized
- * @file: the file structure
- */
-void mei_cl_init(struct mei_cl *priv, struct mei_device *dev)
-{
- memset(priv, 0, sizeof(struct mei_cl));
- init_waitqueue_head(&priv->wait);
- init_waitqueue_head(&priv->rx_wait);
- init_waitqueue_head(&priv->tx_wait);
- INIT_LIST_HEAD(&priv->link);
- priv->reading_state = MEI_IDLE;
- priv->writing_state = MEI_IDLE;
- priv->dev = dev;
-}
-
-int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid)
-{
- int i, res = -ENOENT;
-
- for (i = 0; i < dev->me_clients_num; ++i)
- if (uuid_le_cmp(*cuuid,
- dev->me_clients[i].props.protocol_name) == 0) {
- res = i;
- break;
- }
-
- return res;
-}
-
-
-/**
- * mei_me_cl_link - create link between host and me clinet and add
- * me_cl to the list
- *
- * @dev: the device structure
- * @cl: link between me and host client assocated with opened file descriptor
- * @cuuid: uuid of ME client
- * @client_id: id of the host client
- *
- * returns ME client index if ME client
- * -EINVAL on incorrect values
- * -ENONET if client not found
- */
-int mei_me_cl_link(struct mei_device *dev, struct mei_cl *cl,
- const uuid_le *cuuid, u8 host_cl_id)
-{
- int i;
-
- if (!dev || !cl || !cuuid)
- return -EINVAL;
-
- /* check for valid client id */
- i = mei_me_cl_by_uuid(dev, cuuid);
- if (i >= 0) {
- cl->me_client_id = dev->me_clients[i].client_id;
- cl->state = MEI_FILE_CONNECTING;
- cl->host_client_id = host_cl_id;
-
- list_add_tail(&cl->link, &dev->file_list);
- return (u8)i;
- }
-
- return -ENOENT;
-}
-/**
- * mei_me_cl_unlink - remove me_cl from the list
- *
- * @dev: the device structure
- * @host_client_id: host client id to be removed
- */
-void mei_me_cl_unlink(struct mei_device *dev, struct mei_cl *cl)
-{
- struct mei_cl *pos, *next;
- list_for_each_entry_safe(pos, next, &dev->file_list, link) {
- if (cl->host_client_id == pos->host_client_id) {
- dev_dbg(&dev->pdev->dev, "remove host client = %d, ME client = %d\n",
- pos->host_client_id, pos->me_client_id);
- list_del_init(&pos->link);
- break;
- }
- }
-}
+ /* wake up all readings so they can be interrupted */
+ mei_cl_all_read_wakeup(dev);
-/**
- * mei_alloc_file_private - allocates a private file structure and sets it up.
- * @file: the file structure
- *
- * returns The allocated file or NULL on failure
- */
-struct mei_cl *mei_cl_allocate(struct mei_device *dev)
-{
- struct mei_cl *cl;
-
- cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
- if (!cl)
- return NULL;
-
- mei_cl_init(cl, dev);
-
- return cl;
+ /* remove all waiting requests */
+ mei_cl_all_write_clear(dev);
}
-/**
- * mei_disconnect_host_client - sends disconnect message to fw from host client.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * Locking: called under "dev->device_lock" lock
- *
- * returns 0 on success, <0 on failure.
- */
-int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl)
-{
- struct mei_cl_cb *cb;
- int rets, err;
-
- if (!dev || !cl)
- return -ENODEV;
-
- if (cl->state != MEI_FILE_DISCONNECTING)
- return 0;
-
- cb = mei_io_cb_init(cl, NULL);
- if (!cb)
- return -ENOMEM;
-
- cb->fop_type = MEI_FOP_CLOSE;
- if (dev->mei_host_buffer_is_empty) {
- dev->mei_host_buffer_is_empty = false;
- if (mei_disconnect(dev, cl)) {
- rets = -ENODEV;
- dev_dbg(&dev->pdev->dev, "failed to call mei_disconnect.\n");
- goto free;
- }
- mdelay(10); /* Wait for hardware disconnection ready */
- list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
- } else {
- dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n");
- list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
-
- }
- mutex_unlock(&dev->device_lock);
-
- err = wait_event_timeout(dev->wait_recvd_msg,
- MEI_FILE_DISCONNECTED == cl->state,
- mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
-
- mutex_lock(&dev->device_lock);
- if (MEI_FILE_DISCONNECTED == cl->state) {
- rets = 0;
- dev_dbg(&dev->pdev->dev, "successfully disconnected from FW client.\n");
- } else {
- rets = -ENODEV;
- if (MEI_FILE_DISCONNECTED != cl->state)
- dev_dbg(&dev->pdev->dev, "wrong status client disconnect.\n");
-
- if (err)
- dev_dbg(&dev->pdev->dev,
- "wait failed disconnect err=%08x\n",
- err);
-
- dev_dbg(&dev->pdev->dev, "failed to disconnect from FW client.\n");
- }
-
- mei_io_list_flush(&dev->ctrl_rd_list, cl);
- mei_io_list_flush(&dev->ctrl_wr_list, cl);
-free:
- mei_io_cb_free(cb);
- return rets;
-}
diff --git a/drivers/misc/mei/interface.c b/drivers/misc/mei/interface.c
deleted file mode 100644
index 8de854785960..000000000000
--- a/drivers/misc/mei/interface.c
+++ /dev/null
@@ -1,388 +0,0 @@
-/*
- *
- * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-
-#include <linux/pci.h>
-#include "mei_dev.h"
-#include <linux/mei.h>
-#include "interface.h"
-
-
-
-/**
- * mei_set_csr_register - writes H_CSR register to the mei device,
- * and ignores the H_IS bit for it is write-one-to-zero.
- *
- * @dev: the device structure
- */
-void mei_hcsr_set(struct mei_device *dev)
-{
- if ((dev->host_hw_state & H_IS) == H_IS)
- dev->host_hw_state &= ~H_IS;
- mei_reg_write(dev, H_CSR, dev->host_hw_state);
- dev->host_hw_state = mei_hcsr_read(dev);
-}
-
-/**
- * mei_csr_enable_interrupts - enables mei device interrupts
- *
- * @dev: the device structure
- */
-void mei_enable_interrupts(struct mei_device *dev)
-{
- dev->host_hw_state |= H_IE;
- mei_hcsr_set(dev);
-}
-
-/**
- * mei_csr_disable_interrupts - disables mei device interrupts
- *
- * @dev: the device structure
- */
-void mei_disable_interrupts(struct mei_device *dev)
-{
- dev->host_hw_state &= ~H_IE;
- mei_hcsr_set(dev);
-}
-
-/**
- * mei_hbuf_filled_slots - gets number of device filled buffer slots
- *
- * @device: the device structure
- *
- * returns number of filled slots
- */
-static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
-{
- char read_ptr, write_ptr;
-
- dev->host_hw_state = mei_hcsr_read(dev);
-
- read_ptr = (char) ((dev->host_hw_state & H_CBRP) >> 8);
- write_ptr = (char) ((dev->host_hw_state & H_CBWP) >> 16);
-
- return (unsigned char) (write_ptr - read_ptr);
-}
-
-/**
- * mei_hbuf_is_empty - checks if host buffer is empty.
- *
- * @dev: the device structure
- *
- * returns true if empty, false - otherwise.
- */
-bool mei_hbuf_is_empty(struct mei_device *dev)
-{
- return mei_hbuf_filled_slots(dev) == 0;
-}
-
-/**
- * mei_hbuf_empty_slots - counts write empty slots.
- *
- * @dev: the device structure
- *
- * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise empty slots count
- */
-int mei_hbuf_empty_slots(struct mei_device *dev)
-{
- unsigned char filled_slots, empty_slots;
-
- filled_slots = mei_hbuf_filled_slots(dev);
- empty_slots = dev->hbuf_depth - filled_slots;
-
- /* check for overflow */
- if (filled_slots > dev->hbuf_depth)
- return -EOVERFLOW;
-
- return empty_slots;
-}
-
-/**
- * mei_write_message - writes a message to mei device.
- *
- * @dev: the device structure
- * @header: header of message
- * @write_buffer: message buffer will be written
- * @write_length: message size will be written
- *
- * This function returns -EIO if write has failed
- */
-int mei_write_message(struct mei_device *dev, struct mei_msg_hdr *header,
- unsigned char *buf, unsigned long length)
-{
- unsigned long rem, dw_cnt;
- u32 *reg_buf = (u32 *)buf;
- int i;
- int empty_slots;
-
-
- dev_dbg(&dev->pdev->dev,
- "mei_write_message header=%08x.\n",
- *((u32 *) header));
-
- empty_slots = mei_hbuf_empty_slots(dev);
- dev_dbg(&dev->pdev->dev, "empty slots = %hu.\n", empty_slots);
-
- dw_cnt = mei_data2slots(length);
- if (empty_slots < 0 || dw_cnt > empty_slots)
- return -EIO;
-
- mei_reg_write(dev, H_CB_WW, *((u32 *) header));
-
- for (i = 0; i < length / 4; i++)
- mei_reg_write(dev, H_CB_WW, reg_buf[i]);
-
- rem = length & 0x3;
- if (rem > 0) {
- u32 reg = 0;
- memcpy(&reg, &buf[length - rem], rem);
- mei_reg_write(dev, H_CB_WW, reg);
- }
-
- dev->host_hw_state = mei_hcsr_read(dev);
- dev->host_hw_state |= H_IG;
- mei_hcsr_set(dev);
- dev->me_hw_state = mei_mecsr_read(dev);
- if ((dev->me_hw_state & ME_RDY_HRA) != ME_RDY_HRA)
- return -EIO;
-
- return 0;
-}
-
-/**
- * mei_count_full_read_slots - counts read full slots.
- *
- * @dev: the device structure
- *
- * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise filled slots count
- */
-int mei_count_full_read_slots(struct mei_device *dev)
-{
- char read_ptr, write_ptr;
- unsigned char buffer_depth, filled_slots;
-
- dev->me_hw_state = mei_mecsr_read(dev);
- buffer_depth = (unsigned char)((dev->me_hw_state & ME_CBD_HRA) >> 24);
- read_ptr = (char) ((dev->me_hw_state & ME_CBRP_HRA) >> 8);
- write_ptr = (char) ((dev->me_hw_state & ME_CBWP_HRA) >> 16);
- filled_slots = (unsigned char) (write_ptr - read_ptr);
-
- /* check for overflow */
- if (filled_slots > buffer_depth)
- return -EOVERFLOW;
-
- dev_dbg(&dev->pdev->dev, "filled_slots =%08x\n", filled_slots);
- return (int)filled_slots;
-}
-
-/**
- * mei_read_slots - reads a message from mei device.
- *
- * @dev: the device structure
- * @buffer: message buffer will be written
- * @buffer_length: message size will be read
- */
-void mei_read_slots(struct mei_device *dev, unsigned char *buffer,
- unsigned long buffer_length)
-{
- u32 *reg_buf = (u32 *)buffer;
-
- for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
- *reg_buf++ = mei_mecbrw_read(dev);
-
- if (buffer_length > 0) {
- u32 reg = mei_mecbrw_read(dev);
- memcpy(reg_buf, &reg, buffer_length);
- }
-
- dev->host_hw_state |= H_IG;
- mei_hcsr_set(dev);
-}
-
-/**
- * mei_flow_ctrl_creds - checks flow_control credentials.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
- * -ENOENT if mei_cl is not present
- * -EINVAL if single_recv_buf == 0
- */
-int mei_flow_ctrl_creds(struct mei_device *dev, struct mei_cl *cl)
-{
- int i;
-
- if (!dev->me_clients_num)
- return 0;
-
- if (cl->mei_flow_ctrl_creds > 0)
- return 1;
-
- for (i = 0; i < dev->me_clients_num; i++) {
- struct mei_me_client *me_cl = &dev->me_clients[i];
- if (me_cl->client_id == cl->me_client_id) {
- if (me_cl->mei_flow_ctrl_creds) {
- if (WARN_ON(me_cl->props.single_recv_buf == 0))
- return -EINVAL;
- return 1;
- } else {
- return 0;
- }
- }
- }
- return -ENOENT;
-}
-
-/**
- * mei_flow_ctrl_reduce - reduces flow_control.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- * @returns
- * 0 on success
- * -ENOENT when me client is not found
- * -EINVAL when ctrl credits are <= 0
- */
-int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl)
-{
- int i;
-
- if (!dev->me_clients_num)
- return -ENOENT;
-
- for (i = 0; i < dev->me_clients_num; i++) {
- struct mei_me_client *me_cl = &dev->me_clients[i];
- if (me_cl->client_id == cl->me_client_id) {
- if (me_cl->props.single_recv_buf != 0) {
- if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
- return -EINVAL;
- dev->me_clients[i].mei_flow_ctrl_creds--;
- } else {
- if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
- return -EINVAL;
- cl->mei_flow_ctrl_creds--;
- }
- return 0;
- }
- }
- return -ENOENT;
-}
-
-/**
- * mei_send_flow_control - sends flow control to fw.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * This function returns -EIO on write failure
- */
-int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl)
-{
- struct mei_msg_hdr *mei_hdr;
- struct hbm_flow_control *flow_ctrl;
- const size_t len = sizeof(struct hbm_flow_control);
-
- mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
-
- flow_ctrl = (struct hbm_flow_control *)&dev->wr_msg_buf[1];
- memset(flow_ctrl, 0, len);
- flow_ctrl->hbm_cmd = MEI_FLOW_CONTROL_CMD;
- flow_ctrl->host_addr = cl->host_client_id;
- flow_ctrl->me_addr = cl->me_client_id;
- /* FIXME: reserved !? */
- memset(flow_ctrl->reserved, 0, sizeof(flow_ctrl->reserved));
- dev_dbg(&dev->pdev->dev, "sending flow control host client = %d, ME client = %d\n",
- cl->host_client_id, cl->me_client_id);
-
- return mei_write_message(dev, mei_hdr,
- (unsigned char *) flow_ctrl, len);
-}
-
-/**
- * mei_other_client_is_connecting - checks if other
- * client with the same client id is connected.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * returns 1 if other client is connected, 0 - otherwise.
- */
-int mei_other_client_is_connecting(struct mei_device *dev,
- struct mei_cl *cl)
-{
- struct mei_cl *cl_pos = NULL;
- struct mei_cl *cl_next = NULL;
-
- list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
- if ((cl_pos->state == MEI_FILE_CONNECTING) &&
- (cl_pos != cl) &&
- cl->me_client_id == cl_pos->me_client_id)
- return 1;
-
- }
- return 0;
-}
-
-/**
- * mei_disconnect - sends disconnect message to fw.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * This function returns -EIO on write failure
- */
-int mei_disconnect(struct mei_device *dev, struct mei_cl *cl)
-{
- struct mei_msg_hdr *mei_hdr;
- struct hbm_client_connect_request *req;
- const size_t len = sizeof(struct hbm_client_connect_request);
-
- mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
-
- req = (struct hbm_client_connect_request *)&dev->wr_msg_buf[1];
- memset(req, 0, len);
- req->hbm_cmd = CLIENT_DISCONNECT_REQ_CMD;
- req->host_addr = cl->host_client_id;
- req->me_addr = cl->me_client_id;
- req->reserved = 0;
-
- return mei_write_message(dev, mei_hdr, (unsigned char *)req, len);
-}
-
-/**
- * mei_connect - sends connect message to fw.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * This function returns -EIO on write failure
- */
-int mei_connect(struct mei_device *dev, struct mei_cl *cl)
-{
- struct mei_msg_hdr *mei_hdr;
- struct hbm_client_connect_request *req;
- const size_t len = sizeof(struct hbm_client_connect_request);
-
- mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
-
- req = (struct hbm_client_connect_request *) &dev->wr_msg_buf[1];
- req->hbm_cmd = CLIENT_CONNECT_REQ_CMD;
- req->host_addr = cl->host_client_id;
- req->me_addr = cl->me_client_id;
- req->reserved = 0;
-
- return mei_write_message(dev, mei_hdr, (unsigned char *) req, len);
-}
diff --git a/drivers/misc/mei/interface.h b/drivers/misc/mei/interface.h
deleted file mode 100644
index ec6c785a3961..000000000000
--- a/drivers/misc/mei/interface.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- *
- * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-
-
-
-#ifndef _MEI_INTERFACE_H_
-#define _MEI_INTERFACE_H_
-
-#include <linux/mei.h>
-#include "mei_dev.h"
-
-
-
-void mei_read_slots(struct mei_device *dev,
- unsigned char *buffer,
- unsigned long buffer_length);
-
-int mei_write_message(struct mei_device *dev,
- struct mei_msg_hdr *header,
- unsigned char *write_buffer,
- unsigned long write_length);
-
-bool mei_hbuf_is_empty(struct mei_device *dev);
-
-int mei_hbuf_empty_slots(struct mei_device *dev);
-
-static inline size_t mei_hbuf_max_data(const struct mei_device *dev)
-{
- return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
-}
-
-/* get slots (dwords) from a message length + header (bytes) */
-static inline unsigned char mei_data2slots(size_t length)
-{
- return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4);
-}
-
-int mei_count_full_read_slots(struct mei_device *dev);
-
-
-int mei_flow_ctrl_creds(struct mei_device *dev, struct mei_cl *cl);
-
-
-
-int mei_wd_send(struct mei_device *dev);
-int mei_wd_stop(struct mei_device *dev);
-int mei_wd_host_init(struct mei_device *dev);
-/*
- * mei_watchdog_register - Registering watchdog interface
- * once we got connection to the WD Client
- * @dev - mei device
- */
-void mei_watchdog_register(struct mei_device *dev);
-/*
- * mei_watchdog_unregister - Unregistering watchdog interface
- * @dev - mei device
- */
-void mei_watchdog_unregister(struct mei_device *dev);
-
-int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl);
-
-int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl);
-
-int mei_disconnect(struct mei_device *dev, struct mei_cl *cl);
-int mei_other_client_is_connecting(struct mei_device *dev, struct mei_cl *cl);
-int mei_connect(struct mei_device *dev, struct mei_cl *cl);
-
-#endif /* _MEI_INTERFACE_H_ */
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 04fa2134615e..3535b2676c97 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -21,41 +21,21 @@
#include <linux/fs.h>
#include <linux/jiffies.h>
-#include "mei_dev.h"
#include <linux/mei.h>
-#include "hw.h"
-#include "interface.h"
-
-
-/**
- * mei_interrupt_quick_handler - The ISR of the MEI device
- *
- * @irq: The irq number
- * @dev_id: pointer to the device structure
- *
- * returns irqreturn_t
- */
-irqreturn_t mei_interrupt_quick_handler(int irq, void *dev_id)
-{
- struct mei_device *dev = (struct mei_device *) dev_id;
- u32 csr_reg = mei_hcsr_read(dev);
-
- if ((csr_reg & H_IS) != H_IS)
- return IRQ_NONE;
- /* clear H_IS bit in H_CSR */
- mei_reg_write(dev, H_CSR, csr_reg);
+#include "mei_dev.h"
+#include "hbm.h"
+#include "hw-me.h"
+#include "client.h"
- return IRQ_WAKE_THREAD;
-}
/**
- * _mei_cmpl - processes completed operation.
+ * mei_complete_handler - processes completed operation.
*
* @cl: private data of the file object.
* @cb_pos: callback block.
*/
-static void _mei_cmpl(struct mei_cl *cl, struct mei_cl_cb *cb_pos)
+void mei_irq_complete_handler(struct mei_cl *cl, struct mei_cl_cb *cb_pos)
{
if (cb_pos->fop_type == MEI_FOP_WRITE) {
mei_io_cb_free(cb_pos);
@@ -150,8 +130,8 @@ quit:
dev_dbg(&dev->pdev->dev, "message read\n");
if (!buffer) {
mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
- dev_dbg(&dev->pdev->dev, "discarding message, header =%08x.\n",
- *(u32 *) dev->rd_msg_buf);
+ dev_dbg(&dev->pdev->dev, "discarding message " MEI_HDR_FMT "\n",
+ MEI_HDR_PRM(mei_hdr));
}
return 0;
@@ -179,7 +159,7 @@ static int _mei_irq_thread_close(struct mei_device *dev, s32 *slots,
*slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
- if (mei_disconnect(dev, cl)) {
+ if (mei_hbm_cl_disconnect_req(dev, cl)) {
cl->status = 0;
cb_pos->buf_idx = 0;
list_move_tail(&cb_pos->list, &cmpl_list->list);
@@ -195,440 +175,6 @@ static int _mei_irq_thread_close(struct mei_device *dev, s32 *slots,
return 0;
}
-/**
- * is_treat_specially_client - checks if the message belongs
- * to the file private data.
- *
- * @cl: private data of the file object
- * @rs: connect response bus message
- *
- */
-static bool is_treat_specially_client(struct mei_cl *cl,
- struct hbm_client_connect_response *rs)
-{
-
- if (cl->host_client_id == rs->host_addr &&
- cl->me_client_id == rs->me_addr) {
- if (!rs->status) {
- cl->state = MEI_FILE_CONNECTED;
- cl->status = 0;
-
- } else {
- cl->state = MEI_FILE_DISCONNECTED;
- cl->status = -ENODEV;
- }
- cl->timer_count = 0;
-
- return true;
- }
- return false;
-}
-
-/**
- * mei_client_connect_response - connects to response irq routine
- *
- * @dev: the device structure
- * @rs: connect response bus message
- */
-static void mei_client_connect_response(struct mei_device *dev,
- struct hbm_client_connect_response *rs)
-{
-
- struct mei_cl *cl;
- struct mei_cl_cb *pos = NULL, *next = NULL;
-
- dev_dbg(&dev->pdev->dev,
- "connect_response:\n"
- "ME Client = %d\n"
- "Host Client = %d\n"
- "Status = %d\n",
- rs->me_addr,
- rs->host_addr,
- rs->status);
-
- /* if WD or iamthif client treat specially */
-
- if (is_treat_specially_client(&(dev->wd_cl), rs)) {
- dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n");
- mei_watchdog_register(dev);
-
- return;
- }
-
- if (is_treat_specially_client(&(dev->iamthif_cl), rs)) {
- dev->iamthif_state = MEI_IAMTHIF_IDLE;
- return;
- }
- list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
-
- cl = pos->cl;
- if (!cl) {
- list_del(&pos->list);
- return;
- }
- if (pos->fop_type == MEI_FOP_IOCTL) {
- if (is_treat_specially_client(cl, rs)) {
- list_del(&pos->list);
- cl->status = 0;
- cl->timer_count = 0;
- break;
- }
- }
- }
-}
-
-/**
- * mei_client_disconnect_response - disconnects from response irq routine
- *
- * @dev: the device structure
- * @rs: disconnect response bus message
- */
-static void mei_client_disconnect_response(struct mei_device *dev,
- struct hbm_client_connect_response *rs)
-{
- struct mei_cl *cl;
- struct mei_cl_cb *pos = NULL, *next = NULL;
-
- dev_dbg(&dev->pdev->dev,
- "disconnect_response:\n"
- "ME Client = %d\n"
- "Host Client = %d\n"
- "Status = %d\n",
- rs->me_addr,
- rs->host_addr,
- rs->status);
-
- list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
- cl = pos->cl;
-
- if (!cl) {
- list_del(&pos->list);
- return;
- }
-
- dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n");
- if (cl->host_client_id == rs->host_addr &&
- cl->me_client_id == rs->me_addr) {
-
- list_del(&pos->list);
- if (!rs->status)
- cl->state = MEI_FILE_DISCONNECTED;
-
- cl->status = 0;
- cl->timer_count = 0;
- break;
- }
- }
-}
-
-/**
- * same_flow_addr - tells if they have the same address.
- *
- * @file: private data of the file object.
- * @flow: flow control.
- *
- * returns !=0, same; 0,not.
- */
-static int same_flow_addr(struct mei_cl *cl, struct hbm_flow_control *flow)
-{
- return (cl->host_client_id == flow->host_addr &&
- cl->me_client_id == flow->me_addr);
-}
-
-/**
- * add_single_flow_creds - adds single buffer credentials.
- *
- * @file: private data ot the file object.
- * @flow: flow control.
- */
-static void add_single_flow_creds(struct mei_device *dev,
- struct hbm_flow_control *flow)
-{
- struct mei_me_client *client;
- int i;
-
- for (i = 0; i < dev->me_clients_num; i++) {
- client = &dev->me_clients[i];
- if (client && flow->me_addr == client->client_id) {
- if (client->props.single_recv_buf) {
- client->mei_flow_ctrl_creds++;
- dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n",
- flow->me_addr);
- dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n",
- client->mei_flow_ctrl_creds);
- } else {
- BUG(); /* error in flow control */
- }
- }
- }
-}
-
-/**
- * mei_client_flow_control_response - flow control response irq routine
- *
- * @dev: the device structure
- * @flow_control: flow control response bus message
- */
-static void mei_client_flow_control_response(struct mei_device *dev,
- struct hbm_flow_control *flow_control)
-{
- struct mei_cl *cl_pos = NULL;
- struct mei_cl *cl_next = NULL;
-
- if (!flow_control->host_addr) {
- /* single receive buffer */
- add_single_flow_creds(dev, flow_control);
- } else {
- /* normal connection */
- list_for_each_entry_safe(cl_pos, cl_next,
- &dev->file_list, link) {
- dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in file_list\n");
-
- dev_dbg(&dev->pdev->dev, "cl of host client %d ME client %d.\n",
- cl_pos->host_client_id,
- cl_pos->me_client_id);
- dev_dbg(&dev->pdev->dev, "flow ctrl msg for host %d ME %d.\n",
- flow_control->host_addr,
- flow_control->me_addr);
- if (same_flow_addr(cl_pos, flow_control)) {
- dev_dbg(&dev->pdev->dev, "recv ctrl msg for host %d ME %d.\n",
- flow_control->host_addr,
- flow_control->me_addr);
- cl_pos->mei_flow_ctrl_creds++;
- dev_dbg(&dev->pdev->dev, "flow control credentials = %d.\n",
- cl_pos->mei_flow_ctrl_creds);
- break;
- }
- }
- }
-}
-
-/**
- * same_disconn_addr - tells if they have the same address
- *
- * @file: private data of the file object.
- * @disconn: disconnection request.
- *
- * returns !=0, same; 0,not.
- */
-static int same_disconn_addr(struct mei_cl *cl,
- struct hbm_client_connect_request *req)
-{
- return (cl->host_client_id == req->host_addr &&
- cl->me_client_id == req->me_addr);
-}
-
-/**
- * mei_client_disconnect_request - disconnects from request irq routine
- *
- * @dev: the device structure.
- * @disconnect_req: disconnect request bus message.
- */
-static void mei_client_disconnect_request(struct mei_device *dev,
- struct hbm_client_connect_request *disconnect_req)
-{
- struct hbm_client_connect_response *disconnect_res;
- struct mei_cl *pos, *next;
- const size_t len = sizeof(struct hbm_client_connect_response);
-
- list_for_each_entry_safe(pos, next, &dev->file_list, link) {
- if (same_disconn_addr(pos, disconnect_req)) {
- dev_dbg(&dev->pdev->dev, "disconnect request host client %d ME client %d.\n",
- disconnect_req->host_addr,
- disconnect_req->me_addr);
- pos->state = MEI_FILE_DISCONNECTED;
- pos->timer_count = 0;
- if (pos == &dev->wd_cl)
- dev->wd_pending = false;
- else if (pos == &dev->iamthif_cl)
- dev->iamthif_timer = 0;
-
- /* prepare disconnect response */
- (void)mei_hbm_hdr((u32 *)&dev->wr_ext_msg.hdr, len);
- disconnect_res =
- (struct hbm_client_connect_response *)
- &dev->wr_ext_msg.data;
- disconnect_res->hbm_cmd = CLIENT_DISCONNECT_RES_CMD;
- disconnect_res->host_addr = pos->host_client_id;
- disconnect_res->me_addr = pos->me_client_id;
- disconnect_res->status = 0;
- break;
- }
- }
-}
-
-/**
- * mei_irq_thread_read_bus_message - bottom half read routine after ISR to
- * handle the read bus message cmd processing.
- *
- * @dev: the device structure
- * @mei_hdr: header of bus message
- */
-static void mei_irq_thread_read_bus_message(struct mei_device *dev,
- struct mei_msg_hdr *mei_hdr)
-{
- struct mei_bus_message *mei_msg;
- struct mei_me_client *me_client;
- struct hbm_host_version_response *version_res;
- struct hbm_client_connect_response *connect_res;
- struct hbm_client_connect_response *disconnect_res;
- struct hbm_client_connect_request *disconnect_req;
- struct hbm_flow_control *flow_control;
- struct hbm_props_response *props_res;
- struct hbm_host_enum_response *enum_res;
- struct hbm_host_stop_request *stop_req;
-
- /* read the message to our buffer */
- BUG_ON(mei_hdr->length >= sizeof(dev->rd_msg_buf));
- mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
- mei_msg = (struct mei_bus_message *)dev->rd_msg_buf;
-
- switch (mei_msg->hbm_cmd) {
- case HOST_START_RES_CMD:
- version_res = (struct hbm_host_version_response *) mei_msg;
- if (version_res->host_version_supported) {
- dev->version.major_version = HBM_MAJOR_VERSION;
- dev->version.minor_version = HBM_MINOR_VERSION;
- if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
- dev->init_clients_state == MEI_START_MESSAGE) {
- dev->init_clients_timer = 0;
- mei_host_enum_clients_message(dev);
- } else {
- dev->recvd_msg = false;
- dev_dbg(&dev->pdev->dev, "IMEI reset due to received host start response bus message.\n");
- mei_reset(dev, 1);
- return;
- }
- } else {
- u32 *buf = dev->wr_msg_buf;
- const size_t len = sizeof(struct hbm_host_stop_request);
-
- dev->version = version_res->me_max_version;
-
- /* send stop message */
- mei_hdr = mei_hbm_hdr(&buf[0], len);
- stop_req = (struct hbm_host_stop_request *)&buf[1];
- memset(stop_req, 0, len);
- stop_req->hbm_cmd = HOST_STOP_REQ_CMD;
- stop_req->reason = DRIVER_STOP_REQUEST;
-
- mei_write_message(dev, mei_hdr,
- (unsigned char *)stop_req, len);
- dev_dbg(&dev->pdev->dev, "version mismatch.\n");
- return;
- }
-
- dev->recvd_msg = true;
- dev_dbg(&dev->pdev->dev, "host start response message received.\n");
- break;
-
- case CLIENT_CONNECT_RES_CMD:
- connect_res = (struct hbm_client_connect_response *) mei_msg;
- mei_client_connect_response(dev, connect_res);
- dev_dbg(&dev->pdev->dev, "client connect response message received.\n");
- wake_up(&dev->wait_recvd_msg);
- break;
-
- case CLIENT_DISCONNECT_RES_CMD:
- disconnect_res = (struct hbm_client_connect_response *) mei_msg;
- mei_client_disconnect_response(dev, disconnect_res);
- dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n");
- wake_up(&dev->wait_recvd_msg);
- break;
-
- case MEI_FLOW_CONTROL_CMD:
- flow_control = (struct hbm_flow_control *) mei_msg;
- mei_client_flow_control_response(dev, flow_control);
- dev_dbg(&dev->pdev->dev, "client flow control response message received.\n");
- break;
-
- case HOST_CLIENT_PROPERTIES_RES_CMD:
- props_res = (struct hbm_props_response *)mei_msg;
- me_client = &dev->me_clients[dev->me_client_presentation_num];
-
- if (props_res->status || !dev->me_clients) {
- dev_dbg(&dev->pdev->dev, "reset due to received host client properties response bus message wrong status.\n");
- mei_reset(dev, 1);
- return;
- }
-
- if (me_client->client_id != props_res->address) {
- dev_err(&dev->pdev->dev,
- "Host client properties reply mismatch\n");
- mei_reset(dev, 1);
-
- return;
- }
-
- if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
- dev->init_clients_state != MEI_CLIENT_PROPERTIES_MESSAGE) {
- dev_err(&dev->pdev->dev,
- "Unexpected client properties reply\n");
- mei_reset(dev, 1);
-
- return;
- }
-
- me_client->props = props_res->client_properties;
- dev->me_client_index++;
- dev->me_client_presentation_num++;
-
- mei_host_client_enumerate(dev);
-
- break;
-
- case HOST_ENUM_RES_CMD:
- enum_res = (struct hbm_host_enum_response *) mei_msg;
- memcpy(dev->me_clients_map, enum_res->valid_addresses, 32);
- if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
- dev->init_clients_state == MEI_ENUM_CLIENTS_MESSAGE) {
- dev->init_clients_timer = 0;
- dev->me_client_presentation_num = 0;
- dev->me_client_index = 0;
- mei_allocate_me_clients_storage(dev);
- dev->init_clients_state =
- MEI_CLIENT_PROPERTIES_MESSAGE;
-
- mei_host_client_enumerate(dev);
- } else {
- dev_dbg(&dev->pdev->dev, "reset due to received host enumeration clients response bus message.\n");
- mei_reset(dev, 1);
- return;
- }
- break;
-
- case HOST_STOP_RES_CMD:
- dev->dev_state = MEI_DEV_DISABLED;
- dev_dbg(&dev->pdev->dev, "resetting because of FW stop response.\n");
- mei_reset(dev, 1);
- break;
-
- case CLIENT_DISCONNECT_REQ_CMD:
- /* search for client */
- disconnect_req = (struct hbm_client_connect_request *)mei_msg;
- mei_client_disconnect_request(dev, disconnect_req);
- break;
-
- case ME_STOP_REQ_CMD:
- {
- /* prepare stop request: sent in next interrupt event */
-
- const size_t len = sizeof(struct hbm_host_stop_request);
-
- mei_hdr = mei_hbm_hdr((u32 *)&dev->wr_ext_msg.hdr, len);
- stop_req = (struct hbm_host_stop_request *)&dev->wr_ext_msg.data;
- memset(stop_req, 0, len);
- stop_req->hbm_cmd = HOST_STOP_REQ_CMD;
- stop_req->reason = DRIVER_STOP_REQUEST;
- break;
- }
- default:
- BUG();
- break;
-
- }
-}
-
/**
* _mei_hb_read - processes read related operation.
@@ -655,7 +201,7 @@ static int _mei_irq_thread_read(struct mei_device *dev, s32 *slots,
*slots -= mei_data2slots(sizeof(struct hbm_flow_control));
- if (mei_send_flow_control(dev, cl)) {
+ if (mei_hbm_cl_flow_control_req(dev, cl)) {
cl->status = -ENODEV;
cb_pos->buf_idx = 0;
list_move_tail(&cb_pos->list, &cmpl_list->list);
@@ -691,8 +237,8 @@ static int _mei_irq_thread_ioctl(struct mei_device *dev, s32 *slots,
}
cl->state = MEI_FILE_CONNECTING;
- *slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
- if (mei_connect(dev, cl)) {
+ *slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
+ if (mei_hbm_cl_connect_req(dev, cl)) {
cl->status = -ENODEV;
cb_pos->buf_idx = 0;
list_del(&cb_pos->list);
@@ -717,25 +263,24 @@ static int _mei_irq_thread_ioctl(struct mei_device *dev, s32 *slots,
static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots,
struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list)
{
- struct mei_msg_hdr *mei_hdr;
+ struct mei_msg_hdr mei_hdr;
struct mei_cl *cl = cb->cl;
size_t len = cb->request_buffer.size - cb->buf_idx;
size_t msg_slots = mei_data2slots(len);
- mei_hdr = (struct mei_msg_hdr *)&dev->wr_msg_buf[0];
- mei_hdr->host_addr = cl->host_client_id;
- mei_hdr->me_addr = cl->me_client_id;
- mei_hdr->reserved = 0;
+ mei_hdr.host_addr = cl->host_client_id;
+ mei_hdr.me_addr = cl->me_client_id;
+ mei_hdr.reserved = 0;
if (*slots >= msg_slots) {
- mei_hdr->length = len;
- mei_hdr->msg_complete = 1;
+ mei_hdr.length = len;
+ mei_hdr.msg_complete = 1;
/* Split the message only if we can write the whole host buffer */
} else if (*slots == dev->hbuf_depth) {
msg_slots = *slots;
len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
- mei_hdr->length = len;
- mei_hdr->msg_complete = 0;
+ mei_hdr.length = len;
+ mei_hdr.msg_complete = 0;
} else {
/* wait for next time the host buffer is empty */
return 0;
@@ -743,23 +288,22 @@ static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots,
dev_dbg(&dev->pdev->dev, "buf: size = %d idx = %lu\n",
cb->request_buffer.size, cb->buf_idx);
- dev_dbg(&dev->pdev->dev, "msg: len = %d complete = %d\n",
- mei_hdr->length, mei_hdr->msg_complete);
+ dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
*slots -= msg_slots;
- if (mei_write_message(dev, mei_hdr,
- cb->request_buffer.data + cb->buf_idx, len)) {
+ if (mei_write_message(dev, &mei_hdr,
+ cb->request_buffer.data + cb->buf_idx)) {
cl->status = -ENODEV;
list_move_tail(&cb->list, &cmpl_list->list);
return -ENODEV;
}
- if (mei_flow_ctrl_reduce(dev, cl))
+ if (mei_cl_flow_ctrl_reduce(cl))
return -ENODEV;
cl->status = 0;
- cb->buf_idx += mei_hdr->length;
- if (mei_hdr->msg_complete)
+ cb->buf_idx += mei_hdr.length;
+ if (mei_hdr.msg_complete)
list_move_tail(&cb->list, &dev->write_waiting_list.list);
return 0;
@@ -769,15 +313,14 @@ static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots,
* mei_irq_thread_read_handler - bottom half read routine after ISR to
* handle the read processing.
*
- * @cmpl_list: An instance of our list structure
* @dev: the device structure
+ * @cmpl_list: An instance of our list structure
* @slots: slots to read.
*
* returns 0 on success, <0 on failure.
*/
-static int mei_irq_thread_read_handler(struct mei_cl_cb *cmpl_list,
- struct mei_device *dev,
- s32 *slots)
+int mei_irq_read_handler(struct mei_device *dev,
+ struct mei_cl_cb *cmpl_list, s32 *slots)
{
struct mei_msg_hdr *mei_hdr;
struct mei_cl *cl_pos = NULL;
@@ -785,13 +328,13 @@ static int mei_irq_thread_read_handler(struct mei_cl_cb *cmpl_list,
int ret = 0;
if (!dev->rd_msg_hdr) {
- dev->rd_msg_hdr = mei_mecbrw_read(dev);
+ dev->rd_msg_hdr = mei_read_hdr(dev);
dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
(*slots)--;
dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
}
mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr;
- dev_dbg(&dev->pdev->dev, "mei_hdr->length =%d\n", mei_hdr->length);
+ dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
if (mei_hdr->reserved || !dev->rd_msg_hdr) {
dev_dbg(&dev->pdev->dev, "corrupted message header.\n");
@@ -830,19 +373,18 @@ static int mei_irq_thread_read_handler(struct mei_cl_cb *cmpl_list,
/* decide where to read the message too */
if (!mei_hdr->host_addr) {
dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_bus_message.\n");
- mei_irq_thread_read_bus_message(dev, mei_hdr);
+ mei_hbm_dispatch(dev, mei_hdr);
dev_dbg(&dev->pdev->dev, "end mei_irq_thread_read_bus_message.\n");
} else if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id &&
(MEI_FILE_CONNECTED == dev->iamthif_cl.state) &&
(dev->iamthif_state == MEI_IAMTHIF_READING)) {
dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_iamthif_message.\n");
- dev_dbg(&dev->pdev->dev, "mei_hdr->length =%d\n",
- mei_hdr->length);
+
+ dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
ret = mei_amthif_irq_read_message(cmpl_list, dev, mei_hdr);
if (ret)
goto end;
-
} else {
dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_client_message.\n");
ret = mei_irq_thread_read_client_message(cmpl_list,
@@ -869,15 +411,15 @@ end:
/**
- * mei_irq_thread_write_handler - bottom half write routine after
- * ISR to handle the write processing.
+ * mei_irq_write_handler - dispatch write requests
+ * after irq received
*
* @dev: the device structure
* @cmpl_list: An instance of our list structure
*
* returns 0 on success, <0 on failure.
*/
-static int mei_irq_thread_write_handler(struct mei_device *dev,
+int mei_irq_write_handler(struct mei_device *dev,
struct mei_cl_cb *cmpl_list)
{
@@ -887,7 +429,7 @@ static int mei_irq_thread_write_handler(struct mei_device *dev,
s32 slots;
int ret;
- if (!mei_hbuf_is_empty(dev)) {
+ if (!mei_hbuf_is_ready(dev)) {
dev_dbg(&dev->pdev->dev, "host buffer is not empty.\n");
return 0;
}
@@ -930,16 +472,16 @@ static int mei_irq_thread_write_handler(struct mei_device *dev,
if (dev->wr_ext_msg.hdr.length) {
mei_write_message(dev, &dev->wr_ext_msg.hdr,
- dev->wr_ext_msg.data, dev->wr_ext_msg.hdr.length);
+ dev->wr_ext_msg.data);
slots -= mei_data2slots(dev->wr_ext_msg.hdr.length);
dev->wr_ext_msg.hdr.length = 0;
}
if (dev->dev_state == MEI_DEV_ENABLED) {
if (dev->wd_pending &&
- mei_flow_ctrl_creds(dev, &dev->wd_cl) > 0) {
+ mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
if (mei_wd_send(dev))
dev_dbg(&dev->pdev->dev, "wd send failed.\n");
- else if (mei_flow_ctrl_reduce(dev, &dev->wd_cl))
+ else if (mei_cl_flow_ctrl_reduce(&dev->wd_cl))
return -ENODEV;
dev->wd_pending = false;
@@ -978,7 +520,7 @@ static int mei_irq_thread_write_handler(struct mei_device *dev,
break;
case MEI_FOP_IOCTL:
/* connect message */
- if (mei_other_client_is_connecting(dev, cl))
+ if (mei_cl_is_other_connecting(cl))
continue;
ret = _mei_irq_thread_ioctl(dev, &slots, pos,
cl, cmpl_list);
@@ -998,7 +540,7 @@ static int mei_irq_thread_write_handler(struct mei_device *dev,
cl = pos->cl;
if (cl == NULL)
continue;
- if (mei_flow_ctrl_creds(dev, cl) <= 0) {
+ if (mei_cl_flow_ctrl_creds(cl) <= 0) {
dev_dbg(&dev->pdev->dev,
"No flow control credentials for client %d, not sending.\n",
cl->host_client_id);
@@ -1123,115 +665,3 @@ out:
mutex_unlock(&dev->device_lock);
}
-/**
- * mei_interrupt_thread_handler - function called after ISR to handle the interrupt
- * processing.
- *
- * @irq: The irq number
- * @dev_id: pointer to the device structure
- *
- * returns irqreturn_t
- *
- */
-irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id)
-{
- struct mei_device *dev = (struct mei_device *) dev_id;
- struct mei_cl_cb complete_list;
- struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
- struct mei_cl *cl;
- s32 slots;
- int rets;
- bool bus_message_received;
-
-
- dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n");
- /* initialize our complete list */
- mutex_lock(&dev->device_lock);
- mei_io_list_init(&complete_list);
- dev->host_hw_state = mei_hcsr_read(dev);
-
- /* Ack the interrupt here
- * In case of MSI we don't go through the quick handler */
- if (pci_dev_msi_enabled(dev->pdev))
- mei_reg_write(dev, H_CSR, dev->host_hw_state);
-
- dev->me_hw_state = mei_mecsr_read(dev);
-
- /* check if ME wants a reset */
- if ((dev->me_hw_state & ME_RDY_HRA) == 0 &&
- dev->dev_state != MEI_DEV_RESETING &&
- dev->dev_state != MEI_DEV_INITIALIZING) {
- dev_dbg(&dev->pdev->dev, "FW not ready.\n");
- mei_reset(dev, 1);
- mutex_unlock(&dev->device_lock);
- return IRQ_HANDLED;
- }
-
- /* check if we need to start the dev */
- if ((dev->host_hw_state & H_RDY) == 0) {
- if ((dev->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA) {
- dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
- dev->host_hw_state |= (H_IE | H_IG | H_RDY);
- mei_hcsr_set(dev);
- dev->dev_state = MEI_DEV_INIT_CLIENTS;
- dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n");
- /* link is established
- * start sending messages.
- */
- mei_host_start_message(dev);
- mutex_unlock(&dev->device_lock);
- return IRQ_HANDLED;
- } else {
- dev_dbg(&dev->pdev->dev, "FW not ready.\n");
- mutex_unlock(&dev->device_lock);
- return IRQ_HANDLED;
- }
- }
- /* check slots available for reading */
- slots = mei_count_full_read_slots(dev);
- while (slots > 0) {
- /* we have urgent data to send so break the read */
- if (dev->wr_ext_msg.hdr.length)
- break;
- dev_dbg(&dev->pdev->dev, "slots =%08x\n", slots);
- dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_handler.\n");
- rets = mei_irq_thread_read_handler(&complete_list, dev, &slots);
- if (rets)
- goto end;
- }
- rets = mei_irq_thread_write_handler(dev, &complete_list);
-end:
- dev_dbg(&dev->pdev->dev, "end of bottom half function.\n");
- dev->host_hw_state = mei_hcsr_read(dev);
- dev->mei_host_buffer_is_empty = mei_hbuf_is_empty(dev);
-
- bus_message_received = false;
- if (dev->recvd_msg && waitqueue_active(&dev->wait_recvd_msg)) {
- dev_dbg(&dev->pdev->dev, "received waiting bus message\n");
- bus_message_received = true;
- }
- mutex_unlock(&dev->device_lock);
- if (bus_message_received) {
- dev_dbg(&dev->pdev->dev, "wake up dev->wait_recvd_msg\n");
- wake_up_interruptible(&dev->wait_recvd_msg);
- bus_message_received = false;
- }
- if (list_empty(&complete_list.list))
- return IRQ_HANDLED;
-
-
- list_for_each_entry_safe(cb_pos, cb_next, &complete_list.list, list) {
- cl = cb_pos->cl;
- list_del(&cb_pos->list);
- if (cl) {
- if (cl != &dev->iamthif_cl) {
- dev_dbg(&dev->pdev->dev, "completing call back.\n");
- _mei_cmpl(cl, cb_pos);
- cb_pos = NULL;
- } else if (cl == &dev->iamthif_cl) {
- mei_amthif_complete(dev, cb_pos);
- }
- }
- }
- return IRQ_HANDLED;
-}
diff --git a/drivers/misc/mei/iorw.c b/drivers/misc/mei/iorw.c
deleted file mode 100644
index eb93a1b53b9b..000000000000
--- a/drivers/misc/mei/iorw.c
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- *
- * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-
-
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/aio.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ioctl.h>
-#include <linux/cdev.h>
-#include <linux/list.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/uuid.h>
-#include <linux/jiffies.h>
-#include <linux/uaccess.h>
-
-
-#include "mei_dev.h"
-#include "hw.h"
-#include <linux/mei.h>
-#include "interface.h"
-
-/**
- * mei_io_cb_free - free mei_cb_private related memory
- *
- * @cb: mei callback struct
- */
-void mei_io_cb_free(struct mei_cl_cb *cb)
-{
- if (cb == NULL)
- return;
-
- kfree(cb->request_buffer.data);
- kfree(cb->response_buffer.data);
- kfree(cb);
-}
-/**
- * mei_io_cb_init - allocate and initialize io callback
- *
- * @cl - mei client
- * @file: pointer to file structure
- *
- * returns mei_cl_cb pointer or NULL;
- */
-struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
-{
- struct mei_cl_cb *cb;
-
- cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
- if (!cb)
- return NULL;
-
- mei_io_list_init(cb);
-
- cb->file_object = fp;
- cb->cl = cl;
- cb->buf_idx = 0;
- return cb;
-}
-
-
-/**
- * mei_io_cb_alloc_req_buf - allocate request buffer
- *
- * @cb - io callback structure
- * @size: size of the buffer
- *
- * returns 0 on success
- * -EINVAL if cb is NULL
- * -ENOMEM if allocation failed
- */
-int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
-{
- if (!cb)
- return -EINVAL;
-
- if (length == 0)
- return 0;
-
- cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
- if (!cb->request_buffer.data)
- return -ENOMEM;
- cb->request_buffer.size = length;
- return 0;
-}
-/**
- * mei_io_cb_alloc_req_buf - allocate respose buffer
- *
- * @cb - io callback structure
- * @size: size of the buffer
- *
- * returns 0 on success
- * -EINVAL if cb is NULL
- * -ENOMEM if allocation failed
- */
-int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
-{
- if (!cb)
- return -EINVAL;
-
- if (length == 0)
- return 0;
-
- cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
- if (!cb->response_buffer.data)
- return -ENOMEM;
- cb->response_buffer.size = length;
- return 0;
-}
-
-
-/**
- * mei_me_cl_by_id return index to me_clients for client_id
- *
- * @dev: the device structure
- * @client_id: me client id
- *
- * Locking: called under "dev->device_lock" lock
- *
- * returns index on success, -ENOENT on failure.
- */
-
-int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
-{
- int i;
- for (i = 0; i < dev->me_clients_num; i++)
- if (dev->me_clients[i].client_id == client_id)
- break;
- if (WARN_ON(dev->me_clients[i].client_id != client_id))
- return -ENOENT;
-
- if (i == dev->me_clients_num)
- return -ENOENT;
-
- return i;
-}
-
-/**
- * mei_ioctl_connect_client - the connect to fw client IOCTL function
- *
- * @dev: the device structure
- * @data: IOCTL connect data, input and output parameters
- * @file: private data of the file object
- *
- * Locking: called under "dev->device_lock" lock
- *
- * returns 0 on success, <0 on failure.
- */
-int mei_ioctl_connect_client(struct file *file,
- struct mei_connect_client_data *data)
-{
- struct mei_device *dev;
- struct mei_cl_cb *cb;
- struct mei_client *client;
- struct mei_cl *cl;
- long timeout = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT);
- int i;
- int err;
- int rets;
-
- cl = file->private_data;
- if (WARN_ON(!cl || !cl->dev))
- return -ENODEV;
-
- dev = cl->dev;
-
- dev_dbg(&dev->pdev->dev, "mei_ioctl_connect_client() Entry\n");
-
- /* buffered ioctl cb */
- cb = mei_io_cb_init(cl, file);
- if (!cb) {
- rets = -ENOMEM;
- goto end;
- }
-
- cb->fop_type = MEI_FOP_IOCTL;
-
- if (dev->dev_state != MEI_DEV_ENABLED) {
- rets = -ENODEV;
- goto end;
- }
- if (cl->state != MEI_FILE_INITIALIZING &&
- cl->state != MEI_FILE_DISCONNECTED) {
- rets = -EBUSY;
- goto end;
- }
-
- /* find ME client we're trying to connect to */
- i = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
- if (i >= 0 && !dev->me_clients[i].props.fixed_address) {
- cl->me_client_id = dev->me_clients[i].client_id;
- cl->state = MEI_FILE_CONNECTING;
- }
-
- dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n",
- cl->me_client_id);
- dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n",
- dev->me_clients[i].props.protocol_version);
- dev_dbg(&dev->pdev->dev, "FW Client - Max Msg Len = %d\n",
- dev->me_clients[i].props.max_msg_length);
-
- /* if we're connecting to amthi client then we will use the
- * existing connection
- */
- if (uuid_le_cmp(data->in_client_uuid, mei_amthi_guid) == 0) {
- dev_dbg(&dev->pdev->dev, "FW Client is amthi\n");
- if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) {
- rets = -ENODEV;
- goto end;
- }
- clear_bit(cl->host_client_id, dev->host_clients_map);
- mei_me_cl_unlink(dev, cl);
-
- kfree(cl);
- cl = NULL;
- file->private_data = &dev->iamthif_cl;
-
- client = &data->out_client_properties;
- client->max_msg_length =
- dev->me_clients[i].props.max_msg_length;
- client->protocol_version =
- dev->me_clients[i].props.protocol_version;
- rets = dev->iamthif_cl.status;
-
- goto end;
- }
-
- if (cl->state != MEI_FILE_CONNECTING) {
- rets = -ENODEV;
- goto end;
- }
-
-
- /* prepare the output buffer */
- client = &data->out_client_properties;
- client->max_msg_length = dev->me_clients[i].props.max_msg_length;
- client->protocol_version = dev->me_clients[i].props.protocol_version;
- dev_dbg(&dev->pdev->dev, "Can connect?\n");
- if (dev->mei_host_buffer_is_empty
- && !mei_other_client_is_connecting(dev, cl)) {
- dev_dbg(&dev->pdev->dev, "Sending Connect Message\n");
- dev->mei_host_buffer_is_empty = false;
- if (mei_connect(dev, cl)) {
- dev_dbg(&dev->pdev->dev, "Sending connect message - failed\n");
- rets = -ENODEV;
- goto end;
- } else {
- dev_dbg(&dev->pdev->dev, "Sending connect message - succeeded\n");
- cl->timer_count = MEI_CONNECT_TIMEOUT;
- list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
- }
-
-
- } else {
- dev_dbg(&dev->pdev->dev, "Queuing the connect request due to device busy\n");
- dev_dbg(&dev->pdev->dev, "add connect cb to control write list.\n");
- list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
- }
- mutex_unlock(&dev->device_lock);
- err = wait_event_timeout(dev->wait_recvd_msg,
- (MEI_FILE_CONNECTED == cl->state ||
- MEI_FILE_DISCONNECTED == cl->state), timeout);
-
- mutex_lock(&dev->device_lock);
- if (MEI_FILE_CONNECTED == cl->state) {
- dev_dbg(&dev->pdev->dev, "successfully connected to FW client.\n");
- rets = cl->status;
- goto end;
- } else {
- dev_dbg(&dev->pdev->dev, "failed to connect to FW client.cl->state = %d.\n",
- cl->state);
- if (!err) {
- dev_dbg(&dev->pdev->dev,
- "wait_event_interruptible_timeout failed on client"
- " connect message fw response message.\n");
- }
- rets = -EFAULT;
-
- mei_io_list_flush(&dev->ctrl_rd_list, cl);
- mei_io_list_flush(&dev->ctrl_wr_list, cl);
- goto end;
- }
- rets = 0;
-end:
- dev_dbg(&dev->pdev->dev, "free connect cb memory.");
- mei_io_cb_free(cb);
- return rets;
-}
-
-/**
- * mei_start_read - the start read client message function.
- *
- * @dev: the device structure
- * @if_num: minor number
- * @cl: private data of the file object
- *
- * returns 0 on success, <0 on failure.
- */
-int mei_start_read(struct mei_device *dev, struct mei_cl *cl)
-{
- struct mei_cl_cb *cb;
- int rets;
- int i;
-
- if (cl->state != MEI_FILE_CONNECTED)
- return -ENODEV;
-
- if (dev->dev_state != MEI_DEV_ENABLED)
- return -ENODEV;
-
- if (cl->read_pending || cl->read_cb) {
- dev_dbg(&dev->pdev->dev, "read is pending.\n");
- return -EBUSY;
- }
- i = mei_me_cl_by_id(dev, cl->me_client_id);
- if (i < 0) {
- dev_err(&dev->pdev->dev, "no such me client %d\n",
- cl->me_client_id);
- return -ENODEV;
- }
-
- cb = mei_io_cb_init(cl, NULL);
- if (!cb)
- return -ENOMEM;
-
- rets = mei_io_cb_alloc_resp_buf(cb,
- dev->me_clients[i].props.max_msg_length);
- if (rets)
- goto err;
-
- cb->fop_type = MEI_FOP_READ;
- cl->read_cb = cb;
- if (dev->mei_host_buffer_is_empty) {
- dev->mei_host_buffer_is_empty = false;
- if (mei_send_flow_control(dev, cl)) {
- rets = -ENODEV;
- goto err;
- }
- list_add_tail(&cb->list, &dev->read_list.list);
- } else {
- list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
- }
- return rets;
-err:
- mei_io_cb_free(cb);
- return rets;
-}
-
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 43fb52ff98ad..903f809b21f7 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -37,79 +37,11 @@
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
-#include "mei_dev.h"
#include <linux/mei.h>
-#include "interface.h"
-
-/* AMT device is a singleton on the platform */
-static struct pci_dev *mei_pdev;
-
-/* mei_pci_tbl - PCI Device ID Table */
-static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
-
- /* required last entry */
- {0, }
-};
-
-MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
-static DEFINE_MUTEX(mei_mutex);
-
-
-/**
- * find_read_list_entry - find read list entry
- *
- * @dev: device structure
- * @file: pointer to file structure
- *
- * returns cb on success, NULL on error
- */
-static struct mei_cl_cb *find_read_list_entry(
- struct mei_device *dev,
- struct mei_cl *cl)
-{
- struct mei_cl_cb *pos = NULL;
- struct mei_cl_cb *next = NULL;
-
- dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
- list_for_each_entry_safe(pos, next, &dev->read_list.list, list)
- if (mei_cl_cmp_id(cl, pos->cl))
- return pos;
- return NULL;
-}
+#include "mei_dev.h"
+#include "hw-me.h"
+#include "client.h"
/**
* mei_open - the open function
@@ -121,16 +53,20 @@ static struct mei_cl_cb *find_read_list_entry(
*/
static int mei_open(struct inode *inode, struct file *file)
{
+ struct miscdevice *misc = file->private_data;
+ struct pci_dev *pdev;
struct mei_cl *cl;
struct mei_device *dev;
- unsigned long cl_id;
+
int err;
err = -ENODEV;
- if (!mei_pdev)
+ if (!misc->parent)
goto out;
- dev = pci_get_drvdata(mei_pdev);
+ pdev = container_of(misc->parent, struct pci_dev, dev);
+
+ dev = pci_get_drvdata(pdev);
if (!dev)
goto out;
@@ -153,24 +89,9 @@ static int mei_open(struct inode *inode, struct file *file)
goto out_unlock;
}
- cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
- if (cl_id >= MEI_CLIENTS_MAX) {
- dev_err(&dev->pdev->dev, "client_id exceded %d",
- MEI_CLIENTS_MAX) ;
+ err = mei_cl_link(cl, MEI_HOST_CLIENT_ID_ANY);
+ if (err)
goto out_unlock;
- }
-
- cl->host_client_id = cl_id;
-
- dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id);
-
- dev->open_handle_count++;
-
- list_add_tail(&cl->link, &dev->file_list);
-
- set_bit(cl->host_client_id, dev->host_clients_map);
- cl->state = MEI_FILE_INITIALIZING;
- cl->sm_state = 0;
file->private_data = cl;
mutex_unlock(&dev->device_lock);
@@ -216,7 +137,7 @@ static int mei_release(struct inode *inode, struct file *file)
"ME client = %d\n",
cl->host_client_id,
cl->me_client_id);
- rets = mei_disconnect_host_client(dev, cl);
+ rets = mei_cl_disconnect(cl);
}
mei_cl_flush_queues(cl);
dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
@@ -227,12 +148,13 @@ static int mei_release(struct inode *inode, struct file *file)
clear_bit(cl->host_client_id, dev->host_clients_map);
dev->open_handle_count--;
}
- mei_me_cl_unlink(dev, cl);
+ mei_cl_unlink(cl);
+
/* free read cb */
cb = NULL;
if (cl->read_cb) {
- cb = find_read_list_entry(dev, cl);
+ cb = mei_cl_find_read_cb(cl);
/* Remove entry from read list */
if (cb)
list_del(&cb->list);
@@ -322,7 +244,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
goto out;
}
- err = mei_start_read(dev, cl);
+ err = mei_cl_read_start(cl);
if (err && err != -EBUSY) {
dev_dbg(&dev->pdev->dev,
"mei start read failure with status = %d\n", err);
@@ -393,14 +315,13 @@ copy_buffer:
goto out;
free:
- cb_pos = find_read_list_entry(dev, cl);
+ cb_pos = mei_cl_find_read_cb(cl);
/* Remove entry from read list */
if (cb_pos)
list_del(&cb_pos->list);
mei_io_cb_free(cb);
cl->reading_state = MEI_IDLE;
cl->read_cb = NULL;
- cl->read_pending = 0;
out:
dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets);
mutex_unlock(&dev->device_lock);
@@ -475,16 +396,15 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
/* free entry used in read */
if (cl->reading_state == MEI_READ_COMPLETE) {
*offset = 0;
- write_cb = find_read_list_entry(dev, cl);
+ write_cb = mei_cl_find_read_cb(cl);
if (write_cb) {
list_del(&write_cb->list);
mei_io_cb_free(write_cb);
write_cb = NULL;
cl->reading_state = MEI_IDLE;
cl->read_cb = NULL;
- cl->read_pending = 0;
}
- } else if (cl->reading_state == MEI_IDLE && !cl->read_pending)
+ } else if (cl->reading_state == MEI_IDLE)
*offset = 0;
@@ -519,7 +439,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
if (rets) {
dev_err(&dev->pdev->dev,
- "amthi write failed with status = %d\n", rets);
+ "amthif write failed with status = %d\n", rets);
goto err;
}
mutex_unlock(&dev->device_lock);
@@ -530,20 +450,20 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n",
cl->host_client_id, cl->me_client_id);
- rets = mei_flow_ctrl_creds(dev, cl);
+ rets = mei_cl_flow_ctrl_creds(cl);
if (rets < 0)
goto err;
- if (rets == 0 || dev->mei_host_buffer_is_empty == false) {
+ if (rets == 0 || !dev->hbuf_is_ready) {
write_cb->buf_idx = 0;
mei_hdr.msg_complete = 0;
cl->writing_state = MEI_WRITING;
goto out;
}
- dev->mei_host_buffer_is_empty = false;
- if (length > mei_hbuf_max_data(dev)) {
- mei_hdr.length = mei_hbuf_max_data(dev);
+ dev->hbuf_is_ready = false;
+ if (length > mei_hbuf_max_len(dev)) {
+ mei_hdr.length = mei_hbuf_max_len(dev);
mei_hdr.msg_complete = 0;
} else {
mei_hdr.length = length;
@@ -552,10 +472,10 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
mei_hdr.host_addr = cl->host_client_id;
mei_hdr.me_addr = cl->me_client_id;
mei_hdr.reserved = 0;
- dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n",
- *((u32 *) &mei_hdr));
- if (mei_write_message(dev, &mei_hdr,
- write_cb->request_buffer.data, mei_hdr.length)) {
+
+ dev_dbg(&dev->pdev->dev, "write " MEI_HDR_FMT "\n",
+ MEI_HDR_PRM(&mei_hdr));
+ if (mei_write_message(dev, &mei_hdr, write_cb->request_buffer.data)) {
rets = -ENODEV;
goto err;
}
@@ -564,7 +484,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
out:
if (mei_hdr.msg_complete) {
- if (mei_flow_ctrl_reduce(dev, cl)) {
+ if (mei_cl_flow_ctrl_reduce(cl)) {
rets = -ENODEV;
goto err;
}
@@ -582,6 +502,103 @@ err:
return rets;
}
+/**
+ * mei_ioctl_connect_client - the connect to fw client IOCTL function
+ *
+ * @dev: the device structure
+ * @data: IOCTL connect data, input and output parameters
+ * @file: private data of the file object
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * returns 0 on success, <0 on failure.
+ */
+static int mei_ioctl_connect_client(struct file *file,
+ struct mei_connect_client_data *data)
+{
+ struct mei_device *dev;
+ struct mei_client *client;
+ struct mei_cl *cl;
+ int i;
+ int rets;
+
+ cl = file->private_data;
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ if (dev->dev_state != MEI_DEV_ENABLED) {
+ rets = -ENODEV;
+ goto end;
+ }
+
+ if (cl->state != MEI_FILE_INITIALIZING &&
+ cl->state != MEI_FILE_DISCONNECTED) {
+ rets = -EBUSY;
+ goto end;
+ }
+
+ /* find ME client we're trying to connect to */
+ i = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
+ if (i >= 0 && !dev->me_clients[i].props.fixed_address) {
+ cl->me_client_id = dev->me_clients[i].client_id;
+ cl->state = MEI_FILE_CONNECTING;
+ }
+
+ dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n",
+ cl->me_client_id);
+ dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n",
+ dev->me_clients[i].props.protocol_version);
+ dev_dbg(&dev->pdev->dev, "FW Client - Max Msg Len = %d\n",
+ dev->me_clients[i].props.max_msg_length);
+
+ /* if we're connecting to amthif client then we will use the
+ * existing connection
+ */
+ if (uuid_le_cmp(data->in_client_uuid, mei_amthif_guid) == 0) {
+ dev_dbg(&dev->pdev->dev, "FW Client is amthi\n");
+ if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) {
+ rets = -ENODEV;
+ goto end;
+ }
+ clear_bit(cl->host_client_id, dev->host_clients_map);
+ mei_cl_unlink(cl);
+
+ kfree(cl);
+ cl = NULL;
+ file->private_data = &dev->iamthif_cl;
+
+ client = &data->out_client_properties;
+ client->max_msg_length =
+ dev->me_clients[i].props.max_msg_length;
+ client->protocol_version =
+ dev->me_clients[i].props.protocol_version;
+ rets = dev->iamthif_cl.status;
+
+ goto end;
+ }
+
+ if (cl->state != MEI_FILE_CONNECTING) {
+ rets = -ENODEV;
+ goto end;
+ }
+
+
+ /* prepare the output buffer */
+ client = &data->out_client_properties;
+ client->max_msg_length = dev->me_clients[i].props.max_msg_length;
+ client->protocol_version = dev->me_clients[i].props.protocol_version;
+ dev_dbg(&dev->pdev->dev, "Can connect?\n");
+
+
+ rets = mei_cl_connect(cl, file);
+
+end:
+ dev_dbg(&dev->pdev->dev, "free connect cb memory.");
+ return rets;
+}
+
/**
* mei_ioctl - the IOCTL function
@@ -630,6 +647,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
rets = -EFAULT;
goto out;
}
+
rets = mei_ioctl_connect_client(file, connect_data);
/* if all is ok, copying the data back to user. */
@@ -726,7 +744,6 @@ static const struct file_operations mei_fops = {
.llseek = no_llseek
};
-
/*
* Misc Device Struct
*/
@@ -736,300 +753,17 @@ static struct miscdevice mei_misc_device = {
.minor = MISC_DYNAMIC_MINOR,
};
-/**
- * mei_quirk_probe - probe for devices that doesn't valid ME interface
- * @pdev: PCI device structure
- * @ent: entry into pci_device_table
- *
- * returns true if ME Interface is valid, false otherwise
- */
-static bool mei_quirk_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+int mei_register(struct device *dev)
{
- u32 reg;
- if (ent->device == MEI_DEV_ID_PBG_1) {
- pci_read_config_dword(pdev, 0x48, &reg);
- /* make sure that bit 9 is up and bit 10 is down */
- if ((reg & 0x600) == 0x200) {
- dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
- return false;
- }
- }
- return true;
-}
-/**
- * mei_probe - Device Initialization Routine
- *
- * @pdev: PCI device structure
- * @ent: entry in kcs_pci_tbl
- *
- * returns 0 on success, <0 on failure.
- */
-static int mei_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct mei_device *dev;
- int err;
-
- mutex_lock(&mei_mutex);
-
- if (!mei_quirk_probe(pdev, ent)) {
- err = -ENODEV;
- goto end;
- }
-
- if (mei_pdev) {
- err = -EEXIST;
- goto end;
- }
- /* enable pci dev */
- err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "failed to enable pci device.\n");
- goto end;
- }
- /* set PCI host mastering */
- pci_set_master(pdev);
- /* pci request regions for mei driver */
- err = pci_request_regions(pdev, KBUILD_MODNAME);
- if (err) {
- dev_err(&pdev->dev, "failed to get pci regions.\n");
- goto disable_device;
- }
- /* allocates and initializes the mei dev structure */
- dev = mei_device_init(pdev);
- if (!dev) {
- err = -ENOMEM;
- goto release_regions;
- }
- /* mapping IO device memory */
- dev->mem_addr = pci_iomap(pdev, 0, 0);
- if (!dev->mem_addr) {
- dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
- err = -ENOMEM;
- goto free_device;
- }
- pci_enable_msi(pdev);
-
- /* request and enable interrupt */
- if (pci_dev_msi_enabled(pdev))
- err = request_threaded_irq(pdev->irq,
- NULL,
- mei_interrupt_thread_handler,
- IRQF_ONESHOT, KBUILD_MODNAME, dev);
- else
- err = request_threaded_irq(pdev->irq,
- mei_interrupt_quick_handler,
- mei_interrupt_thread_handler,
- IRQF_SHARED, KBUILD_MODNAME, dev);
-
- if (err) {
- dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
- pdev->irq);
- goto disable_msi;
- }
- INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
- INIT_WORK(&dev->init_work, mei_host_client_init);
-
- if (mei_hw_init(dev)) {
- dev_err(&pdev->dev, "init hw failure.\n");
- err = -ENODEV;
- goto release_irq;
- }
-
- err = misc_register(&mei_misc_device);
- if (err)
- goto release_irq;
-
- mei_pdev = pdev;
- pci_set_drvdata(pdev, dev);
-
-
- schedule_delayed_work(&dev->timer_work, HZ);
-
- mutex_unlock(&mei_mutex);
-
- pr_debug("initialization successful.\n");
-
- return 0;
-
-release_irq:
- /* disable interrupts */
- dev->host_hw_state = mei_hcsr_read(dev);
- mei_disable_interrupts(dev);
- flush_scheduled_work();
- free_irq(pdev->irq, dev);
-disable_msi:
- pci_disable_msi(pdev);
- pci_iounmap(pdev, dev->mem_addr);
-free_device:
- kfree(dev);
-release_regions:
- pci_release_regions(pdev);
-disable_device:
- pci_disable_device(pdev);
-end:
- mutex_unlock(&mei_mutex);
- dev_err(&pdev->dev, "initialization failed.\n");
- return err;
+ mei_misc_device.parent = dev;
+ return misc_register(&mei_misc_device);
}
-/**
- * mei_remove - Device Removal Routine
- *
- * @pdev: PCI device structure
- *
- * mei_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device.
- */
-static void mei_remove(struct pci_dev *pdev)
+void mei_deregister(void)
{
- struct mei_device *dev;
-
- if (mei_pdev != pdev)
- return;
-
- dev = pci_get_drvdata(pdev);
- if (!dev)
- return;
-
- mutex_lock(&dev->device_lock);
-
- cancel_delayed_work(&dev->timer_work);
-
- mei_wd_stop(dev);
-
- mei_pdev = NULL;
-
- if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
- dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
- mei_disconnect_host_client(dev, &dev->iamthif_cl);
- }
- if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
- dev->wd_cl.state = MEI_FILE_DISCONNECTING;
- mei_disconnect_host_client(dev, &dev->wd_cl);
- }
-
- /* Unregistering watchdog device */
- mei_watchdog_unregister(dev);
-
- /* remove entry if already in list */
- dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
- mei_me_cl_unlink(dev, &dev->wd_cl);
- mei_me_cl_unlink(dev, &dev->iamthif_cl);
-
- dev->iamthif_current_cb = NULL;
- dev->me_clients_num = 0;
-
- mutex_unlock(&dev->device_lock);
-
- flush_scheduled_work();
-
- /* disable interrupts */
- mei_disable_interrupts(dev);
-
- free_irq(pdev->irq, dev);
- pci_disable_msi(pdev);
- pci_set_drvdata(pdev, NULL);
-
- if (dev->mem_addr)
- pci_iounmap(pdev, dev->mem_addr);
-
- kfree(dev);
-
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-
misc_deregister(&mei_misc_device);
-}
-#ifdef CONFIG_PM
-static int mei_pci_suspend(struct device *device)
-{
- struct pci_dev *pdev = to_pci_dev(device);
- struct mei_device *dev = pci_get_drvdata(pdev);
- int err;
-
- if (!dev)
- return -ENODEV;
- mutex_lock(&dev->device_lock);
-
- cancel_delayed_work(&dev->timer_work);
-
- /* Stop watchdog if exists */
- err = mei_wd_stop(dev);
- /* Set new mei state */
- if (dev->dev_state == MEI_DEV_ENABLED ||
- dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
- dev->dev_state = MEI_DEV_POWER_DOWN;
- mei_reset(dev, 0);
- }
- mutex_unlock(&dev->device_lock);
-
- free_irq(pdev->irq, dev);
- pci_disable_msi(pdev);
-
- return err;
+ mei_misc_device.parent = NULL;
}
-static int mei_pci_resume(struct device *device)
-{
- struct pci_dev *pdev = to_pci_dev(device);
- struct mei_device *dev;
- int err;
-
- dev = pci_get_drvdata(pdev);
- if (!dev)
- return -ENODEV;
-
- pci_enable_msi(pdev);
-
- /* request and enable interrupt */
- if (pci_dev_msi_enabled(pdev))
- err = request_threaded_irq(pdev->irq,
- NULL,
- mei_interrupt_thread_handler,
- IRQF_ONESHOT, KBUILD_MODNAME, dev);
- else
- err = request_threaded_irq(pdev->irq,
- mei_interrupt_quick_handler,
- mei_interrupt_thread_handler,
- IRQF_SHARED, KBUILD_MODNAME, dev);
-
- if (err) {
- dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
- pdev->irq);
- return err;
- }
-
- mutex_lock(&dev->device_lock);
- dev->dev_state = MEI_DEV_POWER_UP;
- mei_reset(dev, 1);
- mutex_unlock(&dev->device_lock);
-
- /* Start timer if stopped in suspend */
- schedule_delayed_work(&dev->timer_work, HZ);
-
- return err;
-}
-static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
-#define MEI_PM_OPS (&mei_pm_ops)
-#else
-#define MEI_PM_OPS NULL
-#endif /* CONFIG_PM */
-/*
- * PCI driver structure
- */
-static struct pci_driver mei_driver = {
- .name = KBUILD_MODNAME,
- .id_table = mei_pci_tbl,
- .probe = mei_probe,
- .remove = mei_remove,
- .shutdown = mei_remove,
- .driver.pm = MEI_PM_OPS,
-};
-
-module_pci_driver(mei_driver);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 25da04549d04..cb80166161f0 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -21,7 +21,9 @@
#include <linux/watchdog.h>
#include <linux/poll.h>
#include <linux/mei.h>
+
#include "hw.h"
+#include "hw-me-regs.h"
/*
* watch dog definition
@@ -44,7 +46,7 @@
/*
* AMTHI Client UUID
*/
-extern const uuid_le mei_amthi_guid;
+extern const uuid_le mei_amthif_guid;
/*
* Watchdog Client UUID
@@ -65,12 +67,18 @@ extern const u8 mei_wd_state_independence_msg[3][4];
* Number of File descriptors/handles
* that can be opened to the driver.
*
- * Limit to 253: 256 Total Clients
+ * Limit to 255: 256 Total Clients
* minus internal client for MEI Bus Messags
- * minus internal client for AMTHI
- * minus internal client for Watchdog
*/
-#define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 3)
+#define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1)
+
+/*
+ * Internal Clients Number
+ */
+#define MEI_HOST_CLIENT_ID_ANY (-1)
+#define MEI_HBM_HOST_CLIENT_ID 0 /* not used, just for documentation */
+#define MEI_WD_HOST_CLIENT_ID 1
+#define MEI_IAMTHIF_HOST_CLIENT_ID 2
/* File state */
@@ -150,6 +158,19 @@ struct mei_message_data {
unsigned char *data;
};
+/**
+ * struct mei_me_client - representation of me (fw) client
+ *
+ * @props - client properties
+ * @client_id - me client id
+ * @mei_flow_ctrl_creds - flow control credits
+ */
+struct mei_me_client {
+ struct mei_client_properties props;
+ u8 client_id;
+ u8 mei_flow_ctrl_creds;
+};
+
struct mei_cl;
@@ -178,7 +199,6 @@ struct mei_cl {
wait_queue_head_t tx_wait;
wait_queue_head_t rx_wait;
wait_queue_head_t wait;
- int read_pending;
int status;
/* ID of client connected */
u8 host_client_id;
@@ -191,10 +211,67 @@ struct mei_cl {
struct mei_cl_cb *read_cb;
};
+/** struct mei_hw_ops
+ *
+ * @host_set_ready - notify FW that host side is ready
+ * @host_is_ready - query for host readiness
+
+ * @hw_is_ready - query if hw is ready
+ * @hw_reset - reset hw
+ * @hw_config - configure hw
+
+ * @intr_clear - clear pending interrupts
+ * @intr_enable - enable interrupts
+ * @intr_disable - disable interrupts
+
+ * @hbuf_free_slots - query for write buffer empty slots
+ * @hbuf_is_ready - query if write buffer is empty
+ * @hbuf_max_len - query for write buffer max len
+
+ * @write - write a message to FW
+
+ * @rdbuf_full_slots - query how many slots are filled
+
+ * @read_hdr - get first 4 bytes (header)
+ * @read - read a buffer from the FW
+ */
+struct mei_hw_ops {
+
+ void (*host_set_ready) (struct mei_device *dev);
+ bool (*host_is_ready) (struct mei_device *dev);
+
+ bool (*hw_is_ready) (struct mei_device *dev);
+ void (*hw_reset) (struct mei_device *dev, bool enable);
+ void (*hw_config) (struct mei_device *dev);
+
+ void (*intr_clear) (struct mei_device *dev);
+ void (*intr_enable) (struct mei_device *dev);
+ void (*intr_disable) (struct mei_device *dev);
+
+ int (*hbuf_free_slots) (struct mei_device *dev);
+ bool (*hbuf_is_ready) (struct mei_device *dev);
+ size_t (*hbuf_max_len) (const struct mei_device *dev);
+
+ int (*write)(struct mei_device *dev,
+ struct mei_msg_hdr *hdr,
+ unsigned char *buf);
+
+ int (*rdbuf_full_slots)(struct mei_device *dev);
+
+ u32 (*read_hdr)(const struct mei_device *dev);
+ int (*read) (struct mei_device *dev,
+ unsigned char *buf, unsigned long len);
+};
+
/**
* struct mei_device - MEI private device struct
- * @hbuf_depth - depth of host(write) buffer
- * @wr_ext_msg - buffer for hbm control responses (set in read cycle)
+
+ * @mem_addr - mem mapped base register address
+
+ * @hbuf_depth - depth of hardware host/write buffer is slots
+ * @hbuf_is_ready - query if the host host/write buffer is ready
+ * @wr_msg - the buffer for hbm control messages
+ * @wr_ext_msg - the buffer for hbm control responses (set in read cycle)
*/
struct mei_device {
struct pci_dev *pdev; /* pointer to pci device struct */
@@ -213,24 +290,14 @@ struct mei_device {
*/
struct list_head file_list;
long open_handle_count;
- /*
- * memory of device
- */
- unsigned int mem_base;
- unsigned int mem_length;
- void __iomem *mem_addr;
+
/*
* lock for the device
*/
struct mutex device_lock; /* device lock */
struct delayed_work timer_work; /* MEI timer delayed work (timeouts) */
bool recvd_msg;
- /*
- * hw states of host and fw(ME)
- */
- u32 host_hw_state;
- u32 me_hw_state;
- u8 hbuf_depth;
+
/*
* waiting queue for receive message from FW
*/
@@ -243,11 +310,20 @@ struct mei_device {
enum mei_dev_state dev_state;
enum mei_init_clients_states init_clients_state;
u16 init_clients_timer;
- bool need_reset;
unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; /* control messages */
u32 rd_msg_hdr;
- u32 wr_msg_buf[128]; /* used for control messages */
+
+ /* write buffer */
+ u8 hbuf_depth;
+ bool hbuf_is_ready;
+
+ /* used for control messages */
+ struct {
+ struct mei_msg_hdr hdr;
+ unsigned char data[128];
+ } wr_msg;
+
struct {
struct mei_msg_hdr hdr;
unsigned char data[4]; /* All HBM messages are 4 bytes */
@@ -261,7 +337,6 @@ struct mei_device {
u8 me_clients_num;
u8 me_client_presentation_num;
u8 me_client_index;
- bool mei_host_buffer_is_empty;
struct mei_cl wd_cl;
enum mei_wd_states wd_state;
@@ -289,6 +364,9 @@ struct mei_device {
bool iamthif_canceled;
struct work_struct init_work;
+
+ const struct mei_hw_ops *ops;
+ char hw[0] __aligned(sizeof(void *));
};
static inline unsigned long mei_secs_to_jiffies(unsigned long sec)
@@ -300,96 +378,28 @@ static inline unsigned long mei_secs_to_jiffies(unsigned long sec)
/*
* mei init function prototypes
*/
-struct mei_device *mei_device_init(struct pci_dev *pdev);
+void mei_device_init(struct mei_device *dev);
void mei_reset(struct mei_device *dev, int interrupts);
int mei_hw_init(struct mei_device *dev);
-int mei_task_initialize_clients(void *data);
-int mei_initialize_clients(struct mei_device *dev);
-int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl);
-void mei_allocate_me_clients_storage(struct mei_device *dev);
-
-
-int mei_me_cl_link(struct mei_device *dev, struct mei_cl *cl,
- const uuid_le *cguid, u8 host_client_id);
-void mei_me_cl_unlink(struct mei_device *dev, struct mei_cl *cl);
-int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid);
-int mei_me_cl_by_id(struct mei_device *dev, u8 client_id);
-
-/*
- * MEI IO Functions
- */
-struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp);
-void mei_io_cb_free(struct mei_cl_cb *priv_cb);
-int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length);
-int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length);
-
-
-/**
- * mei_io_list_init - Sets up a queue list.
- *
- * @list: An instance cl callback structure
- */
-static inline void mei_io_list_init(struct mei_cl_cb *list)
-{
- INIT_LIST_HEAD(&list->list);
-}
-void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl);
-
-/*
- * MEI ME Client Functions
- */
-
-struct mei_cl *mei_cl_allocate(struct mei_device *dev);
-void mei_cl_init(struct mei_cl *cl, struct mei_device *dev);
-int mei_cl_flush_queues(struct mei_cl *cl);
-/**
- * mei_cl_cmp_id - tells if file private data have same id
- *
- * @fe1: private data of 1. file object
- * @fe2: private data of 2. file object
- *
- * returns true - if ids are the same and not NULL
- */
-static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
- const struct mei_cl *cl2)
-{
- return cl1 && cl2 &&
- (cl1->host_client_id == cl2->host_client_id) &&
- (cl1->me_client_id == cl2->me_client_id);
-}
-
-
-
-/*
- * MEI Host Client Functions
- */
-void mei_host_start_message(struct mei_device *dev);
-void mei_host_enum_clients_message(struct mei_device *dev);
-int mei_host_client_enumerate(struct mei_device *dev);
-void mei_host_client_init(struct work_struct *work);
/*
* MEI interrupt functions prototype
*/
-irqreturn_t mei_interrupt_quick_handler(int irq, void *dev_id);
-irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id);
-void mei_timer(struct work_struct *work);
-/*
- * MEI input output function prototype
- */
-int mei_ioctl_connect_client(struct file *file,
- struct mei_connect_client_data *data);
+void mei_timer(struct work_struct *work);
+int mei_irq_read_handler(struct mei_device *dev,
+ struct mei_cl_cb *cmpl_list, s32 *slots);
-int mei_start_read(struct mei_device *dev, struct mei_cl *cl);
+int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list);
+void mei_irq_complete_handler(struct mei_cl *cl, struct mei_cl_cb *cb_pos);
/*
* AMTHIF - AMT Host Interface Functions
*/
void mei_amthif_reset_params(struct mei_device *dev);
-void mei_amthif_host_init(struct mei_device *dev);
+int mei_amthif_host_init(struct mei_device *dev);
int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *priv_cb);
@@ -407,9 +417,6 @@ struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,
void mei_amthif_run_next_cmd(struct mei_device *dev);
-int mei_amthif_read_message(struct mei_cl_cb *complete_list,
- struct mei_device *dev, struct mei_msg_hdr *mei_hdr);
-
int mei_amthif_irq_write_complete(struct mei_device *dev, s32 *slots,
struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list);
@@ -418,92 +425,107 @@ int mei_amthif_irq_read_message(struct mei_cl_cb *complete_list,
struct mei_device *dev, struct mei_msg_hdr *mei_hdr);
int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);
+
+int mei_wd_send(struct mei_device *dev);
+int mei_wd_stop(struct mei_device *dev);
+int mei_wd_host_init(struct mei_device *dev);
/*
- * Register Access Function
+ * mei_watchdog_register - Registering watchdog interface
+ * once we got connection to the WD Client
+ * @dev - mei device
+ */
+void mei_watchdog_register(struct mei_device *dev);
+/*
+ * mei_watchdog_unregister - Unregistering watchdog interface
+ * @dev - mei device
*/
+void mei_watchdog_unregister(struct mei_device *dev);
-/**
- * mei_reg_read - Reads 32bit data from the mei device
- *
- * @dev: the device structure
- * @offset: offset from which to read the data
- *
- * returns register value (u32)
+/*
+ * Register Access Function
*/
-static inline u32 mei_reg_read(const struct mei_device *dev,
- unsigned long offset)
+
+static inline void mei_hw_config(struct mei_device *dev)
+{
+ dev->ops->hw_config(dev);
+}
+static inline void mei_hw_reset(struct mei_device *dev, bool enable)
{
- return ioread32(dev->mem_addr + offset);
+ dev->ops->hw_reset(dev, enable);
}
-/**
- * mei_reg_write - Writes 32bit data to the mei device
- *
- * @dev: the device structure
- * @offset: offset from which to write the data
- * @value: register value to write (u32)
- */
-static inline void mei_reg_write(const struct mei_device *dev,
- unsigned long offset, u32 value)
+static inline void mei_clear_interrupts(struct mei_device *dev)
{
- iowrite32(value, dev->mem_addr + offset);
+ dev->ops->intr_clear(dev);
}
-/**
- * mei_hcsr_read - Reads 32bit data from the host CSR
- *
- * @dev: the device structure
- *
- * returns the byte read.
- */
-static inline u32 mei_hcsr_read(const struct mei_device *dev)
+static inline void mei_enable_interrupts(struct mei_device *dev)
{
- return mei_reg_read(dev, H_CSR);
+ dev->ops->intr_enable(dev);
}
-/**
- * mei_mecsr_read - Reads 32bit data from the ME CSR
- *
- * @dev: the device structure
- *
- * returns ME_CSR_HA register value (u32)
- */
-static inline u32 mei_mecsr_read(const struct mei_device *dev)
+static inline void mei_disable_interrupts(struct mei_device *dev)
{
- return mei_reg_read(dev, ME_CSR_HA);
+ dev->ops->intr_disable(dev);
}
-/**
- * get_me_cb_rw - Reads 32bit data from the mei ME_CB_RW register
- *
- * @dev: the device structure
- *
- * returns ME_CB_RW register value (u32)
- */
-static inline u32 mei_mecbrw_read(const struct mei_device *dev)
+static inline void mei_host_set_ready(struct mei_device *dev)
{
- return mei_reg_read(dev, ME_CB_RW);
+ dev->ops->host_set_ready(dev);
+}
+static inline bool mei_host_is_ready(struct mei_device *dev)
+{
+ return dev->ops->host_is_ready(dev);
+}
+static inline bool mei_hw_is_ready(struct mei_device *dev)
+{
+ return dev->ops->hw_is_ready(dev);
}
+static inline bool mei_hbuf_is_ready(struct mei_device *dev)
+{
+ return dev->ops->hbuf_is_ready(dev);
+}
-/*
- * mei interface function prototypes
- */
-void mei_hcsr_set(struct mei_device *dev);
-void mei_csr_clear_his(struct mei_device *dev);
+static inline int mei_hbuf_empty_slots(struct mei_device *dev)
+{
+ return dev->ops->hbuf_free_slots(dev);
+}
+
+static inline size_t mei_hbuf_max_len(const struct mei_device *dev)
+{
+ return dev->ops->hbuf_max_len(dev);
+}
-void mei_enable_interrupts(struct mei_device *dev);
-void mei_disable_interrupts(struct mei_device *dev);
+static inline int mei_write_message(struct mei_device *dev,
+ struct mei_msg_hdr *hdr,
+ unsigned char *buf)
+{
+ return dev->ops->write(dev, hdr, buf);
+}
-static inline struct mei_msg_hdr *mei_hbm_hdr(u32 *buf, size_t length)
+static inline u32 mei_read_hdr(const struct mei_device *dev)
{
- struct mei_msg_hdr *hdr = (struct mei_msg_hdr *)buf;
- hdr->host_addr = 0;
- hdr->me_addr = 0;
- hdr->length = length;
- hdr->msg_complete = 1;
- hdr->reserved = 0;
- return hdr;
+ return dev->ops->read_hdr(dev);
}
+static inline void mei_read_slots(struct mei_device *dev,
+ unsigned char *buf, unsigned long len)
+{
+ dev->ops->read(dev, buf, len);
+}
+
+static inline int mei_count_full_read_slots(struct mei_device *dev)
+{
+ return dev->ops->rdbuf_full_slots(dev);
+}
+
+int mei_register(struct device *dev);
+void mei_deregister(void);
+
+#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d comp=%1d"
+#define MEI_HDR_PRM(hdr) \
+ (hdr)->host_addr, (hdr)->me_addr, \
+ (hdr)->length, (hdr)->msg_complete
+
#endif
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
new file mode 100644
index 000000000000..b40ec0601ab0
--- /dev/null
+++ b/drivers/misc/mei/pci-me.c
@@ -0,0 +1,396 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/aio.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/cdev.h>
+#include <linux/sched.h>
+#include <linux/uuid.h>
+#include <linux/compat.h>
+#include <linux/jiffies.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+
+#include <linux/mei.h>
+
+#include "mei_dev.h"
+#include "hw-me.h"
+#include "client.h"
+
+/* AMT device is a singleton on the platform */
+static struct pci_dev *mei_pdev;
+
+/* mei_pci_tbl - PCI Device ID Table */
+static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
+
+ /* required last entry */
+ {0, }
+};
+
+MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
+
+static DEFINE_MUTEX(mei_mutex);
+
+/**
+ * mei_quirk_probe - probe for devices that doesn't valid ME interface
+ * @pdev: PCI device structure
+ * @ent: entry into pci_device_table
+ *
+ * returns true if ME Interface is valid, false otherwise
+ */
+static bool mei_quirk_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ u32 reg;
+ if (ent->device == MEI_DEV_ID_PBG_1) {
+ pci_read_config_dword(pdev, 0x48, &reg);
+ /* make sure that bit 9 is up and bit 10 is down */
+ if ((reg & 0x600) == 0x200) {
+ dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
+ return false;
+ }
+ }
+ return true;
+}
+/**
+ * mei_probe - Device Initialization Routine
+ *
+ * @pdev: PCI device structure
+ * @ent: entry in kcs_pci_tbl
+ *
+ * returns 0 on success, <0 on failure.
+ */
+static int mei_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct mei_device *dev;
+ struct mei_me_hw *hw;
+ int err;
+
+ mutex_lock(&mei_mutex);
+
+ if (!mei_quirk_probe(pdev, ent)) {
+ err = -ENODEV;
+ goto end;
+ }
+
+ if (mei_pdev) {
+ err = -EEXIST;
+ goto end;
+ }
+ /* enable pci dev */
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable pci device.\n");
+ goto end;
+ }
+ /* set PCI host mastering */
+ pci_set_master(pdev);
+ /* pci request regions for mei driver */
+ err = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (err) {
+ dev_err(&pdev->dev, "failed to get pci regions.\n");
+ goto disable_device;
+ }
+ /* allocates and initializes the mei dev structure */
+ dev = mei_me_dev_init(pdev);
+ if (!dev) {
+ err = -ENOMEM;
+ goto release_regions;
+ }
+ hw = to_me_hw(dev);
+ /* mapping IO device memory */
+ hw->mem_addr = pci_iomap(pdev, 0, 0);
+ if (!hw->mem_addr) {
+ dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
+ err = -ENOMEM;
+ goto free_device;
+ }
+ pci_enable_msi(pdev);
+
+ /* request and enable interrupt */
+ if (pci_dev_msi_enabled(pdev))
+ err = request_threaded_irq(pdev->irq,
+ NULL,
+ mei_me_irq_thread_handler,
+ IRQF_ONESHOT, KBUILD_MODNAME, dev);
+ else
+ err = request_threaded_irq(pdev->irq,
+ mei_me_irq_quick_handler,
+ mei_me_irq_thread_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+
+ if (err) {
+ dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
+ pdev->irq);
+ goto disable_msi;
+ }
+
+ if (mei_hw_init(dev)) {
+ dev_err(&pdev->dev, "init hw failure.\n");
+ err = -ENODEV;
+ goto release_irq;
+ }
+
+ err = mei_register(&pdev->dev);
+ if (err)
+ goto release_irq;
+
+ mei_pdev = pdev;
+ pci_set_drvdata(pdev, dev);
+
+
+ schedule_delayed_work(&dev->timer_work, HZ);
+
+ mutex_unlock(&mei_mutex);
+
+ pr_debug("initialization successful.\n");
+
+ return 0;
+
+release_irq:
+ mei_disable_interrupts(dev);
+ flush_scheduled_work();
+ free_irq(pdev->irq, dev);
+disable_msi:
+ pci_disable_msi(pdev);
+ pci_iounmap(pdev, hw->mem_addr);
+free_device:
+ kfree(dev);
+release_regions:
+ pci_release_regions(pdev);
+disable_device:
+ pci_disable_device(pdev);
+end:
+ mutex_unlock(&mei_mutex);
+ dev_err(&pdev->dev, "initialization failed.\n");
+ return err;
+}
+
+/**
+ * mei_remove - Device Removal Routine
+ *
+ * @pdev: PCI device structure
+ *
+ * mei_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.
+ */
+static void mei_remove(struct pci_dev *pdev)
+{
+ struct mei_device *dev;
+ struct mei_me_hw *hw;
+
+ if (mei_pdev != pdev)
+ return;
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev)
+ return;
+
+ hw = to_me_hw(dev);
+
+ mutex_lock(&dev->device_lock);
+
+ cancel_delayed_work(&dev->timer_work);
+
+ mei_wd_stop(dev);
+
+ mei_pdev = NULL;
+
+ if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
+ dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
+ mei_cl_disconnect(&dev->iamthif_cl);
+ }
+ if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
+ dev->wd_cl.state = MEI_FILE_DISCONNECTING;
+ mei_cl_disconnect(&dev->wd_cl);
+ }
+
+ /* Unregistering watchdog device */
+ mei_watchdog_unregister(dev);
+
+ /* remove entry if already in list */
+ dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
+
+ if (dev->open_handle_count > 0)
+ dev->open_handle_count--;
+ mei_cl_unlink(&dev->wd_cl);
+
+ if (dev->open_handle_count > 0)
+ dev->open_handle_count--;
+ mei_cl_unlink(&dev->iamthif_cl);
+
+ dev->iamthif_current_cb = NULL;
+ dev->me_clients_num = 0;
+
+ mutex_unlock(&dev->device_lock);
+
+ flush_scheduled_work();
+
+ /* disable interrupts */
+ mei_disable_interrupts(dev);
+
+ free_irq(pdev->irq, dev);
+ pci_disable_msi(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ if (hw->mem_addr)
+ pci_iounmap(pdev, hw->mem_addr);
+
+ kfree(dev);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ mei_deregister();
+
+}
+#ifdef CONFIG_PM
+static int mei_pci_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct mei_device *dev = pci_get_drvdata(pdev);
+ int err;
+
+ if (!dev)
+ return -ENODEV;
+ mutex_lock(&dev->device_lock);
+
+ cancel_delayed_work(&dev->timer_work);
+
+ /* Stop watchdog if exists */
+ err = mei_wd_stop(dev);
+ /* Set new mei state */
+ if (dev->dev_state == MEI_DEV_ENABLED ||
+ dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
+ dev->dev_state = MEI_DEV_POWER_DOWN;
+ mei_reset(dev, 0);
+ }
+ mutex_unlock(&dev->device_lock);
+
+ free_irq(pdev->irq, dev);
+ pci_disable_msi(pdev);
+
+ return err;
+}
+
+static int mei_pci_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct mei_device *dev;
+ int err;
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev)
+ return -ENODEV;
+
+ pci_enable_msi(pdev);
+
+ /* request and enable interrupt */
+ if (pci_dev_msi_enabled(pdev))
+ err = request_threaded_irq(pdev->irq,
+ NULL,
+ mei_me_irq_thread_handler,
+ IRQF_ONESHOT, KBUILD_MODNAME, dev);
+ else
+ err = request_threaded_irq(pdev->irq,
+ mei_me_irq_quick_handler,
+ mei_me_irq_thread_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+
+ if (err) {
+ dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
+ pdev->irq);
+ return err;
+ }
+
+ mutex_lock(&dev->device_lock);
+ dev->dev_state = MEI_DEV_POWER_UP;
+ mei_reset(dev, 1);
+ mutex_unlock(&dev->device_lock);
+
+ /* Start timer if stopped in suspend */
+ schedule_delayed_work(&dev->timer_work, HZ);
+
+ return err;
+}
+static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
+#define MEI_PM_OPS (&mei_pm_ops)
+#else
+#define MEI_PM_OPS NULL
+#endif /* CONFIG_PM */
+/*
+ * PCI driver structure
+ */
+static struct pci_driver mei_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mei_pci_tbl,
+ .probe = mei_probe,
+ .remove = mei_remove,
+ .shutdown = mei_remove,
+ .driver.pm = MEI_PM_OPS,
+};
+
+module_pci_driver(mei_driver);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index 9299a8c29a6f..2413247fc392 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -21,11 +21,13 @@
#include <linux/sched.h>
#include <linux/watchdog.h>
-#include "mei_dev.h"
-#include "hw.h"
-#include "interface.h"
#include <linux/mei.h>
+#include "mei_dev.h"
+#include "hbm.h"
+#include "hw-me.h"
+#include "client.h"
+
static const u8 mei_start_wd_params[] = { 0x02, 0x12, 0x13, 0x10 };
static const u8 mei_stop_wd_params[] = { 0x02, 0x02, 0x14, 0x10 };
@@ -62,30 +64,41 @@ static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout)
*/
int mei_wd_host_init(struct mei_device *dev)
{
- int id;
- mei_cl_init(&dev->wd_cl, dev);
+ struct mei_cl *cl = &dev->wd_cl;
+ int i;
+ int ret;
+
+ mei_cl_init(cl, dev);
- /* look for WD client and connect to it */
- dev->wd_cl.state = MEI_FILE_DISCONNECTED;
dev->wd_timeout = MEI_WD_DEFAULT_TIMEOUT;
dev->wd_state = MEI_WD_IDLE;
- /* Connect WD ME client to the host client */
- id = mei_me_cl_link(dev, &dev->wd_cl,
- &mei_wd_guid, MEI_WD_HOST_CLIENT_ID);
- if (id < 0) {
+ /* check for valid client id */
+ i = mei_me_cl_by_uuid(dev, &mei_wd_guid);
+ if (i < 0) {
dev_info(&dev->pdev->dev, "wd: failed to find the client\n");
return -ENOENT;
}
- if (mei_connect(dev, &dev->wd_cl)) {
+ cl->me_client_id = dev->me_clients[i].client_id;
+
+ ret = mei_cl_link(cl, MEI_WD_HOST_CLIENT_ID);
+
+ if (ret < 0) {
+ dev_info(&dev->pdev->dev, "wd: failed link client\n");
+ return -ENOENT;
+ }
+
+ cl->state = MEI_FILE_CONNECTING;
+
+ if (mei_hbm_cl_connect_req(dev, cl)) {
dev_err(&dev->pdev->dev, "wd: failed to connect to the client\n");
- dev->wd_cl.state = MEI_FILE_DISCONNECTED;
- dev->wd_cl.host_client_id = 0;
+ cl->state = MEI_FILE_DISCONNECTED;
+ cl->host_client_id = 0;
return -EIO;
}
- dev->wd_cl.timer_count = MEI_CONNECT_TIMEOUT;
+ cl->timer_count = MEI_CONNECT_TIMEOUT;
return 0;
}
@@ -101,22 +114,21 @@ int mei_wd_host_init(struct mei_device *dev)
*/
int mei_wd_send(struct mei_device *dev)
{
- struct mei_msg_hdr *mei_hdr;
+ struct mei_msg_hdr hdr;
- mei_hdr = (struct mei_msg_hdr *) &dev->wr_msg_buf[0];
- mei_hdr->host_addr = dev->wd_cl.host_client_id;
- mei_hdr->me_addr = dev->wd_cl.me_client_id;
- mei_hdr->msg_complete = 1;
- mei_hdr->reserved = 0;
+ hdr.host_addr = dev->wd_cl.host_client_id;
+ hdr.me_addr = dev->wd_cl.me_client_id;
+ hdr.msg_complete = 1;
+ hdr.reserved = 0;
if (!memcmp(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE))
- mei_hdr->length = MEI_WD_START_MSG_SIZE;
+ hdr.length = MEI_WD_START_MSG_SIZE;
else if (!memcmp(dev->wd_data, mei_stop_wd_params, MEI_WD_HDR_SIZE))
- mei_hdr->length = MEI_WD_STOP_MSG_SIZE;
+ hdr.length = MEI_WD_STOP_MSG_SIZE;
else
return -EINVAL;
- return mei_write_message(dev, mei_hdr, dev->wd_data, mei_hdr->length);
+ return mei_write_message(dev, &hdr, dev->wd_data);
}
/**
@@ -141,16 +153,16 @@ int mei_wd_stop(struct mei_device *dev)
dev->wd_state = MEI_WD_STOPPING;
- ret = mei_flow_ctrl_creds(dev, &dev->wd_cl);
+ ret = mei_cl_flow_ctrl_creds(&dev->wd_cl);
if (ret < 0)
goto out;
- if (ret && dev->mei_host_buffer_is_empty) {
+ if (ret && dev->hbuf_is_ready) {
ret = 0;
- dev->mei_host_buffer_is_empty = false;
+ dev->hbuf_is_ready = false;
if (!mei_wd_send(dev)) {
- ret = mei_flow_ctrl_reduce(dev, &dev->wd_cl);
+ ret = mei_cl_flow_ctrl_reduce(&dev->wd_cl);
if (ret)
goto out;
} else {
@@ -270,10 +282,9 @@ static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
dev->wd_state = MEI_WD_RUNNING;
/* Check if we can send the ping to HW*/
- if (dev->mei_host_buffer_is_empty &&
- mei_flow_ctrl_creds(dev, &dev->wd_cl) > 0) {
+ if (dev->hbuf_is_ready && mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
- dev->mei_host_buffer_is_empty = false;
+ dev->hbuf_is_ready = false;
dev_dbg(&dev->pdev->dev, "wd: sending ping\n");
if (mei_wd_send(dev)) {
@@ -282,9 +293,9 @@ static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
goto end;
}
- if (mei_flow_ctrl_reduce(dev, &dev->wd_cl)) {
+ if (mei_cl_flow_ctrl_reduce(&dev->wd_cl)) {
dev_err(&dev->pdev->dev,
- "wd: mei_flow_ctrl_reduce() failed.\n");
+ "wd: mei_cl_flow_ctrl_reduce() failed.\n");
ret = -EIO;
goto end;
}
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index 492c8cac69ac..44d273c5e19d 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -517,7 +517,7 @@ static int __init gru_init(void)
{
int ret;
- if (!is_uv_system())
+ if (!is_uv_system() || (is_uvx_hub() && !is_uv2_hub()))
return 0;
#if defined CONFIG_IA64
diff --git a/drivers/misc/ti-st/Kconfig b/drivers/misc/ti-st/Kconfig
index abb5de1afce3..f34dcc514730 100644
--- a/drivers/misc/ti-st/Kconfig
+++ b/drivers/misc/ti-st/Kconfig
@@ -5,7 +5,7 @@
menu "Texas Instruments shared transport line discipline"
config TI_ST
tristate "Shared transport core driver"
- depends on NET && GPIOLIB
+ depends on NET && GPIOLIB && TTY
select FW_LOADER
help
This enables the shared transport core driver for TI
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index b90a2241d79c..0a1428016350 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -240,7 +240,8 @@ void st_int_recv(void *disc_data,
char *ptr;
struct st_proto_s *proto;
unsigned short payload_len = 0;
- int len = 0, type = 0;
+ int len = 0;
+ unsigned char type = 0;
unsigned char *plen;
struct st_data_s *st_gdata = (struct st_data_s *)disc_data;
unsigned long flags;
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 9ff942a346ed..83269f1d16e3 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -468,6 +468,11 @@ long st_kim_start(void *kim_data)
if (pdata->chip_enable)
pdata->chip_enable(kim_gdata);
+ /* Configure BT nShutdown to HIGH state */
+ gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+ mdelay(5); /* FIXME: a proper toggle */
+ gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
+ mdelay(100);
/* re-initialize the completion */
INIT_COMPLETION(kim_gdata->ldisc_installed);
/* send notification to UIM */
@@ -509,7 +514,8 @@ long st_kim_start(void *kim_data)
* (b) upon failure to either install ldisc or download firmware.
* The function is responsible to (a) notify UIM about un-installation,
* (b) flush UART if the ldisc was installed.
- * (c) invoke platform's chip disabling routine.
+ * (c) reset BT_EN - pull down nshutdown at the end.
+ * (d) invoke platform's chip disabling routine.
*/
long st_kim_stop(void *kim_data)
{
@@ -541,6 +547,13 @@ long st_kim_stop(void *kim_data)
err = -ETIMEDOUT;
}
+ /* By default configure BT nShutdown to LOW state */
+ gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+ mdelay(1);
+ gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
+ mdelay(1);
+ gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+
/* platform specific disable */
if (pdata->chip_disable)
pdata->chip_disable(kim_gdata);
@@ -733,6 +746,20 @@ static int kim_probe(struct platform_device *pdev)
/* refer to itself */
kim_gdata->core_data->kim_data = kim_gdata;
+ /* Claim the chip enable nShutdown gpio from the system */
+ kim_gdata->nshutdown = pdata->nshutdown_gpio;
+ err = gpio_request(kim_gdata->nshutdown, "kim");
+ if (unlikely(err)) {
+ pr_err(" gpio %ld request failed ", kim_gdata->nshutdown);
+ return err;
+ }
+
+ /* Configure nShutdown GPIO as output=0 */
+ err = gpio_direction_output(kim_gdata->nshutdown, 0);
+ if (unlikely(err)) {
+ pr_err(" unable to configure gpio %ld", kim_gdata->nshutdown);
+ return err;
+ }
/* get reference of pdev for request_firmware
*/
kim_gdata->kim_pdev = pdev;
@@ -779,10 +806,18 @@ err_core_init:
static int kim_remove(struct platform_device *pdev)
{
+ /* free the GPIOs requested */
+ struct ti_st_plat_data *pdata = pdev->dev.platform_data;
struct kim_data_s *kim_gdata;
kim_gdata = dev_get_drvdata(&pdev->dev);
+ /* Free the Bluetooth/FM/GPIO
+ * nShutdown gpio from the system
+ */
+ gpio_free(pdata->nshutdown_gpio);
+ pr_info("nshutdown GPIO Freed");
+
debugfs_remove_recursive(kim_debugfs_dir);
sysfs_remove_group(&pdev->dev.kobj, &uim_attr_grp);
pr_info("sysfs entries removed");
diff --git a/drivers/misc/vmw_vmci/Kconfig b/drivers/misc/vmw_vmci/Kconfig
new file mode 100644
index 000000000000..39c2ecadb273
--- /dev/null
+++ b/drivers/misc/vmw_vmci/Kconfig
@@ -0,0 +1,16 @@
+#
+# VMware VMCI device
+#
+
+config VMWARE_VMCI
+ tristate "VMware VMCI Driver"
+ depends on X86 && PCI
+ help
+ This is VMware's Virtual Machine Communication Interface. It enables
+ high-speed communication between host and guest in a virtual
+ environment via the VMCI virtual device.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vmw_vmci.
diff --git a/drivers/misc/vmw_vmci/Makefile b/drivers/misc/vmw_vmci/Makefile
new file mode 100644
index 000000000000..4da9893c3942
--- /dev/null
+++ b/drivers/misc/vmw_vmci/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci.o
+vmw_vmci-y += vmci_context.o vmci_datagram.o vmci_doorbell.o \
+ vmci_driver.o vmci_event.o vmci_guest.o vmci_handle_array.o \
+ vmci_host.o vmci_queue_pair.o vmci_resource.o vmci_route.o
diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c
new file mode 100644
index 000000000000..f866a4baecb5
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_context.c
@@ -0,0 +1,1214 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "vmci_queue_pair.h"
+#include "vmci_datagram.h"
+#include "vmci_doorbell.h"
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_event.h"
+
+/*
+ * List of current VMCI contexts. Contexts can be added by
+ * vmci_ctx_create() and removed via vmci_ctx_destroy().
+ * These, along with context lookup, are protected by the
+ * list structure's lock.
+ */
+static struct {
+ struct list_head head;
+ spinlock_t lock; /* Spinlock for context list operations */
+} ctx_list = {
+ .head = LIST_HEAD_INIT(ctx_list.head),
+ .lock = __SPIN_LOCK_UNLOCKED(ctx_list.lock),
+};
+
+/* Used by contexts that did not set up notify flag pointers */
+static bool ctx_dummy_notify;
+
+static void ctx_signal_notify(struct vmci_ctx *context)
+{
+ *context->notify = true;
+}
+
+static void ctx_clear_notify(struct vmci_ctx *context)
+{
+ *context->notify = false;
+}
+
+/*
+ * If nothing requires the attention of the guest, clears both
+ * notify flag and call.
+ */
+static void ctx_clear_notify_call(struct vmci_ctx *context)
+{
+ if (context->pending_datagrams == 0 &&
+ vmci_handle_arr_get_size(context->pending_doorbell_array) == 0)
+ ctx_clear_notify(context);
+}
+
+/*
+ * Sets the context's notify flag iff datagrams are pending for this
+ * context. Called from vmci_setup_notify().
+ */
+void vmci_ctx_check_signal_notify(struct vmci_ctx *context)
+{
+ spin_lock(&context->lock);
+ if (context->pending_datagrams)
+ ctx_signal_notify(context);
+ spin_unlock(&context->lock);
+}
+
+/*
+ * Allocates and initializes a VMCI context.
+ */
+struct vmci_ctx *vmci_ctx_create(u32 cid, u32 priv_flags,
+ uintptr_t event_hnd,
+ int user_version,
+ const struct cred *cred)
+{
+ struct vmci_ctx *context;
+ int error;
+
+ if (cid == VMCI_INVALID_ID) {
+ pr_devel("Invalid context ID for VMCI context\n");
+ error = -EINVAL;
+ goto err_out;
+ }
+
+ if (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS) {
+ pr_devel("Invalid flag (flags=0x%x) for VMCI context\n",
+ priv_flags);
+ error = -EINVAL;
+ goto err_out;
+ }
+
+ if (user_version == 0) {
+ pr_devel("Invalid suer_version %d\n", user_version);
+ error = -EINVAL;
+ goto err_out;
+ }
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context) {
+ pr_warn("Failed to allocate memory for VMCI context\n");
+ error = -EINVAL;
+ goto err_out;
+ }
+
+ kref_init(&context->kref);
+ spin_lock_init(&context->lock);
+ INIT_LIST_HEAD(&context->list_item);
+ INIT_LIST_HEAD(&context->datagram_queue);
+ INIT_LIST_HEAD(&context->notifier_list);
+
+ /* Initialize host-specific VMCI context. */
+ init_waitqueue_head(&context->host_context.wait_queue);
+
+ context->queue_pair_array = vmci_handle_arr_create(0);
+ if (!context->queue_pair_array) {
+ error = -ENOMEM;
+ goto err_free_ctx;
+ }
+
+ context->doorbell_array = vmci_handle_arr_create(0);
+ if (!context->doorbell_array) {
+ error = -ENOMEM;
+ goto err_free_qp_array;
+ }
+
+ context->pending_doorbell_array = vmci_handle_arr_create(0);
+ if (!context->pending_doorbell_array) {
+ error = -ENOMEM;
+ goto err_free_db_array;
+ }
+
+ context->user_version = user_version;
+
+ context->priv_flags = priv_flags;
+
+ if (cred)
+ context->cred = get_cred(cred);
+
+ context->notify = &ctx_dummy_notify;
+ context->notify_page = NULL;
+
+ /*
+ * If we collide with an existing context we generate a new
+ * and use it instead. The VMX will determine if regeneration
+ * is okay. Since there isn't 4B - 16 VMs running on a given
+ * host, the below loop will terminate.
+ */
+ spin_lock(&ctx_list.lock);
+
+ while (vmci_ctx_exists(cid)) {
+ /* We reserve the lowest 16 ids for fixed contexts. */
+ cid = max(cid, VMCI_RESERVED_CID_LIMIT - 1) + 1;
+ if (cid == VMCI_INVALID_ID)
+ cid = VMCI_RESERVED_CID_LIMIT;
+ }
+ context->cid = cid;
+
+ list_add_tail_rcu(&context->list_item, &ctx_list.head);
+ spin_unlock(&ctx_list.lock);
+
+ return context;
+
+ err_free_db_array:
+ vmci_handle_arr_destroy(context->doorbell_array);
+ err_free_qp_array:
+ vmci_handle_arr_destroy(context->queue_pair_array);
+ err_free_ctx:
+ kfree(context);
+ err_out:
+ return ERR_PTR(error);
+}
+
+/*
+ * Destroy VMCI context.
+ */
+void vmci_ctx_destroy(struct vmci_ctx *context)
+{
+ spin_lock(&ctx_list.lock);
+ list_del_rcu(&context->list_item);
+ spin_unlock(&ctx_list.lock);
+ synchronize_rcu();
+
+ vmci_ctx_put(context);
+}
+
+/*
+ * Fire notification for all contexts interested in given cid.
+ */
+static int ctx_fire_notification(u32 context_id, u32 priv_flags)
+{
+ u32 i, array_size;
+ struct vmci_ctx *sub_ctx;
+ struct vmci_handle_arr *subscriber_array;
+ struct vmci_handle context_handle =
+ vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
+
+ /*
+ * We create an array to hold the subscribers we find when
+ * scanning through all contexts.
+ */
+ subscriber_array = vmci_handle_arr_create(0);
+ if (subscriber_array == NULL)
+ return VMCI_ERROR_NO_MEM;
+
+ /*
+ * Scan all contexts to find who is interested in being
+ * notified about given contextID.
+ */
+ rcu_read_lock();
+ list_for_each_entry_rcu(sub_ctx, &ctx_list.head, list_item) {
+ struct vmci_handle_list *node;
+
+ /*
+ * We only deliver notifications of the removal of
+ * contexts, if the two contexts are allowed to
+ * interact.
+ */
+ if (vmci_deny_interaction(priv_flags, sub_ctx->priv_flags))
+ continue;
+
+ list_for_each_entry_rcu(node, &sub_ctx->notifier_list, node) {
+ if (!vmci_handle_is_equal(node->handle, context_handle))
+ continue;
+
+ vmci_handle_arr_append_entry(&subscriber_array,
+ vmci_make_handle(sub_ctx->cid,
+ VMCI_EVENT_HANDLER));
+ }
+ }
+ rcu_read_unlock();
+
+ /* Fire event to all subscribers. */
+ array_size = vmci_handle_arr_get_size(subscriber_array);
+ for (i = 0; i < array_size; i++) {
+ int result;
+ struct vmci_event_ctx ev;
+
+ ev.msg.hdr.dst = vmci_handle_arr_get_entry(subscriber_array, i);
+ ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_CONTEXT_RESOURCE_ID);
+ ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
+ ev.msg.event_data.event = VMCI_EVENT_CTX_REMOVED;
+ ev.payload.context_id = context_id;
+
+ result = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID,
+ &ev.msg.hdr, false);
+ if (result < VMCI_SUCCESS) {
+ pr_devel("Failed to enqueue event datagram (type=%d) for context (ID=0x%x)\n",
+ ev.msg.event_data.event,
+ ev.msg.hdr.dst.context);
+ /* We continue to enqueue on next subscriber. */
+ }
+ }
+ vmci_handle_arr_destroy(subscriber_array);
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Returns the current number of pending datagrams. The call may
+ * also serve as a synchronization point for the datagram queue,
+ * as no enqueue operations can occur concurrently.
+ */
+int vmci_ctx_pending_datagrams(u32 cid, u32 *pending)
+{
+ struct vmci_ctx *context;
+
+ context = vmci_ctx_get(cid);
+ if (context == NULL)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ spin_lock(&context->lock);
+ if (pending)
+ *pending = context->pending_datagrams;
+ spin_unlock(&context->lock);
+ vmci_ctx_put(context);
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Queues a VMCI datagram for the appropriate target VM context.
+ */
+int vmci_ctx_enqueue_datagram(u32 cid, struct vmci_datagram *dg)
+{
+ struct vmci_datagram_queue_entry *dq_entry;
+ struct vmci_ctx *context;
+ struct vmci_handle dg_src;
+ size_t vmci_dg_size;
+
+ vmci_dg_size = VMCI_DG_SIZE(dg);
+ if (vmci_dg_size > VMCI_MAX_DG_SIZE) {
+ pr_devel("Datagram too large (bytes=%Zu)\n", vmci_dg_size);
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ /* Get the target VM's VMCI context. */
+ context = vmci_ctx_get(cid);
+ if (!context) {
+ pr_devel("Invalid context (ID=0x%x)\n", cid);
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ /* Allocate guest call entry and add it to the target VM's queue. */
+ dq_entry = kmalloc(sizeof(*dq_entry), GFP_KERNEL);
+ if (dq_entry == NULL) {
+ pr_warn("Failed to allocate memory for datagram\n");
+ vmci_ctx_put(context);
+ return VMCI_ERROR_NO_MEM;
+ }
+ dq_entry->dg = dg;
+ dq_entry->dg_size = vmci_dg_size;
+ dg_src = dg->src;
+ INIT_LIST_HEAD(&dq_entry->list_item);
+
+ spin_lock(&context->lock);
+
+ /*
+ * We put a higher limit on datagrams from the hypervisor. If
+ * the pending datagram is not from hypervisor, then we check
+ * if enqueueing it would exceed the
+ * VMCI_MAX_DATAGRAM_QUEUE_SIZE limit on the destination. If
+ * the pending datagram is from hypervisor, we allow it to be
+ * queued at the destination side provided we don't reach the
+ * VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE limit.
+ */
+ if (context->datagram_queue_size + vmci_dg_size >=
+ VMCI_MAX_DATAGRAM_QUEUE_SIZE &&
+ (!vmci_handle_is_equal(dg_src,
+ vmci_make_handle
+ (VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_CONTEXT_RESOURCE_ID)) ||
+ context->datagram_queue_size + vmci_dg_size >=
+ VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE)) {
+ spin_unlock(&context->lock);
+ vmci_ctx_put(context);
+ kfree(dq_entry);
+ pr_devel("Context (ID=0x%x) receive queue is full\n", cid);
+ return VMCI_ERROR_NO_RESOURCES;
+ }
+
+ list_add(&dq_entry->list_item, &context->datagram_queue);
+ context->pending_datagrams++;
+ context->datagram_queue_size += vmci_dg_size;
+ ctx_signal_notify(context);
+ wake_up(&context->host_context.wait_queue);
+ spin_unlock(&context->lock);
+ vmci_ctx_put(context);
+
+ return vmci_dg_size;
+}
+
+/*
+ * Verifies whether a context with the specified context ID exists.
+ * FIXME: utility is dubious as no decisions can be reliably made
+ * using this data as context can appear and disappear at any time.
+ */
+bool vmci_ctx_exists(u32 cid)
+{
+ struct vmci_ctx *context;
+ bool exists = false;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(context, &ctx_list.head, list_item) {
+ if (context->cid == cid) {
+ exists = true;
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+ return exists;
+}
+
+/*
+ * Retrieves VMCI context corresponding to the given cid.
+ */
+struct vmci_ctx *vmci_ctx_get(u32 cid)
+{
+ struct vmci_ctx *c, *context = NULL;
+
+ if (cid == VMCI_INVALID_ID)
+ return NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(c, &ctx_list.head, list_item) {
+ if (c->cid == cid) {
+ /*
+ * The context owner drops its own reference to the
+ * context only after removing it from the list and
+ * waiting for RCU grace period to expire. This
+ * means that we are not about to increase the
+ * reference count of something that is in the
+ * process of being destroyed.
+ */
+ context = c;
+ kref_get(&context->kref);
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return context;
+}
+
+/*
+ * Deallocates all parts of a context data structure. This
+ * function doesn't lock the context, because it assumes that
+ * the caller was holding the last reference to context.
+ */
+static void ctx_free_ctx(struct kref *kref)
+{
+ struct vmci_ctx *context = container_of(kref, struct vmci_ctx, kref);
+ struct vmci_datagram_queue_entry *dq_entry, *dq_entry_tmp;
+ struct vmci_handle temp_handle;
+ struct vmci_handle_list *notifier, *tmp;
+
+ /*
+ * Fire event to all contexts interested in knowing this
+ * context is dying.
+ */
+ ctx_fire_notification(context->cid, context->priv_flags);
+
+ /*
+ * Cleanup all queue pair resources attached to context. If
+ * the VM dies without cleaning up, this code will make sure
+ * that no resources are leaked.
+ */
+ temp_handle = vmci_handle_arr_get_entry(context->queue_pair_array, 0);
+ while (!vmci_handle_is_equal(temp_handle, VMCI_INVALID_HANDLE)) {
+ if (vmci_qp_broker_detach(temp_handle,
+ context) < VMCI_SUCCESS) {
+ /*
+ * When vmci_qp_broker_detach() succeeds it
+ * removes the handle from the array. If
+ * detach fails, we must remove the handle
+ * ourselves.
+ */
+ vmci_handle_arr_remove_entry(context->queue_pair_array,
+ temp_handle);
+ }
+ temp_handle =
+ vmci_handle_arr_get_entry(context->queue_pair_array, 0);
+ }
+
+ /*
+ * It is fine to destroy this without locking the callQueue, as
+ * this is the only thread having a reference to the context.
+ */
+ list_for_each_entry_safe(dq_entry, dq_entry_tmp,
+ &context->datagram_queue, list_item) {
+ WARN_ON(dq_entry->dg_size != VMCI_DG_SIZE(dq_entry->dg));
+ list_del(&dq_entry->list_item);
+ kfree(dq_entry->dg);
+ kfree(dq_entry);
+ }
+
+ list_for_each_entry_safe(notifier, tmp,
+ &context->notifier_list, node) {
+ list_del(&notifier->node);
+ kfree(notifier);
+ }
+
+ vmci_handle_arr_destroy(context->queue_pair_array);
+ vmci_handle_arr_destroy(context->doorbell_array);
+ vmci_handle_arr_destroy(context->pending_doorbell_array);
+ vmci_ctx_unset_notify(context);
+ if (context->cred)
+ put_cred(context->cred);
+ kfree(context);
+}
+
+/*
+ * Drops reference to VMCI context. If this is the last reference to
+ * the context it will be deallocated. A context is created with
+ * a reference count of one, and on destroy, it is removed from
+ * the context list before its reference count is decremented. Thus,
+ * if we reach zero, we are sure that nobody else are about to increment
+ * it (they need the entry in the context list for that), and so there
+ * is no need for locking.
+ */
+void vmci_ctx_put(struct vmci_ctx *context)
+{
+ kref_put(&context->kref, ctx_free_ctx);
+}
+
+/*
+ * Dequeues the next datagram and returns it to caller.
+ * The caller passes in a pointer to the max size datagram
+ * it can handle and the datagram is only unqueued if the
+ * size is less than max_size. If larger max_size is set to
+ * the size of the datagram to give the caller a chance to
+ * set up a larger buffer for the guestcall.
+ */
+int vmci_ctx_dequeue_datagram(struct vmci_ctx *context,
+ size_t *max_size,
+ struct vmci_datagram **dg)
+{
+ struct vmci_datagram_queue_entry *dq_entry;
+ struct list_head *list_item;
+ int rv;
+
+ /* Dequeue the next datagram entry. */
+ spin_lock(&context->lock);
+ if (context->pending_datagrams == 0) {
+ ctx_clear_notify_call(context);
+ spin_unlock(&context->lock);
+ pr_devel("No datagrams pending\n");
+ return VMCI_ERROR_NO_MORE_DATAGRAMS;
+ }
+
+ list_item = context->datagram_queue.next;
+
+ dq_entry =
+ list_entry(list_item, struct vmci_datagram_queue_entry, list_item);
+
+ /* Check size of caller's buffer. */
+ if (*max_size < dq_entry->dg_size) {
+ *max_size = dq_entry->dg_size;
+ spin_unlock(&context->lock);
+ pr_devel("Caller's buffer should be at least (size=%u bytes)\n",
+ (u32) *max_size);
+ return VMCI_ERROR_NO_MEM;
+ }
+
+ list_del(list_item);
+ context->pending_datagrams--;
+ context->datagram_queue_size -= dq_entry->dg_size;
+ if (context->pending_datagrams == 0) {
+ ctx_clear_notify_call(context);
+ rv = VMCI_SUCCESS;
+ } else {
+ /*
+ * Return the size of the next datagram.
+ */
+ struct vmci_datagram_queue_entry *next_entry;
+
+ list_item = context->datagram_queue.next;
+ next_entry =
+ list_entry(list_item, struct vmci_datagram_queue_entry,
+ list_item);
+
+ /*
+ * The following size_t -> int truncation is fine as
+ * the maximum size of a (routable) datagram is 68KB.
+ */
+ rv = (int)next_entry->dg_size;
+ }
+ spin_unlock(&context->lock);
+
+ /* Caller must free datagram. */
+ *dg = dq_entry->dg;
+ dq_entry->dg = NULL;
+ kfree(dq_entry);
+
+ return rv;
+}
+
+/*
+ * Reverts actions set up by vmci_setup_notify(). Unmaps and unlocks the
+ * page mapped/locked by vmci_setup_notify().
+ */
+void vmci_ctx_unset_notify(struct vmci_ctx *context)
+{
+ struct page *notify_page;
+
+ spin_lock(&context->lock);
+
+ notify_page = context->notify_page;
+ context->notify = &ctx_dummy_notify;
+ context->notify_page = NULL;
+
+ spin_unlock(&context->lock);
+
+ if (notify_page) {
+ kunmap(notify_page);
+ put_page(notify_page);
+ }
+}
+
+/*
+ * Add remote_cid to list of contexts current contexts wants
+ * notifications from/about.
+ */
+int vmci_ctx_add_notification(u32 context_id, u32 remote_cid)
+{
+ struct vmci_ctx *context;
+ struct vmci_handle_list *notifier, *n;
+ int result;
+ bool exists = false;
+
+ context = vmci_ctx_get(context_id);
+ if (!context)
+ return VMCI_ERROR_NOT_FOUND;
+
+ if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(remote_cid)) {
+ pr_devel("Context removed notifications for other VMs not supported (src=0x%x, remote=0x%x)\n",
+ context_id, remote_cid);
+ result = VMCI_ERROR_DST_UNREACHABLE;
+ goto out;
+ }
+
+ if (context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) {
+ result = VMCI_ERROR_NO_ACCESS;
+ goto out;
+ }
+
+ notifier = kmalloc(sizeof(struct vmci_handle_list), GFP_KERNEL);
+ if (!notifier) {
+ result = VMCI_ERROR_NO_MEM;
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&notifier->node);
+ notifier->handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER);
+
+ spin_lock(&context->lock);
+
+ list_for_each_entry(n, &context->notifier_list, node) {
+ if (vmci_handle_is_equal(n->handle, notifier->handle)) {
+ exists = true;
+ break;
+ }
+ }
+
+ if (exists) {
+ kfree(notifier);
+ result = VMCI_ERROR_ALREADY_EXISTS;
+ } else {
+ list_add_tail_rcu(&notifier->node, &context->notifier_list);
+ context->n_notifiers++;
+ result = VMCI_SUCCESS;
+ }
+
+ spin_unlock(&context->lock);
+
+ out:
+ vmci_ctx_put(context);
+ return result;
+}
+
+/*
+ * Remove remote_cid from current context's list of contexts it is
+ * interested in getting notifications from/about.
+ */
+int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid)
+{
+ struct vmci_ctx *context;
+ struct vmci_handle_list *notifier, *tmp;
+ struct vmci_handle handle;
+ bool found = false;
+
+ context = vmci_ctx_get(context_id);
+ if (!context)
+ return VMCI_ERROR_NOT_FOUND;
+
+ handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER);
+
+ spin_lock(&context->lock);
+ list_for_each_entry_safe(notifier, tmp,
+ &context->notifier_list, node) {
+ if (vmci_handle_is_equal(notifier->handle, handle)) {
+ list_del_rcu(&notifier->node);
+ context->n_notifiers--;
+ found = true;
+ break;
+ }
+ }
+ spin_unlock(&context->lock);
+
+ if (found) {
+ synchronize_rcu();
+ kfree(notifier);
+ }
+
+ vmci_ctx_put(context);
+
+ return found ? VMCI_SUCCESS : VMCI_ERROR_NOT_FOUND;
+}
+
+static int vmci_ctx_get_chkpt_notifiers(struct vmci_ctx *context,
+ u32 *buf_size, void **pbuf)
+{
+ u32 *notifiers;
+ size_t data_size;
+ struct vmci_handle_list *entry;
+ int i = 0;
+
+ if (context->n_notifiers == 0) {
+ *buf_size = 0;
+ *pbuf = NULL;
+ return VMCI_SUCCESS;
+ }
+
+ data_size = context->n_notifiers * sizeof(*notifiers);
+ if (*buf_size < data_size) {
+ *buf_size = data_size;
+ return VMCI_ERROR_MORE_DATA;
+ }
+
+ notifiers = kmalloc(data_size, GFP_ATOMIC); /* FIXME: want GFP_KERNEL */
+ if (!notifiers)
+ return VMCI_ERROR_NO_MEM;
+
+ list_for_each_entry(entry, &context->notifier_list, node)
+ notifiers[i++] = entry->handle.context;
+
+ *buf_size = data_size;
+ *pbuf = notifiers;
+ return VMCI_SUCCESS;
+}
+
+static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context,
+ u32 *buf_size, void **pbuf)
+{
+ struct dbell_cpt_state *dbells;
+ size_t n_doorbells;
+ int i;
+
+ n_doorbells = vmci_handle_arr_get_size(context->doorbell_array);
+ if (n_doorbells > 0) {
+ size_t data_size = n_doorbells * sizeof(*dbells);
+ if (*buf_size < data_size) {
+ *buf_size = data_size;
+ return VMCI_ERROR_MORE_DATA;
+ }
+
+ dbells = kmalloc(data_size, GFP_ATOMIC);
+ if (!dbells)
+ return VMCI_ERROR_NO_MEM;
+
+ for (i = 0; i < n_doorbells; i++)
+ dbells[i].handle = vmci_handle_arr_get_entry(
+ context->doorbell_array, i);
+
+ *buf_size = data_size;
+ *pbuf = dbells;
+ } else {
+ *buf_size = 0;
+ *pbuf = NULL;
+ }
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Get current context's checkpoint state of given type.
+ */
+int vmci_ctx_get_chkpt_state(u32 context_id,
+ u32 cpt_type,
+ u32 *buf_size,
+ void **pbuf)
+{
+ struct vmci_ctx *context;
+ int result;
+
+ context = vmci_ctx_get(context_id);
+ if (!context)
+ return VMCI_ERROR_NOT_FOUND;
+
+ spin_lock(&context->lock);
+
+ switch (cpt_type) {
+ case VMCI_NOTIFICATION_CPT_STATE:
+ result = vmci_ctx_get_chkpt_notifiers(context, buf_size, pbuf);
+ break;
+
+ case VMCI_WELLKNOWN_CPT_STATE:
+ /*
+ * For compatibility with VMX'en with VM to VM communication, we
+ * always return zero wellknown handles.
+ */
+
+ *buf_size = 0;
+ *pbuf = NULL;
+ result = VMCI_SUCCESS;
+ break;
+
+ case VMCI_DOORBELL_CPT_STATE:
+ result = vmci_ctx_get_chkpt_doorbells(context, buf_size, pbuf);
+ break;
+
+ default:
+ pr_devel("Invalid cpt state (type=%d)\n", cpt_type);
+ result = VMCI_ERROR_INVALID_ARGS;
+ break;
+ }
+
+ spin_unlock(&context->lock);
+ vmci_ctx_put(context);
+
+ return result;
+}
+
+/*
+ * Set current context's checkpoint state of given type.
+ */
+int vmci_ctx_set_chkpt_state(u32 context_id,
+ u32 cpt_type,
+ u32 buf_size,
+ void *cpt_buf)
+{
+ u32 i;
+ u32 current_id;
+ int result = VMCI_SUCCESS;
+ u32 num_ids = buf_size / sizeof(u32);
+
+ if (cpt_type == VMCI_WELLKNOWN_CPT_STATE && num_ids > 0) {
+ /*
+ * We would end up here if VMX with VM to VM communication
+ * attempts to restore a checkpoint with wellknown handles.
+ */
+ pr_warn("Attempt to restore checkpoint with obsolete wellknown handles\n");
+ return VMCI_ERROR_OBSOLETE;
+ }
+
+ if (cpt_type != VMCI_NOTIFICATION_CPT_STATE) {
+ pr_devel("Invalid cpt state (type=%d)\n", cpt_type);
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ for (i = 0; i < num_ids && result == VMCI_SUCCESS; i++) {
+ current_id = ((u32 *)cpt_buf)[i];
+ result = vmci_ctx_add_notification(context_id, current_id);
+ if (result != VMCI_SUCCESS)
+ break;
+ }
+ if (result != VMCI_SUCCESS)
+ pr_devel("Failed to set cpt state (type=%d) (error=%d)\n",
+ cpt_type, result);
+
+ return result;
+}
+
+/*
+ * Retrieves the specified context's pending notifications in the
+ * form of a handle array. The handle arrays returned are the
+ * actual data - not a copy and should not be modified by the
+ * caller. They must be released using
+ * vmci_ctx_rcv_notifications_release.
+ */
+int vmci_ctx_rcv_notifications_get(u32 context_id,
+ struct vmci_handle_arr **db_handle_array,
+ struct vmci_handle_arr **qp_handle_array)
+{
+ struct vmci_ctx *context;
+ int result = VMCI_SUCCESS;
+
+ context = vmci_ctx_get(context_id);
+ if (context == NULL)
+ return VMCI_ERROR_NOT_FOUND;
+
+ spin_lock(&context->lock);
+
+ *db_handle_array = context->pending_doorbell_array;
+ context->pending_doorbell_array = vmci_handle_arr_create(0);
+ if (!context->pending_doorbell_array) {
+ context->pending_doorbell_array = *db_handle_array;
+ *db_handle_array = NULL;
+ result = VMCI_ERROR_NO_MEM;
+ }
+ *qp_handle_array = NULL;
+
+ spin_unlock(&context->lock);
+ vmci_ctx_put(context);
+
+ return result;
+}
+
+/*
+ * Releases handle arrays with pending notifications previously
+ * retrieved using vmci_ctx_rcv_notifications_get. If the
+ * notifications were not successfully handed over to the guest,
+ * success must be false.
+ */
+void vmci_ctx_rcv_notifications_release(u32 context_id,
+ struct vmci_handle_arr *db_handle_array,
+ struct vmci_handle_arr *qp_handle_array,
+ bool success)
+{
+ struct vmci_ctx *context = vmci_ctx_get(context_id);
+
+ spin_lock(&context->lock);
+ if (!success) {
+ struct vmci_handle handle;
+
+ /*
+ * New notifications may have been added while we were not
+ * holding the context lock, so we transfer any new pending
+ * doorbell notifications to the old array, and reinstate the
+ * old array.
+ */
+
+ handle = vmci_handle_arr_remove_tail(
+ context->pending_doorbell_array);
+ while (!vmci_handle_is_invalid(handle)) {
+ if (!vmci_handle_arr_has_entry(db_handle_array,
+ handle)) {
+ vmci_handle_arr_append_entry(
+ &db_handle_array, handle);
+ }
+ handle = vmci_handle_arr_remove_tail(
+ context->pending_doorbell_array);
+ }
+ vmci_handle_arr_destroy(context->pending_doorbell_array);
+ context->pending_doorbell_array = db_handle_array;
+ db_handle_array = NULL;
+ } else {
+ ctx_clear_notify_call(context);
+ }
+ spin_unlock(&context->lock);
+ vmci_ctx_put(context);
+
+ if (db_handle_array)
+ vmci_handle_arr_destroy(db_handle_array);
+
+ if (qp_handle_array)
+ vmci_handle_arr_destroy(qp_handle_array);
+}
+
+/*
+ * Registers that a new doorbell handle has been allocated by the
+ * context. Only doorbell handles registered can be notified.
+ */
+int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle)
+{
+ struct vmci_ctx *context;
+ int result;
+
+ if (context_id == VMCI_INVALID_ID || vmci_handle_is_invalid(handle))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ context = vmci_ctx_get(context_id);
+ if (context == NULL)
+ return VMCI_ERROR_NOT_FOUND;
+
+ spin_lock(&context->lock);
+ if (!vmci_handle_arr_has_entry(context->doorbell_array, handle)) {
+ vmci_handle_arr_append_entry(&context->doorbell_array, handle);
+ result = VMCI_SUCCESS;
+ } else {
+ result = VMCI_ERROR_DUPLICATE_ENTRY;
+ }
+
+ spin_unlock(&context->lock);
+ vmci_ctx_put(context);
+
+ return result;
+}
+
+/*
+ * Unregisters a doorbell handle that was previously registered
+ * with vmci_ctx_dbell_create.
+ */
+int vmci_ctx_dbell_destroy(u32 context_id, struct vmci_handle handle)
+{
+ struct vmci_ctx *context;
+ struct vmci_handle removed_handle;
+
+ if (context_id == VMCI_INVALID_ID || vmci_handle_is_invalid(handle))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ context = vmci_ctx_get(context_id);
+ if (context == NULL)
+ return VMCI_ERROR_NOT_FOUND;
+
+ spin_lock(&context->lock);
+ removed_handle =
+ vmci_handle_arr_remove_entry(context->doorbell_array, handle);
+ vmci_handle_arr_remove_entry(context->pending_doorbell_array, handle);
+ spin_unlock(&context->lock);
+
+ vmci_ctx_put(context);
+
+ return vmci_handle_is_invalid(removed_handle) ?
+ VMCI_ERROR_NOT_FOUND : VMCI_SUCCESS;
+}
+
+/*
+ * Unregisters all doorbell handles that were previously
+ * registered with vmci_ctx_dbell_create.
+ */
+int vmci_ctx_dbell_destroy_all(u32 context_id)
+{
+ struct vmci_ctx *context;
+ struct vmci_handle handle;
+
+ if (context_id == VMCI_INVALID_ID)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ context = vmci_ctx_get(context_id);
+ if (context == NULL)
+ return VMCI_ERROR_NOT_FOUND;
+
+ spin_lock(&context->lock);
+ do {
+ struct vmci_handle_arr *arr = context->doorbell_array;
+ handle = vmci_handle_arr_remove_tail(arr);
+ } while (!vmci_handle_is_invalid(handle));
+ do {
+ struct vmci_handle_arr *arr = context->pending_doorbell_array;
+ handle = vmci_handle_arr_remove_tail(arr);
+ } while (!vmci_handle_is_invalid(handle));
+ spin_unlock(&context->lock);
+
+ vmci_ctx_put(context);
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Registers a notification of a doorbell handle initiated by the
+ * specified source context. The notification of doorbells are
+ * subject to the same isolation rules as datagram delivery. To
+ * allow host side senders of notifications a finer granularity
+ * of sender rights than those assigned to the sending context
+ * itself, the host context is required to specify a different
+ * set of privilege flags that will override the privileges of
+ * the source context.
+ */
+int vmci_ctx_notify_dbell(u32 src_cid,
+ struct vmci_handle handle,
+ u32 src_priv_flags)
+{
+ struct vmci_ctx *dst_context;
+ int result;
+
+ if (vmci_handle_is_invalid(handle))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ /* Get the target VM's VMCI context. */
+ dst_context = vmci_ctx_get(handle.context);
+ if (!dst_context) {
+ pr_devel("Invalid context (ID=0x%x)\n", handle.context);
+ return VMCI_ERROR_NOT_FOUND;
+ }
+
+ if (src_cid != handle.context) {
+ u32 dst_priv_flags;
+
+ if (VMCI_CONTEXT_IS_VM(src_cid) &&
+ VMCI_CONTEXT_IS_VM(handle.context)) {
+ pr_devel("Doorbell notification from VM to VM not supported (src=0x%x, dst=0x%x)\n",
+ src_cid, handle.context);
+ result = VMCI_ERROR_DST_UNREACHABLE;
+ goto out;
+ }
+
+ result = vmci_dbell_get_priv_flags(handle, &dst_priv_flags);
+ if (result < VMCI_SUCCESS) {
+ pr_warn("Failed to get privilege flags for destination (handle=0x%x:0x%x)\n",
+ handle.context, handle.resource);
+ goto out;
+ }
+
+ if (src_cid != VMCI_HOST_CONTEXT_ID ||
+ src_priv_flags == VMCI_NO_PRIVILEGE_FLAGS) {
+ src_priv_flags = vmci_context_get_priv_flags(src_cid);
+ }
+
+ if (vmci_deny_interaction(src_priv_flags, dst_priv_flags)) {
+ result = VMCI_ERROR_NO_ACCESS;
+ goto out;
+ }
+ }
+
+ if (handle.context == VMCI_HOST_CONTEXT_ID) {
+ result = vmci_dbell_host_context_notify(src_cid, handle);
+ } else {
+ spin_lock(&dst_context->lock);
+
+ if (!vmci_handle_arr_has_entry(dst_context->doorbell_array,
+ handle)) {
+ result = VMCI_ERROR_NOT_FOUND;
+ } else {
+ if (!vmci_handle_arr_has_entry(
+ dst_context->pending_doorbell_array,
+ handle)) {
+ vmci_handle_arr_append_entry(
+ &dst_context->pending_doorbell_array,
+ handle);
+
+ ctx_signal_notify(dst_context);
+ wake_up(&dst_context->host_context.wait_queue);
+
+ }
+ result = VMCI_SUCCESS;
+ }
+ spin_unlock(&dst_context->lock);
+ }
+
+ out:
+ vmci_ctx_put(dst_context);
+
+ return result;
+}
+
+bool vmci_ctx_supports_host_qp(struct vmci_ctx *context)
+{
+ return context && context->user_version >= VMCI_VERSION_HOSTQP;
+}
+
+/*
+ * Registers that a new queue pair handle has been allocated by
+ * the context.
+ */
+int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle)
+{
+ int result;
+
+ if (context == NULL || vmci_handle_is_invalid(handle))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if (!vmci_handle_arr_has_entry(context->queue_pair_array, handle)) {
+ vmci_handle_arr_append_entry(&context->queue_pair_array,
+ handle);
+ result = VMCI_SUCCESS;
+ } else {
+ result = VMCI_ERROR_DUPLICATE_ENTRY;
+ }
+
+ return result;
+}
+
+/*
+ * Unregisters a queue pair handle that was previously registered
+ * with vmci_ctx_qp_create.
+ */
+int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle)
+{
+ struct vmci_handle hndl;
+
+ if (context == NULL || vmci_handle_is_invalid(handle))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ hndl = vmci_handle_arr_remove_entry(context->queue_pair_array, handle);
+
+ return vmci_handle_is_invalid(hndl) ?
+ VMCI_ERROR_NOT_FOUND : VMCI_SUCCESS;
+}
+
+/*
+ * Determines whether a given queue pair handle is registered
+ * with the given context.
+ */
+bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle)
+{
+ if (context == NULL || vmci_handle_is_invalid(handle))
+ return false;
+
+ return vmci_handle_arr_has_entry(context->queue_pair_array, handle);
+}
+
+/*
+ * vmci_context_get_priv_flags() - Retrieve privilege flags.
+ * @context_id: The context ID of the VMCI context.
+ *
+ * Retrieves privilege flags of the given VMCI context ID.
+ */
+u32 vmci_context_get_priv_flags(u32 context_id)
+{
+ if (vmci_host_code_active()) {
+ u32 flags;
+ struct vmci_ctx *context;
+
+ context = vmci_ctx_get(context_id);
+ if (!context)
+ return VMCI_LEAST_PRIVILEGE_FLAGS;
+
+ flags = context->priv_flags;
+ vmci_ctx_put(context);
+ return flags;
+ }
+ return VMCI_NO_PRIVILEGE_FLAGS;
+}
+EXPORT_SYMBOL_GPL(vmci_context_get_priv_flags);
+
+/*
+ * vmci_is_context_owner() - Determimnes if user is the context owner
+ * @context_id: The context ID of the VMCI context.
+ * @uid: The host user id (real kernel value).
+ *
+ * Determines whether a given UID is the owner of given VMCI context.
+ */
+bool vmci_is_context_owner(u32 context_id, kuid_t uid)
+{
+ bool is_owner = false;
+
+ if (vmci_host_code_active()) {
+ struct vmci_ctx *context = vmci_ctx_get(context_id);
+ if (context) {
+ if (context->cred)
+ is_owner = uid_eq(context->cred->uid, uid);
+ vmci_ctx_put(context);
+ }
+ }
+
+ return is_owner;
+}
+EXPORT_SYMBOL_GPL(vmci_is_context_owner);
diff --git a/drivers/misc/vmw_vmci/vmci_context.h b/drivers/misc/vmw_vmci/vmci_context.h
new file mode 100644
index 000000000000..24a88e68a1e6
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_context.h
@@ -0,0 +1,182 @@
+/*
+ * VMware VMCI driver (vmciContext.h)
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_CONTEXT_H_
+#define _VMCI_CONTEXT_H_
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/atomic.h>
+#include <linux/kref.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include "vmci_handle_array.h"
+#include "vmci_datagram.h"
+
+/* Used to determine what checkpoint state to get and set. */
+enum {
+ VMCI_NOTIFICATION_CPT_STATE = 1,
+ VMCI_WELLKNOWN_CPT_STATE = 2,
+ VMCI_DG_OUT_STATE = 3,
+ VMCI_DG_IN_STATE = 4,
+ VMCI_DG_IN_SIZE_STATE = 5,
+ VMCI_DOORBELL_CPT_STATE = 6,
+};
+
+/* Host specific struct used for signalling */
+struct vmci_host {
+ wait_queue_head_t wait_queue;
+};
+
+struct vmci_handle_list {
+ struct list_head node;
+ struct vmci_handle handle;
+};
+
+struct vmci_ctx {
+ struct list_head list_item; /* For global VMCI list. */
+ u32 cid;
+ struct kref kref;
+ struct list_head datagram_queue; /* Head of per VM queue. */
+ u32 pending_datagrams;
+ size_t datagram_queue_size; /* Size of datagram queue in bytes. */
+
+ /*
+ * Version of the code that created
+ * this context; e.g., VMX.
+ */
+ int user_version;
+ spinlock_t lock; /* Locks callQueue and handle_arrays. */
+
+ /*
+ * queue_pairs attached to. The array of
+ * handles for queue pairs is accessed
+ * from the code for QP API, and there
+ * it is protected by the QP lock. It
+ * is also accessed from the context
+ * clean up path, which does not
+ * require a lock. VMCILock is not
+ * used to protect the QP array field.
+ */
+ struct vmci_handle_arr *queue_pair_array;
+
+ /* Doorbells created by context. */
+ struct vmci_handle_arr *doorbell_array;
+
+ /* Doorbells pending for context. */
+ struct vmci_handle_arr *pending_doorbell_array;
+
+ /* Contexts current context is subscribing to. */
+ struct list_head notifier_list;
+ unsigned int n_notifiers;
+
+ struct vmci_host host_context;
+ u32 priv_flags;
+
+ const struct cred *cred;
+ bool *notify; /* Notify flag pointer - hosted only. */
+ struct page *notify_page; /* Page backing the notify UVA. */
+};
+
+/* VMCINotifyAddRemoveInfo: Used to add/remove remote context notifications. */
+struct vmci_ctx_info {
+ u32 remote_cid;
+ int result;
+};
+
+/* VMCICptBufInfo: Used to set/get current context's checkpoint state. */
+struct vmci_ctx_chkpt_buf_info {
+ u64 cpt_buf;
+ u32 cpt_type;
+ u32 buf_size;
+ s32 result;
+ u32 _pad;
+};
+
+/*
+ * VMCINotificationReceiveInfo: Used to recieve pending notifications
+ * for doorbells and queue pairs.
+ */
+struct vmci_ctx_notify_recv_info {
+ u64 db_handle_buf_uva;
+ u64 db_handle_buf_size;
+ u64 qp_handle_buf_uva;
+ u64 qp_handle_buf_size;
+ s32 result;
+ u32 _pad;
+};
+
+/*
+ * Utilility function that checks whether two entities are allowed
+ * to interact. If one of them is restricted, the other one must
+ * be trusted.
+ */
+static inline bool vmci_deny_interaction(u32 part_one, u32 part_two)
+{
+ return ((part_one & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
+ !(part_two & VMCI_PRIVILEGE_FLAG_TRUSTED)) ||
+ ((part_two & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
+ !(part_one & VMCI_PRIVILEGE_FLAG_TRUSTED));
+}
+
+struct vmci_ctx *vmci_ctx_create(u32 cid, u32 flags,
+ uintptr_t event_hnd, int version,
+ const struct cred *cred);
+void vmci_ctx_destroy(struct vmci_ctx *context);
+
+bool vmci_ctx_supports_host_qp(struct vmci_ctx *context);
+int vmci_ctx_enqueue_datagram(u32 cid, struct vmci_datagram *dg);
+int vmci_ctx_dequeue_datagram(struct vmci_ctx *context,
+ size_t *max_size, struct vmci_datagram **dg);
+int vmci_ctx_pending_datagrams(u32 cid, u32 *pending);
+struct vmci_ctx *vmci_ctx_get(u32 cid);
+void vmci_ctx_put(struct vmci_ctx *context);
+bool vmci_ctx_exists(u32 cid);
+
+int vmci_ctx_add_notification(u32 context_id, u32 remote_cid);
+int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid);
+int vmci_ctx_get_chkpt_state(u32 context_id, u32 cpt_type,
+ u32 *num_cids, void **cpt_buf_ptr);
+int vmci_ctx_set_chkpt_state(u32 context_id, u32 cpt_type,
+ u32 num_cids, void *cpt_buf);
+
+int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle);
+int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle);
+bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle);
+
+void vmci_ctx_check_signal_notify(struct vmci_ctx *context);
+void vmci_ctx_unset_notify(struct vmci_ctx *context);
+
+int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle);
+int vmci_ctx_dbell_destroy(u32 context_id, struct vmci_handle handle);
+int vmci_ctx_dbell_destroy_all(u32 context_id);
+int vmci_ctx_notify_dbell(u32 cid, struct vmci_handle handle,
+ u32 src_priv_flags);
+
+int vmci_ctx_rcv_notifications_get(u32 context_id, struct vmci_handle_arr
+ **db_handle_array, struct vmci_handle_arr
+ **qp_handle_array);
+void vmci_ctx_rcv_notifications_release(u32 context_id, struct vmci_handle_arr
+ *db_handle_array, struct vmci_handle_arr
+ *qp_handle_array, bool success);
+
+static inline u32 vmci_ctx_get_id(struct vmci_ctx *context)
+{
+ if (!context)
+ return VMCI_INVALID_ID;
+ return context->cid;
+}
+
+#endif /* _VMCI_CONTEXT_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_datagram.c b/drivers/misc/vmw_vmci/vmci_datagram.c
new file mode 100644
index 000000000000..ed5c433cd493
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_datagram.c
@@ -0,0 +1,500 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/bug.h>
+
+#include "vmci_datagram.h"
+#include "vmci_resource.h"
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_event.h"
+#include "vmci_route.h"
+
+/*
+ * struct datagram_entry describes the datagram entity. It is used for datagram
+ * entities created only on the host.
+ */
+struct datagram_entry {
+ struct vmci_resource resource;
+ u32 flags;
+ bool run_delayed;
+ vmci_datagram_recv_cb recv_cb;
+ void *client_data;
+ u32 priv_flags;
+};
+
+struct delayed_datagram_info {
+ struct datagram_entry *entry;
+ struct vmci_datagram msg;
+ struct work_struct work;
+ bool in_dg_host_queue;
+};
+
+/* Number of in-flight host->host datagrams */
+static atomic_t delayed_dg_host_queue_size = ATOMIC_INIT(0);
+
+/*
+ * Create a datagram entry given a handle pointer.
+ */
+static int dg_create_handle(u32 resource_id,
+ u32 flags,
+ u32 priv_flags,
+ vmci_datagram_recv_cb recv_cb,
+ void *client_data, struct vmci_handle *out_handle)
+{
+ int result;
+ u32 context_id;
+ struct vmci_handle handle;
+ struct datagram_entry *entry;
+
+ if ((flags & VMCI_FLAG_WELLKNOWN_DG_HND) != 0)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if ((flags & VMCI_FLAG_ANYCID_DG_HND) != 0) {
+ context_id = VMCI_INVALID_ID;
+ } else {
+ context_id = vmci_get_context_id();
+ if (context_id == VMCI_INVALID_ID)
+ return VMCI_ERROR_NO_RESOURCES;
+ }
+
+ handle = vmci_make_handle(context_id, resource_id);
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ pr_warn("Failed allocating memory for datagram entry\n");
+ return VMCI_ERROR_NO_MEM;
+ }
+
+ entry->run_delayed = (flags & VMCI_FLAG_DG_DELAYED_CB) ? true : false;
+ entry->flags = flags;
+ entry->recv_cb = recv_cb;
+ entry->client_data = client_data;
+ entry->priv_flags = priv_flags;
+
+ /* Make datagram resource live. */
+ result = vmci_resource_add(&entry->resource,
+ VMCI_RESOURCE_TYPE_DATAGRAM,
+ handle);
+ if (result != VMCI_SUCCESS) {
+ pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d\n",
+ handle.context, handle.resource, result);
+ kfree(entry);
+ return result;
+ }
+
+ *out_handle = vmci_resource_handle(&entry->resource);
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Internal utility function with the same purpose as
+ * vmci_datagram_get_priv_flags that also takes a context_id.
+ */
+static int vmci_datagram_get_priv_flags(u32 context_id,
+ struct vmci_handle handle,
+ u32 *priv_flags)
+{
+ if (context_id == VMCI_INVALID_ID)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if (context_id == VMCI_HOST_CONTEXT_ID) {
+ struct datagram_entry *src_entry;
+ struct vmci_resource *resource;
+
+ resource = vmci_resource_by_handle(handle,
+ VMCI_RESOURCE_TYPE_DATAGRAM);
+ if (!resource)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ src_entry = container_of(resource, struct datagram_entry,
+ resource);
+ *priv_flags = src_entry->priv_flags;
+ vmci_resource_put(resource);
+ } else if (context_id == VMCI_HYPERVISOR_CONTEXT_ID)
+ *priv_flags = VMCI_MAX_PRIVILEGE_FLAGS;
+ else
+ *priv_flags = vmci_context_get_priv_flags(context_id);
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Calls the specified callback in a delayed context.
+ */
+static void dg_delayed_dispatch(struct work_struct *work)
+{
+ struct delayed_datagram_info *dg_info =
+ container_of(work, struct delayed_datagram_info, work);
+
+ dg_info->entry->recv_cb(dg_info->entry->client_data, &dg_info->msg);
+
+ vmci_resource_put(&dg_info->entry->resource);
+
+ if (dg_info->in_dg_host_queue)
+ atomic_dec(&delayed_dg_host_queue_size);
+
+ kfree(dg_info);
+}
+
+/*
+ * Dispatch datagram as a host, to the host, or other vm context. This
+ * function cannot dispatch to hypervisor context handlers. This should
+ * have been handled before we get here by vmci_datagram_dispatch.
+ * Returns number of bytes sent on success, error code otherwise.
+ */
+static int dg_dispatch_as_host(u32 context_id, struct vmci_datagram *dg)
+{
+ int retval;
+ size_t dg_size;
+ u32 src_priv_flags;
+
+ dg_size = VMCI_DG_SIZE(dg);
+
+ /* Host cannot send to the hypervisor. */
+ if (dg->dst.context == VMCI_HYPERVISOR_CONTEXT_ID)
+ return VMCI_ERROR_DST_UNREACHABLE;
+
+ /* Check that source handle matches sending context. */
+ if (dg->src.context != context_id) {
+ pr_devel("Sender context (ID=0x%x) is not owner of src datagram entry (handle=0x%x:0x%x)\n",
+ context_id, dg->src.context, dg->src.resource);
+ return VMCI_ERROR_NO_ACCESS;
+ }
+
+ /* Get hold of privileges of sending endpoint. */
+ retval = vmci_datagram_get_priv_flags(context_id, dg->src,
+ &src_priv_flags);
+ if (retval != VMCI_SUCCESS) {
+ pr_warn("Couldn't get privileges (handle=0x%x:0x%x)\n",
+ dg->src.context, dg->src.resource);
+ return retval;
+ }
+
+ /* Determine if we should route to host or guest destination. */
+ if (dg->dst.context == VMCI_HOST_CONTEXT_ID) {
+ /* Route to host datagram entry. */
+ struct datagram_entry *dst_entry;
+ struct vmci_resource *resource;
+
+ if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
+ dg->dst.resource == VMCI_EVENT_HANDLER) {
+ return vmci_event_dispatch(dg);
+ }
+
+ resource = vmci_resource_by_handle(dg->dst,
+ VMCI_RESOURCE_TYPE_DATAGRAM);
+ if (!resource) {
+ pr_devel("Sending to invalid destination (handle=0x%x:0x%x)\n",
+ dg->dst.context, dg->dst.resource);
+ return VMCI_ERROR_INVALID_RESOURCE;
+ }
+ dst_entry = container_of(resource, struct datagram_entry,
+ resource);
+ if (vmci_deny_interaction(src_priv_flags,
+ dst_entry->priv_flags)) {
+ vmci_resource_put(resource);
+ return VMCI_ERROR_NO_ACCESS;
+ }
+
+ /*
+ * If a VMCI datagram destined for the host is also sent by the
+ * host, we always run it delayed. This ensures that no locks
+ * are held when the datagram callback runs.
+ */
+ if (dst_entry->run_delayed ||
+ dg->src.context == VMCI_HOST_CONTEXT_ID) {
+ struct delayed_datagram_info *dg_info;
+
+ if (atomic_add_return(1, &delayed_dg_host_queue_size)
+ == VMCI_MAX_DELAYED_DG_HOST_QUEUE_SIZE) {
+ atomic_dec(&delayed_dg_host_queue_size);
+ vmci_resource_put(resource);
+ return VMCI_ERROR_NO_MEM;
+ }
+
+ dg_info = kmalloc(sizeof(*dg_info) +
+ (size_t) dg->payload_size, GFP_ATOMIC);
+ if (!dg_info) {
+ atomic_dec(&delayed_dg_host_queue_size);
+ vmci_resource_put(resource);
+ return VMCI_ERROR_NO_MEM;
+ }
+
+ dg_info->in_dg_host_queue = true;
+ dg_info->entry = dst_entry;
+ memcpy(&dg_info->msg, dg, dg_size);
+
+ INIT_WORK(&dg_info->work, dg_delayed_dispatch);
+ schedule_work(&dg_info->work);
+ retval = VMCI_SUCCESS;
+
+ } else {
+ retval = dst_entry->recv_cb(dst_entry->client_data, dg);
+ vmci_resource_put(resource);
+ if (retval < VMCI_SUCCESS)
+ return retval;
+ }
+ } else {
+ /* Route to destination VM context. */
+ struct vmci_datagram *new_dg;
+
+ if (context_id != dg->dst.context) {
+ if (vmci_deny_interaction(src_priv_flags,
+ vmci_context_get_priv_flags
+ (dg->dst.context))) {
+ return VMCI_ERROR_NO_ACCESS;
+ } else if (VMCI_CONTEXT_IS_VM(context_id)) {
+ /*
+ * If the sending context is a VM, it
+ * cannot reach another VM.
+ */
+
+ pr_devel("Datagram communication between VMs not supported (src=0x%x, dst=0x%x)\n",
+ context_id, dg->dst.context);
+ return VMCI_ERROR_DST_UNREACHABLE;
+ }
+ }
+
+ /* We make a copy to enqueue. */
+ new_dg = kmalloc(dg_size, GFP_KERNEL);
+ if (new_dg == NULL)
+ return VMCI_ERROR_NO_MEM;
+
+ memcpy(new_dg, dg, dg_size);
+ retval = vmci_ctx_enqueue_datagram(dg->dst.context, new_dg);
+ if (retval < VMCI_SUCCESS) {
+ kfree(new_dg);
+ return retval;
+ }
+ }
+
+ /*
+ * We currently truncate the size to signed 32 bits. This doesn't
+ * matter for this handler as it only support 4Kb messages.
+ */
+ return (int)dg_size;
+}
+
+/*
+ * Dispatch datagram as a guest, down through the VMX and potentially to
+ * the host.
+ * Returns number of bytes sent on success, error code otherwise.
+ */
+static int dg_dispatch_as_guest(struct vmci_datagram *dg)
+{
+ int retval;
+ struct vmci_resource *resource;
+
+ resource = vmci_resource_by_handle(dg->src,
+ VMCI_RESOURCE_TYPE_DATAGRAM);
+ if (!resource)
+ return VMCI_ERROR_NO_HANDLE;
+
+ retval = vmci_send_datagram(dg);
+ vmci_resource_put(resource);
+ return retval;
+}
+
+/*
+ * Dispatch datagram. This will determine the routing for the datagram
+ * and dispatch it accordingly.
+ * Returns number of bytes sent on success, error code otherwise.
+ */
+int vmci_datagram_dispatch(u32 context_id,
+ struct vmci_datagram *dg, bool from_guest)
+{
+ int retval;
+ enum vmci_route route;
+
+ BUILD_BUG_ON(sizeof(struct vmci_datagram) != 24);
+
+ if (VMCI_DG_SIZE(dg) > VMCI_MAX_DG_SIZE) {
+ pr_devel("Payload (size=%llu bytes) too big to send\n",
+ (unsigned long long)dg->payload_size);
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ retval = vmci_route(&dg->src, &dg->dst, from_guest, &route);
+ if (retval < VMCI_SUCCESS) {
+ pr_devel("Failed to route datagram (src=0x%x, dst=0x%x, err=%d)\n",
+ dg->src.context, dg->dst.context, retval);
+ return retval;
+ }
+
+ if (VMCI_ROUTE_AS_HOST == route) {
+ if (VMCI_INVALID_ID == context_id)
+ context_id = VMCI_HOST_CONTEXT_ID;
+ return dg_dispatch_as_host(context_id, dg);
+ }
+
+ if (VMCI_ROUTE_AS_GUEST == route)
+ return dg_dispatch_as_guest(dg);
+
+ pr_warn("Unknown route (%d) for datagram\n", route);
+ return VMCI_ERROR_DST_UNREACHABLE;
+}
+
+/*
+ * Invoke the handler for the given datagram. This is intended to be
+ * called only when acting as a guest and receiving a datagram from the
+ * virtual device.
+ */
+int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg)
+{
+ struct vmci_resource *resource;
+ struct datagram_entry *dst_entry;
+
+ resource = vmci_resource_by_handle(dg->dst,
+ VMCI_RESOURCE_TYPE_DATAGRAM);
+ if (!resource) {
+ pr_devel("destination (handle=0x%x:0x%x) doesn't exist\n",
+ dg->dst.context, dg->dst.resource);
+ return VMCI_ERROR_NO_HANDLE;
+ }
+
+ dst_entry = container_of(resource, struct datagram_entry, resource);
+ if (dst_entry->run_delayed) {
+ struct delayed_datagram_info *dg_info;
+
+ dg_info = kmalloc(sizeof(*dg_info) + (size_t)dg->payload_size,
+ GFP_ATOMIC);
+ if (!dg_info) {
+ vmci_resource_put(resource);
+ return VMCI_ERROR_NO_MEM;
+ }
+
+ dg_info->in_dg_host_queue = false;
+ dg_info->entry = dst_entry;
+ memcpy(&dg_info->msg, dg, VMCI_DG_SIZE(dg));
+
+ INIT_WORK(&dg_info->work, dg_delayed_dispatch);
+ schedule_work(&dg_info->work);
+ } else {
+ dst_entry->recv_cb(dst_entry->client_data, dg);
+ vmci_resource_put(resource);
+ }
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * vmci_datagram_create_handle_priv() - Create host context datagram endpoint
+ * @resource_id: The resource ID.
+ * @flags: Datagram Flags.
+ * @priv_flags: Privilege Flags.
+ * @recv_cb: Callback when receiving datagrams.
+ * @client_data: Pointer for a datagram_entry struct
+ * @out_handle: vmci_handle that is populated as a result of this function.
+ *
+ * Creates a host context datagram endpoint and returns a handle to it.
+ */
+int vmci_datagram_create_handle_priv(u32 resource_id,
+ u32 flags,
+ u32 priv_flags,
+ vmci_datagram_recv_cb recv_cb,
+ void *client_data,
+ struct vmci_handle *out_handle)
+{
+ if (out_handle == NULL)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if (recv_cb == NULL) {
+ pr_devel("Client callback needed when creating datagram\n");
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ if (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ return dg_create_handle(resource_id, flags, priv_flags, recv_cb,
+ client_data, out_handle);
+}
+EXPORT_SYMBOL_GPL(vmci_datagram_create_handle_priv);
+
+/*
+ * vmci_datagram_create_handle() - Create host context datagram endpoint
+ * @resource_id: Resource ID.
+ * @flags: Datagram Flags.
+ * @recv_cb: Callback when receiving datagrams.
+ * @client_ata: Pointer for a datagram_entry struct
+ * @out_handle: vmci_handle that is populated as a result of this function.
+ *
+ * Creates a host context datagram endpoint and returns a handle to
+ * it. Same as vmci_datagram_create_handle_priv without the priviledge
+ * flags argument.
+ */
+int vmci_datagram_create_handle(u32 resource_id,
+ u32 flags,
+ vmci_datagram_recv_cb recv_cb,
+ void *client_data,
+ struct vmci_handle *out_handle)
+{
+ return vmci_datagram_create_handle_priv(
+ resource_id, flags,
+ VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS,
+ recv_cb, client_data,
+ out_handle);
+}
+EXPORT_SYMBOL_GPL(vmci_datagram_create_handle);
+
+/*
+ * vmci_datagram_destroy_handle() - Destroys datagram handle
+ * @handle: vmci_handle to be destroyed and reaped.
+ *
+ * Use this function to destroy any datagram handles created by
+ * vmci_datagram_create_handle{,Priv} functions.
+ */
+int vmci_datagram_destroy_handle(struct vmci_handle handle)
+{
+ struct datagram_entry *entry;
+ struct vmci_resource *resource;
+
+ resource = vmci_resource_by_handle(handle, VMCI_RESOURCE_TYPE_DATAGRAM);
+ if (!resource) {
+ pr_devel("Failed to destroy datagram (handle=0x%x:0x%x)\n",
+ handle.context, handle.resource);
+ return VMCI_ERROR_NOT_FOUND;
+ }
+
+ entry = container_of(resource, struct datagram_entry, resource);
+
+ vmci_resource_put(&entry->resource);
+ vmci_resource_remove(&entry->resource);
+ kfree(entry);
+
+ return VMCI_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(vmci_datagram_destroy_handle);
+
+/*
+ * vmci_datagram_send() - Send a datagram
+ * @msg: The datagram to send.
+ *
+ * Sends the provided datagram on its merry way.
+ */
+int vmci_datagram_send(struct vmci_datagram *msg)
+{
+ if (msg == NULL)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ return vmci_datagram_dispatch(VMCI_INVALID_ID, msg, false);
+}
+EXPORT_SYMBOL_GPL(vmci_datagram_send);
diff --git a/drivers/misc/vmw_vmci/vmci_datagram.h b/drivers/misc/vmw_vmci/vmci_datagram.h
new file mode 100644
index 000000000000..eb4aab7f64ec
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_datagram.h
@@ -0,0 +1,52 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_DATAGRAM_H_
+#define _VMCI_DATAGRAM_H_
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+#include "vmci_context.h"
+
+#define VMCI_MAX_DELAYED_DG_HOST_QUEUE_SIZE 256
+
+/*
+ * The struct vmci_datagram_queue_entry is a queue header for the in-kernel VMCI
+ * datagram queues. It is allocated in non-paged memory, as the
+ * content is accessed while holding a spinlock. The pending datagram
+ * itself may be allocated from paged memory. We shadow the size of
+ * the datagram in the non-paged queue entry as this size is used
+ * while holding the same spinlock as above.
+ */
+struct vmci_datagram_queue_entry {
+ struct list_head list_item; /* For queuing. */
+ size_t dg_size; /* Size of datagram. */
+ struct vmci_datagram *dg; /* Pending datagram. */
+};
+
+/* VMCIDatagramSendRecvInfo */
+struct vmci_datagram_snd_rcv_info {
+ u64 addr;
+ u32 len;
+ s32 result;
+};
+
+/* Datagram API for non-public use. */
+int vmci_datagram_dispatch(u32 context_id, struct vmci_datagram *dg,
+ bool from_guest);
+int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg);
+
+#endif /* _VMCI_DATAGRAM_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
new file mode 100644
index 000000000000..c3e8397f62ed
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
@@ -0,0 +1,604 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/completion.h>
+#include <linux/hash.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "vmci_datagram.h"
+#include "vmci_doorbell.h"
+#include "vmci_resource.h"
+#include "vmci_driver.h"
+#include "vmci_route.h"
+
+
+#define VMCI_DOORBELL_INDEX_BITS 6
+#define VMCI_DOORBELL_INDEX_TABLE_SIZE (1 << VMCI_DOORBELL_INDEX_BITS)
+#define VMCI_DOORBELL_HASH(_idx) hash_32(_idx, VMCI_DOORBELL_INDEX_BITS)
+
+/*
+ * DoorbellEntry describes the a doorbell notification handle allocated by the
+ * host.
+ */
+struct dbell_entry {
+ struct vmci_resource resource;
+ struct hlist_node node;
+ struct work_struct work;
+ vmci_callback notify_cb;
+ void *client_data;
+ u32 idx;
+ u32 priv_flags;
+ bool run_delayed;
+ atomic_t active; /* Only used by guest personality */
+};
+
+/* The VMCI index table keeps track of currently registered doorbells. */
+struct dbell_index_table {
+ spinlock_t lock; /* Index table lock */
+ struct hlist_head entries[VMCI_DOORBELL_INDEX_TABLE_SIZE];
+};
+
+static struct dbell_index_table vmci_doorbell_it = {
+ .lock = __SPIN_LOCK_UNLOCKED(vmci_doorbell_it.lock),
+};
+
+/*
+ * The max_notify_idx is one larger than the currently known bitmap index in
+ * use, and is used to determine how much of the bitmap needs to be scanned.
+ */
+static u32 max_notify_idx;
+
+/*
+ * The notify_idx_count is used for determining whether there are free entries
+ * within the bitmap (if notify_idx_count + 1 < max_notify_idx).
+ */
+static u32 notify_idx_count;
+
+/*
+ * The last_notify_idx_reserved is used to track the last index handed out - in
+ * the case where multiple handles share a notification index, we hand out
+ * indexes round robin based on last_notify_idx_reserved.
+ */
+static u32 last_notify_idx_reserved;
+
+/* This is a one entry cache used to by the index allocation. */
+static u32 last_notify_idx_released = PAGE_SIZE;
+
+
+/*
+ * Utility function that retrieves the privilege flags associated
+ * with a given doorbell handle. For guest endpoints, the
+ * privileges are determined by the context ID, but for host
+ * endpoints privileges are associated with the complete
+ * handle. Hypervisor endpoints are not yet supported.
+ */
+int vmci_dbell_get_priv_flags(struct vmci_handle handle, u32 *priv_flags)
+{
+ if (priv_flags == NULL || handle.context == VMCI_INVALID_ID)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if (handle.context == VMCI_HOST_CONTEXT_ID) {
+ struct dbell_entry *entry;
+ struct vmci_resource *resource;
+
+ resource = vmci_resource_by_handle(handle,
+ VMCI_RESOURCE_TYPE_DOORBELL);
+ if (!resource)
+ return VMCI_ERROR_NOT_FOUND;
+
+ entry = container_of(resource, struct dbell_entry, resource);
+ *priv_flags = entry->priv_flags;
+ vmci_resource_put(resource);
+ } else if (handle.context == VMCI_HYPERVISOR_CONTEXT_ID) {
+ /*
+ * Hypervisor endpoints for notifications are not
+ * supported (yet).
+ */
+ return VMCI_ERROR_INVALID_ARGS;
+ } else {
+ *priv_flags = vmci_context_get_priv_flags(handle.context);
+ }
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Find doorbell entry by bitmap index.
+ */
+static struct dbell_entry *dbell_index_table_find(u32 idx)
+{
+ u32 bucket = VMCI_DOORBELL_HASH(idx);
+ struct dbell_entry *dbell;
+ struct hlist_node *node;
+
+ hlist_for_each_entry(dbell, node, &vmci_doorbell_it.entries[bucket],
+ node) {
+ if (idx == dbell->idx)
+ return dbell;
+ }
+
+ return NULL;
+}
+
+/*
+ * Add the given entry to the index table. This willi take a reference to the
+ * entry's resource so that the entry is not deleted before it is removed from
+ * the * table.
+ */
+static void dbell_index_table_add(struct dbell_entry *entry)
+{
+ u32 bucket;
+ u32 new_notify_idx;
+
+ vmci_resource_get(&entry->resource);
+
+ spin_lock_bh(&vmci_doorbell_it.lock);
+
+ /*
+ * Below we try to allocate an index in the notification
+ * bitmap with "not too much" sharing between resources. If we
+ * use less that the full bitmap, we either add to the end if
+ * there are no unused flags within the currently used area,
+ * or we search for unused ones. If we use the full bitmap, we
+ * allocate the index round robin.
+ */
+ if (max_notify_idx < PAGE_SIZE || notify_idx_count < PAGE_SIZE) {
+ if (last_notify_idx_released < max_notify_idx &&
+ !dbell_index_table_find(last_notify_idx_released)) {
+ new_notify_idx = last_notify_idx_released;
+ last_notify_idx_released = PAGE_SIZE;
+ } else {
+ bool reused = false;
+ new_notify_idx = last_notify_idx_reserved;
+ if (notify_idx_count + 1 < max_notify_idx) {
+ do {
+ if (!dbell_index_table_find
+ (new_notify_idx)) {
+ reused = true;
+ break;
+ }
+ new_notify_idx = (new_notify_idx + 1) %
+ max_notify_idx;
+ } while (new_notify_idx !=
+ last_notify_idx_released);
+ }
+ if (!reused) {
+ new_notify_idx = max_notify_idx;
+ max_notify_idx++;
+ }
+ }
+ } else {
+ new_notify_idx = (last_notify_idx_reserved + 1) % PAGE_SIZE;
+ }
+
+ last_notify_idx_reserved = new_notify_idx;
+ notify_idx_count++;
+
+ entry->idx = new_notify_idx;
+ bucket = VMCI_DOORBELL_HASH(entry->idx);
+ hlist_add_head(&entry->node, &vmci_doorbell_it.entries[bucket]);
+
+ spin_unlock_bh(&vmci_doorbell_it.lock);
+}
+
+/*
+ * Remove the given entry from the index table. This will release() the
+ * entry's resource.
+ */
+static void dbell_index_table_remove(struct dbell_entry *entry)
+{
+ spin_lock_bh(&vmci_doorbell_it.lock);
+
+ hlist_del_init(&entry->node);
+
+ notify_idx_count--;
+ if (entry->idx == max_notify_idx - 1) {
+ /*
+ * If we delete an entry with the maximum known
+ * notification index, we take the opportunity to
+ * prune the current max. As there might be other
+ * unused indices immediately below, we lower the
+ * maximum until we hit an index in use.
+ */
+ while (max_notify_idx > 0 &&
+ !dbell_index_table_find(max_notify_idx - 1))
+ max_notify_idx--;
+ }
+
+ last_notify_idx_released = entry->idx;
+
+ spin_unlock_bh(&vmci_doorbell_it.lock);
+
+ vmci_resource_put(&entry->resource);
+}
+
+/*
+ * Creates a link between the given doorbell handle and the given
+ * index in the bitmap in the device backend. A notification state
+ * is created in hypervisor.
+ */
+static int dbell_link(struct vmci_handle handle, u32 notify_idx)
+{
+ struct vmci_doorbell_link_msg link_msg;
+
+ link_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_DOORBELL_LINK);
+ link_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
+ link_msg.hdr.payload_size = sizeof(link_msg) - VMCI_DG_HEADERSIZE;
+ link_msg.handle = handle;
+ link_msg.notify_idx = notify_idx;
+
+ return vmci_send_datagram(&link_msg.hdr);
+}
+
+/*
+ * Unlinks the given doorbell handle from an index in the bitmap in
+ * the device backend. The notification state is destroyed in hypervisor.
+ */
+static int dbell_unlink(struct vmci_handle handle)
+{
+ struct vmci_doorbell_unlink_msg unlink_msg;
+
+ unlink_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_DOORBELL_UNLINK);
+ unlink_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
+ unlink_msg.hdr.payload_size = sizeof(unlink_msg) - VMCI_DG_HEADERSIZE;
+ unlink_msg.handle = handle;
+
+ return vmci_send_datagram(&unlink_msg.hdr);
+}
+
+/*
+ * Notify another guest or the host. We send a datagram down to the
+ * host via the hypervisor with the notification info.
+ */
+static int dbell_notify_as_guest(struct vmci_handle handle, u32 priv_flags)
+{
+ struct vmci_doorbell_notify_msg notify_msg;
+
+ notify_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_DOORBELL_NOTIFY);
+ notify_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
+ notify_msg.hdr.payload_size = sizeof(notify_msg) - VMCI_DG_HEADERSIZE;
+ notify_msg.handle = handle;
+
+ return vmci_send_datagram(&notify_msg.hdr);
+}
+
+/*
+ * Calls the specified callback in a delayed context.
+ */
+static void dbell_delayed_dispatch(struct work_struct *work)
+{
+ struct dbell_entry *entry = container_of(work,
+ struct dbell_entry, work);
+
+ entry->notify_cb(entry->client_data);
+ vmci_resource_put(&entry->resource);
+}
+
+/*
+ * Dispatches a doorbell notification to the host context.
+ */
+int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
+{
+ struct dbell_entry *entry;
+ struct vmci_resource *resource;
+
+ if (vmci_handle_is_invalid(handle)) {
+ pr_devel("Notifying an invalid doorbell (handle=0x%x:0x%x)\n",
+ handle.context, handle.resource);
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ resource = vmci_resource_by_handle(handle,
+ VMCI_RESOURCE_TYPE_DOORBELL);
+ if (!resource) {
+ pr_devel("Notifying an unknown doorbell (handle=0x%x:0x%x)\n",
+ handle.context, handle.resource);
+ return VMCI_ERROR_NOT_FOUND;
+ }
+
+ entry = container_of(resource, struct dbell_entry, resource);
+ if (entry->run_delayed) {
+ schedule_work(&entry->work);
+ } else {
+ entry->notify_cb(entry->client_data);
+ vmci_resource_put(resource);
+ }
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Register the notification bitmap with the host.
+ */
+bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn)
+{
+ int result;
+ struct vmci_notify_bm_set_msg bitmap_set_msg;
+
+ bitmap_set_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_SET_NOTIFY_BITMAP);
+ bitmap_set_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
+ bitmap_set_msg.hdr.payload_size = sizeof(bitmap_set_msg) -
+ VMCI_DG_HEADERSIZE;
+ bitmap_set_msg.bitmap_ppn = bitmap_ppn;
+
+ result = vmci_send_datagram(&bitmap_set_msg.hdr);
+ if (result != VMCI_SUCCESS) {
+ pr_devel("Failed to register (PPN=%u) as notification bitmap (error=%d)\n",
+ bitmap_ppn, result);
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Executes or schedules the handlers for a given notify index.
+ */
+static void dbell_fire_entries(u32 notify_idx)
+{
+ u32 bucket = VMCI_DOORBELL_HASH(notify_idx);
+ struct dbell_entry *dbell;
+ struct hlist_node *node;
+
+ spin_lock_bh(&vmci_doorbell_it.lock);
+
+ hlist_for_each_entry(dbell, node,
+ &vmci_doorbell_it.entries[bucket], node) {
+ if (dbell->idx == notify_idx &&
+ atomic_read(&dbell->active) == 1) {
+ if (dbell->run_delayed) {
+ vmci_resource_get(&dbell->resource);
+ schedule_work(&dbell->work);
+ } else {
+ dbell->notify_cb(dbell->client_data);
+ }
+ }
+ }
+
+ spin_unlock_bh(&vmci_doorbell_it.lock);
+}
+
+/*
+ * Scans the notification bitmap, collects pending notifications,
+ * resets the bitmap and invokes appropriate callbacks.
+ */
+void vmci_dbell_scan_notification_entries(u8 *bitmap)
+{
+ u32 idx;
+
+ for (idx = 0; idx < max_notify_idx; idx++) {
+ if (bitmap[idx] & 0x1) {
+ bitmap[idx] &= ~1;
+ dbell_fire_entries(idx);
+ }
+ }
+}
+
+/*
+ * vmci_doorbell_create() - Creates a doorbell
+ * @handle: A handle used to track the resource. Can be invalid.
+ * @flags: Flag that determines context of callback.
+ * @priv_flags: Privileges flags.
+ * @notify_cb: The callback to be ivoked when the doorbell fires.
+ * @client_data: A parameter to be passed to the callback.
+ *
+ * Creates a doorbell with the given callback. If the handle is
+ * VMCI_INVALID_HANDLE, a free handle will be assigned, if
+ * possible. The callback can be run immediately (potentially with
+ * locks held - the default) or delayed (in a kernel thread) by
+ * specifying the flag VMCI_FLAG_DELAYED_CB. If delayed execution
+ * is selected, a given callback may not be run if the kernel is
+ * unable to allocate memory for the delayed execution (highly
+ * unlikely).
+ */
+int vmci_doorbell_create(struct vmci_handle *handle,
+ u32 flags,
+ u32 priv_flags,
+ vmci_callback notify_cb, void *client_data)
+{
+ struct dbell_entry *entry;
+ struct vmci_handle new_handle;
+ int result;
+
+ if (!handle || !notify_cb || flags & ~VMCI_FLAG_DELAYED_CB ||
+ priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (entry == NULL) {
+ pr_warn("Failed allocating memory for datagram entry\n");
+ return VMCI_ERROR_NO_MEM;
+ }
+
+ if (vmci_handle_is_invalid(*handle)) {
+ u32 context_id = vmci_get_context_id();
+
+ /* Let resource code allocate a free ID for us */
+ new_handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
+ } else {
+ bool valid_context = false;
+
+ /*
+ * Validate the handle. We must do both of the checks below
+ * because we can be acting as both a host and a guest at the
+ * same time. We always allow the host context ID, since the
+ * host functionality is in practice always there with the
+ * unified driver.
+ */
+ if (handle->context == VMCI_HOST_CONTEXT_ID ||
+ (vmci_guest_code_active() &&
+ vmci_get_context_id() == handle->context)) {
+ valid_context = true;
+ }
+
+ if (!valid_context || handle->resource == VMCI_INVALID_ID) {
+ pr_devel("Invalid argument (handle=0x%x:0x%x)\n",
+ handle->context, handle->resource);
+ result = VMCI_ERROR_INVALID_ARGS;
+ goto free_mem;
+ }
+
+ new_handle = *handle;
+ }
+
+ entry->idx = 0;
+ INIT_HLIST_NODE(&entry->node);
+ entry->priv_flags = priv_flags;
+ INIT_WORK(&entry->work, dbell_delayed_dispatch);
+ entry->run_delayed = flags & VMCI_FLAG_DELAYED_CB;
+ entry->notify_cb = notify_cb;
+ entry->client_data = client_data;
+ atomic_set(&entry->active, 0);
+
+ result = vmci_resource_add(&entry->resource,
+ VMCI_RESOURCE_TYPE_DOORBELL,
+ new_handle);
+ if (result != VMCI_SUCCESS) {
+ pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d\n",
+ new_handle.context, new_handle.resource, result);
+ goto free_mem;
+ }
+
+ new_handle = vmci_resource_handle(&entry->resource);
+ if (vmci_guest_code_active()) {
+ dbell_index_table_add(entry);
+ result = dbell_link(new_handle, entry->idx);
+ if (VMCI_SUCCESS != result)
+ goto destroy_resource;
+
+ atomic_set(&entry->active, 1);
+ }
+
+ *handle = new_handle;
+
+ return result;
+
+ destroy_resource:
+ dbell_index_table_remove(entry);
+ vmci_resource_remove(&entry->resource);
+ free_mem:
+ kfree(entry);
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_doorbell_create);
+
+/*
+ * vmci_doorbell_destroy() - Destroy a doorbell.
+ * @handle: The handle tracking the resource.
+ *
+ * Destroys a doorbell previously created with vmcii_doorbell_create. This
+ * operation may block waiting for a callback to finish.
+ */
+int vmci_doorbell_destroy(struct vmci_handle handle)
+{
+ struct dbell_entry *entry;
+ struct vmci_resource *resource;
+
+ if (vmci_handle_is_invalid(handle))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ resource = vmci_resource_by_handle(handle,
+ VMCI_RESOURCE_TYPE_DOORBELL);
+ if (!resource) {
+ pr_devel("Failed to destroy doorbell (handle=0x%x:0x%x)\n",
+ handle.context, handle.resource);
+ return VMCI_ERROR_NOT_FOUND;
+ }
+
+ entry = container_of(resource, struct dbell_entry, resource);
+
+ if (vmci_guest_code_active()) {
+ int result;
+
+ dbell_index_table_remove(entry);
+
+ result = dbell_unlink(handle);
+ if (VMCI_SUCCESS != result) {
+
+ /*
+ * The only reason this should fail would be
+ * an inconsistency between guest and
+ * hypervisor state, where the guest believes
+ * it has an active registration whereas the
+ * hypervisor doesn't. One case where this may
+ * happen is if a doorbell is unregistered
+ * following a hibernation at a time where the
+ * doorbell state hasn't been restored on the
+ * hypervisor side yet. Since the handle has
+ * now been removed in the guest, we just
+ * print a warning and return success.
+ */
+ pr_devel("Unlink of doorbell (handle=0x%x:0x%x) unknown by hypervisor (error=%d)\n",
+ handle.context, handle.resource, result);
+ }
+ }
+
+ /*
+ * Now remove the resource from the table. It might still be in use
+ * after this, in a callback or still on the delayed work queue.
+ */
+ vmci_resource_put(&entry->resource);
+ vmci_resource_remove(&entry->resource);
+
+ kfree(entry);
+
+ return VMCI_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(vmci_doorbell_destroy);
+
+/*
+ * vmci_doorbell_notify() - Ring the doorbell (and hide in the bushes).
+ * @dst: The handlle identifying the doorbell resource
+ * @priv_flags: Priviledge flags.
+ *
+ * Generates a notification on the doorbell identified by the
+ * handle. For host side generation of notifications, the caller
+ * can specify what the privilege of the calling side is.
+ */
+int vmci_doorbell_notify(struct vmci_handle dst, u32 priv_flags)
+{
+ int retval;
+ enum vmci_route route;
+ struct vmci_handle src;
+
+ if (vmci_handle_is_invalid(dst) ||
+ (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ src = VMCI_INVALID_HANDLE;
+ retval = vmci_route(&src, &dst, false, &route);
+ if (retval < VMCI_SUCCESS)
+ return retval;
+
+ if (VMCI_ROUTE_AS_HOST == route)
+ return vmci_ctx_notify_dbell(VMCI_HOST_CONTEXT_ID,
+ dst, priv_flags);
+
+ if (VMCI_ROUTE_AS_GUEST == route)
+ return dbell_notify_as_guest(dst, priv_flags);
+
+ pr_warn("Unknown route (%d) for doorbell\n", route);
+ return VMCI_ERROR_DST_UNREACHABLE;
+}
+EXPORT_SYMBOL_GPL(vmci_doorbell_notify);
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.h b/drivers/misc/vmw_vmci/vmci_doorbell.h
new file mode 100644
index 000000000000..e4c0b17486a5
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.h
@@ -0,0 +1,51 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef VMCI_DOORBELL_H
+#define VMCI_DOORBELL_H
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/types.h>
+
+#include "vmci_driver.h"
+
+/*
+ * VMCINotifyResourceInfo: Used to create and destroy doorbells, and
+ * generate a notification for a doorbell or queue pair.
+ */
+struct vmci_dbell_notify_resource_info {
+ struct vmci_handle handle;
+ u16 resource;
+ u16 action;
+ s32 result;
+};
+
+/*
+ * Structure used for checkpointing the doorbell mappings. It is
+ * written to the checkpoint as is, so changing this structure will
+ * break checkpoint compatibility.
+ */
+struct dbell_cpt_state {
+ struct vmci_handle handle;
+ u64 bitmap_idx;
+};
+
+int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle);
+int vmci_dbell_get_priv_flags(struct vmci_handle handle, u32 *priv_flags);
+
+bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn);
+void vmci_dbell_scan_notification_entries(u8 *bitmap);
+
+#endif /* VMCI_DOORBELL_H */
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
new file mode 100644
index 000000000000..7b3fce2da6c3
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_driver.c
@@ -0,0 +1,117 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include "vmci_driver.h"
+#include "vmci_event.h"
+
+static bool vmci_disable_host;
+module_param_named(disable_host, vmci_disable_host, bool, 0);
+MODULE_PARM_DESC(disable_host,
+ "Disable driver host personality (default=enabled)");
+
+static bool vmci_disable_guest;
+module_param_named(disable_guest, vmci_disable_guest, bool, 0);
+MODULE_PARM_DESC(disable_guest,
+ "Disable driver guest personality (default=enabled)");
+
+static bool vmci_guest_personality_initialized;
+static bool vmci_host_personality_initialized;
+
+/*
+ * vmci_get_context_id() - Gets the current context ID.
+ *
+ * Returns the current context ID. Note that since this is accessed only
+ * from code running in the host, this always returns the host context ID.
+ */
+u32 vmci_get_context_id(void)
+{
+ if (vmci_guest_code_active())
+ return vmci_get_vm_context_id();
+ else if (vmci_host_code_active())
+ return VMCI_HOST_CONTEXT_ID;
+
+ return VMCI_INVALID_ID;
+}
+EXPORT_SYMBOL_GPL(vmci_get_context_id);
+
+static int __init vmci_drv_init(void)
+{
+ int vmci_err;
+ int error;
+
+ vmci_err = vmci_event_init();
+ if (vmci_err < VMCI_SUCCESS) {
+ pr_err("Failed to initialize VMCIEvent (result=%d)\n",
+ vmci_err);
+ return -EINVAL;
+ }
+
+ if (!vmci_disable_guest) {
+ error = vmci_guest_init();
+ if (error) {
+ pr_warn("Failed to initialize guest personality (err=%d)\n",
+ error);
+ } else {
+ vmci_guest_personality_initialized = true;
+ pr_info("Guest personality initialized and is %s\n",
+ vmci_guest_code_active() ?
+ "active" : "inactive");
+ }
+ }
+
+ if (!vmci_disable_host) {
+ error = vmci_host_init();
+ if (error) {
+ pr_warn("Unable to initialize host personality (err=%d)\n",
+ error);
+ } else {
+ vmci_host_personality_initialized = true;
+ pr_info("Initialized host personality\n");
+ }
+ }
+
+ if (!vmci_guest_personality_initialized &&
+ !vmci_host_personality_initialized) {
+ vmci_event_exit();
+ return -ENODEV;
+ }
+
+ return 0;
+}
+module_init(vmci_drv_init);
+
+static void __exit vmci_drv_exit(void)
+{
+ if (vmci_guest_personality_initialized)
+ vmci_guest_exit();
+
+ if (vmci_host_personality_initialized)
+ vmci_host_exit();
+
+ vmci_event_exit();
+}
+module_exit(vmci_drv_exit);
+
+MODULE_AUTHOR("VMware, Inc.");
+MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
+MODULE_VERSION("1.0.0.0-k");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/vmw_vmci/vmci_driver.h b/drivers/misc/vmw_vmci/vmci_driver.h
new file mode 100644
index 000000000000..f69156a1f30c
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_driver.h
@@ -0,0 +1,50 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_DRIVER_H_
+#define _VMCI_DRIVER_H_
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/wait.h>
+
+#include "vmci_queue_pair.h"
+#include "vmci_context.h"
+
+enum vmci_obj_type {
+ VMCIOBJ_VMX_VM = 10,
+ VMCIOBJ_CONTEXT,
+ VMCIOBJ_SOCKET,
+ VMCIOBJ_NOT_SET,
+};
+
+/* For storing VMCI structures in file handles. */
+struct vmci_obj {
+ void *ptr;
+ enum vmci_obj_type type;
+};
+
+u32 vmci_get_context_id(void);
+int vmci_send_datagram(struct vmci_datagram *dg);
+
+int vmci_host_init(void);
+void vmci_host_exit(void);
+bool vmci_host_code_active(void);
+
+int vmci_guest_init(void);
+void vmci_guest_exit(void);
+bool vmci_guest_code_active(void);
+u32 vmci_get_vm_context_id(void);
+
+#endif /* _VMCI_DRIVER_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_event.c b/drivers/misc/vmw_vmci/vmci_event.c
new file mode 100644
index 000000000000..8449516d6ac6
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_event.c
@@ -0,0 +1,224 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "vmci_driver.h"
+#include "vmci_event.h"
+
+#define EVENT_MAGIC 0xEABE0000
+#define VMCI_EVENT_MAX_ATTEMPTS 10
+
+struct vmci_subscription {
+ u32 id;
+ u32 event;
+ vmci_event_cb callback;
+ void *callback_data;
+ struct list_head node; /* on one of subscriber lists */
+};
+
+static struct list_head subscriber_array[VMCI_EVENT_MAX];
+static DEFINE_MUTEX(subscriber_mutex);
+
+int __init vmci_event_init(void)
+{
+ int i;
+
+ for (i = 0; i < VMCI_EVENT_MAX; i++)
+ INIT_LIST_HEAD(&subscriber_array[i]);
+
+ return VMCI_SUCCESS;
+}
+
+void vmci_event_exit(void)
+{
+ int e;
+
+ /* We free all memory at exit. */
+ for (e = 0; e < VMCI_EVENT_MAX; e++) {
+ struct vmci_subscription *cur, *p2;
+ list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) {
+
+ /*
+ * We should never get here because all events
+ * should have been unregistered before we try
+ * to unload the driver module.
+ */
+ pr_warn("Unexpected free events occurring\n");
+ list_del(&cur->node);
+ kfree(cur);
+ }
+ }
+}
+
+/*
+ * Find entry. Assumes subscriber_mutex is held.
+ */
+static struct vmci_subscription *event_find(u32 sub_id)
+{
+ int e;
+
+ for (e = 0; e < VMCI_EVENT_MAX; e++) {
+ struct vmci_subscription *cur;
+ list_for_each_entry(cur, &subscriber_array[e], node) {
+ if (cur->id == sub_id)
+ return cur;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Actually delivers the events to the subscribers.
+ * The callback function for each subscriber is invoked.
+ */
+static void event_deliver(struct vmci_event_msg *event_msg)
+{
+ struct vmci_subscription *cur;
+ struct list_head *subscriber_list;
+
+ rcu_read_lock();
+ subscriber_list = &subscriber_array[event_msg->event_data.event];
+ list_for_each_entry_rcu(cur, subscriber_list, node) {
+ cur->callback(cur->id, &event_msg->event_data,
+ cur->callback_data);
+ }
+ rcu_read_unlock();
+}
+
+/*
+ * Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all
+ * subscribers for given event.
+ */
+int vmci_event_dispatch(struct vmci_datagram *msg)
+{
+ struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg;
+
+ if (msg->payload_size < sizeof(u32) ||
+ msg->payload_size > sizeof(struct vmci_event_data_max))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if (!VMCI_EVENT_VALID(event_msg->event_data.event))
+ return VMCI_ERROR_EVENT_UNKNOWN;
+
+ event_deliver(event_msg);
+ return VMCI_SUCCESS;
+}
+
+/*
+ * vmci_event_subscribe() - Subscribe to a given event.
+ * @event: The event to subscribe to.
+ * @callback: The callback to invoke upon the event.
+ * @callback_data: Data to pass to the callback.
+ * @subscription_id: ID used to track subscription. Used with
+ * vmci_event_unsubscribe()
+ *
+ * Subscribes to the provided event. The callback specified will be
+ * fired from RCU critical section and therefore must not sleep.
+ */
+int vmci_event_subscribe(u32 event,
+ vmci_event_cb callback,
+ void *callback_data,
+ u32 *new_subscription_id)
+{
+ struct vmci_subscription *sub;
+ int attempts;
+ int retval;
+ bool have_new_id = false;
+
+ if (!new_subscription_id) {
+ pr_devel("%s: Invalid subscription (NULL)\n", __func__);
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ if (!VMCI_EVENT_VALID(event) || !callback) {
+ pr_devel("%s: Failed to subscribe to event (type=%d) (callback=%p) (data=%p)\n",
+ __func__, event, callback, callback_data);
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ sub = kzalloc(sizeof(*sub), GFP_KERNEL);
+ if (!sub)
+ return VMCI_ERROR_NO_MEM;
+
+ sub->id = VMCI_EVENT_MAX;
+ sub->event = event;
+ sub->callback = callback;
+ sub->callback_data = callback_data;
+ INIT_LIST_HEAD(&sub->node);
+
+ mutex_lock(&subscriber_mutex);
+
+ /* Creation of a new event is always allowed. */
+ for (attempts = 0; attempts < VMCI_EVENT_MAX_ATTEMPTS; attempts++) {
+ static u32 subscription_id;
+ /*
+ * We try to get an id a couple of time before
+ * claiming we are out of resources.
+ */
+
+ /* Test for duplicate id. */
+ if (!event_find(++subscription_id)) {
+ sub->id = subscription_id;
+ have_new_id = true;
+ break;
+ }
+ }
+
+ if (have_new_id) {
+ list_add_rcu(&sub->node, &subscriber_array[event]);
+ retval = VMCI_SUCCESS;
+ } else {
+ retval = VMCI_ERROR_NO_RESOURCES;
+ }
+
+ mutex_unlock(&subscriber_mutex);
+
+ *new_subscription_id = sub->id;
+ return retval;
+}
+EXPORT_SYMBOL_GPL(vmci_event_subscribe);
+
+/*
+ * vmci_event_unsubscribe() - unsubscribe from an event.
+ * @sub_id: A subscription ID as provided by vmci_event_subscribe()
+ *
+ * Unsubscribe from given event. Removes it from list and frees it.
+ * Will return callback_data if requested by caller.
+ */
+int vmci_event_unsubscribe(u32 sub_id)
+{
+ struct vmci_subscription *s;
+
+ mutex_lock(&subscriber_mutex);
+ s = event_find(sub_id);
+ if (s)
+ list_del_rcu(&s->node);
+ mutex_unlock(&subscriber_mutex);
+
+ if (!s)
+ return VMCI_ERROR_NOT_FOUND;
+
+ synchronize_rcu();
+ kfree(s);
+
+ return VMCI_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(vmci_event_unsubscribe);
diff --git a/drivers/misc/vmw_vmci/vmci_event.h b/drivers/misc/vmw_vmci/vmci_event.h
new file mode 100644
index 000000000000..7df9b1c0a96c
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_event.h
@@ -0,0 +1,25 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef __VMCI_EVENT_H__
+#define __VMCI_EVENT_H__
+
+#include <linux/vmw_vmci_api.h>
+
+int vmci_event_init(void);
+void vmci_event_exit(void);
+int vmci_event_dispatch(struct vmci_datagram *msg);
+
+#endif /*__VMCI_EVENT_H__ */
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
new file mode 100644
index 000000000000..60c01999f489
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -0,0 +1,759 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/moduleparam.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/smp.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+
+#include "vmci_datagram.h"
+#include "vmci_doorbell.h"
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_event.h"
+
+#define PCI_VENDOR_ID_VMWARE 0x15AD
+#define PCI_DEVICE_ID_VMWARE_VMCI 0x0740
+
+#define VMCI_UTIL_NUM_RESOURCES 1
+
+static bool vmci_disable_msi;
+module_param_named(disable_msi, vmci_disable_msi, bool, 0);
+MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
+
+static bool vmci_disable_msix;
+module_param_named(disable_msix, vmci_disable_msix, bool, 0);
+MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
+
+static u32 ctx_update_sub_id = VMCI_INVALID_ID;
+static u32 vm_context_id = VMCI_INVALID_ID;
+
+struct vmci_guest_device {
+ struct device *dev; /* PCI device we are attached to */
+ void __iomem *iobase;
+
+ unsigned int irq;
+ unsigned int intr_type;
+ bool exclusive_vectors;
+ struct msix_entry msix_entries[VMCI_MAX_INTRS];
+
+ struct tasklet_struct datagram_tasklet;
+ struct tasklet_struct bm_tasklet;
+
+ void *data_buffer;
+ void *notification_bitmap;
+};
+
+/* vmci_dev singleton device and supporting data*/
+static struct vmci_guest_device *vmci_dev_g;
+static DEFINE_SPINLOCK(vmci_dev_spinlock);
+
+static atomic_t vmci_num_guest_devices = ATOMIC_INIT(0);
+
+bool vmci_guest_code_active(void)
+{
+ return atomic_read(&vmci_num_guest_devices) != 0;
+}
+
+u32 vmci_get_vm_context_id(void)
+{
+ if (vm_context_id == VMCI_INVALID_ID) {
+ struct vmci_datagram get_cid_msg;
+ get_cid_msg.dst =
+ vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_GET_CONTEXT_ID);
+ get_cid_msg.src = VMCI_ANON_SRC_HANDLE;
+ get_cid_msg.payload_size = 0;
+ vm_context_id = vmci_send_datagram(&get_cid_msg);
+ }
+ return vm_context_id;
+}
+
+/*
+ * VM to hypervisor call mechanism. We use the standard VMware naming
+ * convention since shared code is calling this function as well.
+ */
+int vmci_send_datagram(struct vmci_datagram *dg)
+{
+ unsigned long flags;
+ int result;
+
+ /* Check args. */
+ if (dg == NULL)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ /*
+ * Need to acquire spinlock on the device because the datagram
+ * data may be spread over multiple pages and the monitor may
+ * interleave device user rpc calls from multiple
+ * VCPUs. Acquiring the spinlock precludes that
+ * possibility. Disabling interrupts to avoid incoming
+ * datagrams during a "rep out" and possibly landing up in
+ * this function.
+ */
+ spin_lock_irqsave(&vmci_dev_spinlock, flags);
+
+ if (vmci_dev_g) {
+ iowrite8_rep(vmci_dev_g->iobase + VMCI_DATA_OUT_ADDR,
+ dg, VMCI_DG_SIZE(dg));
+ result = ioread32(vmci_dev_g->iobase + VMCI_RESULT_LOW_ADDR);
+ } else {
+ result = VMCI_ERROR_UNAVAILABLE;
+ }
+
+ spin_unlock_irqrestore(&vmci_dev_spinlock, flags);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_send_datagram);
+
+/*
+ * Gets called with the new context id if updated or resumed.
+ * Context id.
+ */
+static void vmci_guest_cid_update(u32 sub_id,
+ const struct vmci_event_data *event_data,
+ void *client_data)
+{
+ const struct vmci_event_payld_ctx *ev_payload =
+ vmci_event_data_const_payload(event_data);
+
+ if (sub_id != ctx_update_sub_id) {
+ pr_devel("Invalid subscriber (ID=0x%x)\n", sub_id);
+ return;
+ }
+
+ if (!event_data || ev_payload->context_id == VMCI_INVALID_ID) {
+ pr_devel("Invalid event data\n");
+ return;
+ }
+
+ pr_devel("Updating context from (ID=0x%x) to (ID=0x%x) on event (type=%d)\n",
+ vm_context_id, ev_payload->context_id, event_data->event);
+
+ vm_context_id = ev_payload->context_id;
+}
+
+/*
+ * Verify that the host supports the hypercalls we need. If it does not,
+ * try to find fallback hypercalls and use those instead. Returns
+ * true if required hypercalls (or fallback hypercalls) are
+ * supported by the host, false otherwise.
+ */
+static bool vmci_check_host_caps(struct pci_dev *pdev)
+{
+ bool result;
+ struct vmci_resource_query_msg *msg;
+ u32 msg_size = sizeof(struct vmci_resource_query_hdr) +
+ VMCI_UTIL_NUM_RESOURCES * sizeof(u32);
+ struct vmci_datagram *check_msg;
+
+ check_msg = kmalloc(msg_size, GFP_KERNEL);
+ if (!check_msg) {
+ dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__);
+ return false;
+ }
+
+ check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_RESOURCES_QUERY);
+ check_msg->src = VMCI_ANON_SRC_HANDLE;
+ check_msg->payload_size = msg_size - VMCI_DG_HEADERSIZE;
+ msg = (struct vmci_resource_query_msg *)VMCI_DG_PAYLOAD(check_msg);
+
+ msg->num_resources = VMCI_UTIL_NUM_RESOURCES;
+ msg->resources[0] = VMCI_GET_CONTEXT_ID;
+
+ /* Checks that hyper calls are supported */
+ result = vmci_send_datagram(check_msg) == 0x01;
+ kfree(check_msg);
+
+ dev_dbg(&pdev->dev, "%s: Host capability check: %s\n",
+ __func__, result ? "PASSED" : "FAILED");
+
+ /* We need the vector. There are no fallbacks. */
+ return result;
+}
+
+/*
+ * Reads datagrams from the data in port and dispatches them. We
+ * always start reading datagrams into only the first page of the
+ * datagram buffer. If the datagrams don't fit into one page, we
+ * use the maximum datagram buffer size for the remainder of the
+ * invocation. This is a simple heuristic for not penalizing
+ * small datagrams.
+ *
+ * This function assumes that it has exclusive access to the data
+ * in port for the duration of the call.
+ */
+static void vmci_dispatch_dgs(unsigned long data)
+{
+ struct vmci_guest_device *vmci_dev = (struct vmci_guest_device *)data;
+ u8 *dg_in_buffer = vmci_dev->data_buffer;
+ struct vmci_datagram *dg;
+ size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE;
+ size_t current_dg_in_buffer_size = PAGE_SIZE;
+ size_t remaining_bytes;
+
+ BUILD_BUG_ON(VMCI_MAX_DG_SIZE < PAGE_SIZE);
+
+ ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR,
+ vmci_dev->data_buffer, current_dg_in_buffer_size);
+ dg = (struct vmci_datagram *)dg_in_buffer;
+ remaining_bytes = current_dg_in_buffer_size;
+
+ while (dg->dst.resource != VMCI_INVALID_ID ||
+ remaining_bytes > PAGE_SIZE) {
+ unsigned dg_in_size;
+
+ /*
+ * When the input buffer spans multiple pages, a datagram can
+ * start on any page boundary in the buffer.
+ */
+ if (dg->dst.resource == VMCI_INVALID_ID) {
+ dg = (struct vmci_datagram *)roundup(
+ (uintptr_t)dg + 1, PAGE_SIZE);
+ remaining_bytes =
+ (size_t)(dg_in_buffer +
+ current_dg_in_buffer_size -
+ (u8 *)dg);
+ continue;
+ }
+
+ dg_in_size = VMCI_DG_SIZE_ALIGNED(dg);
+
+ if (dg_in_size <= dg_in_buffer_size) {
+ int result;
+
+ /*
+ * If the remaining bytes in the datagram
+ * buffer doesn't contain the complete
+ * datagram, we first make sure we have enough
+ * room for it and then we read the reminder
+ * of the datagram and possibly any following
+ * datagrams.
+ */
+ if (dg_in_size > remaining_bytes) {
+ if (remaining_bytes !=
+ current_dg_in_buffer_size) {
+
+ /*
+ * We move the partial
+ * datagram to the front and
+ * read the reminder of the
+ * datagram and possibly
+ * following calls into the
+ * following bytes.
+ */
+ memmove(dg_in_buffer, dg_in_buffer +
+ current_dg_in_buffer_size -
+ remaining_bytes,
+ remaining_bytes);
+ dg = (struct vmci_datagram *)
+ dg_in_buffer;
+ }
+
+ if (current_dg_in_buffer_size !=
+ dg_in_buffer_size)
+ current_dg_in_buffer_size =
+ dg_in_buffer_size;
+
+ ioread8_rep(vmci_dev->iobase +
+ VMCI_DATA_IN_ADDR,
+ vmci_dev->data_buffer +
+ remaining_bytes,
+ current_dg_in_buffer_size -
+ remaining_bytes);
+ }
+
+ /*
+ * We special case event datagrams from the
+ * hypervisor.
+ */
+ if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
+ dg->dst.resource == VMCI_EVENT_HANDLER) {
+ result = vmci_event_dispatch(dg);
+ } else {
+ result = vmci_datagram_invoke_guest_handler(dg);
+ }
+ if (result < VMCI_SUCCESS)
+ dev_dbg(vmci_dev->dev,
+ "Datagram with resource (ID=0x%x) failed (err=%d)\n",
+ dg->dst.resource, result);
+
+ /* On to the next datagram. */
+ dg = (struct vmci_datagram *)((u8 *)dg +
+ dg_in_size);
+ } else {
+ size_t bytes_to_skip;
+
+ /*
+ * Datagram doesn't fit in datagram buffer of maximal
+ * size. We drop it.
+ */
+ dev_dbg(vmci_dev->dev,
+ "Failed to receive datagram (size=%u bytes)\n",
+ dg_in_size);
+
+ bytes_to_skip = dg_in_size - remaining_bytes;
+ if (current_dg_in_buffer_size != dg_in_buffer_size)
+ current_dg_in_buffer_size = dg_in_buffer_size;
+
+ for (;;) {
+ ioread8_rep(vmci_dev->iobase +
+ VMCI_DATA_IN_ADDR,
+ vmci_dev->data_buffer,
+ current_dg_in_buffer_size);
+ if (bytes_to_skip <= current_dg_in_buffer_size)
+ break;
+
+ bytes_to_skip -= current_dg_in_buffer_size;
+ }
+ dg = (struct vmci_datagram *)(dg_in_buffer +
+ bytes_to_skip);
+ }
+
+ remaining_bytes =
+ (size_t) (dg_in_buffer + current_dg_in_buffer_size -
+ (u8 *)dg);
+
+ if (remaining_bytes < VMCI_DG_HEADERSIZE) {
+ /* Get the next batch of datagrams. */
+
+ ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR,
+ vmci_dev->data_buffer,
+ current_dg_in_buffer_size);
+ dg = (struct vmci_datagram *)dg_in_buffer;
+ remaining_bytes = current_dg_in_buffer_size;
+ }
+ }
+}
+
+/*
+ * Scans the notification bitmap for raised flags, clears them
+ * and handles the notifications.
+ */
+static void vmci_process_bitmap(unsigned long data)
+{
+ struct vmci_guest_device *dev = (struct vmci_guest_device *)data;
+
+ if (!dev->notification_bitmap) {
+ dev_dbg(dev->dev, "No bitmap present in %s\n", __func__);
+ return;
+ }
+
+ vmci_dbell_scan_notification_entries(dev->notification_bitmap);
+}
+
+/*
+ * Enable MSI-X. Try exclusive vectors first, then shared vectors.
+ */
+static int vmci_enable_msix(struct pci_dev *pdev,
+ struct vmci_guest_device *vmci_dev)
+{
+ int i;
+ int result;
+
+ for (i = 0; i < VMCI_MAX_INTRS; ++i) {
+ vmci_dev->msix_entries[i].entry = i;
+ vmci_dev->msix_entries[i].vector = i;
+ }
+
+ result = pci_enable_msix(pdev, vmci_dev->msix_entries, VMCI_MAX_INTRS);
+ if (result == 0)
+ vmci_dev->exclusive_vectors = true;
+ else if (result > 0)
+ result = pci_enable_msix(pdev, vmci_dev->msix_entries, 1);
+
+ return result;
+}
+
+/*
+ * Interrupt handler for legacy or MSI interrupt, or for first MSI-X
+ * interrupt (vector VMCI_INTR_DATAGRAM).
+ */
+static irqreturn_t vmci_interrupt(int irq, void *_dev)
+{
+ struct vmci_guest_device *dev = _dev;
+
+ /*
+ * If we are using MSI-X with exclusive vectors then we simply schedule
+ * the datagram tasklet, since we know the interrupt was meant for us.
+ * Otherwise we must read the ICR to determine what to do.
+ */
+
+ if (dev->intr_type == VMCI_INTR_TYPE_MSIX && dev->exclusive_vectors) {
+ tasklet_schedule(&dev->datagram_tasklet);
+ } else {
+ unsigned int icr;
+
+ /* Acknowledge interrupt and determine what needs doing. */
+ icr = ioread32(dev->iobase + VMCI_ICR_ADDR);
+ if (icr == 0 || icr == ~0)
+ return IRQ_NONE;
+
+ if (icr & VMCI_ICR_DATAGRAM) {
+ tasklet_schedule(&dev->datagram_tasklet);
+ icr &= ~VMCI_ICR_DATAGRAM;
+ }
+
+ if (icr & VMCI_ICR_NOTIFICATION) {
+ tasklet_schedule(&dev->bm_tasklet);
+ icr &= ~VMCI_ICR_NOTIFICATION;
+ }
+
+ if (icr != 0)
+ dev_warn(dev->dev,
+ "Ignoring unknown interrupt cause (%d)\n",
+ icr);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Interrupt handler for MSI-X interrupt vector VMCI_INTR_NOTIFICATION,
+ * which is for the notification bitmap. Will only get called if we are
+ * using MSI-X with exclusive vectors.
+ */
+static irqreturn_t vmci_interrupt_bm(int irq, void *_dev)
+{
+ struct vmci_guest_device *dev = _dev;
+
+ /* For MSI-X we can just assume it was meant for us. */
+ tasklet_schedule(&dev->bm_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Most of the initialization at module load time is done here.
+ */
+static int vmci_guest_probe_device(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct vmci_guest_device *vmci_dev;
+ void __iomem *iobase;
+ unsigned int capabilities;
+ unsigned long cmd;
+ int vmci_err;
+ int error;
+
+ dev_dbg(&pdev->dev, "Probing for vmci/PCI guest device\n");
+
+ error = pcim_enable_device(pdev);
+ if (error) {
+ dev_err(&pdev->dev,
+ "Failed to enable VMCI device: %d\n", error);
+ return error;
+ }
+
+ error = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to reserve/map IO regions\n");
+ return error;
+ }
+
+ iobase = pcim_iomap_table(pdev)[0];
+
+ dev_info(&pdev->dev, "Found VMCI PCI device at %#lx, irq %u\n",
+ (unsigned long)iobase, pdev->irq);
+
+ vmci_dev = devm_kzalloc(&pdev->dev, sizeof(*vmci_dev), GFP_KERNEL);
+ if (!vmci_dev) {
+ dev_err(&pdev->dev,
+ "Can't allocate memory for VMCI device\n");
+ return -ENOMEM;
+ }
+
+ vmci_dev->dev = &pdev->dev;
+ vmci_dev->intr_type = VMCI_INTR_TYPE_INTX;
+ vmci_dev->exclusive_vectors = false;
+ vmci_dev->iobase = iobase;
+
+ tasklet_init(&vmci_dev->datagram_tasklet,
+ vmci_dispatch_dgs, (unsigned long)vmci_dev);
+ tasklet_init(&vmci_dev->bm_tasklet,
+ vmci_process_bitmap, (unsigned long)vmci_dev);
+
+ vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE);
+ if (!vmci_dev->data_buffer) {
+ dev_err(&pdev->dev,
+ "Can't allocate memory for datagram buffer\n");
+ return -ENOMEM;
+ }
+
+ pci_set_master(pdev); /* To enable queue_pair functionality. */
+
+ /*
+ * Verify that the VMCI Device supports the capabilities that
+ * we need. If the device is missing capabilities that we would
+ * like to use, check for fallback capabilities and use those
+ * instead (so we can run a new VM on old hosts). Fail the load if
+ * a required capability is missing and there is no fallback.
+ *
+ * Right now, we need datagrams. There are no fallbacks.
+ */
+ capabilities = ioread32(vmci_dev->iobase + VMCI_CAPS_ADDR);
+ if (!(capabilities & VMCI_CAPS_DATAGRAM)) {
+ dev_err(&pdev->dev, "Device does not support datagrams\n");
+ error = -ENXIO;
+ goto err_free_data_buffer;
+ }
+
+ /*
+ * If the hardware supports notifications, we will use that as
+ * well.
+ */
+ if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
+ vmci_dev->notification_bitmap = vmalloc(PAGE_SIZE);
+ if (!vmci_dev->notification_bitmap) {
+ dev_warn(&pdev->dev,
+ "Unable to allocate notification bitmap\n");
+ } else {
+ memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE);
+ capabilities |= VMCI_CAPS_NOTIFICATIONS;
+ }
+ }
+
+ dev_info(&pdev->dev, "Using capabilities 0x%x\n", capabilities);
+
+ /* Let the host know which capabilities we intend to use. */
+ iowrite32(capabilities, vmci_dev->iobase + VMCI_CAPS_ADDR);
+
+ /* Set up global device so that we can start sending datagrams */
+ spin_lock_irq(&vmci_dev_spinlock);
+ vmci_dev_g = vmci_dev;
+ spin_unlock_irq(&vmci_dev_spinlock);
+
+ /*
+ * Register notification bitmap with device if that capability is
+ * used.
+ */
+ if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
+ struct page *page =
+ vmalloc_to_page(vmci_dev->notification_bitmap);
+ unsigned long bitmap_ppn = page_to_pfn(page);
+ if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) {
+ dev_warn(&pdev->dev,
+ "VMCI device unable to register notification bitmap with PPN 0x%x\n",
+ (u32) bitmap_ppn);
+ goto err_remove_vmci_dev_g;
+ }
+ }
+
+ /* Check host capabilities. */
+ if (!vmci_check_host_caps(pdev))
+ goto err_remove_bitmap;
+
+ /* Enable device. */
+
+ /*
+ * We subscribe to the VMCI_EVENT_CTX_ID_UPDATE here so we can
+ * update the internal context id when needed.
+ */
+ vmci_err = vmci_event_subscribe(VMCI_EVENT_CTX_ID_UPDATE,
+ vmci_guest_cid_update, NULL,
+ &ctx_update_sub_id);
+ if (vmci_err < VMCI_SUCCESS)
+ dev_warn(&pdev->dev,
+ "Failed to subscribe to event (type=%d): %d\n",
+ VMCI_EVENT_CTX_ID_UPDATE, vmci_err);
+
+ /*
+ * Enable interrupts. Try MSI-X first, then MSI, and then fallback on
+ * legacy interrupts.
+ */
+ if (!vmci_disable_msix && !vmci_enable_msix(pdev, vmci_dev)) {
+ vmci_dev->intr_type = VMCI_INTR_TYPE_MSIX;
+ vmci_dev->irq = vmci_dev->msix_entries[0].vector;
+ } else if (!vmci_disable_msi && !pci_enable_msi(pdev)) {
+ vmci_dev->intr_type = VMCI_INTR_TYPE_MSI;
+ vmci_dev->irq = pdev->irq;
+ } else {
+ vmci_dev->intr_type = VMCI_INTR_TYPE_INTX;
+ vmci_dev->irq = pdev->irq;
+ }
+
+ /*
+ * Request IRQ for legacy or MSI interrupts, or for first
+ * MSI-X vector.
+ */
+ error = request_irq(vmci_dev->irq, vmci_interrupt, IRQF_SHARED,
+ KBUILD_MODNAME, vmci_dev);
+ if (error) {
+ dev_err(&pdev->dev, "Irq %u in use: %d\n",
+ vmci_dev->irq, error);
+ goto err_disable_msi;
+ }
+
+ /*
+ * For MSI-X with exclusive vectors we need to request an
+ * interrupt for each vector so that we get a separate
+ * interrupt handler routine. This allows us to distinguish
+ * between the vectors.
+ */
+ if (vmci_dev->exclusive_vectors) {
+ error = request_irq(vmci_dev->msix_entries[1].vector,
+ vmci_interrupt_bm, 0, KBUILD_MODNAME,
+ vmci_dev);
+ if (error) {
+ dev_err(&pdev->dev,
+ "Failed to allocate irq %u: %d\n",
+ vmci_dev->msix_entries[1].vector, error);
+ goto err_free_irq;
+ }
+ }
+
+ dev_dbg(&pdev->dev, "Registered device\n");
+
+ atomic_inc(&vmci_num_guest_devices);
+
+ /* Enable specific interrupt bits. */
+ cmd = VMCI_IMR_DATAGRAM;
+ if (capabilities & VMCI_CAPS_NOTIFICATIONS)
+ cmd |= VMCI_IMR_NOTIFICATION;
+ iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR);
+
+ /* Enable interrupts. */
+ iowrite32(VMCI_CONTROL_INT_ENABLE,
+ vmci_dev->iobase + VMCI_CONTROL_ADDR);
+
+ pci_set_drvdata(pdev, vmci_dev);
+ return 0;
+
+err_free_irq:
+ free_irq(vmci_dev->irq, &vmci_dev);
+ tasklet_kill(&vmci_dev->datagram_tasklet);
+ tasklet_kill(&vmci_dev->bm_tasklet);
+
+err_disable_msi:
+ if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX)
+ pci_disable_msix(pdev);
+ else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI)
+ pci_disable_msi(pdev);
+
+ vmci_err = vmci_event_unsubscribe(ctx_update_sub_id);
+ if (vmci_err < VMCI_SUCCESS)
+ dev_warn(&pdev->dev,
+ "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
+ VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err);
+
+err_remove_bitmap:
+ if (vmci_dev->notification_bitmap) {
+ iowrite32(VMCI_CONTROL_RESET,
+ vmci_dev->iobase + VMCI_CONTROL_ADDR);
+ vfree(vmci_dev->notification_bitmap);
+ }
+
+err_remove_vmci_dev_g:
+ spin_lock_irq(&vmci_dev_spinlock);
+ vmci_dev_g = NULL;
+ spin_unlock_irq(&vmci_dev_spinlock);
+
+err_free_data_buffer:
+ vfree(vmci_dev->data_buffer);
+
+ /* The rest are managed resources and will be freed by PCI core */
+ return error;
+}
+
+static void vmci_guest_remove_device(struct pci_dev *pdev)
+{
+ struct vmci_guest_device *vmci_dev = pci_get_drvdata(pdev);
+ int vmci_err;
+
+ dev_dbg(&pdev->dev, "Removing device\n");
+
+ atomic_dec(&vmci_num_guest_devices);
+
+ vmci_qp_guest_endpoints_exit();
+
+ vmci_err = vmci_event_unsubscribe(ctx_update_sub_id);
+ if (vmci_err < VMCI_SUCCESS)
+ dev_warn(&pdev->dev,
+ "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
+ VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err);
+
+ spin_lock_irq(&vmci_dev_spinlock);
+ vmci_dev_g = NULL;
+ spin_unlock_irq(&vmci_dev_spinlock);
+
+ dev_dbg(&pdev->dev, "Resetting vmci device\n");
+ iowrite32(VMCI_CONTROL_RESET, vmci_dev->iobase + VMCI_CONTROL_ADDR);
+
+ /*
+ * Free IRQ and then disable MSI/MSI-X as appropriate. For
+ * MSI-X, we might have multiple vectors, each with their own
+ * IRQ, which we must free too.
+ */
+ free_irq(vmci_dev->irq, vmci_dev);
+ if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX) {
+ if (vmci_dev->exclusive_vectors)
+ free_irq(vmci_dev->msix_entries[1].vector, vmci_dev);
+ pci_disable_msix(pdev);
+ } else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI) {
+ pci_disable_msi(pdev);
+ }
+
+ tasklet_kill(&vmci_dev->datagram_tasklet);
+ tasklet_kill(&vmci_dev->bm_tasklet);
+
+ if (vmci_dev->notification_bitmap) {
+ /*
+ * The device reset above cleared the bitmap state of the
+ * device, so we can safely free it here.
+ */
+
+ vfree(vmci_dev->notification_bitmap);
+ }
+
+ vfree(vmci_dev->data_buffer);
+
+ /* The rest are managed resources and will be freed by PCI core */
+}
+
+static DEFINE_PCI_DEVICE_TABLE(vmci_ids) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_VMCI), },
+ { 0 },
+};
+MODULE_DEVICE_TABLE(pci, vmci_ids);
+
+static struct pci_driver vmci_guest_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = vmci_ids,
+ .probe = vmci_guest_probe_device,
+ .remove = vmci_guest_remove_device,
+};
+
+int __init vmci_guest_init(void)
+{
+ return pci_register_driver(&vmci_guest_driver);
+}
+
+void __exit vmci_guest_exit(void)
+{
+ pci_unregister_driver(&vmci_guest_driver);
+}
diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.c b/drivers/misc/vmw_vmci/vmci_handle_array.c
new file mode 100644
index 000000000000..344973a0fb0a
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_handle_array.c
@@ -0,0 +1,142 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/slab.h>
+#include "vmci_handle_array.h"
+
+static size_t handle_arr_calc_size(size_t capacity)
+{
+ return sizeof(struct vmci_handle_arr) +
+ capacity * sizeof(struct vmci_handle);
+}
+
+struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity)
+{
+ struct vmci_handle_arr *array;
+
+ if (capacity == 0)
+ capacity = VMCI_HANDLE_ARRAY_DEFAULT_SIZE;
+
+ array = kmalloc(handle_arr_calc_size(capacity), GFP_ATOMIC);
+ if (!array)
+ return NULL;
+
+ array->capacity = capacity;
+ array->size = 0;
+
+ return array;
+}
+
+void vmci_handle_arr_destroy(struct vmci_handle_arr *array)
+{
+ kfree(array);
+}
+
+void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
+ struct vmci_handle handle)
+{
+ struct vmci_handle_arr *array = *array_ptr;
+
+ if (unlikely(array->size >= array->capacity)) {
+ /* reallocate. */
+ struct vmci_handle_arr *new_array;
+ size_t new_capacity = array->capacity * VMCI_ARR_CAP_MULT;
+ size_t new_size = handle_arr_calc_size(new_capacity);
+
+ new_array = krealloc(array, new_size, GFP_ATOMIC);
+ if (!new_array)
+ return;
+
+ new_array->capacity = new_capacity;
+ *array_ptr = array = new_array;
+ }
+
+ array->entries[array->size] = handle;
+ array->size++;
+}
+
+/*
+ * Handle that was removed, VMCI_INVALID_HANDLE if entry not found.
+ */
+struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array,
+ struct vmci_handle entry_handle)
+{
+ struct vmci_handle handle = VMCI_INVALID_HANDLE;
+ size_t i;
+
+ for (i = 0; i < array->size; i++) {
+ if (vmci_handle_is_equal(array->entries[i], entry_handle)) {
+ handle = array->entries[i];
+ array->size--;
+ array->entries[i] = array->entries[array->size];
+ array->entries[array->size] = VMCI_INVALID_HANDLE;
+ break;
+ }
+ }
+
+ return handle;
+}
+
+/*
+ * Handle that was removed, VMCI_INVALID_HANDLE if array was empty.
+ */
+struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array)
+{
+ struct vmci_handle handle = VMCI_INVALID_HANDLE;
+
+ if (array->size) {
+ array->size--;
+ handle = array->entries[array->size];
+ array->entries[array->size] = VMCI_INVALID_HANDLE;
+ }
+
+ return handle;
+}
+
+/*
+ * Handle at given index, VMCI_INVALID_HANDLE if invalid index.
+ */
+struct vmci_handle
+vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index)
+{
+ if (unlikely(index >= array->size))
+ return VMCI_INVALID_HANDLE;
+
+ return array->entries[index];
+}
+
+bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array,
+ struct vmci_handle entry_handle)
+{
+ size_t i;
+
+ for (i = 0; i < array->size; i++)
+ if (vmci_handle_is_equal(array->entries[i], entry_handle))
+ return true;
+
+ return false;
+}
+
+/*
+ * NULL if the array is empty. Otherwise, a pointer to the array
+ * of VMCI handles in the handle array.
+ */
+struct vmci_handle *vmci_handle_arr_get_handles(struct vmci_handle_arr *array)
+{
+ if (array->size)
+ return array->entries;
+
+ return NULL;
+}
diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.h b/drivers/misc/vmw_vmci/vmci_handle_array.h
new file mode 100644
index 000000000000..b5f3a7f98cf1
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_handle_array.h
@@ -0,0 +1,52 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_HANDLE_ARRAY_H_
+#define _VMCI_HANDLE_ARRAY_H_
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/types.h>
+
+#define VMCI_HANDLE_ARRAY_DEFAULT_SIZE 4
+#define VMCI_ARR_CAP_MULT 2 /* Array capacity multiplier */
+
+struct vmci_handle_arr {
+ size_t capacity;
+ size_t size;
+ struct vmci_handle entries[];
+};
+
+struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity);
+void vmci_handle_arr_destroy(struct vmci_handle_arr *array);
+void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
+ struct vmci_handle handle);
+struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array,
+ struct vmci_handle
+ entry_handle);
+struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array);
+struct vmci_handle
+vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index);
+bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array,
+ struct vmci_handle entry_handle);
+struct vmci_handle *vmci_handle_arr_get_handles(struct vmci_handle_arr *array);
+
+static inline size_t vmci_handle_arr_get_size(
+ const struct vmci_handle_arr *array)
+{
+ return array->size;
+}
+
+
+#endif /* _VMCI_HANDLE_ARRAY_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
new file mode 100644
index 000000000000..d4722b3dc8ec
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -0,0 +1,1043 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/moduleparam.h>
+#include <linux/miscdevice.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/pci.h>
+#include <linux/smp.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+
+#include "vmci_handle_array.h"
+#include "vmci_queue_pair.h"
+#include "vmci_datagram.h"
+#include "vmci_doorbell.h"
+#include "vmci_resource.h"
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_event.h"
+
+#define VMCI_UTIL_NUM_RESOURCES 1
+
+enum {
+ VMCI_NOTIFY_RESOURCE_QUEUE_PAIR = 0,
+ VMCI_NOTIFY_RESOURCE_DOOR_BELL = 1,
+};
+
+enum {
+ VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY = 0,
+ VMCI_NOTIFY_RESOURCE_ACTION_CREATE = 1,
+ VMCI_NOTIFY_RESOURCE_ACTION_DESTROY = 2,
+};
+
+/*
+ * VMCI driver initialization. This block can also be used to
+ * pass initial group membership etc.
+ */
+struct vmci_init_blk {
+ u32 cid;
+ u32 flags;
+};
+
+/* VMCIqueue_pairAllocInfo_VMToVM */
+struct vmci_qp_alloc_info_vmvm {
+ struct vmci_handle handle;
+ u32 peer;
+ u32 flags;
+ u64 produce_size;
+ u64 consume_size;
+ u64 produce_page_file; /* User VA. */
+ u64 consume_page_file; /* User VA. */
+ u64 produce_page_file_size; /* Size of the file name array. */
+ u64 consume_page_file_size; /* Size of the file name array. */
+ s32 result;
+ u32 _pad;
+};
+
+/* VMCISetNotifyInfo: Used to pass notify flag's address to the host driver. */
+struct vmci_set_notify_info {
+ u64 notify_uva;
+ s32 result;
+ u32 _pad;
+};
+
+/*
+ * Per-instance host state
+ */
+struct vmci_host_dev {
+ struct vmci_ctx *context;
+ int user_version;
+ enum vmci_obj_type ct_type;
+ struct mutex lock; /* Mutex lock for vmci context access */
+};
+
+static struct vmci_ctx *host_context;
+static bool vmci_host_device_initialized;
+static atomic_t vmci_host_active_users = ATOMIC_INIT(0);
+
+/*
+ * Determines whether the VMCI host personality is
+ * available. Since the core functionality of the host driver is
+ * always present, all guests could possibly use the host
+ * personality. However, to minimize the deviation from the
+ * pre-unified driver state of affairs, we only consider the host
+ * device active if there is no active guest device or if there
+ * are VMX'en with active VMCI contexts using the host device.
+ */
+bool vmci_host_code_active(void)
+{
+ return vmci_host_device_initialized &&
+ (!vmci_guest_code_active() ||
+ atomic_read(&vmci_host_active_users) > 0);
+}
+
+/*
+ * Called on open of /dev/vmci.
+ */
+static int vmci_host_open(struct inode *inode, struct file *filp)
+{
+ struct vmci_host_dev *vmci_host_dev;
+
+ vmci_host_dev = kzalloc(sizeof(struct vmci_host_dev), GFP_KERNEL);
+ if (vmci_host_dev == NULL)
+ return -ENOMEM;
+
+ vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
+ mutex_init(&vmci_host_dev->lock);
+ filp->private_data = vmci_host_dev;
+
+ return 0;
+}
+
+/*
+ * Called on close of /dev/vmci, most often when the process
+ * exits.
+ */
+static int vmci_host_close(struct inode *inode, struct file *filp)
+{
+ struct vmci_host_dev *vmci_host_dev = filp->private_data;
+
+ if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
+ vmci_ctx_destroy(vmci_host_dev->context);
+ vmci_host_dev->context = NULL;
+
+ /*
+ * The number of active contexts is used to track whether any
+ * VMX'en are using the host personality. It is incremented when
+ * a context is created through the IOCTL_VMCI_INIT_CONTEXT
+ * ioctl.
+ */
+ atomic_dec(&vmci_host_active_users);
+ }
+ vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
+
+ kfree(vmci_host_dev);
+ filp->private_data = NULL;
+ return 0;
+}
+
+/*
+ * This is used to wake up the VMX when a VMCI call arrives, or
+ * to wake up select() or poll() at the next clock tick.
+ */
+static unsigned int vmci_host_poll(struct file *filp, poll_table *wait)
+{
+ struct vmci_host_dev *vmci_host_dev = filp->private_data;
+ struct vmci_ctx *context = vmci_host_dev->context;
+ unsigned int mask = 0;
+
+ if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
+ /* Check for VMCI calls to this VM context. */
+ if (wait)
+ poll_wait(filp, &context->host_context.wait_queue,
+ wait);
+
+ spin_lock(&context->lock);
+ if (context->pending_datagrams > 0 ||
+ vmci_handle_arr_get_size(
+ context->pending_doorbell_array) > 0) {
+ mask = POLLIN;
+ }
+ spin_unlock(&context->lock);
+ }
+ return mask;
+}
+
+/*
+ * Copies the handles of a handle array into a user buffer, and
+ * returns the new length in userBufferSize. If the copy to the
+ * user buffer fails, the functions still returns VMCI_SUCCESS,
+ * but retval != 0.
+ */
+static int drv_cp_harray_to_user(void __user *user_buf_uva,
+ u64 *user_buf_size,
+ struct vmci_handle_arr *handle_array,
+ int *retval)
+{
+ u32 array_size = 0;
+ struct vmci_handle *handles;
+
+ if (handle_array)
+ array_size = vmci_handle_arr_get_size(handle_array);
+
+ if (array_size * sizeof(*handles) > *user_buf_size)
+ return VMCI_ERROR_MORE_DATA;
+
+ *user_buf_size = array_size * sizeof(*handles);
+ if (*user_buf_size)
+ *retval = copy_to_user(user_buf_uva,
+ vmci_handle_arr_get_handles
+ (handle_array), *user_buf_size);
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Sets up a given context for notify to work. Calls drv_map_bool_ptr()
+ * which maps the notify boolean in user VA in kernel space.
+ */
+static int vmci_host_setup_notify(struct vmci_ctx *context,
+ unsigned long uva)
+{
+ struct page *page;
+ int retval;
+
+ if (context->notify_page) {
+ pr_devel("%s: Notify mechanism is already set up\n", __func__);
+ return VMCI_ERROR_DUPLICATE_ENTRY;
+ }
+
+ /*
+ * We are using 'bool' internally, but let's make sure we explicit
+ * about the size.
+ */
+ BUILD_BUG_ON(sizeof(bool) != sizeof(u8));
+ if (!access_ok(VERIFY_WRITE, (void __user *)uva, sizeof(u8)))
+ return VMCI_ERROR_GENERIC;
+
+ /*
+ * Lock physical page backing a given user VA.
+ */
+ down_read(&current->mm->mmap_sem);
+ retval = get_user_pages(current, current->mm,
+ PAGE_ALIGN(uva),
+ 1, 1, 0, &page, NULL);
+ up_read(&current->mm->mmap_sem);
+ if (retval != 1)
+ return VMCI_ERROR_GENERIC;
+
+ /*
+ * Map the locked page and set up notify pointer.
+ */
+ context->notify = kmap(page) + (uva & (PAGE_SIZE - 1));
+ vmci_ctx_check_signal_notify(context);
+
+ return VMCI_SUCCESS;
+}
+
+static int vmci_host_get_version(struct vmci_host_dev *vmci_host_dev,
+ unsigned int cmd, void __user *uptr)
+{
+ if (cmd == IOCTL_VMCI_VERSION2) {
+ int __user *vptr = uptr;
+ if (get_user(vmci_host_dev->user_version, vptr))
+ return -EFAULT;
+ }
+
+ /*
+ * The basic logic here is:
+ *
+ * If the user sends in a version of 0 tell it our version.
+ * If the user didn't send in a version, tell it our version.
+ * If the user sent in an old version, tell it -its- version.
+ * If the user sent in an newer version, tell it our version.
+ *
+ * The rationale behind telling the caller its version is that
+ * Workstation 6.5 required that VMX and VMCI kernel module were
+ * version sync'd. All new VMX users will be programmed to
+ * handle the VMCI kernel module version.
+ */
+
+ if (vmci_host_dev->user_version > 0 &&
+ vmci_host_dev->user_version < VMCI_VERSION_HOSTQP) {
+ return vmci_host_dev->user_version;
+ }
+
+ return VMCI_VERSION;
+}
+
+#define vmci_ioctl_err(fmt, ...) \
+ pr_devel("%s: " fmt, ioctl_name, ##__VA_ARGS__)
+
+static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_init_blk init_block;
+ const struct cred *cred;
+ int retval;
+
+ if (copy_from_user(&init_block, uptr, sizeof(init_block))) {
+ vmci_ioctl_err("error reading init block\n");
+ return -EFAULT;
+ }
+
+ mutex_lock(&vmci_host_dev->lock);
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_NOT_SET) {
+ vmci_ioctl_err("received VMCI init on initialized handle\n");
+ retval = -EINVAL;
+ goto out;
+ }
+
+ if (init_block.flags & ~VMCI_PRIVILEGE_FLAG_RESTRICTED) {
+ vmci_ioctl_err("unsupported VMCI restriction flag\n");
+ retval = -EINVAL;
+ goto out;
+ }
+
+ cred = get_current_cred();
+ vmci_host_dev->context = vmci_ctx_create(init_block.cid,
+ init_block.flags, 0,
+ vmci_host_dev->user_version,
+ cred);
+ put_cred(cred);
+ if (IS_ERR(vmci_host_dev->context)) {
+ retval = PTR_ERR(vmci_host_dev->context);
+ vmci_ioctl_err("error initializing context\n");
+ goto out;
+ }
+
+ /*
+ * Copy cid to userlevel, we do this to allow the VMX
+ * to enforce its policy on cid generation.
+ */
+ init_block.cid = vmci_ctx_get_id(vmci_host_dev->context);
+ if (copy_to_user(uptr, &init_block, sizeof(init_block))) {
+ vmci_ctx_destroy(vmci_host_dev->context);
+ vmci_host_dev->context = NULL;
+ vmci_ioctl_err("error writing init block\n");
+ retval = -EFAULT;
+ goto out;
+ }
+
+ vmci_host_dev->ct_type = VMCIOBJ_CONTEXT;
+ atomic_inc(&vmci_host_active_users);
+
+ retval = 0;
+
+out:
+ mutex_unlock(&vmci_host_dev->lock);
+ return retval;
+}
+
+static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_datagram_snd_rcv_info send_info;
+ struct vmci_datagram *dg = NULL;
+ u32 cid;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&send_info, uptr, sizeof(send_info)))
+ return -EFAULT;
+
+ if (send_info.len > VMCI_MAX_DG_SIZE) {
+ vmci_ioctl_err("datagram is too big (size=%d)\n",
+ send_info.len);
+ return -EINVAL;
+ }
+
+ if (send_info.len < sizeof(*dg)) {
+ vmci_ioctl_err("datagram is too small (size=%d)\n",
+ send_info.len);
+ return -EINVAL;
+ }
+
+ dg = kmalloc(send_info.len, GFP_KERNEL);
+ if (!dg) {
+ vmci_ioctl_err(
+ "cannot allocate memory to dispatch datagram\n");
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(dg, (void __user *)(uintptr_t)send_info.addr,
+ send_info.len)) {
+ vmci_ioctl_err("error getting datagram\n");
+ kfree(dg);
+ return -EFAULT;
+ }
+
+ pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n",
+ dg->dst.context, dg->dst.resource,
+ dg->src.context, dg->src.resource,
+ (unsigned long long)dg->payload_size);
+
+ /* Get source context id. */
+ cid = vmci_ctx_get_id(vmci_host_dev->context);
+ send_info.result = vmci_datagram_dispatch(cid, dg, true);
+ kfree(dg);
+
+ return copy_to_user(uptr, &send_info, sizeof(send_info)) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_receive_datagram(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_datagram_snd_rcv_info recv_info;
+ struct vmci_datagram *dg = NULL;
+ int retval;
+ size_t size;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&recv_info, uptr, sizeof(recv_info)))
+ return -EFAULT;
+
+ size = recv_info.len;
+ recv_info.result = vmci_ctx_dequeue_datagram(vmci_host_dev->context,
+ &size, &dg);
+
+ if (recv_info.result >= VMCI_SUCCESS) {
+ void __user *ubuf = (void __user *)(uintptr_t)recv_info.addr;
+ retval = copy_to_user(ubuf, dg, VMCI_DG_SIZE(dg));
+ kfree(dg);
+ if (retval != 0)
+ return -EFAULT;
+ }
+
+ return copy_to_user(uptr, &recv_info, sizeof(recv_info)) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_handle handle;
+ int vmci_status;
+ int __user *retptr;
+ u32 cid;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ cid = vmci_ctx_get_id(vmci_host_dev->context);
+
+ if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
+ struct vmci_qp_alloc_info_vmvm alloc_info;
+ struct vmci_qp_alloc_info_vmvm __user *info = uptr;
+
+ if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
+ return -EFAULT;
+
+ handle = alloc_info.handle;
+ retptr = &info->result;
+
+ vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
+ alloc_info.peer,
+ alloc_info.flags,
+ VMCI_NO_PRIVILEGE_FLAGS,
+ alloc_info.produce_size,
+ alloc_info.consume_size,
+ NULL,
+ vmci_host_dev->context);
+
+ if (vmci_status == VMCI_SUCCESS)
+ vmci_status = VMCI_SUCCESS_QUEUEPAIR_CREATE;
+ } else {
+ struct vmci_qp_alloc_info alloc_info;
+ struct vmci_qp_alloc_info __user *info = uptr;
+ struct vmci_qp_page_store page_store;
+
+ if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
+ return -EFAULT;
+
+ handle = alloc_info.handle;
+ retptr = &info->result;
+
+ page_store.pages = alloc_info.ppn_va;
+ page_store.len = alloc_info.num_ppns;
+
+ vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
+ alloc_info.peer,
+ alloc_info.flags,
+ VMCI_NO_PRIVILEGE_FLAGS,
+ alloc_info.produce_size,
+ alloc_info.consume_size,
+ &page_store,
+ vmci_host_dev->context);
+ }
+
+ if (put_user(vmci_status, retptr)) {
+ if (vmci_status >= VMCI_SUCCESS) {
+ vmci_status = vmci_qp_broker_detach(handle,
+ vmci_host_dev->context);
+ }
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int vmci_host_do_queuepair_setva(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_qp_set_va_info set_va_info;
+ struct vmci_qp_set_va_info __user *info = uptr;
+ s32 result;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
+ vmci_ioctl_err("is not allowed\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&set_va_info, uptr, sizeof(set_va_info)))
+ return -EFAULT;
+
+ if (set_va_info.va) {
+ /*
+ * VMX is passing down a new VA for the queue
+ * pair mapping.
+ */
+ result = vmci_qp_broker_map(set_va_info.handle,
+ vmci_host_dev->context,
+ set_va_info.va);
+ } else {
+ /*
+ * The queue pair is about to be unmapped by
+ * the VMX.
+ */
+ result = vmci_qp_broker_unmap(set_va_info.handle,
+ vmci_host_dev->context, 0);
+ }
+
+ return put_user(result, &info->result) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_queuepair_setpf(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_qp_page_file_info page_file_info;
+ struct vmci_qp_page_file_info __user *info = uptr;
+ s32 result;
+
+ if (vmci_host_dev->user_version < VMCI_VERSION_HOSTQP ||
+ vmci_host_dev->user_version >= VMCI_VERSION_NOVMVM) {
+ vmci_ioctl_err("not supported on this VMX (version=%d)\n",
+ vmci_host_dev->user_version);
+ return -EINVAL;
+ }
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&page_file_info, uptr, sizeof(*info)))
+ return -EFAULT;
+
+ /*
+ * Communicate success pre-emptively to the caller. Note that the
+ * basic premise is that it is incumbent upon the caller not to look at
+ * the info.result field until after the ioctl() returns. And then,
+ * only if the ioctl() result indicates no error. We send up the
+ * SUCCESS status before calling SetPageStore() store because failing
+ * to copy up the result code means unwinding the SetPageStore().
+ *
+ * It turns out the logic to unwind a SetPageStore() opens a can of
+ * worms. For example, if a host had created the queue_pair and a
+ * guest attaches and SetPageStore() is successful but writing success
+ * fails, then ... the host has to be stopped from writing (anymore)
+ * data into the queue_pair. That means an additional test in the
+ * VMCI_Enqueue() code path. Ugh.
+ */
+
+ if (put_user(VMCI_SUCCESS, &info->result)) {
+ /*
+ * In this case, we can't write a result field of the
+ * caller's info block. So, we don't even try to
+ * SetPageStore().
+ */
+ return -EFAULT;
+ }
+
+ result = vmci_qp_broker_set_page_store(page_file_info.handle,
+ page_file_info.produce_va,
+ page_file_info.consume_va,
+ vmci_host_dev->context);
+ if (result < VMCI_SUCCESS) {
+ if (put_user(result, &info->result)) {
+ /*
+ * Note that in this case the SetPageStore()
+ * call failed but we were unable to
+ * communicate that to the caller (because the
+ * copy_to_user() call failed). So, if we
+ * simply return an error (in this case
+ * -EFAULT) then the caller will know that the
+ * SetPageStore failed even though we couldn't
+ * put the result code in the result field and
+ * indicate exactly why it failed.
+ *
+ * That says nothing about the issue where we
+ * were once able to write to the caller's info
+ * memory and now can't. Something more
+ * serious is probably going on than the fact
+ * that SetPageStore() didn't work.
+ */
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static int vmci_host_do_qp_detach(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_qp_dtch_info detach_info;
+ struct vmci_qp_dtch_info __user *info = uptr;
+ s32 result;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&detach_info, uptr, sizeof(detach_info)))
+ return -EFAULT;
+
+ result = vmci_qp_broker_detach(detach_info.handle,
+ vmci_host_dev->context);
+ if (result == VMCI_SUCCESS &&
+ vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
+ result = VMCI_SUCCESS_LAST_DETACH;
+ }
+
+ return put_user(result, &info->result) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_ctx_add_notify(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_ctx_info ar_info;
+ struct vmci_ctx_info __user *info = uptr;
+ s32 result;
+ u32 cid;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
+ return -EFAULT;
+
+ cid = vmci_ctx_get_id(vmci_host_dev->context);
+ result = vmci_ctx_add_notification(cid, ar_info.remote_cid);
+
+ return put_user(result, &info->result) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_ctx_remove_notify(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_ctx_info ar_info;
+ struct vmci_ctx_info __user *info = uptr;
+ u32 cid;
+ int result;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
+ return -EFAULT;
+
+ cid = vmci_ctx_get_id(vmci_host_dev->context);
+ result = vmci_ctx_remove_notification(cid,
+ ar_info.remote_cid);
+
+ return put_user(result, &info->result) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_ctx_get_cpt_state(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_ctx_chkpt_buf_info get_info;
+ u32 cid;
+ void *cpt_buf;
+ int retval;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&get_info, uptr, sizeof(get_info)))
+ return -EFAULT;
+
+ cid = vmci_ctx_get_id(vmci_host_dev->context);
+ get_info.result = vmci_ctx_get_chkpt_state(cid, get_info.cpt_type,
+ &get_info.buf_size, &cpt_buf);
+ if (get_info.result == VMCI_SUCCESS && get_info.buf_size) {
+ void __user *ubuf = (void __user *)(uintptr_t)get_info.cpt_buf;
+ retval = copy_to_user(ubuf, cpt_buf, get_info.buf_size);
+ kfree(cpt_buf);
+
+ if (retval)
+ return -EFAULT;
+ }
+
+ return copy_to_user(uptr, &get_info, sizeof(get_info)) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_ctx_chkpt_buf_info set_info;
+ u32 cid;
+ void *cpt_buf;
+ int retval;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&set_info, uptr, sizeof(set_info)))
+ return -EFAULT;
+
+ cpt_buf = kmalloc(set_info.buf_size, GFP_KERNEL);
+ if (!cpt_buf) {
+ vmci_ioctl_err(
+ "cannot allocate memory to set cpt state (type=%d)\n",
+ set_info.cpt_type);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(cpt_buf, (void __user *)(uintptr_t)set_info.cpt_buf,
+ set_info.buf_size)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ cid = vmci_ctx_get_id(vmci_host_dev->context);
+ set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type,
+ set_info.buf_size, cpt_buf);
+
+ retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0;
+
+out:
+ kfree(cpt_buf);
+ return retval;
+}
+
+static int vmci_host_do_get_context_id(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ u32 __user *u32ptr = uptr;
+
+ return put_user(VMCI_HOST_CONTEXT_ID, u32ptr) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_set_notify(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_set_notify_info notify_info;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&notify_info, uptr, sizeof(notify_info)))
+ return -EFAULT;
+
+ if (notify_info.notify_uva) {
+ notify_info.result =
+ vmci_host_setup_notify(vmci_host_dev->context,
+ notify_info.notify_uva);
+ } else {
+ vmci_ctx_unset_notify(vmci_host_dev->context);
+ notify_info.result = VMCI_SUCCESS;
+ }
+
+ return copy_to_user(uptr, &notify_info, sizeof(notify_info)) ?
+ -EFAULT : 0;
+}
+
+static int vmci_host_do_notify_resource(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_dbell_notify_resource_info info;
+ u32 cid;
+
+ if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
+ vmci_ioctl_err("invalid for current VMX versions\n");
+ return -EINVAL;
+ }
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&info, uptr, sizeof(info)))
+ return -EFAULT;
+
+ cid = vmci_ctx_get_id(vmci_host_dev->context);
+
+ switch (info.action) {
+ case VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY:
+ if (info.resource == VMCI_NOTIFY_RESOURCE_DOOR_BELL) {
+ u32 flags = VMCI_NO_PRIVILEGE_FLAGS;
+ info.result = vmci_ctx_notify_dbell(cid, info.handle,
+ flags);
+ } else {
+ info.result = VMCI_ERROR_UNAVAILABLE;
+ }
+ break;
+
+ case VMCI_NOTIFY_RESOURCE_ACTION_CREATE:
+ info.result = vmci_ctx_dbell_create(cid, info.handle);
+ break;
+
+ case VMCI_NOTIFY_RESOURCE_ACTION_DESTROY:
+ info.result = vmci_ctx_dbell_destroy(cid, info.handle);
+ break;
+
+ default:
+ vmci_ioctl_err("got unknown action (action=%d)\n",
+ info.action);
+ info.result = VMCI_ERROR_INVALID_ARGS;
+ }
+
+ return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_recv_notifications(struct vmci_host_dev *vmci_host_dev,
+ const char *ioctl_name,
+ void __user *uptr)
+{
+ struct vmci_ctx_notify_recv_info info;
+ struct vmci_handle_arr *db_handle_array;
+ struct vmci_handle_arr *qp_handle_array;
+ void __user *ubuf;
+ u32 cid;
+ int retval = 0;
+
+ if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+ vmci_ioctl_err("only valid for contexts\n");
+ return -EINVAL;
+ }
+
+ if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
+ vmci_ioctl_err("not supported for the current vmx version\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&info, uptr, sizeof(info)))
+ return -EFAULT;
+
+ if ((info.db_handle_buf_size && !info.db_handle_buf_uva) ||
+ (info.qp_handle_buf_size && !info.qp_handle_buf_uva)) {
+ return -EINVAL;
+ }
+
+ cid = vmci_ctx_get_id(vmci_host_dev->context);
+
+ info.result = vmci_ctx_rcv_notifications_get(cid,
+ &db_handle_array, &qp_handle_array);
+ if (info.result != VMCI_SUCCESS)
+ return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
+
+ ubuf = (void __user *)(uintptr_t)info.db_handle_buf_uva;
+ info.result = drv_cp_harray_to_user(ubuf, &info.db_handle_buf_size,
+ db_handle_array, &retval);
+ if (info.result == VMCI_SUCCESS && !retval) {
+ ubuf = (void __user *)(uintptr_t)info.qp_handle_buf_uva;
+ info.result = drv_cp_harray_to_user(ubuf,
+ &info.qp_handle_buf_size,
+ qp_handle_array, &retval);
+ }
+
+ if (!retval && copy_to_user(uptr, &info, sizeof(info)))
+ retval = -EFAULT;
+
+ vmci_ctx_rcv_notifications_release(cid,
+ db_handle_array, qp_handle_array,
+ info.result == VMCI_SUCCESS && !retval);
+
+ return retval;
+}
+
+static long vmci_host_unlocked_ioctl(struct file *filp,
+ unsigned int iocmd, unsigned long ioarg)
+{
+#define VMCI_DO_IOCTL(ioctl_name, ioctl_fn) do { \
+ char *name = __stringify(IOCTL_VMCI_ ## ioctl_name); \
+ return vmci_host_do_ ## ioctl_fn( \
+ vmci_host_dev, name, uptr); \
+ } while (0)
+
+ struct vmci_host_dev *vmci_host_dev = filp->private_data;
+ void __user *uptr = (void __user *)ioarg;
+
+ switch (iocmd) {
+ case IOCTL_VMCI_INIT_CONTEXT:
+ VMCI_DO_IOCTL(INIT_CONTEXT, init_context);
+ case IOCTL_VMCI_DATAGRAM_SEND:
+ VMCI_DO_IOCTL(DATAGRAM_SEND, send_datagram);
+ case IOCTL_VMCI_DATAGRAM_RECEIVE:
+ VMCI_DO_IOCTL(DATAGRAM_RECEIVE, receive_datagram);
+ case IOCTL_VMCI_QUEUEPAIR_ALLOC:
+ VMCI_DO_IOCTL(QUEUEPAIR_ALLOC, alloc_queuepair);
+ case IOCTL_VMCI_QUEUEPAIR_SETVA:
+ VMCI_DO_IOCTL(QUEUEPAIR_SETVA, queuepair_setva);
+ case IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE:
+ VMCI_DO_IOCTL(QUEUEPAIR_SETPAGEFILE, queuepair_setpf);
+ case IOCTL_VMCI_QUEUEPAIR_DETACH:
+ VMCI_DO_IOCTL(QUEUEPAIR_DETACH, qp_detach);
+ case IOCTL_VMCI_CTX_ADD_NOTIFICATION:
+ VMCI_DO_IOCTL(CTX_ADD_NOTIFICATION, ctx_add_notify);
+ case IOCTL_VMCI_CTX_REMOVE_NOTIFICATION:
+ VMCI_DO_IOCTL(CTX_REMOVE_NOTIFICATION, ctx_remove_notify);
+ case IOCTL_VMCI_CTX_GET_CPT_STATE:
+ VMCI_DO_IOCTL(CTX_GET_CPT_STATE, ctx_get_cpt_state);
+ case IOCTL_VMCI_CTX_SET_CPT_STATE:
+ VMCI_DO_IOCTL(CTX_SET_CPT_STATE, ctx_set_cpt_state);
+ case IOCTL_VMCI_GET_CONTEXT_ID:
+ VMCI_DO_IOCTL(GET_CONTEXT_ID, get_context_id);
+ case IOCTL_VMCI_SET_NOTIFY:
+ VMCI_DO_IOCTL(SET_NOTIFY, set_notify);
+ case IOCTL_VMCI_NOTIFY_RESOURCE:
+ VMCI_DO_IOCTL(NOTIFY_RESOURCE, notify_resource);
+ case IOCTL_VMCI_NOTIFICATIONS_RECEIVE:
+ VMCI_DO_IOCTL(NOTIFICATIONS_RECEIVE, recv_notifications);
+
+ case IOCTL_VMCI_VERSION:
+ case IOCTL_VMCI_VERSION2:
+ return vmci_host_get_version(vmci_host_dev, iocmd, uptr);
+
+ default:
+ pr_devel("%s: Unknown ioctl (iocmd=%d)\n", __func__, iocmd);
+ return -EINVAL;
+ }
+
+#undef VMCI_DO_IOCTL
+}
+
+static const struct file_operations vmuser_fops = {
+ .owner = THIS_MODULE,
+ .open = vmci_host_open,
+ .release = vmci_host_close,
+ .poll = vmci_host_poll,
+ .unlocked_ioctl = vmci_host_unlocked_ioctl,
+ .compat_ioctl = vmci_host_unlocked_ioctl,
+};
+
+static struct miscdevice vmci_host_miscdev = {
+ .name = "vmci",
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &vmuser_fops,
+};
+
+int __init vmci_host_init(void)
+{
+ int error;
+
+ host_context = vmci_ctx_create(VMCI_HOST_CONTEXT_ID,
+ VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS,
+ -1, VMCI_VERSION, NULL);
+ if (IS_ERR(host_context)) {
+ error = PTR_ERR(host_context);
+ pr_warn("Failed to initialize VMCIContext (error%d)\n",
+ error);
+ return error;
+ }
+
+ error = misc_register(&vmci_host_miscdev);
+ if (error) {
+ pr_warn("Module registration error (name=%s, major=%d, minor=%d, err=%d)\n",
+ vmci_host_miscdev.name,
+ MISC_MAJOR, vmci_host_miscdev.minor,
+ error);
+ pr_warn("Unable to initialize host personality\n");
+ vmci_ctx_destroy(host_context);
+ return error;
+ }
+
+ pr_info("VMCI host device registered (name=%s, major=%d, minor=%d)\n",
+ vmci_host_miscdev.name, MISC_MAJOR, vmci_host_miscdev.minor);
+
+ vmci_host_device_initialized = true;
+ return 0;
+}
+
+void __exit vmci_host_exit(void)
+{
+ int error;
+
+ vmci_host_device_initialized = false;
+
+ error = misc_deregister(&vmci_host_miscdev);
+ if (error)
+ pr_warn("Error unregistering character device: %d\n", error);
+
+ vmci_ctx_destroy(host_context);
+ vmci_qp_broker_exit();
+
+ pr_debug("VMCI host driver module unloaded\n");
+}
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
new file mode 100644
index 000000000000..d94245dbd765
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -0,0 +1,3425 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/socket.h>
+#include <linux/wait.h>
+#include <linux/vmalloc.h>
+
+#include "vmci_handle_array.h"
+#include "vmci_queue_pair.h"
+#include "vmci_datagram.h"
+#include "vmci_resource.h"
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_event.h"
+#include "vmci_route.h"
+
+/*
+ * In the following, we will distinguish between two kinds of VMX processes -
+ * the ones with versions lower than VMCI_VERSION_NOVMVM that use specialized
+ * VMCI page files in the VMX and supporting VM to VM communication and the
+ * newer ones that use the guest memory directly. We will in the following
+ * refer to the older VMX versions as old-style VMX'en, and the newer ones as
+ * new-style VMX'en.
+ *
+ * The state transition datagram is as follows (the VMCIQPB_ prefix has been
+ * removed for readability) - see below for more details on the transtions:
+ *
+ * -------------- NEW -------------
+ * | |
+ * \_/ \_/
+ * CREATED_NO_MEM <-----------------> CREATED_MEM
+ * | | |
+ * | o-----------------------o |
+ * | | |
+ * \_/ \_/ \_/
+ * ATTACHED_NO_MEM <----------------> ATTACHED_MEM
+ * | | |
+ * | o----------------------o |
+ * | | |
+ * \_/ \_/ \_/
+ * SHUTDOWN_NO_MEM <----------------> SHUTDOWN_MEM
+ * | |
+ * | |
+ * -------------> gone <-------------
+ *
+ * In more detail. When a VMCI queue pair is first created, it will be in the
+ * VMCIQPB_NEW state. It will then move into one of the following states:
+ *
+ * - VMCIQPB_CREATED_NO_MEM: this state indicates that either:
+ *
+ * - the created was performed by a host endpoint, in which case there is
+ * no backing memory yet.
+ *
+ * - the create was initiated by an old-style VMX, that uses
+ * vmci_qp_broker_set_page_store to specify the UVAs of the queue pair at
+ * a later point in time. This state can be distinguished from the one
+ * above by the context ID of the creator. A host side is not allowed to
+ * attach until the page store has been set.
+ *
+ * - VMCIQPB_CREATED_MEM: this state is the result when the queue pair
+ * is created by a VMX using the queue pair device backend that
+ * sets the UVAs of the queue pair immediately and stores the
+ * information for later attachers. At this point, it is ready for
+ * the host side to attach to it.
+ *
+ * Once the queue pair is in one of the created states (with the exception of
+ * the case mentioned for older VMX'en above), it is possible to attach to the
+ * queue pair. Again we have two new states possible:
+ *
+ * - VMCIQPB_ATTACHED_MEM: this state can be reached through the following
+ * paths:
+ *
+ * - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue
+ * pair, and attaches to a queue pair previously created by the host side.
+ *
+ * - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pair
+ * already created by a guest.
+ *
+ * - from VMCIQPB_ATTACHED_NO_MEM, when an old-style VMX calls
+ * vmci_qp_broker_set_page_store (see below).
+ *
+ * - VMCIQPB_ATTACHED_NO_MEM: If the queue pair already was in the
+ * VMCIQPB_CREATED_NO_MEM due to a host side create, an old-style VMX will
+ * bring the queue pair into this state. Once vmci_qp_broker_set_page_store
+ * is called to register the user memory, the VMCIQPB_ATTACH_MEM state
+ * will be entered.
+ *
+ * From the attached queue pair, the queue pair can enter the shutdown states
+ * when either side of the queue pair detaches. If the guest side detaches
+ * first, the queue pair will enter the VMCIQPB_SHUTDOWN_NO_MEM state, where
+ * the content of the queue pair will no longer be available. If the host
+ * side detaches first, the queue pair will either enter the
+ * VMCIQPB_SHUTDOWN_MEM, if the guest memory is currently mapped, or
+ * VMCIQPB_SHUTDOWN_NO_MEM, if the guest memory is not mapped
+ * (e.g., the host detaches while a guest is stunned).
+ *
+ * New-style VMX'en will also unmap guest memory, if the guest is
+ * quiesced, e.g., during a snapshot operation. In that case, the guest
+ * memory will no longer be available, and the queue pair will transition from
+ * *_MEM state to a *_NO_MEM state. The VMX may later map the memory once more,
+ * in which case the queue pair will transition from the *_NO_MEM state at that
+ * point back to the *_MEM state. Note that the *_NO_MEM state may have changed,
+ * since the peer may have either attached or detached in the meantime. The
+ * values are laid out such that ++ on a state will move from a *_NO_MEM to a
+ * *_MEM state, and vice versa.
+ */
+
+/*
+ * VMCIMemcpy{To,From}QueueFunc() prototypes. Functions of these
+ * types are passed around to enqueue and dequeue routines. Note that
+ * often the functions passed are simply wrappers around memcpy
+ * itself.
+ *
+ * Note: In order for the memcpy typedefs to be compatible with the VMKernel,
+ * there's an unused last parameter for the hosted side. In
+ * ESX, that parameter holds a buffer type.
+ */
+typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue,
+ u64 queue_offset, const void *src,
+ size_t src_offset, size_t size);
+typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset,
+ const struct vmci_queue *queue,
+ u64 queue_offset, size_t size);
+
+/* The Kernel specific component of the struct vmci_queue structure. */
+struct vmci_queue_kern_if {
+ struct page **page;
+ struct page **header_page;
+ void *va;
+ struct mutex __mutex; /* Protects the queue. */
+ struct mutex *mutex; /* Shared by producer and consumer queues. */
+ bool host;
+ size_t num_pages;
+ bool mapped;
+};
+
+/*
+ * This structure is opaque to the clients.
+ */
+struct vmci_qp {
+ struct vmci_handle handle;
+ struct vmci_queue *produce_q;
+ struct vmci_queue *consume_q;
+ u64 produce_q_size;
+ u64 consume_q_size;
+ u32 peer;
+ u32 flags;
+ u32 priv_flags;
+ bool guest_endpoint;
+ unsigned int blocked;
+ unsigned int generation;
+ wait_queue_head_t event;
+};
+
+enum qp_broker_state {
+ VMCIQPB_NEW,
+ VMCIQPB_CREATED_NO_MEM,
+ VMCIQPB_CREATED_MEM,
+ VMCIQPB_ATTACHED_NO_MEM,
+ VMCIQPB_ATTACHED_MEM,
+ VMCIQPB_SHUTDOWN_NO_MEM,
+ VMCIQPB_SHUTDOWN_MEM,
+ VMCIQPB_GONE
+};
+
+#define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \
+ _qpb->state == VMCIQPB_ATTACHED_MEM || \
+ _qpb->state == VMCIQPB_SHUTDOWN_MEM)
+
+/*
+ * In the queue pair broker, we always use the guest point of view for
+ * the produce and consume queue values and references, e.g., the
+ * produce queue size stored is the guests produce queue size. The
+ * host endpoint will need to swap these around. The only exception is
+ * the local queue pairs on the host, in which case the host endpoint
+ * that creates the queue pair will have the right orientation, and
+ * the attaching host endpoint will need to swap.
+ */
+struct qp_entry {
+ struct list_head list_item;
+ struct vmci_handle handle;
+ u32 peer;
+ u32 flags;
+ u64 produce_size;
+ u64 consume_size;
+ u32 ref_count;
+};
+
+struct qp_broker_entry {
+ struct vmci_resource resource;
+ struct qp_entry qp;
+ u32 create_id;
+ u32 attach_id;
+ enum qp_broker_state state;
+ bool require_trusted_attach;
+ bool created_by_trusted;
+ bool vmci_page_files; /* Created by VMX using VMCI page files */
+ struct vmci_queue *produce_q;
+ struct vmci_queue *consume_q;
+ struct vmci_queue_header saved_produce_q;
+ struct vmci_queue_header saved_consume_q;
+ vmci_event_release_cb wakeup_cb;
+ void *client_data;
+ void *local_mem; /* Kernel memory for local queue pair */
+};
+
+struct qp_guest_endpoint {
+ struct vmci_resource resource;
+ struct qp_entry qp;
+ u64 num_ppns;
+ void *produce_q;
+ void *consume_q;
+ struct ppn_set ppn_set;
+};
+
+struct qp_list {
+ struct list_head head;
+ struct mutex mutex; /* Protect queue list. */
+};
+
+static struct qp_list qp_broker_list = {
+ .head = LIST_HEAD_INIT(qp_broker_list.head),
+ .mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex),
+};
+
+static struct qp_list qp_guest_endpoints = {
+ .head = LIST_HEAD_INIT(qp_guest_endpoints.head),
+ .mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex),
+};
+
+#define INVALID_VMCI_GUEST_MEM_ID 0
+#define QPE_NUM_PAGES(_QPE) ((u32) \
+ (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \
+ DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2))
+
+
+/*
+ * Frees kernel VA space for a given queue and its queue header, and
+ * frees physical data pages.
+ */
+static void qp_free_queue(void *q, u64 size)
+{
+ struct vmci_queue *queue = q;
+
+ if (queue) {
+ u64 i = DIV_ROUND_UP(size, PAGE_SIZE);
+
+ if (queue->kernel_if->mapped) {
+ vunmap(queue->kernel_if->va);
+ queue->kernel_if->va = NULL;
+ }
+
+ while (i)
+ __free_page(queue->kernel_if->page[--i]);
+
+ vfree(queue->q_header);
+ }
+}
+
+/*
+ * Allocates kernel VA space of specified size, plus space for the
+ * queue structure/kernel interface and the queue header. Allocates
+ * physical pages for the queue data pages.
+ *
+ * PAGE m: struct vmci_queue_header (struct vmci_queue->q_header)
+ * PAGE m+1: struct vmci_queue
+ * PAGE m+1+q: struct vmci_queue_kern_if (struct vmci_queue->kernel_if)
+ * PAGE n-size: Data pages (struct vmci_queue->kernel_if->page[])
+ */
+static void *qp_alloc_queue(u64 size, u32 flags)
+{
+ u64 i;
+ struct vmci_queue *queue;
+ struct vmci_queue_header *q_header;
+ const u64 num_data_pages = DIV_ROUND_UP(size, PAGE_SIZE);
+ const uint queue_size =
+ PAGE_SIZE +
+ sizeof(*queue) + sizeof(*(queue->kernel_if)) +
+ num_data_pages * sizeof(*(queue->kernel_if->page));
+
+ q_header = vmalloc(queue_size);
+ if (!q_header)
+ return NULL;
+
+ queue = (void *)q_header + PAGE_SIZE;
+ queue->q_header = q_header;
+ queue->saved_header = NULL;
+ queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
+ queue->kernel_if->header_page = NULL; /* Unused in guest. */
+ queue->kernel_if->page = (struct page **)(queue->kernel_if + 1);
+ queue->kernel_if->host = false;
+ queue->kernel_if->va = NULL;
+ queue->kernel_if->mapped = false;
+
+ for (i = 0; i < num_data_pages; i++) {
+ queue->kernel_if->page[i] = alloc_pages(GFP_KERNEL, 0);
+ if (!queue->kernel_if->page[i])
+ goto fail;
+ }
+
+ if (vmci_qp_pinned(flags)) {
+ queue->kernel_if->va =
+ vmap(queue->kernel_if->page, num_data_pages, VM_MAP,
+ PAGE_KERNEL);
+ if (!queue->kernel_if->va)
+ goto fail;
+
+ queue->kernel_if->mapped = true;
+ }
+
+ return (void *)queue;
+
+ fail:
+ qp_free_queue(queue, i * PAGE_SIZE);
+ return NULL;
+}
+
+/*
+ * Copies from a given buffer or iovector to a VMCI Queue. Uses
+ * kmap()/kunmap() to dynamically map/unmap required portions of the queue
+ * by traversing the offset -> page translation structure for the queue.
+ * Assumes that offset + size does not wrap around in the queue.
+ */
+static int __qp_memcpy_to_queue(struct vmci_queue *queue,
+ u64 queue_offset,
+ const void *src,
+ size_t size,
+ bool is_iovec)
+{
+ struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
+ size_t bytes_copied = 0;
+
+ while (bytes_copied < size) {
+ u64 page_index = (queue_offset + bytes_copied) / PAGE_SIZE;
+ size_t page_offset =
+ (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
+ void *va;
+ size_t to_copy;
+
+ if (!kernel_if->mapped)
+ va = kmap(kernel_if->page[page_index]);
+ else
+ va = (void *)((u8 *)kernel_if->va +
+ (page_index * PAGE_SIZE));
+
+ if (size - bytes_copied > PAGE_SIZE - page_offset)
+ /* Enough payload to fill up from this page. */
+ to_copy = PAGE_SIZE - page_offset;
+ else
+ to_copy = size - bytes_copied;
+
+ if (is_iovec) {
+ struct iovec *iov = (struct iovec *)src;
+ int err;
+
+ /* The iovec will track bytes_copied internally. */
+ err = memcpy_fromiovec((u8 *)va + page_offset,
+ iov, to_copy);
+ if (err != 0) {
+ kunmap(kernel_if->page[page_index]);
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+ } else {
+ memcpy((u8 *)va + page_offset,
+ (u8 *)src + bytes_copied, to_copy);
+ }
+
+ bytes_copied += to_copy;
+ if (!kernel_if->mapped)
+ kunmap(kernel_if->page[page_index]);
+ }
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Copies to a given buffer or iovector from a VMCI Queue. Uses
+ * kmap()/kunmap() to dynamically map/unmap required portions of the queue
+ * by traversing the offset -> page translation structure for the queue.
+ * Assumes that offset + size does not wrap around in the queue.
+ */
+static int __qp_memcpy_from_queue(void *dest,
+ const struct vmci_queue *queue,
+ u64 queue_offset,
+ size_t size,
+ bool is_iovec)
+{
+ struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
+ size_t bytes_copied = 0;
+
+ while (bytes_copied < size) {
+ u64 page_index = (queue_offset + bytes_copied) / PAGE_SIZE;
+ size_t page_offset =
+ (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
+ void *va;
+ size_t to_copy;
+
+ if (!kernel_if->mapped)
+ va = kmap(kernel_if->page[page_index]);
+ else
+ va = (void *)((u8 *)kernel_if->va +
+ (page_index * PAGE_SIZE));
+
+ if (size - bytes_copied > PAGE_SIZE - page_offset)
+ /* Enough payload to fill up this page. */
+ to_copy = PAGE_SIZE - page_offset;
+ else
+ to_copy = size - bytes_copied;
+
+ if (is_iovec) {
+ struct iovec *iov = (struct iovec *)dest;
+ int err;
+
+ /* The iovec will track bytes_copied internally. */
+ err = memcpy_toiovec(iov, (u8 *)va + page_offset,
+ to_copy);
+ if (err != 0) {
+ kunmap(kernel_if->page[page_index]);
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+ } else {
+ memcpy((u8 *)dest + bytes_copied,
+ (u8 *)va + page_offset, to_copy);
+ }
+
+ bytes_copied += to_copy;
+ if (!kernel_if->mapped)
+ kunmap(kernel_if->page[page_index]);
+ }
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Allocates two list of PPNs --- one for the pages in the produce queue,
+ * and the other for the pages in the consume queue. Intializes the list
+ * of PPNs with the page frame numbers of the KVA for the two queues (and
+ * the queue headers).
+ */
+static int qp_alloc_ppn_set(void *prod_q,
+ u64 num_produce_pages,
+ void *cons_q,
+ u64 num_consume_pages, struct ppn_set *ppn_set)
+{
+ u32 *produce_ppns;
+ u32 *consume_ppns;
+ struct vmci_queue *produce_q = prod_q;
+ struct vmci_queue *consume_q = cons_q;
+ u64 i;
+
+ if (!produce_q || !num_produce_pages || !consume_q ||
+ !num_consume_pages || !ppn_set)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if (ppn_set->initialized)
+ return VMCI_ERROR_ALREADY_EXISTS;
+
+ produce_ppns =
+ kmalloc(num_produce_pages * sizeof(*produce_ppns), GFP_KERNEL);
+ if (!produce_ppns)
+ return VMCI_ERROR_NO_MEM;
+
+ consume_ppns =
+ kmalloc(num_consume_pages * sizeof(*consume_ppns), GFP_KERNEL);
+ if (!consume_ppns) {
+ kfree(produce_ppns);
+ return VMCI_ERROR_NO_MEM;
+ }
+
+ produce_ppns[0] = page_to_pfn(vmalloc_to_page(produce_q->q_header));
+ for (i = 1; i < num_produce_pages; i++) {
+ unsigned long pfn;
+
+ produce_ppns[i] =
+ page_to_pfn(produce_q->kernel_if->page[i - 1]);
+ pfn = produce_ppns[i];
+
+ /* Fail allocation if PFN isn't supported by hypervisor. */
+ if (sizeof(pfn) > sizeof(*produce_ppns)
+ && pfn != produce_ppns[i])
+ goto ppn_error;
+ }
+
+ consume_ppns[0] = page_to_pfn(vmalloc_to_page(consume_q->q_header));
+ for (i = 1; i < num_consume_pages; i++) {
+ unsigned long pfn;
+
+ consume_ppns[i] =
+ page_to_pfn(consume_q->kernel_if->page[i - 1]);
+ pfn = consume_ppns[i];
+
+ /* Fail allocation if PFN isn't supported by hypervisor. */
+ if (sizeof(pfn) > sizeof(*consume_ppns)
+ && pfn != consume_ppns[i])
+ goto ppn_error;
+ }
+
+ ppn_set->num_produce_pages = num_produce_pages;
+ ppn_set->num_consume_pages = num_consume_pages;
+ ppn_set->produce_ppns = produce_ppns;
+ ppn_set->consume_ppns = consume_ppns;
+ ppn_set->initialized = true;
+ return VMCI_SUCCESS;
+
+ ppn_error:
+ kfree(produce_ppns);
+ kfree(consume_ppns);
+ return VMCI_ERROR_INVALID_ARGS;
+}
+
+/*
+ * Frees the two list of PPNs for a queue pair.
+ */
+static void qp_free_ppn_set(struct ppn_set *ppn_set)
+{
+ if (ppn_set->initialized) {
+ /* Do not call these functions on NULL inputs. */
+ kfree(ppn_set->produce_ppns);
+ kfree(ppn_set->consume_ppns);
+ }
+ memset(ppn_set, 0, sizeof(*ppn_set));
+}
+
+/*
+ * Populates the list of PPNs in the hypercall structure with the PPNS
+ * of the produce queue and the consume queue.
+ */
+static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
+{
+ memcpy(call_buf, ppn_set->produce_ppns,
+ ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns));
+ memcpy(call_buf +
+ ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns),
+ ppn_set->consume_ppns,
+ ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns));
+
+ return VMCI_SUCCESS;
+}
+
+static int qp_memcpy_to_queue(struct vmci_queue *queue,
+ u64 queue_offset,
+ const void *src, size_t src_offset, size_t size)
+{
+ return __qp_memcpy_to_queue(queue, queue_offset,
+ (u8 *)src + src_offset, size, false);
+}
+
+static int qp_memcpy_from_queue(void *dest,
+ size_t dest_offset,
+ const struct vmci_queue *queue,
+ u64 queue_offset, size_t size)
+{
+ return __qp_memcpy_from_queue((u8 *)dest + dest_offset,
+ queue, queue_offset, size, false);
+}
+
+/*
+ * Copies from a given iovec from a VMCI Queue.
+ */
+static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
+ u64 queue_offset,
+ const void *src,
+ size_t src_offset, size_t size)
+{
+
+ /*
+ * We ignore src_offset because src is really a struct iovec * and will
+ * maintain offset internally.
+ */
+ return __qp_memcpy_to_queue(queue, queue_offset, src, size, true);
+}
+
+/*
+ * Copies to a given iovec from a VMCI Queue.
+ */
+static int qp_memcpy_from_queue_iov(void *dest,
+ size_t dest_offset,
+ const struct vmci_queue *queue,
+ u64 queue_offset, size_t size)
+{
+ /*
+ * We ignore dest_offset because dest is really a struct iovec * and
+ * will maintain offset internally.
+ */
+ return __qp_memcpy_from_queue(dest, queue, queue_offset, size, true);
+}
+
+/*
+ * Allocates kernel VA space of specified size plus space for the queue
+ * and kernel interface. This is different from the guest queue allocator,
+ * because we do not allocate our own queue header/data pages here but
+ * share those of the guest.
+ */
+static struct vmci_queue *qp_host_alloc_queue(u64 size)
+{
+ struct vmci_queue *queue;
+ const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
+ const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
+ const size_t queue_page_size =
+ num_pages * sizeof(*queue->kernel_if->page);
+
+ queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
+ if (queue) {
+ queue->q_header = NULL;
+ queue->saved_header = NULL;
+ queue->kernel_if =
+ (struct vmci_queue_kern_if *)((u8 *)queue +
+ sizeof(*queue));
+ queue->kernel_if->host = true;
+ queue->kernel_if->mutex = NULL;
+ queue->kernel_if->num_pages = num_pages;
+ queue->kernel_if->header_page =
+ (struct page **)((u8 *)queue + queue_size);
+ queue->kernel_if->page = &queue->kernel_if->header_page[1];
+ queue->kernel_if->va = NULL;
+ queue->kernel_if->mapped = false;
+ }
+
+ return queue;
+}
+
+/*
+ * Frees kernel memory for a given queue (header plus translation
+ * structure).
+ */
+static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size)
+{
+ kfree(queue);
+}
+
+/*
+ * Initialize the mutex for the pair of queues. This mutex is used to
+ * protect the q_header and the buffer from changing out from under any
+ * users of either queue. Of course, it's only any good if the mutexes
+ * are actually acquired. Queue structure must lie on non-paged memory
+ * or we cannot guarantee access to the mutex.
+ */
+static void qp_init_queue_mutex(struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q)
+{
+ /*
+ * Only the host queue has shared state - the guest queues do not
+ * need to synchronize access using a queue mutex.
+ */
+
+ if (produce_q->kernel_if->host) {
+ produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
+ consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
+ mutex_init(produce_q->kernel_if->mutex);
+ }
+}
+
+/*
+ * Cleans up the mutex for the pair of queues.
+ */
+static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q)
+{
+ if (produce_q->kernel_if->host) {
+ produce_q->kernel_if->mutex = NULL;
+ consume_q->kernel_if->mutex = NULL;
+ }
+}
+
+/*
+ * Acquire the mutex for the queue. Note that the produce_q and
+ * the consume_q share a mutex. So, only one of the two need to
+ * be passed in to this routine. Either will work just fine.
+ */
+static void qp_acquire_queue_mutex(struct vmci_queue *queue)
+{
+ if (queue->kernel_if->host)
+ mutex_lock(queue->kernel_if->mutex);
+}
+
+/*
+ * Release the mutex for the queue. Note that the produce_q and
+ * the consume_q share a mutex. So, only one of the two need to
+ * be passed in to this routine. Either will work just fine.
+ */
+static void qp_release_queue_mutex(struct vmci_queue *queue)
+{
+ if (queue->kernel_if->host)
+ mutex_unlock(queue->kernel_if->mutex);
+}
+
+/*
+ * Helper function to release pages in the PageStoreAttachInfo
+ * previously obtained using get_user_pages.
+ */
+static void qp_release_pages(struct page **pages,
+ u64 num_pages, bool dirty)
+{
+ int i;
+
+ for (i = 0; i < num_pages; i++) {
+ if (dirty)
+ set_page_dirty(pages[i]);
+
+ page_cache_release(pages[i]);
+ pages[i] = NULL;
+ }
+}
+
+/*
+ * Lock the user pages referenced by the {produce,consume}Buffer
+ * struct into memory and populate the {produce,consume}Pages
+ * arrays in the attach structure with them.
+ */
+static int qp_host_get_user_memory(u64 produce_uva,
+ u64 consume_uva,
+ struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q)
+{
+ int retval;
+ int err = VMCI_SUCCESS;
+
+ down_write(&current->mm->mmap_sem);
+ retval = get_user_pages(current,
+ current->mm,
+ (uintptr_t) produce_uva,
+ produce_q->kernel_if->num_pages,
+ 1, 0, produce_q->kernel_if->header_page, NULL);
+ if (retval < produce_q->kernel_if->num_pages) {
+ pr_warn("get_user_pages(produce) failed (retval=%d)", retval);
+ qp_release_pages(produce_q->kernel_if->header_page, retval,
+ false);
+ err = VMCI_ERROR_NO_MEM;
+ goto out;
+ }
+
+ retval = get_user_pages(current,
+ current->mm,
+ (uintptr_t) consume_uva,
+ consume_q->kernel_if->num_pages,
+ 1, 0, consume_q->kernel_if->header_page, NULL);
+ if (retval < consume_q->kernel_if->num_pages) {
+ pr_warn("get_user_pages(consume) failed (retval=%d)", retval);
+ qp_release_pages(consume_q->kernel_if->header_page, retval,
+ false);
+ qp_release_pages(produce_q->kernel_if->header_page,
+ produce_q->kernel_if->num_pages, false);
+ err = VMCI_ERROR_NO_MEM;
+ }
+
+ out:
+ up_write(&current->mm->mmap_sem);
+
+ return err;
+}
+
+/*
+ * Registers the specification of the user pages used for backing a queue
+ * pair. Enough information to map in pages is stored in the OS specific
+ * part of the struct vmci_queue structure.
+ */
+static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store,
+ struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q)
+{
+ u64 produce_uva;
+ u64 consume_uva;
+
+ /*
+ * The new style and the old style mapping only differs in
+ * that we either get a single or two UVAs, so we split the
+ * single UVA range at the appropriate spot.
+ */
+ produce_uva = page_store->pages;
+ consume_uva = page_store->pages +
+ produce_q->kernel_if->num_pages * PAGE_SIZE;
+ return qp_host_get_user_memory(produce_uva, consume_uva, produce_q,
+ consume_q);
+}
+
+/*
+ * Releases and removes the references to user pages stored in the attach
+ * struct. Pages are released from the page cache and may become
+ * swappable again.
+ */
+static void qp_host_unregister_user_memory(struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q)
+{
+ qp_release_pages(produce_q->kernel_if->header_page,
+ produce_q->kernel_if->num_pages, true);
+ memset(produce_q->kernel_if->header_page, 0,
+ sizeof(*produce_q->kernel_if->header_page) *
+ produce_q->kernel_if->num_pages);
+ qp_release_pages(consume_q->kernel_if->header_page,
+ consume_q->kernel_if->num_pages, true);
+ memset(consume_q->kernel_if->header_page, 0,
+ sizeof(*consume_q->kernel_if->header_page) *
+ consume_q->kernel_if->num_pages);
+}
+
+/*
+ * Once qp_host_register_user_memory has been performed on a
+ * queue, the queue pair headers can be mapped into the
+ * kernel. Once mapped, they must be unmapped with
+ * qp_host_unmap_queues prior to calling
+ * qp_host_unregister_user_memory.
+ * Pages are pinned.
+ */
+static int qp_host_map_queues(struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q)
+{
+ int result;
+
+ if (!produce_q->q_header || !consume_q->q_header) {
+ struct page *headers[2];
+
+ if (produce_q->q_header != consume_q->q_header)
+ return VMCI_ERROR_QUEUEPAIR_MISMATCH;
+
+ if (produce_q->kernel_if->header_page == NULL ||
+ *produce_q->kernel_if->header_page == NULL)
+ return VMCI_ERROR_UNAVAILABLE;
+
+ headers[0] = *produce_q->kernel_if->header_page;
+ headers[1] = *consume_q->kernel_if->header_page;
+
+ produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL);
+ if (produce_q->q_header != NULL) {
+ consume_q->q_header =
+ (struct vmci_queue_header *)((u8 *)
+ produce_q->q_header +
+ PAGE_SIZE);
+ result = VMCI_SUCCESS;
+ } else {
+ pr_warn("vmap failed\n");
+ result = VMCI_ERROR_NO_MEM;
+ }
+ } else {
+ result = VMCI_SUCCESS;
+ }
+
+ return result;
+}
+
+/*
+ * Unmaps previously mapped queue pair headers from the kernel.
+ * Pages are unpinned.
+ */
+static int qp_host_unmap_queues(u32 gid,
+ struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q)
+{
+ if (produce_q->q_header) {
+ if (produce_q->q_header < consume_q->q_header)
+ vunmap(produce_q->q_header);
+ else
+ vunmap(consume_q->q_header);
+
+ produce_q->q_header = NULL;
+ consume_q->q_header = NULL;
+ }
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Finds the entry in the list corresponding to a given handle. Assumes
+ * that the list is locked.
+ */
+static struct qp_entry *qp_list_find(struct qp_list *qp_list,
+ struct vmci_handle handle)
+{
+ struct qp_entry *entry;
+
+ if (vmci_handle_is_invalid(handle))
+ return NULL;
+
+ list_for_each_entry(entry, &qp_list->head, list_item) {
+ if (vmci_handle_is_equal(entry->handle, handle))
+ return entry;
+ }
+
+ return NULL;
+}
+
+/*
+ * Finds the entry in the list corresponding to a given handle.
+ */
+static struct qp_guest_endpoint *
+qp_guest_handle_to_entry(struct vmci_handle handle)
+{
+ struct qp_guest_endpoint *entry;
+ struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle);
+
+ entry = qp ? container_of(
+ qp, struct qp_guest_endpoint, qp) : NULL;
+ return entry;
+}
+
+/*
+ * Finds the entry in the list corresponding to a given handle.
+ */
+static struct qp_broker_entry *
+qp_broker_handle_to_entry(struct vmci_handle handle)
+{
+ struct qp_broker_entry *entry;
+ struct qp_entry *qp = qp_list_find(&qp_broker_list, handle);
+
+ entry = qp ? container_of(
+ qp, struct qp_broker_entry, qp) : NULL;
+ return entry;
+}
+
+/*
+ * Dispatches a queue pair event message directly into the local event
+ * queue.
+ */
+static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
+{
+ u32 context_id = vmci_get_context_id();
+ struct vmci_event_qp ev;
+
+ ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
+ ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_CONTEXT_RESOURCE_ID);
+ ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
+ ev.msg.event_data.event =
+ attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
+ ev.payload.peer_id = context_id;
+ ev.payload.handle = handle;
+
+ return vmci_event_dispatch(&ev.msg.hdr);
+}
+
+/*
+ * Allocates and initializes a qp_guest_endpoint structure.
+ * Allocates a queue_pair rid (and handle) iff the given entry has
+ * an invalid handle. 0 through VMCI_RESERVED_RESOURCE_ID_MAX
+ * are reserved handles. Assumes that the QP list mutex is held
+ * by the caller.
+ */
+static struct qp_guest_endpoint *
+qp_guest_endpoint_create(struct vmci_handle handle,
+ u32 peer,
+ u32 flags,
+ u64 produce_size,
+ u64 consume_size,
+ void *produce_q,
+ void *consume_q)
+{
+ int result;
+ struct qp_guest_endpoint *entry;
+ /* One page each for the queue headers. */
+ const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) +
+ DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2;
+
+ if (vmci_handle_is_invalid(handle)) {
+ u32 context_id = vmci_get_context_id();
+
+ handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (entry) {
+ entry->qp.peer = peer;
+ entry->qp.flags = flags;
+ entry->qp.produce_size = produce_size;
+ entry->qp.consume_size = consume_size;
+ entry->qp.ref_count = 0;
+ entry->num_ppns = num_ppns;
+ entry->produce_q = produce_q;
+ entry->consume_q = consume_q;
+ INIT_LIST_HEAD(&entry->qp.list_item);
+
+ /* Add resource obj */
+ result = vmci_resource_add(&entry->resource,
+ VMCI_RESOURCE_TYPE_QPAIR_GUEST,
+ handle);
+ entry->qp.handle = vmci_resource_handle(&entry->resource);
+ if ((result != VMCI_SUCCESS) ||
+ qp_list_find(&qp_guest_endpoints, entry->qp.handle)) {
+ pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
+ handle.context, handle.resource, result);
+ kfree(entry);
+ entry = NULL;
+ }
+ }
+ return entry;
+}
+
+/*
+ * Frees a qp_guest_endpoint structure.
+ */
+static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry)
+{
+ qp_free_ppn_set(&entry->ppn_set);
+ qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
+ qp_free_queue(entry->produce_q, entry->qp.produce_size);
+ qp_free_queue(entry->consume_q, entry->qp.consume_size);
+ /* Unlink from resource hash table and free callback */
+ vmci_resource_remove(&entry->resource);
+
+ kfree(entry);
+}
+
+/*
+ * Helper to make a queue_pairAlloc hypercall when the driver is
+ * supporting a guest device.
+ */
+static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry)
+{
+ struct vmci_qp_alloc_msg *alloc_msg;
+ size_t msg_size;
+ int result;
+
+ if (!entry || entry->num_ppns <= 2)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ msg_size = sizeof(*alloc_msg) +
+ (size_t) entry->num_ppns * sizeof(u32);
+ alloc_msg = kmalloc(msg_size, GFP_KERNEL);
+ if (!alloc_msg)
+ return VMCI_ERROR_NO_MEM;
+
+ alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_QUEUEPAIR_ALLOC);
+ alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE;
+ alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE;
+ alloc_msg->handle = entry->qp.handle;
+ alloc_msg->peer = entry->qp.peer;
+ alloc_msg->flags = entry->qp.flags;
+ alloc_msg->produce_size = entry->qp.produce_size;
+ alloc_msg->consume_size = entry->qp.consume_size;
+ alloc_msg->num_ppns = entry->num_ppns;
+
+ result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg),
+ &entry->ppn_set);
+ if (result == VMCI_SUCCESS)
+ result = vmci_send_datagram(&alloc_msg->hdr);
+
+ kfree(alloc_msg);
+
+ return result;
+}
+
+/*
+ * Helper to make a queue_pairDetach hypercall when the driver is
+ * supporting a guest device.
+ */
+static int qp_detatch_hypercall(struct vmci_handle handle)
+{
+ struct vmci_qp_detach_msg detach_msg;
+
+ detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_QUEUEPAIR_DETACH);
+ detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
+ detach_msg.hdr.payload_size = sizeof(handle);
+ detach_msg.handle = handle;
+
+ return vmci_send_datagram(&detach_msg.hdr);
+}
+
+/*
+ * Adds the given entry to the list. Assumes that the list is locked.
+ */
+static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry)
+{
+ if (entry)
+ list_add(&entry->list_item, &qp_list->head);
+}
+
+/*
+ * Removes the given entry from the list. Assumes that the list is locked.
+ */
+static void qp_list_remove_entry(struct qp_list *qp_list,
+ struct qp_entry *entry)
+{
+ if (entry)
+ list_del(&entry->list_item);
+}
+
+/*
+ * Helper for VMCI queue_pair detach interface. Frees the physical
+ * pages for the queue pair.
+ */
+static int qp_detatch_guest_work(struct vmci_handle handle)
+{
+ int result;
+ struct qp_guest_endpoint *entry;
+ u32 ref_count = ~0; /* To avoid compiler warning below */
+
+ mutex_lock(&qp_guest_endpoints.mutex);
+
+ entry = qp_guest_handle_to_entry(handle);
+ if (!entry) {
+ mutex_unlock(&qp_guest_endpoints.mutex);
+ return VMCI_ERROR_NOT_FOUND;
+ }
+
+ if (entry->qp.flags & VMCI_QPFLAG_LOCAL) {
+ result = VMCI_SUCCESS;
+
+ if (entry->qp.ref_count > 1) {
+ result = qp_notify_peer_local(false, handle);
+ /*
+ * We can fail to notify a local queuepair
+ * because we can't allocate. We still want
+ * to release the entry if that happens, so
+ * don't bail out yet.
+ */
+ }
+ } else {
+ result = qp_detatch_hypercall(handle);
+ if (result < VMCI_SUCCESS) {
+ /*
+ * We failed to notify a non-local queuepair.
+ * That other queuepair might still be
+ * accessing the shared memory, so don't
+ * release the entry yet. It will get cleaned
+ * up by VMCIqueue_pair_Exit() if necessary
+ * (assuming we are going away, otherwise why
+ * did this fail?).
+ */
+
+ mutex_unlock(&qp_guest_endpoints.mutex);
+ return result;
+ }
+ }
+
+ /*
+ * If we get here then we either failed to notify a local queuepair, or
+ * we succeeded in all cases. Release the entry if required.
+ */
+
+ entry->qp.ref_count--;
+ if (entry->qp.ref_count == 0)
+ qp_list_remove_entry(&qp_guest_endpoints, &entry->qp);
+
+ /* If we didn't remove the entry, this could change once we unlock. */
+ if (entry)
+ ref_count = entry->qp.ref_count;
+
+ mutex_unlock(&qp_guest_endpoints.mutex);
+
+ if (ref_count == 0)
+ qp_guest_endpoint_destroy(entry);
+
+ return result;
+}
+
+/*
+ * This functions handles the actual allocation of a VMCI queue
+ * pair guest endpoint. Allocates physical pages for the queue
+ * pair. It makes OS dependent calls through generic wrappers.
+ */
+static int qp_alloc_guest_work(struct vmci_handle *handle,
+ struct vmci_queue **produce_q,
+ u64 produce_size,
+ struct vmci_queue **consume_q,
+ u64 consume_size,
+ u32 peer,
+ u32 flags,
+ u32 priv_flags)
+{
+ const u64 num_produce_pages =
+ DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1;
+ const u64 num_consume_pages =
+ DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1;
+ void *my_produce_q = NULL;
+ void *my_consume_q = NULL;
+ int result;
+ struct qp_guest_endpoint *queue_pair_entry = NULL;
+
+ if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS)
+ return VMCI_ERROR_NO_ACCESS;
+
+ mutex_lock(&qp_guest_endpoints.mutex);
+
+ queue_pair_entry = qp_guest_handle_to_entry(*handle);
+ if (queue_pair_entry) {
+ if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
+ /* Local attach case. */
+ if (queue_pair_entry->qp.ref_count > 1) {
+ pr_devel("Error attempting to attach more than once\n");
+ result = VMCI_ERROR_UNAVAILABLE;
+ goto error_keep_entry;
+ }
+
+ if (queue_pair_entry->qp.produce_size != consume_size ||
+ queue_pair_entry->qp.consume_size !=
+ produce_size ||
+ queue_pair_entry->qp.flags !=
+ (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) {
+ pr_devel("Error mismatched queue pair in local attach\n");
+ result = VMCI_ERROR_QUEUEPAIR_MISMATCH;
+ goto error_keep_entry;
+ }
+
+ /*
+ * Do a local attach. We swap the consume and
+ * produce queues for the attacher and deliver
+ * an attach event.
+ */
+ result = qp_notify_peer_local(true, *handle);
+ if (result < VMCI_SUCCESS)
+ goto error_keep_entry;
+
+ my_produce_q = queue_pair_entry->consume_q;
+ my_consume_q = queue_pair_entry->produce_q;
+ goto out;
+ }
+
+ result = VMCI_ERROR_ALREADY_EXISTS;
+ goto error_keep_entry;
+ }
+
+ my_produce_q = qp_alloc_queue(produce_size, flags);
+ if (!my_produce_q) {
+ pr_warn("Error allocating pages for produce queue\n");
+ result = VMCI_ERROR_NO_MEM;
+ goto error;
+ }
+
+ my_consume_q = qp_alloc_queue(consume_size, flags);
+ if (!my_consume_q) {
+ pr_warn("Error allocating pages for consume queue\n");
+ result = VMCI_ERROR_NO_MEM;
+ goto error;
+ }
+
+ queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags,
+ produce_size, consume_size,
+ my_produce_q, my_consume_q);
+ if (!queue_pair_entry) {
+ pr_warn("Error allocating memory in %s\n", __func__);
+ result = VMCI_ERROR_NO_MEM;
+ goto error;
+ }
+
+ result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q,
+ num_consume_pages,
+ &queue_pair_entry->ppn_set);
+ if (result < VMCI_SUCCESS) {
+ pr_warn("qp_alloc_ppn_set failed\n");
+ goto error;
+ }
+
+ /*
+ * It's only necessary to notify the host if this queue pair will be
+ * attached to from another context.
+ */
+ if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
+ /* Local create case. */
+ u32 context_id = vmci_get_context_id();
+
+ /*
+ * Enforce similar checks on local queue pairs as we
+ * do for regular ones. The handle's context must
+ * match the creator or attacher context id (here they
+ * are both the current context id) and the
+ * attach-only flag cannot exist during create. We
+ * also ensure specified peer is this context or an
+ * invalid one.
+ */
+ if (queue_pair_entry->qp.handle.context != context_id ||
+ (queue_pair_entry->qp.peer != VMCI_INVALID_ID &&
+ queue_pair_entry->qp.peer != context_id)) {
+ result = VMCI_ERROR_NO_ACCESS;
+ goto error;
+ }
+
+ if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) {
+ result = VMCI_ERROR_NOT_FOUND;
+ goto error;
+ }
+ } else {
+ result = qp_alloc_hypercall(queue_pair_entry);
+ if (result < VMCI_SUCCESS) {
+ pr_warn("qp_alloc_hypercall result = %d\n", result);
+ goto error;
+ }
+ }
+
+ qp_init_queue_mutex((struct vmci_queue *)my_produce_q,
+ (struct vmci_queue *)my_consume_q);
+
+ qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp);
+
+ out:
+ queue_pair_entry->qp.ref_count++;
+ *handle = queue_pair_entry->qp.handle;
+ *produce_q = (struct vmci_queue *)my_produce_q;
+ *consume_q = (struct vmci_queue *)my_consume_q;
+
+ /*
+ * We should initialize the queue pair header pages on a local
+ * queue pair create. For non-local queue pairs, the
+ * hypervisor initializes the header pages in the create step.
+ */
+ if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) &&
+ queue_pair_entry->qp.ref_count == 1) {
+ vmci_q_header_init((*produce_q)->q_header, *handle);
+ vmci_q_header_init((*consume_q)->q_header, *handle);
+ }
+
+ mutex_unlock(&qp_guest_endpoints.mutex);
+
+ return VMCI_SUCCESS;
+
+ error:
+ mutex_unlock(&qp_guest_endpoints.mutex);
+ if (queue_pair_entry) {
+ /* The queues will be freed inside the destroy routine. */
+ qp_guest_endpoint_destroy(queue_pair_entry);
+ } else {
+ qp_free_queue(my_produce_q, produce_size);
+ qp_free_queue(my_consume_q, consume_size);
+ }
+ return result;
+
+ error_keep_entry:
+ /* This path should only be used when an existing entry was found. */
+ mutex_unlock(&qp_guest_endpoints.mutex);
+ return result;
+}
+
+/*
+ * The first endpoint issuing a queue pair allocation will create the state
+ * of the queue pair in the queue pair broker.
+ *
+ * If the creator is a guest, it will associate a VMX virtual address range
+ * with the queue pair as specified by the page_store. For compatibility with
+ * older VMX'en, that would use a separate step to set the VMX virtual
+ * address range, the virtual address range can be registered later using
+ * vmci_qp_broker_set_page_store. In that case, a page_store of NULL should be
+ * used.
+ *
+ * If the creator is the host, a page_store of NULL should be used as well,
+ * since the host is not able to supply a page store for the queue pair.
+ *
+ * For older VMX and host callers, the queue pair will be created in the
+ * VMCIQPB_CREATED_NO_MEM state, and for current VMX callers, it will be
+ * created in VMCOQPB_CREATED_MEM state.
+ */
+static int qp_broker_create(struct vmci_handle handle,
+ u32 peer,
+ u32 flags,
+ u32 priv_flags,
+ u64 produce_size,
+ u64 consume_size,
+ struct vmci_qp_page_store *page_store,
+ struct vmci_ctx *context,
+ vmci_event_release_cb wakeup_cb,
+ void *client_data, struct qp_broker_entry **ent)
+{
+ struct qp_broker_entry *entry = NULL;
+ const u32 context_id = vmci_ctx_get_id(context);
+ bool is_local = flags & VMCI_QPFLAG_LOCAL;
+ int result;
+ u64 guest_produce_size;
+ u64 guest_consume_size;
+
+ /* Do not create if the caller asked not to. */
+ if (flags & VMCI_QPFLAG_ATTACH_ONLY)
+ return VMCI_ERROR_NOT_FOUND;
+
+ /*
+ * Creator's context ID should match handle's context ID or the creator
+ * must allow the context in handle's context ID as the "peer".
+ */
+ if (handle.context != context_id && handle.context != peer)
+ return VMCI_ERROR_NO_ACCESS;
+
+ if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer))
+ return VMCI_ERROR_DST_UNREACHABLE;
+
+ /*
+ * Creator's context ID for local queue pairs should match the
+ * peer, if a peer is specified.
+ */
+ if (is_local && peer != VMCI_INVALID_ID && context_id != peer)
+ return VMCI_ERROR_NO_ACCESS;
+
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ return VMCI_ERROR_NO_MEM;
+
+ if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) {
+ /*
+ * The queue pair broker entry stores values from the guest
+ * point of view, so a creating host side endpoint should swap
+ * produce and consume values -- unless it is a local queue
+ * pair, in which case no swapping is necessary, since the local
+ * attacher will swap queues.
+ */
+
+ guest_produce_size = consume_size;
+ guest_consume_size = produce_size;
+ } else {
+ guest_produce_size = produce_size;
+ guest_consume_size = consume_size;
+ }
+
+ entry->qp.handle = handle;
+ entry->qp.peer = peer;
+ entry->qp.flags = flags;
+ entry->qp.produce_size = guest_produce_size;
+ entry->qp.consume_size = guest_consume_size;
+ entry->qp.ref_count = 1;
+ entry->create_id = context_id;
+ entry->attach_id = VMCI_INVALID_ID;
+ entry->state = VMCIQPB_NEW;
+ entry->require_trusted_attach =
+ !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED);
+ entry->created_by_trusted =
+ !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED);
+ entry->vmci_page_files = false;
+ entry->wakeup_cb = wakeup_cb;
+ entry->client_data = client_data;
+ entry->produce_q = qp_host_alloc_queue(guest_produce_size);
+ if (entry->produce_q == NULL) {
+ result = VMCI_ERROR_NO_MEM;
+ goto error;
+ }
+ entry->consume_q = qp_host_alloc_queue(guest_consume_size);
+ if (entry->consume_q == NULL) {
+ result = VMCI_ERROR_NO_MEM;
+ goto error;
+ }
+
+ qp_init_queue_mutex(entry->produce_q, entry->consume_q);
+
+ INIT_LIST_HEAD(&entry->qp.list_item);
+
+ if (is_local) {
+ u8 *tmp;
+
+ entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp),
+ PAGE_SIZE, GFP_KERNEL);
+ if (entry->local_mem == NULL) {
+ result = VMCI_ERROR_NO_MEM;
+ goto error;
+ }
+ entry->state = VMCIQPB_CREATED_MEM;
+ entry->produce_q->q_header = entry->local_mem;
+ tmp = (u8 *)entry->local_mem + PAGE_SIZE *
+ (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1);
+ entry->consume_q->q_header = (struct vmci_queue_header *)tmp;
+ } else if (page_store) {
+ /*
+ * The VMX already initialized the queue pair headers, so no
+ * need for the kernel side to do that.
+ */
+ result = qp_host_register_user_memory(page_store,
+ entry->produce_q,
+ entry->consume_q);
+ if (result < VMCI_SUCCESS)
+ goto error;
+
+ entry->state = VMCIQPB_CREATED_MEM;
+ } else {
+ /*
+ * A create without a page_store may be either a host
+ * side create (in which case we are waiting for the
+ * guest side to supply the memory) or an old style
+ * queue pair create (in which case we will expect a
+ * set page store call as the next step).
+ */
+ entry->state = VMCIQPB_CREATED_NO_MEM;
+ }
+
+ qp_list_add_entry(&qp_broker_list, &entry->qp);
+ if (ent != NULL)
+ *ent = entry;
+
+ /* Add to resource obj */
+ result = vmci_resource_add(&entry->resource,
+ VMCI_RESOURCE_TYPE_QPAIR_HOST,
+ handle);
+ if (result != VMCI_SUCCESS) {
+ pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
+ handle.context, handle.resource, result);
+ goto error;
+ }
+
+ entry->qp.handle = vmci_resource_handle(&entry->resource);
+ if (is_local) {
+ vmci_q_header_init(entry->produce_q->q_header,
+ entry->qp.handle);
+ vmci_q_header_init(entry->consume_q->q_header,
+ entry->qp.handle);
+ }
+
+ vmci_ctx_qp_create(context, entry->qp.handle);
+
+ return VMCI_SUCCESS;
+
+ error:
+ if (entry != NULL) {
+ qp_host_free_queue(entry->produce_q, guest_produce_size);
+ qp_host_free_queue(entry->consume_q, guest_consume_size);
+ kfree(entry);
+ }
+
+ return result;
+}
+
+/*
+ * Enqueues an event datagram to notify the peer VM attached to
+ * the given queue pair handle about attach/detach event by the
+ * given VM. Returns Payload size of datagram enqueued on
+ * success, error code otherwise.
+ */
+static int qp_notify_peer(bool attach,
+ struct vmci_handle handle,
+ u32 my_id,
+ u32 peer_id)
+{
+ int rv;
+ struct vmci_event_qp ev;
+
+ if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID ||
+ peer_id == VMCI_INVALID_ID)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ /*
+ * In vmci_ctx_enqueue_datagram() we enforce the upper limit on
+ * number of pending events from the hypervisor to a given VM
+ * otherwise a rogue VM could do an arbitrary number of attach
+ * and detach operations causing memory pressure in the host
+ * kernel.
+ */
+
+ ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
+ ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+ VMCI_CONTEXT_RESOURCE_ID);
+ ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
+ ev.msg.event_data.event = attach ?
+ VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
+ ev.payload.handle = handle;
+ ev.payload.peer_id = my_id;
+
+ rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID,
+ &ev.msg.hdr, false);
+ if (rv < VMCI_SUCCESS)
+ pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n",
+ attach ? "ATTACH" : "DETACH", peer_id);
+
+ return rv;
+}
+
+/*
+ * The second endpoint issuing a queue pair allocation will attach to
+ * the queue pair registered with the queue pair broker.
+ *
+ * If the attacher is a guest, it will associate a VMX virtual address
+ * range with the queue pair as specified by the page_store. At this
+ * point, the already attach host endpoint may start using the queue
+ * pair, and an attach event is sent to it. For compatibility with
+ * older VMX'en, that used a separate step to set the VMX virtual
+ * address range, the virtual address range can be registered later
+ * using vmci_qp_broker_set_page_store. In that case, a page_store of
+ * NULL should be used, and the attach event will be generated once
+ * the actual page store has been set.
+ *
+ * If the attacher is the host, a page_store of NULL should be used as
+ * well, since the page store information is already set by the guest.
+ *
+ * For new VMX and host callers, the queue pair will be moved to the
+ * VMCIQPB_ATTACHED_MEM state, and for older VMX callers, it will be
+ * moved to the VMCOQPB_ATTACHED_NO_MEM state.
+ */
+static int qp_broker_attach(struct qp_broker_entry *entry,
+ u32 peer,
+ u32 flags,
+ u32 priv_flags,
+ u64 produce_size,
+ u64 consume_size,
+ struct vmci_qp_page_store *page_store,
+ struct vmci_ctx *context,
+ vmci_event_release_cb wakeup_cb,
+ void *client_data,
+ struct qp_broker_entry **ent)
+{
+ const u32 context_id = vmci_ctx_get_id(context);
+ bool is_local = flags & VMCI_QPFLAG_LOCAL;
+ int result;
+
+ if (entry->state != VMCIQPB_CREATED_NO_MEM &&
+ entry->state != VMCIQPB_CREATED_MEM)
+ return VMCI_ERROR_UNAVAILABLE;
+
+ if (is_local) {
+ if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) ||
+ context_id != entry->create_id) {
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+ } else if (context_id == entry->create_id ||
+ context_id == entry->attach_id) {
+ return VMCI_ERROR_ALREADY_EXISTS;
+ }
+
+ if (VMCI_CONTEXT_IS_VM(context_id) &&
+ VMCI_CONTEXT_IS_VM(entry->create_id))
+ return VMCI_ERROR_DST_UNREACHABLE;
+
+ /*
+ * If we are attaching from a restricted context then the queuepair
+ * must have been created by a trusted endpoint.
+ */
+ if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
+ !entry->created_by_trusted)
+ return VMCI_ERROR_NO_ACCESS;
+
+ /*
+ * If we are attaching to a queuepair that was created by a restricted
+ * context then we must be trusted.
+ */
+ if (entry->require_trusted_attach &&
+ (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED)))
+ return VMCI_ERROR_NO_ACCESS;
+
+ /*
+ * If the creator specifies VMCI_INVALID_ID in "peer" field, access
+ * control check is not performed.
+ */
+ if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id)
+ return VMCI_ERROR_NO_ACCESS;
+
+ if (entry->create_id == VMCI_HOST_CONTEXT_ID) {
+ /*
+ * Do not attach if the caller doesn't support Host Queue Pairs
+ * and a host created this queue pair.
+ */
+
+ if (!vmci_ctx_supports_host_qp(context))
+ return VMCI_ERROR_INVALID_RESOURCE;
+
+ } else if (context_id == VMCI_HOST_CONTEXT_ID) {
+ struct vmci_ctx *create_context;
+ bool supports_host_qp;
+
+ /*
+ * Do not attach a host to a user created queue pair if that
+ * user doesn't support host queue pair end points.
+ */
+
+ create_context = vmci_ctx_get(entry->create_id);
+ supports_host_qp = vmci_ctx_supports_host_qp(create_context);
+ vmci_ctx_put(create_context);
+
+ if (!supports_host_qp)
+ return VMCI_ERROR_INVALID_RESOURCE;
+ }
+
+ if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER))
+ return VMCI_ERROR_QUEUEPAIR_MISMATCH;
+
+ if (context_id != VMCI_HOST_CONTEXT_ID) {
+ /*
+ * The queue pair broker entry stores values from the guest
+ * point of view, so an attaching guest should match the values
+ * stored in the entry.
+ */
+
+ if (entry->qp.produce_size != produce_size ||
+ entry->qp.consume_size != consume_size) {
+ return VMCI_ERROR_QUEUEPAIR_MISMATCH;
+ }
+ } else if (entry->qp.produce_size != consume_size ||
+ entry->qp.consume_size != produce_size) {
+ return VMCI_ERROR_QUEUEPAIR_MISMATCH;
+ }
+
+ if (context_id != VMCI_HOST_CONTEXT_ID) {
+ /*
+ * If a guest attached to a queue pair, it will supply
+ * the backing memory. If this is a pre NOVMVM vmx,
+ * the backing memory will be supplied by calling
+ * vmci_qp_broker_set_page_store() following the
+ * return of the vmci_qp_broker_alloc() call. If it is
+ * a vmx of version NOVMVM or later, the page store
+ * must be supplied as part of the
+ * vmci_qp_broker_alloc call. Under all circumstances
+ * must the initially created queue pair not have any
+ * memory associated with it already.
+ */
+
+ if (entry->state != VMCIQPB_CREATED_NO_MEM)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if (page_store != NULL) {
+ /*
+ * Patch up host state to point to guest
+ * supplied memory. The VMX already
+ * initialized the queue pair headers, so no
+ * need for the kernel side to do that.
+ */
+
+ result = qp_host_register_user_memory(page_store,
+ entry->produce_q,
+ entry->consume_q);
+ if (result < VMCI_SUCCESS)
+ return result;
+
+ /*
+ * Preemptively load in the headers if non-blocking to
+ * prevent blocking later.
+ */
+ if (entry->qp.flags & VMCI_QPFLAG_NONBLOCK) {
+ result = qp_host_map_queues(entry->produce_q,
+ entry->consume_q);
+ if (result < VMCI_SUCCESS) {
+ qp_host_unregister_user_memory(
+ entry->produce_q,
+ entry->consume_q);
+ return result;
+ }
+ }
+
+ entry->state = VMCIQPB_ATTACHED_MEM;
+ } else {
+ entry->state = VMCIQPB_ATTACHED_NO_MEM;
+ }
+ } else if (entry->state == VMCIQPB_CREATED_NO_MEM) {
+ /*
+ * The host side is attempting to attach to a queue
+ * pair that doesn't have any memory associated with
+ * it. This must be a pre NOVMVM vmx that hasn't set
+ * the page store information yet, or a quiesced VM.
+ */
+
+ return VMCI_ERROR_UNAVAILABLE;
+ } else {
+ /*
+ * For non-blocking queue pairs, we cannot rely on
+ * enqueue/dequeue to map in the pages on the
+ * host-side, since it may block, so we make an
+ * attempt here.
+ */
+
+ if (flags & VMCI_QPFLAG_NONBLOCK) {
+ result =
+ qp_host_map_queues(entry->produce_q,
+ entry->consume_q);
+ if (result < VMCI_SUCCESS)
+ return result;
+
+ entry->qp.flags |= flags &
+ (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED);
+ }
+
+ /* The host side has successfully attached to a queue pair. */
+ entry->state = VMCIQPB_ATTACHED_MEM;
+ }
+
+ if (entry->state == VMCIQPB_ATTACHED_MEM) {
+ result =
+ qp_notify_peer(true, entry->qp.handle, context_id,
+ entry->create_id);
+ if (result < VMCI_SUCCESS)
+ pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
+ entry->create_id, entry->qp.handle.context,
+ entry->qp.handle.resource);
+ }
+
+ entry->attach_id = context_id;
+ entry->qp.ref_count++;
+ if (wakeup_cb) {
+ entry->wakeup_cb = wakeup_cb;
+ entry->client_data = client_data;
+ }
+
+ /*
+ * When attaching to local queue pairs, the context already has
+ * an entry tracking the queue pair, so don't add another one.
+ */
+ if (!is_local)
+ vmci_ctx_qp_create(context, entry->qp.handle);
+
+ if (ent != NULL)
+ *ent = entry;
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * queue_pair_Alloc for use when setting up queue pair endpoints
+ * on the host.
+ */
+static int qp_broker_alloc(struct vmci_handle handle,
+ u32 peer,
+ u32 flags,
+ u32 priv_flags,
+ u64 produce_size,
+ u64 consume_size,
+ struct vmci_qp_page_store *page_store,
+ struct vmci_ctx *context,
+ vmci_event_release_cb wakeup_cb,
+ void *client_data,
+ struct qp_broker_entry **ent,
+ bool *swap)
+{
+ const u32 context_id = vmci_ctx_get_id(context);
+ bool create;
+ struct qp_broker_entry *entry = NULL;
+ bool is_local = flags & VMCI_QPFLAG_LOCAL;
+ int result;
+
+ if (vmci_handle_is_invalid(handle) ||
+ (flags & ~VMCI_QP_ALL_FLAGS) || is_local ||
+ !(produce_size || consume_size) ||
+ !context || context_id == VMCI_INVALID_ID ||
+ handle.context == VMCI_INVALID_ID) {
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ /*
+ * In the initial argument check, we ensure that non-vmkernel hosts
+ * are not allowed to create local queue pairs.
+ */
+
+ mutex_lock(&qp_broker_list.mutex);
+
+ if (!is_local && vmci_ctx_qp_exists(context, handle)) {
+ pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n",
+ context_id, handle.context, handle.resource);
+ mutex_unlock(&qp_broker_list.mutex);
+ return VMCI_ERROR_ALREADY_EXISTS;
+ }
+
+ if (handle.resource != VMCI_INVALID_ID)
+ entry = qp_broker_handle_to_entry(handle);
+
+ if (!entry) {
+ create = true;
+ result =
+ qp_broker_create(handle, peer, flags, priv_flags,
+ produce_size, consume_size, page_store,
+ context, wakeup_cb, client_data, ent);
+ } else {
+ create = false;
+ result =
+ qp_broker_attach(entry, peer, flags, priv_flags,
+ produce_size, consume_size, page_store,
+ context, wakeup_cb, client_data, ent);
+ }
+
+ mutex_unlock(&qp_broker_list.mutex);
+
+ if (swap)
+ *swap = (context_id == VMCI_HOST_CONTEXT_ID) &&
+ !(create && is_local);
+
+ return result;
+}
+
+/*
+ * This function implements the kernel API for allocating a queue
+ * pair.
+ */
+static int qp_alloc_host_work(struct vmci_handle *handle,
+ struct vmci_queue **produce_q,
+ u64 produce_size,
+ struct vmci_queue **consume_q,
+ u64 consume_size,
+ u32 peer,
+ u32 flags,
+ u32 priv_flags,
+ vmci_event_release_cb wakeup_cb,
+ void *client_data)
+{
+ struct vmci_handle new_handle;
+ struct vmci_ctx *context;
+ struct qp_broker_entry *entry;
+ int result;
+ bool swap;
+
+ if (vmci_handle_is_invalid(*handle)) {
+ new_handle = vmci_make_handle(
+ VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID);
+ } else
+ new_handle = *handle;
+
+ context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
+ entry = NULL;
+ result =
+ qp_broker_alloc(new_handle, peer, flags, priv_flags,
+ produce_size, consume_size, NULL, context,
+ wakeup_cb, client_data, &entry, &swap);
+ if (result == VMCI_SUCCESS) {
+ if (swap) {
+ /*
+ * If this is a local queue pair, the attacher
+ * will swap around produce and consume
+ * queues.
+ */
+
+ *produce_q = entry->consume_q;
+ *consume_q = entry->produce_q;
+ } else {
+ *produce_q = entry->produce_q;
+ *consume_q = entry->consume_q;
+ }
+
+ *handle = vmci_resource_handle(&entry->resource);
+ } else {
+ *handle = VMCI_INVALID_HANDLE;
+ pr_devel("queue pair broker failed to alloc (result=%d)\n",
+ result);
+ }
+ vmci_ctx_put(context);
+ return result;
+}
+
+/*
+ * Allocates a VMCI queue_pair. Only checks validity of input
+ * arguments. The real work is done in the host or guest
+ * specific function.
+ */
+int vmci_qp_alloc(struct vmci_handle *handle,
+ struct vmci_queue **produce_q,
+ u64 produce_size,
+ struct vmci_queue **consume_q,
+ u64 consume_size,
+ u32 peer,
+ u32 flags,
+ u32 priv_flags,
+ bool guest_endpoint,
+ vmci_event_release_cb wakeup_cb,
+ void *client_data)
+{
+ if (!handle || !produce_q || !consume_q ||
+ (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if (guest_endpoint) {
+ return qp_alloc_guest_work(handle, produce_q,
+ produce_size, consume_q,
+ consume_size, peer,
+ flags, priv_flags);
+ } else {
+ return qp_alloc_host_work(handle, produce_q,
+ produce_size, consume_q,
+ consume_size, peer, flags,
+ priv_flags, wakeup_cb, client_data);
+ }
+}
+
+/*
+ * This function implements the host kernel API for detaching from
+ * a queue pair.
+ */
+static int qp_detatch_host_work(struct vmci_handle handle)
+{
+ int result;
+ struct vmci_ctx *context;
+
+ context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
+
+ result = vmci_qp_broker_detach(handle, context);
+
+ vmci_ctx_put(context);
+ return result;
+}
+
+/*
+ * Detaches from a VMCI queue_pair. Only checks validity of input argument.
+ * Real work is done in the host or guest specific function.
+ */
+static int qp_detatch(struct vmci_handle handle, bool guest_endpoint)
+{
+ if (vmci_handle_is_invalid(handle))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ if (guest_endpoint)
+ return qp_detatch_guest_work(handle);
+ else
+ return qp_detatch_host_work(handle);
+}
+
+/*
+ * Returns the entry from the head of the list. Assumes that the list is
+ * locked.
+ */
+static struct qp_entry *qp_list_get_head(struct qp_list *qp_list)
+{
+ if (!list_empty(&qp_list->head)) {
+ struct qp_entry *entry =
+ list_first_entry(&qp_list->head, struct qp_entry,
+ list_item);
+ return entry;
+ }
+
+ return NULL;
+}
+
+void vmci_qp_broker_exit(void)
+{
+ struct qp_entry *entry;
+ struct qp_broker_entry *be;
+
+ mutex_lock(&qp_broker_list.mutex);
+
+ while ((entry = qp_list_get_head(&qp_broker_list))) {
+ be = (struct qp_broker_entry *)entry;
+
+ qp_list_remove_entry(&qp_broker_list, entry);
+ kfree(be);
+ }
+
+ mutex_unlock(&qp_broker_list.mutex);
+}
+
+/*
+ * Requests that a queue pair be allocated with the VMCI queue
+ * pair broker. Allocates a queue pair entry if one does not
+ * exist. Attaches to one if it exists, and retrieves the page
+ * files backing that queue_pair. Assumes that the queue pair
+ * broker lock is held.
+ */
+int vmci_qp_broker_alloc(struct vmci_handle handle,
+ u32 peer,
+ u32 flags,
+ u32 priv_flags,
+ u64 produce_size,
+ u64 consume_size,
+ struct vmci_qp_page_store *page_store,
+ struct vmci_ctx *context)
+{
+ return qp_broker_alloc(handle, peer, flags, priv_flags,
+ produce_size, consume_size,
+ page_store, context, NULL, NULL, NULL, NULL);
+}
+
+/*
+ * VMX'en with versions lower than VMCI_VERSION_NOVMVM use a separate
+ * step to add the UVAs of the VMX mapping of the queue pair. This function
+ * provides backwards compatibility with such VMX'en, and takes care of
+ * registering the page store for a queue pair previously allocated by the
+ * VMX during create or attach. This function will move the queue pair state
+ * to either from VMCIQBP_CREATED_NO_MEM to VMCIQBP_CREATED_MEM or
+ * VMCIQBP_ATTACHED_NO_MEM to VMCIQBP_ATTACHED_MEM. If moving to the
+ * attached state with memory, the queue pair is ready to be used by the
+ * host peer, and an attached event will be generated.
+ *
+ * Assumes that the queue pair broker lock is held.
+ *
+ * This function is only used by the hosted platform, since there is no
+ * issue with backwards compatibility for vmkernel.
+ */
+int vmci_qp_broker_set_page_store(struct vmci_handle handle,
+ u64 produce_uva,
+ u64 consume_uva,
+ struct vmci_ctx *context)
+{
+ struct qp_broker_entry *entry;
+ int result;
+ const u32 context_id = vmci_ctx_get_id(context);
+
+ if (vmci_handle_is_invalid(handle) || !context ||
+ context_id == VMCI_INVALID_ID)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ /*
+ * We only support guest to host queue pairs, so the VMX must
+ * supply UVAs for the mapped page files.
+ */
+
+ if (produce_uva == 0 || consume_uva == 0)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ mutex_lock(&qp_broker_list.mutex);
+
+ if (!vmci_ctx_qp_exists(context, handle)) {
+ pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
+ context_id, handle.context, handle.resource);
+ result = VMCI_ERROR_NOT_FOUND;
+ goto out;
+ }
+
+ entry = qp_broker_handle_to_entry(handle);
+ if (!entry) {
+ result = VMCI_ERROR_NOT_FOUND;
+ goto out;
+ }
+
+ /*
+ * If I'm the owner then I can set the page store.
+ *
+ * Or, if a host created the queue_pair and I'm the attached peer
+ * then I can set the page store.
+ */
+ if (entry->create_id != context_id &&
+ (entry->create_id != VMCI_HOST_CONTEXT_ID ||
+ entry->attach_id != context_id)) {
+ result = VMCI_ERROR_QUEUEPAIR_NOTOWNER;
+ goto out;
+ }
+
+ if (entry->state != VMCIQPB_CREATED_NO_MEM &&
+ entry->state != VMCIQPB_ATTACHED_NO_MEM) {
+ result = VMCI_ERROR_UNAVAILABLE;
+ goto out;
+ }
+
+ result = qp_host_get_user_memory(produce_uva, consume_uva,
+ entry->produce_q, entry->consume_q);
+ if (result < VMCI_SUCCESS)
+ goto out;
+
+ result = qp_host_map_queues(entry->produce_q, entry->consume_q);
+ if (result < VMCI_SUCCESS) {
+ qp_host_unregister_user_memory(entry->produce_q,
+ entry->consume_q);
+ goto out;
+ }
+
+ if (entry->state == VMCIQPB_CREATED_NO_MEM)
+ entry->state = VMCIQPB_CREATED_MEM;
+ else
+ entry->state = VMCIQPB_ATTACHED_MEM;
+
+ entry->vmci_page_files = true;
+
+ if (entry->state == VMCIQPB_ATTACHED_MEM) {
+ result =
+ qp_notify_peer(true, handle, context_id, entry->create_id);
+ if (result < VMCI_SUCCESS) {
+ pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
+ entry->create_id, entry->qp.handle.context,
+ entry->qp.handle.resource);
+ }
+ }
+
+ result = VMCI_SUCCESS;
+ out:
+ mutex_unlock(&qp_broker_list.mutex);
+ return result;
+}
+
+/*
+ * Resets saved queue headers for the given QP broker
+ * entry. Should be used when guest memory becomes available
+ * again, or the guest detaches.
+ */
+static void qp_reset_saved_headers(struct qp_broker_entry *entry)
+{
+ entry->produce_q->saved_header = NULL;
+ entry->consume_q->saved_header = NULL;
+}
+
+/*
+ * The main entry point for detaching from a queue pair registered with the
+ * queue pair broker. If more than one endpoint is attached to the queue
+ * pair, the first endpoint will mainly decrement a reference count and
+ * generate a notification to its peer. The last endpoint will clean up
+ * the queue pair state registered with the broker.
+ *
+ * When a guest endpoint detaches, it will unmap and unregister the guest
+ * memory backing the queue pair. If the host is still attached, it will
+ * no longer be able to access the queue pair content.
+ *
+ * If the queue pair is already in a state where there is no memory
+ * registered for the queue pair (any *_NO_MEM state), it will transition to
+ * the VMCIQPB_SHUTDOWN_NO_MEM state. This will also happen, if a guest
+ * endpoint is the first of two endpoints to detach. If the host endpoint is
+ * the first out of two to detach, the queue pair will move to the
+ * VMCIQPB_SHUTDOWN_MEM state.
+ */
+int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context)
+{
+ struct qp_broker_entry *entry;
+ const u32 context_id = vmci_ctx_get_id(context);
+ u32 peer_id;
+ bool is_local = false;
+ int result;
+
+ if (vmci_handle_is_invalid(handle) || !context ||
+ context_id == VMCI_INVALID_ID) {
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ mutex_lock(&qp_broker_list.mutex);
+
+ if (!vmci_ctx_qp_exists(context, handle)) {
+ pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
+ context_id, handle.context, handle.resource);
+ result = VMCI_ERROR_NOT_FOUND;
+ goto out;
+ }
+
+ entry = qp_broker_handle_to_entry(handle);
+ if (!entry) {
+ pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n",
+ context_id, handle.context, handle.resource);
+ result = VMCI_ERROR_NOT_FOUND;
+ goto out;
+ }
+
+ if (context_id != entry->create_id && context_id != entry->attach_id) {
+ result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
+ goto out;
+ }
+
+ if (context_id == entry->create_id) {
+ peer_id = entry->attach_id;
+ entry->create_id = VMCI_INVALID_ID;
+ } else {
+ peer_id = entry->create_id;
+ entry->attach_id = VMCI_INVALID_ID;
+ }
+ entry->qp.ref_count--;
+
+ is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
+
+ if (context_id != VMCI_HOST_CONTEXT_ID) {
+ bool headers_mapped;
+
+ /*
+ * Pre NOVMVM vmx'en may detach from a queue pair
+ * before setting the page store, and in that case
+ * there is no user memory to detach from. Also, more
+ * recent VMX'en may detach from a queue pair in the
+ * quiesced state.
+ */
+
+ qp_acquire_queue_mutex(entry->produce_q);
+ headers_mapped = entry->produce_q->q_header ||
+ entry->consume_q->q_header;
+ if (QPBROKERSTATE_HAS_MEM(entry)) {
+ result =
+ qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID,
+ entry->produce_q,
+ entry->consume_q);
+ if (result < VMCI_SUCCESS)
+ pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
+ handle.context, handle.resource,
+ result);
+
+ if (entry->vmci_page_files)
+ qp_host_unregister_user_memory(entry->produce_q,
+ entry->
+ consume_q);
+ else
+ qp_host_unregister_user_memory(entry->produce_q,
+ entry->
+ consume_q);
+
+ }
+
+ if (!headers_mapped)
+ qp_reset_saved_headers(entry);
+
+ qp_release_queue_mutex(entry->produce_q);
+
+ if (!headers_mapped && entry->wakeup_cb)
+ entry->wakeup_cb(entry->client_data);
+
+ } else {
+ if (entry->wakeup_cb) {
+ entry->wakeup_cb = NULL;
+ entry->client_data = NULL;
+ }
+ }
+
+ if (entry->qp.ref_count == 0) {
+ qp_list_remove_entry(&qp_broker_list, &entry->qp);
+
+ if (is_local)
+ kfree(entry->local_mem);
+
+ qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
+ qp_host_free_queue(entry->produce_q, entry->qp.produce_size);
+ qp_host_free_queue(entry->consume_q, entry->qp.consume_size);
+ /* Unlink from resource hash table and free callback */
+ vmci_resource_remove(&entry->resource);
+
+ kfree(entry);
+
+ vmci_ctx_qp_destroy(context, handle);
+ } else {
+ qp_notify_peer(false, handle, context_id, peer_id);
+ if (context_id == VMCI_HOST_CONTEXT_ID &&
+ QPBROKERSTATE_HAS_MEM(entry)) {
+ entry->state = VMCIQPB_SHUTDOWN_MEM;
+ } else {
+ entry->state = VMCIQPB_SHUTDOWN_NO_MEM;
+ }
+
+ if (!is_local)
+ vmci_ctx_qp_destroy(context, handle);
+
+ }
+ result = VMCI_SUCCESS;
+ out:
+ mutex_unlock(&qp_broker_list.mutex);
+ return result;
+}
+
+/*
+ * Establishes the necessary mappings for a queue pair given a
+ * reference to the queue pair guest memory. This is usually
+ * called when a guest is unquiesced and the VMX is allowed to
+ * map guest memory once again.
+ */
+int vmci_qp_broker_map(struct vmci_handle handle,
+ struct vmci_ctx *context,
+ u64 guest_mem)
+{
+ struct qp_broker_entry *entry;
+ const u32 context_id = vmci_ctx_get_id(context);
+ bool is_local = false;
+ int result;
+
+ if (vmci_handle_is_invalid(handle) || !context ||
+ context_id == VMCI_INVALID_ID)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ mutex_lock(&qp_broker_list.mutex);
+
+ if (!vmci_ctx_qp_exists(context, handle)) {
+ pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
+ context_id, handle.context, handle.resource);
+ result = VMCI_ERROR_NOT_FOUND;
+ goto out;
+ }
+
+ entry = qp_broker_handle_to_entry(handle);
+ if (!entry) {
+ pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
+ context_id, handle.context, handle.resource);
+ result = VMCI_ERROR_NOT_FOUND;
+ goto out;
+ }
+
+ if (context_id != entry->create_id && context_id != entry->attach_id) {
+ result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
+ goto out;
+ }
+
+ is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
+ result = VMCI_SUCCESS;
+
+ if (context_id != VMCI_HOST_CONTEXT_ID) {
+ struct vmci_qp_page_store page_store;
+
+ page_store.pages = guest_mem;
+ page_store.len = QPE_NUM_PAGES(entry->qp);
+
+ qp_acquire_queue_mutex(entry->produce_q);
+ qp_reset_saved_headers(entry);
+ result =
+ qp_host_register_user_memory(&page_store,
+ entry->produce_q,
+ entry->consume_q);
+ qp_release_queue_mutex(entry->produce_q);
+ if (result == VMCI_SUCCESS) {
+ /* Move state from *_NO_MEM to *_MEM */
+
+ entry->state++;
+
+ if (entry->wakeup_cb)
+ entry->wakeup_cb(entry->client_data);
+ }
+ }
+
+ out:
+ mutex_unlock(&qp_broker_list.mutex);
+ return result;
+}
+
+/*
+ * Saves a snapshot of the queue headers for the given QP broker
+ * entry. Should be used when guest memory is unmapped.
+ * Results:
+ * VMCI_SUCCESS on success, appropriate error code if guest memory
+ * can't be accessed..
+ */
+static int qp_save_headers(struct qp_broker_entry *entry)
+{
+ int result;
+
+ if (entry->produce_q->saved_header != NULL &&
+ entry->consume_q->saved_header != NULL) {
+ /*
+ * If the headers have already been saved, we don't need to do
+ * it again, and we don't want to map in the headers
+ * unnecessarily.
+ */
+
+ return VMCI_SUCCESS;
+ }
+
+ if (NULL == entry->produce_q->q_header ||
+ NULL == entry->consume_q->q_header) {
+ result = qp_host_map_queues(entry->produce_q, entry->consume_q);
+ if (result < VMCI_SUCCESS)
+ return result;
+ }
+
+ memcpy(&entry->saved_produce_q, entry->produce_q->q_header,
+ sizeof(entry->saved_produce_q));
+ entry->produce_q->saved_header = &entry->saved_produce_q;
+ memcpy(&entry->saved_consume_q, entry->consume_q->q_header,
+ sizeof(entry->saved_consume_q));
+ entry->consume_q->saved_header = &entry->saved_consume_q;
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Removes all references to the guest memory of a given queue pair, and
+ * will move the queue pair from state *_MEM to *_NO_MEM. It is usually
+ * called when a VM is being quiesced where access to guest memory should
+ * avoided.
+ */
+int vmci_qp_broker_unmap(struct vmci_handle handle,
+ struct vmci_ctx *context,
+ u32 gid)
+{
+ struct qp_broker_entry *entry;
+ const u32 context_id = vmci_ctx_get_id(context);
+ bool is_local = false;
+ int result;
+
+ if (vmci_handle_is_invalid(handle) || !context ||
+ context_id == VMCI_INVALID_ID)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ mutex_lock(&qp_broker_list.mutex);
+
+ if (!vmci_ctx_qp_exists(context, handle)) {
+ pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
+ context_id, handle.context, handle.resource);
+ result = VMCI_ERROR_NOT_FOUND;
+ goto out;
+ }
+
+ entry = qp_broker_handle_to_entry(handle);
+ if (!entry) {
+ pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
+ context_id, handle.context, handle.resource);
+ result = VMCI_ERROR_NOT_FOUND;
+ goto out;
+ }
+
+ if (context_id != entry->create_id && context_id != entry->attach_id) {
+ result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
+ goto out;
+ }
+
+ is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
+
+ if (context_id != VMCI_HOST_CONTEXT_ID) {
+ qp_acquire_queue_mutex(entry->produce_q);
+ result = qp_save_headers(entry);
+ if (result < VMCI_SUCCESS)
+ pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
+ handle.context, handle.resource, result);
+
+ qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q);
+
+ /*
+ * On hosted, when we unmap queue pairs, the VMX will also
+ * unmap the guest memory, so we invalidate the previously
+ * registered memory. If the queue pair is mapped again at a
+ * later point in time, we will need to reregister the user
+ * memory with a possibly new user VA.
+ */
+ qp_host_unregister_user_memory(entry->produce_q,
+ entry->consume_q);
+
+ /*
+ * Move state from *_MEM to *_NO_MEM.
+ */
+ entry->state--;
+
+ qp_release_queue_mutex(entry->produce_q);
+ }
+
+ result = VMCI_SUCCESS;
+
+ out:
+ mutex_unlock(&qp_broker_list.mutex);
+ return result;
+}
+
+/*
+ * Destroys all guest queue pair endpoints. If active guest queue
+ * pairs still exist, hypercalls to attempt detach from these
+ * queue pairs will be made. Any failure to detach is silently
+ * ignored.
+ */
+void vmci_qp_guest_endpoints_exit(void)
+{
+ struct qp_entry *entry;
+ struct qp_guest_endpoint *ep;
+
+ mutex_lock(&qp_guest_endpoints.mutex);
+
+ while ((entry = qp_list_get_head(&qp_guest_endpoints))) {
+ ep = (struct qp_guest_endpoint *)entry;
+
+ /* Don't make a hypercall for local queue_pairs. */
+ if (!(entry->flags & VMCI_QPFLAG_LOCAL))
+ qp_detatch_hypercall(entry->handle);
+
+ /* We cannot fail the exit, so let's reset ref_count. */
+ entry->ref_count = 0;
+ qp_list_remove_entry(&qp_guest_endpoints, entry);
+
+ qp_guest_endpoint_destroy(ep);
+ }
+
+ mutex_unlock(&qp_guest_endpoints.mutex);
+}
+
+/*
+ * Helper routine that will lock the queue pair before subsequent
+ * operations.
+ * Note: Non-blocking on the host side is currently only implemented in ESX.
+ * Since non-blocking isn't yet implemented on the host personality we
+ * have no reason to acquire a spin lock. So to avoid the use of an
+ * unnecessary lock only acquire the mutex if we can block.
+ * Note: It is assumed that QPFLAG_PINNED implies QPFLAG_NONBLOCK. Therefore
+ * we can use the same locking function for access to both the queue
+ * and the queue headers as it is the same logic. Assert this behvior.
+ */
+static void qp_lock(const struct vmci_qp *qpair)
+{
+ if (vmci_can_block(qpair->flags))
+ qp_acquire_queue_mutex(qpair->produce_q);
+}
+
+/*
+ * Helper routine that unlocks the queue pair after calling
+ * qp_lock. Respects non-blocking and pinning flags.
+ */
+static void qp_unlock(const struct vmci_qp *qpair)
+{
+ if (vmci_can_block(qpair->flags))
+ qp_release_queue_mutex(qpair->produce_q);
+}
+
+/*
+ * The queue headers may not be mapped at all times. If a queue is
+ * currently not mapped, it will be attempted to do so.
+ */
+static int qp_map_queue_headers(struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q,
+ bool can_block)
+{
+ int result;
+
+ if (NULL == produce_q->q_header || NULL == consume_q->q_header) {
+ if (can_block)
+ result = qp_host_map_queues(produce_q, consume_q);
+ else
+ result = VMCI_ERROR_QUEUEPAIR_NOT_READY;
+
+ if (result < VMCI_SUCCESS)
+ return (produce_q->saved_header &&
+ consume_q->saved_header) ?
+ VMCI_ERROR_QUEUEPAIR_NOT_READY :
+ VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
+ }
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Helper routine that will retrieve the produce and consume
+ * headers of a given queue pair. If the guest memory of the
+ * queue pair is currently not available, the saved queue headers
+ * will be returned, if these are available.
+ */
+static int qp_get_queue_headers(const struct vmci_qp *qpair,
+ struct vmci_queue_header **produce_q_header,
+ struct vmci_queue_header **consume_q_header)
+{
+ int result;
+
+ result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q,
+ vmci_can_block(qpair->flags));
+ if (result == VMCI_SUCCESS) {
+ *produce_q_header = qpair->produce_q->q_header;
+ *consume_q_header = qpair->consume_q->q_header;
+ } else if (qpair->produce_q->saved_header &&
+ qpair->consume_q->saved_header) {
+ *produce_q_header = qpair->produce_q->saved_header;
+ *consume_q_header = qpair->consume_q->saved_header;
+ result = VMCI_SUCCESS;
+ }
+
+ return result;
+}
+
+/*
+ * Callback from VMCI queue pair broker indicating that a queue
+ * pair that was previously not ready, now either is ready or
+ * gone forever.
+ */
+static int qp_wakeup_cb(void *client_data)
+{
+ struct vmci_qp *qpair = (struct vmci_qp *)client_data;
+
+ qp_lock(qpair);
+ while (qpair->blocked > 0) {
+ qpair->blocked--;
+ qpair->generation++;
+ wake_up(&qpair->event);
+ }
+ qp_unlock(qpair);
+
+ return VMCI_SUCCESS;
+}
+
+/*
+ * Makes the calling thread wait for the queue pair to become
+ * ready for host side access. Returns true when thread is
+ * woken up after queue pair state change, false otherwise.
+ */
+static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
+{
+ unsigned int generation;
+
+ if (qpair->flags & VMCI_QPFLAG_NONBLOCK)
+ return false;
+
+ qpair->blocked++;
+ generation = qpair->generation;
+ qp_unlock(qpair);
+ wait_event(qpair->event, generation != qpair->generation);
+ qp_lock(qpair);
+
+ return true;
+}
+
+/*
+ * Enqueues a given buffer to the produce queue using the provided
+ * function. As many bytes as possible (space available in the queue)
+ * are enqueued. Assumes the queue->mutex has been acquired. Returns
+ * VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue
+ * data, VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the
+ * queue (as defined by the queue size), VMCI_ERROR_INVALID_ARGS, if
+ * an error occured when accessing the buffer,
+ * VMCI_ERROR_QUEUEPAIR_NOTATTACHED, if the queue pair pages aren't
+ * available. Otherwise, the number of bytes written to the queue is
+ * returned. Updates the tail pointer of the produce queue.
+ */
+static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q,
+ const u64 produce_q_size,
+ const void *buf,
+ size_t buf_size,
+ vmci_memcpy_to_queue_func memcpy_to_queue,
+ bool can_block)
+{
+ s64 free_space;
+ u64 tail;
+ size_t written;
+ ssize_t result;
+
+ result = qp_map_queue_headers(produce_q, consume_q, can_block);
+ if (unlikely(result != VMCI_SUCCESS))
+ return result;
+
+ free_space = vmci_q_header_free_space(produce_q->q_header,
+ consume_q->q_header,
+ produce_q_size);
+ if (free_space == 0)
+ return VMCI_ERROR_QUEUEPAIR_NOSPACE;
+
+ if (free_space < VMCI_SUCCESS)
+ return (ssize_t) free_space;
+
+ written = (size_t) (free_space > buf_size ? buf_size : free_space);
+ tail = vmci_q_header_producer_tail(produce_q->q_header);
+ if (likely(tail + written < produce_q_size)) {
+ result = memcpy_to_queue(produce_q, tail, buf, 0, written);
+ } else {
+ /* Tail pointer wraps around. */
+
+ const size_t tmp = (size_t) (produce_q_size - tail);
+
+ result = memcpy_to_queue(produce_q, tail, buf, 0, tmp);
+ if (result >= VMCI_SUCCESS)
+ result = memcpy_to_queue(produce_q, 0, buf, tmp,
+ written - tmp);
+ }
+
+ if (result < VMCI_SUCCESS)
+ return result;
+
+ vmci_q_header_add_producer_tail(produce_q->q_header, written,
+ produce_q_size);
+ return written;
+}
+
+/*
+ * Dequeues data (if available) from the given consume queue. Writes data
+ * to the user provided buffer using the provided function.
+ * Assumes the queue->mutex has been acquired.
+ * Results:
+ * VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue.
+ * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue
+ * (as defined by the queue size).
+ * VMCI_ERROR_INVALID_ARGS, if an error occured when accessing the buffer.
+ * Otherwise the number of bytes dequeued is returned.
+ * Side effects:
+ * Updates the head pointer of the consume queue.
+ */
+static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
+ struct vmci_queue *consume_q,
+ const u64 consume_q_size,
+ void *buf,
+ size_t buf_size,
+ vmci_memcpy_from_queue_func memcpy_from_queue,
+ bool update_consumer,
+ bool can_block)
+{
+ s64 buf_ready;
+ u64 head;
+ size_t read;
+ ssize_t result;
+
+ result = qp_map_queue_headers(produce_q, consume_q, can_block);
+ if (unlikely(result != VMCI_SUCCESS))
+ return result;
+
+ buf_ready = vmci_q_header_buf_ready(consume_q->q_header,
+ produce_q->q_header,
+ consume_q_size);
+ if (buf_ready == 0)
+ return VMCI_ERROR_QUEUEPAIR_NODATA;
+
+ if (buf_ready < VMCI_SUCCESS)
+ return (ssize_t) buf_ready;
+
+ read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
+ head = vmci_q_header_consumer_head(produce_q->q_header);
+ if (likely(head + read < consume_q_size)) {
+ result = memcpy_from_queue(buf, 0, consume_q, head, read);
+ } else {
+ /* Head pointer wraps around. */
+
+ const size_t tmp = (size_t) (consume_q_size - head);
+
+ result = memcpy_from_queue(buf, 0, consume_q, head, tmp);
+ if (result >= VMCI_SUCCESS)
+ result = memcpy_from_queue(buf, tmp, consume_q, 0,
+ read - tmp);
+
+ }
+
+ if (result < VMCI_SUCCESS)
+ return result;
+
+ if (update_consumer)
+ vmci_q_header_add_consumer_head(produce_q->q_header,
+ read, consume_q_size);
+
+ return read;
+}
+
+/*
+ * vmci_qpair_alloc() - Allocates a queue pair.
+ * @qpair: Pointer for the new vmci_qp struct.
+ * @handle: Handle to track the resource.
+ * @produce_qsize: Desired size of the producer queue.
+ * @consume_qsize: Desired size of the consumer queue.
+ * @peer: ContextID of the peer.
+ * @flags: VMCI flags.
+ * @priv_flags: VMCI priviledge flags.
+ *
+ * This is the client interface for allocating the memory for a
+ * vmci_qp structure and then attaching to the underlying
+ * queue. If an error occurs allocating the memory for the
+ * vmci_qp structure no attempt is made to attach. If an
+ * error occurs attaching, then the structure is freed.
+ */
+int vmci_qpair_alloc(struct vmci_qp **qpair,
+ struct vmci_handle *handle,
+ u64 produce_qsize,
+ u64 consume_qsize,
+ u32 peer,
+ u32 flags,
+ u32 priv_flags)
+{
+ struct vmci_qp *my_qpair;
+ int retval;
+ struct vmci_handle src = VMCI_INVALID_HANDLE;
+ struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID);
+ enum vmci_route route;
+ vmci_event_release_cb wakeup_cb;
+ void *client_data;
+
+ /*
+ * Restrict the size of a queuepair. The device already
+ * enforces a limit on the total amount of memory that can be
+ * allocated to queuepairs for a guest. However, we try to
+ * allocate this memory before we make the queuepair
+ * allocation hypercall. On Linux, we allocate each page
+ * separately, which means rather than fail, the guest will
+ * thrash while it tries to allocate, and will become
+ * increasingly unresponsive to the point where it appears to
+ * be hung. So we place a limit on the size of an individual
+ * queuepair here, and leave the device to enforce the
+ * restriction on total queuepair memory. (Note that this
+ * doesn't prevent all cases; a user with only this much
+ * physical memory could still get into trouble.) The error
+ * used by the device is NO_RESOURCES, so use that here too.
+ */
+
+ if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) ||
+ produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY)
+ return VMCI_ERROR_NO_RESOURCES;
+
+ retval = vmci_route(&src, &dst, false, &route);
+ if (retval < VMCI_SUCCESS)
+ route = vmci_guest_code_active() ?
+ VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST;
+
+ /* If NONBLOCK or PINNED is set, we better be the guest personality. */
+ if ((!vmci_can_block(flags) || vmci_qp_pinned(flags)) &&
+ VMCI_ROUTE_AS_GUEST != route) {
+ pr_devel("Not guest personality w/ NONBLOCK OR PINNED set");
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ /*
+ * Limit the size of pinned QPs and check sanity.
+ *
+ * Pinned pages implies non-blocking mode. Mutexes aren't acquired
+ * when the NONBLOCK flag is set in qpair code; and also should not be
+ * acquired when the PINNED flagged is set. Since pinning pages
+ * implies we want speed, it makes no sense not to have NONBLOCK
+ * set if PINNED is set. Hence enforce this implication.
+ */
+ if (vmci_qp_pinned(flags)) {
+ if (vmci_can_block(flags)) {
+ pr_err("Attempted to enable pinning w/o non-blocking");
+ return VMCI_ERROR_INVALID_ARGS;
+ }
+
+ if (produce_qsize + consume_qsize > VMCI_MAX_PINNED_QP_MEMORY)
+ return VMCI_ERROR_NO_RESOURCES;
+ }
+
+ my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL);
+ if (!my_qpair)
+ return VMCI_ERROR_NO_MEM;
+
+ my_qpair->produce_q_size = produce_qsize;
+ my_qpair->consume_q_size = consume_qsize;
+ my_qpair->peer = peer;
+ my_qpair->flags = flags;
+ my_qpair->priv_flags = priv_flags;
+
+ wakeup_cb = NULL;
+ client_data = NULL;
+
+ if (VMCI_ROUTE_AS_HOST == route) {
+ my_qpair->guest_endpoint = false;
+ if (!(flags & VMCI_QPFLAG_LOCAL)) {
+ my_qpair->blocked = 0;
+ my_qpair->generation = 0;
+ init_waitqueue_head(&my_qpair->event);
+ wakeup_cb = qp_wakeup_cb;
+ client_data = (void *)my_qpair;
+ }
+ } else {
+ my_qpair->guest_endpoint = true;
+ }
+
+ retval = vmci_qp_alloc(handle,
+ &my_qpair->produce_q,
+ my_qpair->produce_q_size,
+ &my_qpair->consume_q,
+ my_qpair->consume_q_size,
+ my_qpair->peer,
+ my_qpair->flags,
+ my_qpair->priv_flags,
+ my_qpair->guest_endpoint,
+ wakeup_cb, client_data);
+
+ if (retval < VMCI_SUCCESS) {
+ kfree(my_qpair);
+ return retval;
+ }
+
+ *qpair = my_qpair;
+ my_qpair->handle = *handle;
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_alloc);
+
+/*
+ * vmci_qpair_detach() - Detatches the client from a queue pair.
+ * @qpair: Reference of a pointer to the qpair struct.
+ *
+ * This is the client interface for detaching from a VMCIQPair.
+ * Note that this routine will free the memory allocated for the
+ * vmci_qp structure too.
+ */
+int vmci_qpair_detach(struct vmci_qp **qpair)
+{
+ int result;
+ struct vmci_qp *old_qpair;
+
+ if (!qpair || !(*qpair))
+ return VMCI_ERROR_INVALID_ARGS;
+
+ old_qpair = *qpair;
+ result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint);
+
+ /*
+ * The guest can fail to detach for a number of reasons, and
+ * if it does so, it will cleanup the entry (if there is one).
+ * The host can fail too, but it won't cleanup the entry
+ * immediately, it will do that later when the context is
+ * freed. Either way, we need to release the qpair struct
+ * here; there isn't much the caller can do, and we don't want
+ * to leak.
+ */
+
+ memset(old_qpair, 0, sizeof(*old_qpair));
+ old_qpair->handle = VMCI_INVALID_HANDLE;
+ old_qpair->peer = VMCI_INVALID_ID;
+ kfree(old_qpair);
+ *qpair = NULL;
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_detach);
+
+/*
+ * vmci_qpair_get_produce_indexes() - Retrieves the indexes of the producer.
+ * @qpair: Pointer to the queue pair struct.
+ * @producer_tail: Reference used for storing producer tail index.
+ * @consumer_head: Reference used for storing the consumer head index.
+ *
+ * This is the client interface for getting the current indexes of the
+ * QPair from the point of the view of the caller as the producer.
+ */
+int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair,
+ u64 *producer_tail,
+ u64 *consumer_head)
+{
+ struct vmci_queue_header *produce_q_header;
+ struct vmci_queue_header *consume_q_header;
+ int result;
+
+ if (!qpair)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+ result =
+ qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+ if (result == VMCI_SUCCESS)
+ vmci_q_header_get_pointers(produce_q_header, consume_q_header,
+ producer_tail, consumer_head);
+ qp_unlock(qpair);
+
+ if (result == VMCI_SUCCESS &&
+ ((producer_tail && *producer_tail >= qpair->produce_q_size) ||
+ (consumer_head && *consumer_head >= qpair->produce_q_size)))
+ return VMCI_ERROR_INVALID_SIZE;
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes);
+
+/*
+ * vmci_qpair_get_consume_indexes() - Retrieves the indexes of the comsumer.
+ * @qpair: Pointer to the queue pair struct.
+ * @consumer_tail: Reference used for storing consumer tail index.
+ * @producer_head: Reference used for storing the producer head index.
+ *
+ * This is the client interface for getting the current indexes of the
+ * QPair from the point of the view of the caller as the consumer.
+ */
+int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair,
+ u64 *consumer_tail,
+ u64 *producer_head)
+{
+ struct vmci_queue_header *produce_q_header;
+ struct vmci_queue_header *consume_q_header;
+ int result;
+
+ if (!qpair)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+ result =
+ qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+ if (result == VMCI_SUCCESS)
+ vmci_q_header_get_pointers(consume_q_header, produce_q_header,
+ consumer_tail, producer_head);
+ qp_unlock(qpair);
+
+ if (result == VMCI_SUCCESS &&
+ ((consumer_tail && *consumer_tail >= qpair->consume_q_size) ||
+ (producer_head && *producer_head >= qpair->consume_q_size)))
+ return VMCI_ERROR_INVALID_SIZE;
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes);
+
+/*
+ * vmci_qpair_produce_free_space() - Retrieves free space in producer queue.
+ * @qpair: Pointer to the queue pair struct.
+ *
+ * This is the client interface for getting the amount of free
+ * space in the QPair from the point of the view of the caller as
+ * the producer which is the common case. Returns < 0 if err, else
+ * available bytes into which data can be enqueued if > 0.
+ */
+s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair)
+{
+ struct vmci_queue_header *produce_q_header;
+ struct vmci_queue_header *consume_q_header;
+ s64 result;
+
+ if (!qpair)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+ result =
+ qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+ if (result == VMCI_SUCCESS)
+ result = vmci_q_header_free_space(produce_q_header,
+ consume_q_header,
+ qpair->produce_q_size);
+ else
+ result = 0;
+
+ qp_unlock(qpair);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space);
+
+/*
+ * vmci_qpair_consume_free_space() - Retrieves free space in consumer queue.
+ * @qpair: Pointer to the queue pair struct.
+ *
+ * This is the client interface for getting the amount of free
+ * space in the QPair from the point of the view of the caller as
+ * the consumer which is not the common case. Returns < 0 if err, else
+ * available bytes into which data can be enqueued if > 0.
+ */
+s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair)
+{
+ struct vmci_queue_header *produce_q_header;
+ struct vmci_queue_header *consume_q_header;
+ s64 result;
+
+ if (!qpair)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+ result =
+ qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+ if (result == VMCI_SUCCESS)
+ result = vmci_q_header_free_space(consume_q_header,
+ produce_q_header,
+ qpair->consume_q_size);
+ else
+ result = 0;
+
+ qp_unlock(qpair);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space);
+
+/*
+ * vmci_qpair_produce_buf_ready() - Gets bytes ready to read from
+ * producer queue.
+ * @qpair: Pointer to the queue pair struct.
+ *
+ * This is the client interface for getting the amount of
+ * enqueued data in the QPair from the point of the view of the
+ * caller as the producer which is not the common case. Returns < 0 if err,
+ * else available bytes that may be read.
+ */
+s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair)
+{
+ struct vmci_queue_header *produce_q_header;
+ struct vmci_queue_header *consume_q_header;
+ s64 result;
+
+ if (!qpair)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+ result =
+ qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+ if (result == VMCI_SUCCESS)
+ result = vmci_q_header_buf_ready(produce_q_header,
+ consume_q_header,
+ qpair->produce_q_size);
+ else
+ result = 0;
+
+ qp_unlock(qpair);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready);
+
+/*
+ * vmci_qpair_consume_buf_ready() - Gets bytes ready to read from
+ * consumer queue.
+ * @qpair: Pointer to the queue pair struct.
+ *
+ * This is the client interface for getting the amount of
+ * enqueued data in the QPair from the point of the view of the
+ * caller as the consumer which is the normal case. Returns < 0 if err,
+ * else available bytes that may be read.
+ */
+s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair)
+{
+ struct vmci_queue_header *produce_q_header;
+ struct vmci_queue_header *consume_q_header;
+ s64 result;
+
+ if (!qpair)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+ result =
+ qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+ if (result == VMCI_SUCCESS)
+ result = vmci_q_header_buf_ready(consume_q_header,
+ produce_q_header,
+ qpair->consume_q_size);
+ else
+ result = 0;
+
+ qp_unlock(qpair);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready);
+
+/*
+ * vmci_qpair_enqueue() - Throw data on the queue.
+ * @qpair: Pointer to the queue pair struct.
+ * @buf: Pointer to buffer containing data
+ * @buf_size: Length of buffer.
+ * @buf_type: Buffer type (Unused).
+ *
+ * This is the client interface for enqueueing data into the queue.
+ * Returns number of bytes enqueued or < 0 on error.
+ */
+ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
+ const void *buf,
+ size_t buf_size,
+ int buf_type)
+{
+ ssize_t result;
+
+ if (!qpair || !buf)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+
+ do {
+ result = qp_enqueue_locked(qpair->produce_q,
+ qpair->consume_q,
+ qpair->produce_q_size,
+ buf, buf_size,
+ qp_memcpy_to_queue,
+ vmci_can_block(qpair->flags));
+
+ if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+ !qp_wait_for_ready_queue(qpair))
+ result = VMCI_ERROR_WOULD_BLOCK;
+
+ } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+ qp_unlock(qpair);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_enqueue);
+
+/*
+ * vmci_qpair_dequeue() - Get data from the queue.
+ * @qpair: Pointer to the queue pair struct.
+ * @buf: Pointer to buffer for the data
+ * @buf_size: Length of buffer.
+ * @buf_type: Buffer type (Unused).
+ *
+ * This is the client interface for dequeueing data from the queue.
+ * Returns number of bytes dequeued or < 0 on error.
+ */
+ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
+ void *buf,
+ size_t buf_size,
+ int buf_type)
+{
+ ssize_t result;
+
+ if (!qpair || !buf)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+
+ do {
+ result = qp_dequeue_locked(qpair->produce_q,
+ qpair->consume_q,
+ qpair->consume_q_size,
+ buf, buf_size,
+ qp_memcpy_from_queue, true,
+ vmci_can_block(qpair->flags));
+
+ if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+ !qp_wait_for_ready_queue(qpair))
+ result = VMCI_ERROR_WOULD_BLOCK;
+
+ } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+ qp_unlock(qpair);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_dequeue);
+
+/*
+ * vmci_qpair_peek() - Peek at the data in the queue.
+ * @qpair: Pointer to the queue pair struct.
+ * @buf: Pointer to buffer for the data
+ * @buf_size: Length of buffer.
+ * @buf_type: Buffer type (Unused on Linux).
+ *
+ * This is the client interface for peeking into a queue. (I.e.,
+ * copy data from the queue without updating the head pointer.)
+ * Returns number of bytes dequeued or < 0 on error.
+ */
+ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
+ void *buf,
+ size_t buf_size,
+ int buf_type)
+{
+ ssize_t result;
+
+ if (!qpair || !buf)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+
+ do {
+ result = qp_dequeue_locked(qpair->produce_q,
+ qpair->consume_q,
+ qpair->consume_q_size,
+ buf, buf_size,
+ qp_memcpy_from_queue, false,
+ vmci_can_block(qpair->flags));
+
+ if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+ !qp_wait_for_ready_queue(qpair))
+ result = VMCI_ERROR_WOULD_BLOCK;
+
+ } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+ qp_unlock(qpair);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_peek);
+
+/*
+ * vmci_qpair_enquev() - Throw data on the queue using iov.
+ * @qpair: Pointer to the queue pair struct.
+ * @iov: Pointer to buffer containing data
+ * @iov_size: Length of buffer.
+ * @buf_type: Buffer type (Unused).
+ *
+ * This is the client interface for enqueueing data into the queue.
+ * This function uses IO vectors to handle the work. Returns number
+ * of bytes enqueued or < 0 on error.
+ */
+ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
+ void *iov,
+ size_t iov_size,
+ int buf_type)
+{
+ ssize_t result;
+
+ if (!qpair || !iov)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+
+ do {
+ result = qp_enqueue_locked(qpair->produce_q,
+ qpair->consume_q,
+ qpair->produce_q_size,
+ iov, iov_size,
+ qp_memcpy_to_queue_iov,
+ vmci_can_block(qpair->flags));
+
+ if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+ !qp_wait_for_ready_queue(qpair))
+ result = VMCI_ERROR_WOULD_BLOCK;
+
+ } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+ qp_unlock(qpair);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_enquev);
+
+/*
+ * vmci_qpair_dequev() - Get data from the queue using iov.
+ * @qpair: Pointer to the queue pair struct.
+ * @iov: Pointer to buffer for the data
+ * @iov_size: Length of buffer.
+ * @buf_type: Buffer type (Unused).
+ *
+ * This is the client interface for dequeueing data from the queue.
+ * This function uses IO vectors to handle the work. Returns number
+ * of bytes dequeued or < 0 on error.
+ */
+ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
+ void *iov,
+ size_t iov_size,
+ int buf_type)
+{
+ ssize_t result;
+
+ if (!qpair || !iov)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+
+ do {
+ result = qp_dequeue_locked(qpair->produce_q,
+ qpair->consume_q,
+ qpair->consume_q_size,
+ iov, iov_size,
+ qp_memcpy_from_queue_iov,
+ true, vmci_can_block(qpair->flags));
+
+ if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+ !qp_wait_for_ready_queue(qpair))
+ result = VMCI_ERROR_WOULD_BLOCK;
+
+ } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+ qp_unlock(qpair);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_dequev);
+
+/*
+ * vmci_qpair_peekv() - Peek at the data in the queue using iov.
+ * @qpair: Pointer to the queue pair struct.
+ * @iov: Pointer to buffer for the data
+ * @iov_size: Length of buffer.
+ * @buf_type: Buffer type (Unused on Linux).
+ *
+ * This is the client interface for peeking into a queue. (I.e.,
+ * copy data from the queue without updating the head pointer.)
+ * This function uses IO vectors to handle the work. Returns number
+ * of bytes peeked or < 0 on error.
+ */
+ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
+ void *iov,
+ size_t iov_size,
+ int buf_type)
+{
+ ssize_t result;
+
+ if (!qpair || !iov)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ qp_lock(qpair);
+
+ do {
+ result = qp_dequeue_locked(qpair->produce_q,
+ qpair->consume_q,
+ qpair->consume_q_size,
+ iov, iov_size,
+ qp_memcpy_from_queue_iov,
+ false, vmci_can_block(qpair->flags));
+
+ if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+ !qp_wait_for_ready_queue(qpair))
+ result = VMCI_ERROR_WOULD_BLOCK;
+
+ } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+ qp_unlock(qpair);
+ return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_peekv);
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.h b/drivers/misc/vmw_vmci/vmci_queue_pair.h
new file mode 100644
index 000000000000..58c6959f6b6d
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.h
@@ -0,0 +1,191 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_QUEUE_PAIR_H_
+#define _VMCI_QUEUE_PAIR_H_
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/types.h>
+
+#include "vmci_context.h"
+
+/* Callback needed for correctly waiting on events. */
+typedef int (*vmci_event_release_cb) (void *client_data);
+
+/* Guest device port I/O. */
+struct ppn_set {
+ u64 num_produce_pages;
+ u64 num_consume_pages;
+ u32 *produce_ppns;
+ u32 *consume_ppns;
+ bool initialized;
+};
+
+/* VMCIqueue_pairAllocInfo */
+struct vmci_qp_alloc_info {
+ struct vmci_handle handle;
+ u32 peer;
+ u32 flags;
+ u64 produce_size;
+ u64 consume_size;
+ u64 ppn_va; /* Start VA of queue pair PPNs. */
+ u64 num_ppns;
+ s32 result;
+ u32 version;
+};
+
+/* VMCIqueue_pairSetVAInfo */
+struct vmci_qp_set_va_info {
+ struct vmci_handle handle;
+ u64 va; /* Start VA of queue pair PPNs. */
+ u64 num_ppns;
+ u32 version;
+ s32 result;
+};
+
+/*
+ * For backwards compatibility, here is a version of the
+ * VMCIqueue_pairPageFileInfo before host support end-points was added.
+ * Note that the current version of that structure requires VMX to
+ * pass down the VA of the mapped file. Before host support was added
+ * there was nothing of the sort. So, when the driver sees the ioctl
+ * with a parameter that is the sizeof
+ * VMCIqueue_pairPageFileInfo_NoHostQP then it can infer that the version
+ * of VMX running can't attach to host end points because it doesn't
+ * provide the VA of the mapped files.
+ *
+ * The Linux driver doesn't get an indication of the size of the
+ * structure passed down from user space. So, to fix a long standing
+ * but unfiled bug, the _pad field has been renamed to version.
+ * Existing versions of VMX always initialize the PageFileInfo
+ * structure so that _pad, er, version is set to 0.
+ *
+ * A version value of 1 indicates that the size of the structure has
+ * been increased to include two UVA's: produce_uva and consume_uva.
+ * These UVA's are of the mmap()'d queue contents backing files.
+ *
+ * In addition, if when VMX is sending down the
+ * VMCIqueue_pairPageFileInfo structure it gets an error then it will
+ * try again with the _NoHostQP version of the file to see if an older
+ * VMCI kernel module is running.
+ */
+
+/* VMCIqueue_pairPageFileInfo */
+struct vmci_qp_page_file_info {
+ struct vmci_handle handle;
+ u64 produce_page_file; /* User VA. */
+ u64 consume_page_file; /* User VA. */
+ u64 produce_page_file_size; /* Size of the file name array. */
+ u64 consume_page_file_size; /* Size of the file name array. */
+ s32 result;
+ u32 version; /* Was _pad. */
+ u64 produce_va; /* User VA of the mapped file. */
+ u64 consume_va; /* User VA of the mapped file. */
+};
+
+/* vmci queuepair detach info */
+struct vmci_qp_dtch_info {
+ struct vmci_handle handle;
+ s32 result;
+ u32 _pad;
+};
+
+/*
+ * struct vmci_qp_page_store describes how the memory of a given queue pair
+ * is backed. When the queue pair is between the host and a guest, the
+ * page store consists of references to the guest pages. On vmkernel,
+ * this is a list of PPNs, and on hosted, it is a user VA where the
+ * queue pair is mapped into the VMX address space.
+ */
+struct vmci_qp_page_store {
+ /* Reference to pages backing the queue pair. */
+ u64 pages;
+ /* Length of pageList/virtual addres range (in pages). */
+ u32 len;
+};
+
+/*
+ * This data type contains the information about a queue.
+ * There are two queues (hence, queue pairs) per transaction model between a
+ * pair of end points, A & B. One queue is used by end point A to transmit
+ * commands and responses to B. The other queue is used by B to transmit
+ * commands and responses.
+ *
+ * struct vmci_queue_kern_if is a per-OS defined Queue structure. It contains
+ * either a direct pointer to the linear address of the buffer contents or a
+ * pointer to structures which help the OS locate those data pages. See
+ * vmciKernelIf.c for each platform for its definition.
+ */
+struct vmci_queue {
+ struct vmci_queue_header *q_header;
+ struct vmci_queue_header *saved_header;
+ struct vmci_queue_kern_if *kernel_if;
+};
+
+/*
+ * Utility function that checks whether the fields of the page
+ * store contain valid values.
+ * Result:
+ * true if the page store is wellformed. false otherwise.
+ */
+static inline bool
+VMCI_QP_PAGESTORE_IS_WELLFORMED(struct vmci_qp_page_store *page_store)
+{
+ return page_store->len >= 2;
+}
+
+/*
+ * Helper function to check if the non-blocking flag
+ * is set for a given queue pair.
+ */
+static inline bool vmci_can_block(u32 flags)
+{
+ return !(flags & VMCI_QPFLAG_NONBLOCK);
+}
+
+/*
+ * Helper function to check if the queue pair is pinned
+ * into memory.
+ */
+static inline bool vmci_qp_pinned(u32 flags)
+{
+ return flags & VMCI_QPFLAG_PINNED;
+}
+
+void vmci_qp_broker_exit(void);
+int vmci_qp_broker_alloc(struct vmci_handle handle, u32 peer,
+ u32 flags, u32 priv_flags,
+ u64 produce_size, u64 consume_size,
+ struct vmci_qp_page_store *page_store,
+ struct vmci_ctx *context);
+int vmci_qp_broker_set_page_store(struct vmci_handle handle,
+ u64 produce_uva, u64 consume_uva,
+ struct vmci_ctx *context);
+int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context);
+
+void vmci_qp_guest_endpoints_exit(void);
+
+int vmci_qp_alloc(struct vmci_handle *handle,
+ struct vmci_queue **produce_q, u64 produce_size,
+ struct vmci_queue **consume_q, u64 consume_size,
+ u32 peer, u32 flags, u32 priv_flags,
+ bool guest_endpoint, vmci_event_release_cb wakeup_cb,
+ void *client_data);
+int vmci_qp_broker_map(struct vmci_handle handle,
+ struct vmci_ctx *context, u64 guest_mem);
+int vmci_qp_broker_unmap(struct vmci_handle handle,
+ struct vmci_ctx *context, u32 gid);
+
+#endif /* _VMCI_QUEUE_PAIR_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c
new file mode 100644
index 000000000000..a196f84a4fd2
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_resource.c
@@ -0,0 +1,229 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/hash.h>
+#include <linux/types.h>
+#include <linux/rculist.h>
+
+#include "vmci_resource.h"
+#include "vmci_driver.h"
+
+
+#define VMCI_RESOURCE_HASH_BITS 7
+#define VMCI_RESOURCE_HASH_BUCKETS (1 << VMCI_RESOURCE_HASH_BITS)
+
+struct vmci_hash_table {
+ spinlock_t lock;
+ struct hlist_head entries[VMCI_RESOURCE_HASH_BUCKETS];
+};
+
+static struct vmci_hash_table vmci_resource_table = {
+ .lock = __SPIN_LOCK_UNLOCKED(vmci_resource_table.lock),
+};
+
+static unsigned int vmci_resource_hash(struct vmci_handle handle)
+{
+ return hash_32(handle.resource, VMCI_RESOURCE_HASH_BITS);
+}
+
+/*
+ * Gets a resource (if one exists) matching given handle from the hash table.
+ */
+static struct vmci_resource *vmci_resource_lookup(struct vmci_handle handle,
+ enum vmci_resource_type type)
+{
+ struct vmci_resource *r, *resource = NULL;
+ struct hlist_node *node;
+ unsigned int idx = vmci_resource_hash(handle);
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(r, node,
+ &vmci_resource_table.entries[idx], node) {
+ u32 cid = r->handle.context;
+ u32 rid = r->handle.resource;
+
+ if (r->type == type &&
+ rid == handle.resource &&
+ (cid == handle.context || cid == VMCI_INVALID_ID)) {
+ resource = r;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return resource;
+}
+
+/*
+ * Find an unused resource ID and return it. The first
+ * VMCI_RESERVED_RESOURCE_ID_MAX are reserved so we start from
+ * its value + 1.
+ * Returns VMCI resource id on success, VMCI_INVALID_ID on failure.
+ */
+static u32 vmci_resource_find_id(u32 context_id,
+ enum vmci_resource_type resource_type)
+{
+ static u32 resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1;
+ u32 old_rid = resource_id;
+ u32 current_rid;
+
+ /*
+ * Generate a unique resource ID. Keep on trying until we wrap around
+ * in the RID space.
+ */
+ do {
+ struct vmci_handle handle;
+
+ current_rid = resource_id;
+ resource_id++;
+ if (unlikely(resource_id == VMCI_INVALID_ID)) {
+ /* Skip the reserved rids. */
+ resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1;
+ }
+
+ handle = vmci_make_handle(context_id, current_rid);
+ if (!vmci_resource_lookup(handle, resource_type))
+ return current_rid;
+ } while (resource_id != old_rid);
+
+ return VMCI_INVALID_ID;
+}
+
+
+int vmci_resource_add(struct vmci_resource *resource,
+ enum vmci_resource_type resource_type,
+ struct vmci_handle handle)
+
+{
+ unsigned int idx;
+ int result;
+
+ spin_lock(&vmci_resource_table.lock);
+
+ if (handle.resource == VMCI_INVALID_ID) {
+ handle.resource = vmci_resource_find_id(handle.context,
+ resource_type);
+ if (handle.resource == VMCI_INVALID_ID) {
+ result = VMCI_ERROR_NO_HANDLE;
+ goto out;
+ }
+ } else if (vmci_resource_lookup(handle, resource_type)) {
+ result = VMCI_ERROR_ALREADY_EXISTS;
+ goto out;
+ }
+
+ resource->handle = handle;
+ resource->type = resource_type;
+ INIT_HLIST_NODE(&resource->node);
+ kref_init(&resource->kref);
+ init_completion(&resource->done);
+
+ idx = vmci_resource_hash(resource->handle);
+ hlist_add_head_rcu(&resource->node, &vmci_resource_table.entries[idx]);
+
+ result = VMCI_SUCCESS;
+
+out:
+ spin_unlock(&vmci_resource_table.lock);
+ return result;
+}
+
+void vmci_resource_remove(struct vmci_resource *resource)
+{
+ struct vmci_handle handle = resource->handle;
+ unsigned int idx = vmci_resource_hash(handle);
+ struct vmci_resource *r;
+ struct hlist_node *node;
+
+ /* Remove resource from hash table. */
+ spin_lock(&vmci_resource_table.lock);
+
+ hlist_for_each_entry(r, node, &vmci_resource_table.entries[idx], node) {
+ if (vmci_handle_is_equal(r->handle, resource->handle)) {
+ hlist_del_init_rcu(&r->node);
+ break;
+ }
+ }
+
+ spin_unlock(&vmci_resource_table.lock);
+ synchronize_rcu();
+
+ vmci_resource_put(resource);
+ wait_for_completion(&resource->done);
+}
+
+struct vmci_resource *
+vmci_resource_by_handle(struct vmci_handle resource_handle,
+ enum vmci_resource_type resource_type)
+{
+ struct vmci_resource *r, *resource = NULL;
+
+ rcu_read_lock();
+
+ r = vmci_resource_lookup(resource_handle, resource_type);
+ if (r &&
+ (resource_type == r->type ||
+ resource_type == VMCI_RESOURCE_TYPE_ANY)) {
+ resource = vmci_resource_get(r);
+ }
+
+ rcu_read_unlock();
+
+ return resource;
+}
+
+/*
+ * Get a reference to given resource.
+ */
+struct vmci_resource *vmci_resource_get(struct vmci_resource *resource)
+{
+ kref_get(&resource->kref);
+
+ return resource;
+}
+
+static void vmci_release_resource(struct kref *kref)
+{
+ struct vmci_resource *resource =
+ container_of(kref, struct vmci_resource, kref);
+
+ /* Verify the resource has been unlinked from hash table */
+ WARN_ON(!hlist_unhashed(&resource->node));
+
+ /* Signal that container of this resource can now be destroyed */
+ complete(&resource->done);
+}
+
+/*
+ * Resource's release function will get called if last reference.
+ * If it is the last reference, then we are sure that nobody else
+ * can increment the count again (it's gone from the resource hash
+ * table), so there's no need for locking here.
+ */
+int vmci_resource_put(struct vmci_resource *resource)
+{
+ /*
+ * We propagate the information back to caller in case it wants to know
+ * whether entry was freed.
+ */
+ return kref_put(&resource->kref, vmci_release_resource) ?
+ VMCI_SUCCESS_ENTRY_DEAD : VMCI_SUCCESS;
+}
+
+struct vmci_handle vmci_resource_handle(struct vmci_resource *resource)
+{
+ return resource->handle;
+}
diff --git a/drivers/misc/vmw_vmci/vmci_resource.h b/drivers/misc/vmw_vmci/vmci_resource.h
new file mode 100644
index 000000000000..9190cd298bee
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_resource.h
@@ -0,0 +1,59 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_RESOURCE_H_
+#define _VMCI_RESOURCE_H_
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/types.h>
+
+#include "vmci_context.h"
+
+
+enum vmci_resource_type {
+ VMCI_RESOURCE_TYPE_ANY,
+ VMCI_RESOURCE_TYPE_API,
+ VMCI_RESOURCE_TYPE_GROUP,
+ VMCI_RESOURCE_TYPE_DATAGRAM,
+ VMCI_RESOURCE_TYPE_DOORBELL,
+ VMCI_RESOURCE_TYPE_QPAIR_GUEST,
+ VMCI_RESOURCE_TYPE_QPAIR_HOST
+};
+
+struct vmci_resource {
+ struct vmci_handle handle;
+ enum vmci_resource_type type;
+ struct hlist_node node;
+ struct kref kref;
+ struct completion done;
+};
+
+
+int vmci_resource_add(struct vmci_resource *resource,
+ enum vmci_resource_type resource_type,
+ struct vmci_handle handle);
+
+void vmci_resource_remove(struct vmci_resource *resource);
+
+struct vmci_resource *
+vmci_resource_by_handle(struct vmci_handle resource_handle,
+ enum vmci_resource_type resource_type);
+
+struct vmci_resource *vmci_resource_get(struct vmci_resource *resource);
+int vmci_resource_put(struct vmci_resource *resource);
+
+struct vmci_handle vmci_resource_handle(struct vmci_resource *resource);
+
+#endif /* _VMCI_RESOURCE_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_route.c b/drivers/misc/vmw_vmci/vmci_route.c
new file mode 100644
index 000000000000..91090658b929
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_route.c
@@ -0,0 +1,226 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_route.h"
+
+/*
+ * Make a routing decision for the given source and destination handles.
+ * This will try to determine the route using the handles and the available
+ * devices. Will set the source context if it is invalid.
+ */
+int vmci_route(struct vmci_handle *src,
+ const struct vmci_handle *dst,
+ bool from_guest,
+ enum vmci_route *route)
+{
+ bool has_host_device = vmci_host_code_active();
+ bool has_guest_device = vmci_guest_code_active();
+
+ *route = VMCI_ROUTE_NONE;
+
+ /*
+ * "from_guest" is only ever set to true by
+ * IOCTL_VMCI_DATAGRAM_SEND (or by the vmkernel equivalent),
+ * which comes from the VMX, so we know it is coming from a
+ * guest.
+ *
+ * To avoid inconsistencies, test these once. We will test
+ * them again when we do the actual send to ensure that we do
+ * not touch a non-existent device.
+ */
+
+ /* Must have a valid destination context. */
+ if (VMCI_INVALID_ID == dst->context)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ /* Anywhere to hypervisor. */
+ if (VMCI_HYPERVISOR_CONTEXT_ID == dst->context) {
+
+ /*
+ * If this message already came from a guest then we
+ * cannot send it to the hypervisor. It must come
+ * from a local client.
+ */
+ if (from_guest)
+ return VMCI_ERROR_DST_UNREACHABLE;
+
+ /*
+ * We must be acting as a guest in order to send to
+ * the hypervisor.
+ */
+ if (!has_guest_device)
+ return VMCI_ERROR_DEVICE_NOT_FOUND;
+
+ /* And we cannot send if the source is the host context. */
+ if (VMCI_HOST_CONTEXT_ID == src->context)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ /*
+ * If the client passed the ANON source handle then
+ * respect it (both context and resource are invalid).
+ * However, if they passed only an invalid context,
+ * then they probably mean ANY, in which case we
+ * should set the real context here before passing it
+ * down.
+ */
+ if (VMCI_INVALID_ID == src->context &&
+ VMCI_INVALID_ID != src->resource)
+ src->context = vmci_get_context_id();
+
+ /* Send from local client down to the hypervisor. */
+ *route = VMCI_ROUTE_AS_GUEST;
+ return VMCI_SUCCESS;
+ }
+
+ /* Anywhere to local client on host. */
+ if (VMCI_HOST_CONTEXT_ID == dst->context) {
+ /*
+ * If it is not from a guest but we are acting as a
+ * guest, then we need to send it down to the host.
+ * Note that if we are also acting as a host then this
+ * will prevent us from sending from local client to
+ * local client, but we accept that restriction as a
+ * way to remove any ambiguity from the host context.
+ */
+ if (src->context == VMCI_HYPERVISOR_CONTEXT_ID) {
+ /*
+ * If the hypervisor is the source, this is
+ * host local communication. The hypervisor
+ * may send vmci event datagrams to the host
+ * itself, but it will never send datagrams to
+ * an "outer host" through the guest device.
+ */
+
+ if (has_host_device) {
+ *route = VMCI_ROUTE_AS_HOST;
+ return VMCI_SUCCESS;
+ } else {
+ return VMCI_ERROR_DEVICE_NOT_FOUND;
+ }
+ }
+
+ if (!from_guest && has_guest_device) {
+ /* If no source context then use the current. */
+ if (VMCI_INVALID_ID == src->context)
+ src->context = vmci_get_context_id();
+
+ /* Send it from local client down to the host. */
+ *route = VMCI_ROUTE_AS_GUEST;
+ return VMCI_SUCCESS;
+ }
+
+ /*
+ * Otherwise we already received it from a guest and
+ * it is destined for a local client on this host, or
+ * it is from another local client on this host. We
+ * must be acting as a host to service it.
+ */
+ if (!has_host_device)
+ return VMCI_ERROR_DEVICE_NOT_FOUND;
+
+ if (VMCI_INVALID_ID == src->context) {
+ /*
+ * If it came from a guest then it must have a
+ * valid context. Otherwise we can use the
+ * host context.
+ */
+ if (from_guest)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ src->context = VMCI_HOST_CONTEXT_ID;
+ }
+
+ /* Route to local client. */
+ *route = VMCI_ROUTE_AS_HOST;
+ return VMCI_SUCCESS;
+ }
+
+ /*
+ * If we are acting as a host then this might be destined for
+ * a guest.
+ */
+ if (has_host_device) {
+ /* It will have a context if it is meant for a guest. */
+ if (vmci_ctx_exists(dst->context)) {
+ if (VMCI_INVALID_ID == src->context) {
+ /*
+ * If it came from a guest then it
+ * must have a valid context.
+ * Otherwise we can use the host
+ * context.
+ */
+
+ if (from_guest)
+ return VMCI_ERROR_INVALID_ARGS;
+
+ src->context = VMCI_HOST_CONTEXT_ID;
+ } else if (VMCI_CONTEXT_IS_VM(src->context) &&
+ src->context != dst->context) {
+ /*
+ * VM to VM communication is not
+ * allowed. Since we catch all
+ * communication destined for the host
+ * above, this must be destined for a
+ * VM since there is a valid context.
+ */
+
+ return VMCI_ERROR_DST_UNREACHABLE;
+ }
+
+ /* Pass it up to the guest. */
+ *route = VMCI_ROUTE_AS_HOST;
+ return VMCI_SUCCESS;
+ } else if (!has_guest_device) {
+ /*
+ * The host is attempting to reach a CID
+ * without an active context, and we can't
+ * send it down, since we have no guest
+ * device.
+ */
+
+ return VMCI_ERROR_DST_UNREACHABLE;
+ }
+ }
+
+ /*
+ * We must be a guest trying to send to another guest, which means
+ * we need to send it down to the host. We do not filter out VM to
+ * VM communication here, since we want to be able to use the guest
+ * driver on older versions that do support VM to VM communication.
+ */
+ if (!has_guest_device) {
+ /*
+ * Ending up here means we have neither guest nor host
+ * device.
+ */
+ return VMCI_ERROR_DEVICE_NOT_FOUND;
+ }
+
+ /* If no source context then use the current context. */
+ if (VMCI_INVALID_ID == src->context)
+ src->context = vmci_get_context_id();
+
+ /*
+ * Send it from local client down to the host, which will
+ * route it to the other guest for us.
+ */
+ *route = VMCI_ROUTE_AS_GUEST;
+ return VMCI_SUCCESS;
+}
diff --git a/drivers/misc/vmw_vmci/vmci_route.h b/drivers/misc/vmw_vmci/vmci_route.h
new file mode 100644
index 000000000000..3b30e82419c3
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_route.h
@@ -0,0 +1,30 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_ROUTE_H_
+#define _VMCI_ROUTE_H_
+
+#include <linux/vmw_vmci_defs.h>
+
+enum vmci_route {
+ VMCI_ROUTE_NONE,
+ VMCI_ROUTE_AS_HOST,
+ VMCI_ROUTE_AS_GUEST,
+};
+
+int vmci_route(struct vmci_handle *src, const struct vmci_handle *dst,
+ bool from_guest, enum vmci_route *route);
+
+#endif /* _VMCI_ROUTE_H_ */
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 3b1f783bf924..5562308699bc 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -52,6 +52,7 @@ config MMC_BLOCK_BOUNCE
config SDIO_UART
tristate "SDIO UART/GPS class support"
+ depends on TTY
help
SDIO function driver for SDIO cards that implements the UART
class, as well as the GPS class which appears like a UART.
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c
index bd57a11acc79..c931dfe6a59c 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/card/sdio_uart.c
@@ -381,7 +381,6 @@ static void sdio_uart_stop_rx(struct sdio_uart_port *port)
static void sdio_uart_receive_chars(struct sdio_uart_port *port,
unsigned int *status)
{
- struct tty_struct *tty = tty_port_tty_get(&port->port);
unsigned int ch, flag;
int max_count = 256;
@@ -418,23 +417,19 @@ static void sdio_uart_receive_chars(struct sdio_uart_port *port,
}
if ((*status & port->ignore_status_mask & ~UART_LSR_OE) == 0)
- if (tty)
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(&port->port, ch, flag);
/*
* Overrun is special. Since it's reported immediately,
* it doesn't affect the current character.
*/
if (*status & ~port->ignore_status_mask & UART_LSR_OE)
- if (tty)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
*status = sdio_in(port, UART_LSR);
} while ((*status & UART_LSR_DR) && (max_count-- > 0));
- if (tty) {
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
- }
+
+ tty_flip_buffer_push(&port->port);
}
static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index ef103871517f..269d072ef55e 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -18,8 +18,7 @@ config MMC_UNSAFE_RESUME
module parameter "removable=0" or "removable=1".
config MMC_CLKGATE
- bool "MMC host clock gating (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ bool "MMC host clock gating"
help
This will attempt to aggressively gate the clock to the MMC card.
This is done to save power due to gating off the logic and bus
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 8d13c6594520..3be8b94d7914 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -69,7 +69,7 @@ config MMC_SDHCI_PCI
If unsure, say N.
config MMC_RICOH_MMC
- bool "Ricoh MMC Controller Disabler (EXPERIMENTAL)"
+ bool "Ricoh MMC Controller Disabler"
depends on MMC_SDHCI_PCI
help
This adds a pci quirk to disable Ricoh MMC Controller. This
@@ -186,9 +186,6 @@ config MMC_SDHCI_S3C
often referrered to as the HSMMC block in some of the Samsung S3C
range of SoC.
- Note, due to the problems with DMA, the DMA support is only
- available with CONFIG_EXPERIMENTAL is selected.
-
If you have a controller with this interface, say Y or M here.
If unsure, say N.
@@ -233,7 +230,7 @@ config MMC_SDHCI_SPEAR
config MMC_SDHCI_S3C_DMA
bool "DMA support on S3C SDHCI"
- depends on MMC_SDHCI_S3C && EXPERIMENTAL
+ depends on MMC_SDHCI_S3C
help
Enable DMA support on the Samsung S3C SDHCI glue. The DMA
has proved to be problematic if the controller encounters
@@ -330,8 +327,8 @@ config MMC_MXS
If unsure, say N.
config MMC_TIFM_SD
- tristate "TI Flash Media MMC/SD Interface support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && PCI
+ tristate "TI Flash Media MMC/SD Interface support"
+ depends on PCI
select TIFM_CORE
help
Say Y here if you want to be able to access MMC/SD cards with
@@ -410,8 +407,7 @@ config MMC_S3C_PIO
the S3C MCI driver.
config MMC_S3C_DMA
- bool "Use DMA transfers only (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ bool "Use DMA transfers only"
help
Use DMA to transfer data between memory and the hardare.
@@ -420,7 +416,7 @@ config MMC_S3C_DMA
option is useful.
config MMC_S3C_PIODMA
- bool "Support for both PIO and DMA (EXPERIMENTAL)"
+ bool "Support for both PIO and DMA"
help
Compile both the PIO and DMA transfer routines into the
driver and let the platform select at run-time which one
@@ -431,8 +427,8 @@ config MMC_S3C_PIODMA
endchoice
config MMC_SDRICOH_CS
- tristate "MMC/SD driver for Ricoh Bay1Controllers (EXPERIMENTAL)"
- depends on EXPERIMENTAL && PCI && PCMCIA
+ tristate "MMC/SD driver for Ricoh Bay1Controllers"
+ depends on PCI && PCMCIA
help
Say Y here if your Notebook reports a Ricoh Bay1Controller PCMCIA
card whenever you insert a MMC or SD card into the card slot.
@@ -461,7 +457,7 @@ config MMC_SDHI
config MMC_CB710
tristate "ENE CB710 MMC/SD Interface support"
- depends on PCI
+ depends on PCI && GENERIC_HARDIRQS
select CB710_CORE
help
This option enables support for MMC/SD part of ENE CB710/720 Flash
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index 5e1fb1d2c422..41c27b74b003 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -10,6 +10,7 @@
* (at your option) any later version.
*/
+#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/io.h>
@@ -46,9 +47,9 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
host->dev = &pdev->dev;
host->irq_flags = 0;
host->pdata = pdev->dev.platform_data;
- host->regs = devm_request_and_ioremap(&pdev->dev, regs);
- if (!host->regs)
- return -ENOMEM;
+ host->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(host->regs))
+ return PTR_ERR(host->regs);
if (drv_data && drv_data->init) {
ret = drv_data->init(host);
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 150772395cc6..372e921389c8 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -20,6 +20,7 @@
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/log2.h>
+#include <linux/mmc/pm.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/amba/bus.h>
@@ -59,6 +60,7 @@ static unsigned int fmax = 515633;
* @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
* @pwrreg_powerup: power up value for MMCIPOWER register
* @signal_direction: input/out direction of bus signals can be indicated
+ * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
*/
struct variant_data {
unsigned int clkreg;
@@ -71,6 +73,7 @@ struct variant_data {
bool blksz_datactrl16;
u32 pwrreg_powerup;
bool signal_direction;
+ bool pwrreg_clkgate;
};
static struct variant_data variant_arm = {
@@ -87,6 +90,14 @@ static struct variant_data variant_arm_extended_fifo = {
.pwrreg_powerup = MCI_PWR_UP,
};
+static struct variant_data variant_arm_extended_fifo_hwfc = {
+ .fifosize = 128 * 4,
+ .fifohalfsize = 64 * 4,
+ .clkreg_enable = MCI_ARM_HWFCEN,
+ .datalength_bits = 16,
+ .pwrreg_powerup = MCI_PWR_UP,
+};
+
static struct variant_data variant_u300 = {
.fifosize = 16 * 4,
.fifohalfsize = 8 * 4,
@@ -95,6 +106,7 @@ static struct variant_data variant_u300 = {
.sdio = true,
.pwrreg_powerup = MCI_PWR_ON,
.signal_direction = true,
+ .pwrreg_clkgate = true,
};
static struct variant_data variant_nomadik = {
@@ -106,6 +118,7 @@ static struct variant_data variant_nomadik = {
.st_clkdiv = true,
.pwrreg_powerup = MCI_PWR_ON,
.signal_direction = true,
+ .pwrreg_clkgate = true,
};
static struct variant_data variant_ux500 = {
@@ -118,6 +131,7 @@ static struct variant_data variant_ux500 = {
.st_clkdiv = true,
.pwrreg_powerup = MCI_PWR_ON,
.signal_direction = true,
+ .pwrreg_clkgate = true,
};
static struct variant_data variant_ux500v2 = {
@@ -131,9 +145,28 @@ static struct variant_data variant_ux500v2 = {
.blksz_datactrl16 = true,
.pwrreg_powerup = MCI_PWR_ON,
.signal_direction = true,
+ .pwrreg_clkgate = true,
};
/*
+ * Validate mmc prerequisites
+ */
+static int mmci_validate_data(struct mmci_host *host,
+ struct mmc_data *data)
+{
+ if (!data)
+ return 0;
+
+ if (!is_power_of_2(data->blksz)) {
+ dev_err(mmc_dev(host->mmc),
+ "unsupported block size (%d bytes)\n", data->blksz);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
* This must be called with host->lock held
*/
static void mmci_write_clkreg(struct mmci_host *host, u32 clk)
@@ -202,6 +235,9 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
clk |= MCI_ST_8BIT_BUS;
+ if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
+ clk |= MCI_ST_UX500_NEG_EDGE;
+
mmci_write_clkreg(host, clk);
}
@@ -352,10 +388,33 @@ static inline void mmci_dma_release(struct mmci_host *host)
host->dma_rx_channel = host->dma_tx_channel = NULL;
}
+static void mmci_dma_data_error(struct mmci_host *host)
+{
+ dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
+ dmaengine_terminate_all(host->dma_current);
+ host->dma_current = NULL;
+ host->dma_desc_current = NULL;
+ host->data->host_cookie = 0;
+}
+
static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
{
- struct dma_chan *chan = host->dma_current;
+ struct dma_chan *chan;
enum dma_data_direction dir;
+
+ if (data->flags & MMC_DATA_READ) {
+ dir = DMA_FROM_DEVICE;
+ chan = host->dma_rx_channel;
+ } else {
+ dir = DMA_TO_DEVICE;
+ chan = host->dma_tx_channel;
+ }
+
+ dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
+}
+
+static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
+{
u32 status;
int i;
@@ -374,19 +433,13 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
* contiguous buffers. On TX, we'll get a FIFO underrun error.
*/
if (status & MCI_RXDATAAVLBLMASK) {
- dmaengine_terminate_all(chan);
+ mmci_dma_data_error(host);
if (!data->error)
data->error = -EIO;
}
- if (data->flags & MMC_DATA_WRITE) {
- dir = DMA_TO_DEVICE;
- } else {
- dir = DMA_FROM_DEVICE;
- }
-
if (!data->host_cookie)
- dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
+ mmci_dma_unmap(host, data);
/*
* Use of DMA with scatter-gather is impossible.
@@ -396,16 +449,15 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
mmci_dma_release(host);
}
-}
-static void mmci_dma_data_error(struct mmci_host *host)
-{
- dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
- dmaengine_terminate_all(host->dma_current);
+ host->dma_current = NULL;
+ host->dma_desc_current = NULL;
}
-static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
- struct mmci_host_next *next)
+/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
+static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
+ struct dma_chan **dma_chan,
+ struct dma_async_tx_descriptor **dma_desc)
{
struct variant_data *variant = host->variant;
struct dma_slave_config conf = {
@@ -423,16 +475,6 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
enum dma_data_direction buffer_dirn;
int nr_sg;
- /* Check if next job is already prepared */
- if (data->host_cookie && !next &&
- host->dma_current && host->dma_desc_current)
- return 0;
-
- if (!next) {
- host->dma_current = NULL;
- host->dma_desc_current = NULL;
- }
-
if (data->flags & MMC_DATA_READ) {
conf.direction = DMA_DEV_TO_MEM;
buffer_dirn = DMA_FROM_DEVICE;
@@ -462,29 +504,41 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
if (!desc)
goto unmap_exit;
- if (next) {
- next->dma_chan = chan;
- next->dma_desc = desc;
- } else {
- host->dma_current = chan;
- host->dma_desc_current = desc;
- }
+ *dma_chan = chan;
+ *dma_desc = desc;
return 0;
unmap_exit:
- if (!next)
- dmaengine_terminate_all(chan);
dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
return -ENOMEM;
}
+static inline int mmci_dma_prep_data(struct mmci_host *host,
+ struct mmc_data *data)
+{
+ /* Check if next job is already prepared. */
+ if (host->dma_current && host->dma_desc_current)
+ return 0;
+
+ /* No job were prepared thus do it now. */
+ return __mmci_dma_prep_data(host, data, &host->dma_current,
+ &host->dma_desc_current);
+}
+
+static inline int mmci_dma_prep_next(struct mmci_host *host,
+ struct mmc_data *data)
+{
+ struct mmci_host_next *nd = &host->next_data;
+ return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
+}
+
static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
{
int ret;
struct mmc_data *data = host->data;
- ret = mmci_dma_prep_data(host, host->data, NULL);
+ ret = mmci_dma_prep_data(host, host->data);
if (ret)
return ret;
@@ -514,19 +568,11 @@ static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
{
struct mmci_host_next *next = &host->next_data;
- if (data->host_cookie && data->host_cookie != next->cookie) {
- pr_warning("[%s] invalid cookie: data->host_cookie %d"
- " host->next_data.cookie %d\n",
- __func__, data->host_cookie, host->next_data.cookie);
- data->host_cookie = 0;
- }
-
- if (!data->host_cookie)
- return;
+ WARN_ON(data->host_cookie && data->host_cookie != next->cookie);
+ WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan));
host->dma_desc_current = next->dma_desc;
host->dma_current = next->dma_chan;
-
next->dma_desc = NULL;
next->dma_chan = NULL;
}
@@ -541,19 +587,13 @@ static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
if (!data)
return;
- if (data->host_cookie) {
- data->host_cookie = 0;
+ BUG_ON(data->host_cookie);
+
+ if (mmci_validate_data(host, data))
return;
- }
- /* if config for dma */
- if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) ||
- ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) {
- if (mmci_dma_prep_data(host, data, nd))
- data->host_cookie = 0;
- else
- data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
- }
+ if (!mmci_dma_prep_next(host, data))
+ data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
}
static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
@@ -561,29 +601,23 @@ static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
{
struct mmci_host *host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
- struct dma_chan *chan;
- enum dma_data_direction dir;
- if (!data)
+ if (!data || !data->host_cookie)
return;
- if (data->flags & MMC_DATA_READ) {
- dir = DMA_FROM_DEVICE;
- chan = host->dma_rx_channel;
- } else {
- dir = DMA_TO_DEVICE;
- chan = host->dma_tx_channel;
- }
+ mmci_dma_unmap(host, data);
+ if (err) {
+ struct mmci_host_next *next = &host->next_data;
+ struct dma_chan *chan;
+ if (data->flags & MMC_DATA_READ)
+ chan = host->dma_rx_channel;
+ else
+ chan = host->dma_tx_channel;
+ dmaengine_terminate_all(chan);
- /* if config for dma */
- if (chan) {
- if (err)
- dmaengine_terminate_all(chan);
- if (data->host_cookie)
- dma_unmap_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len, dir);
- mrq->data->host_cookie = 0;
+ next->dma_desc = NULL;
+ next->dma_chan = NULL;
}
}
@@ -604,6 +638,11 @@ static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
{
}
+static inline void mmci_dma_finalize(struct mmci_host *host,
+ struct mmc_data *data)
+{
+}
+
static inline void mmci_dma_data_error(struct mmci_host *host)
{
}
@@ -680,6 +719,9 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
mmci_write_clkreg(host, clk);
}
+ if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
+ datactrl |= MCI_ST_DPSM_DDRMODE;
+
/*
* Attempt to use DMA operation mode, if this
* should fail, fall back to PIO mode
@@ -751,8 +793,10 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
u32 remain, success;
/* Terminate the DMA transfer */
- if (dma_inprogress(host))
+ if (dma_inprogress(host)) {
mmci_dma_data_error(host);
+ mmci_dma_unmap(host, data);
+ }
/*
* Calculate how far we are into the transfer. Note that
@@ -791,7 +835,7 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
if (status & MCI_DATAEND || data->error) {
if (dma_inprogress(host))
- mmci_dma_unmap(host, data);
+ mmci_dma_finalize(host, data);
mmci_stop_data(host);
if (!data->error)
@@ -828,8 +872,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
if (!cmd->data || cmd->error) {
if (host->data) {
/* Terminate the DMA transfer */
- if (dma_inprogress(host))
+ if (dma_inprogress(host)) {
mmci_dma_data_error(host);
+ mmci_dma_unmap(host, host->data);
+ }
mmci_stop_data(host);
}
mmci_request_end(host, cmd->mrq);
@@ -1055,10 +1101,8 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
WARN_ON(host->mrq != NULL);
- if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
- dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n",
- mrq->data->blksz);
- mrq->cmd->error = -EINVAL;
+ mrq->cmd->error = mmci_validate_data(host, mrq->data);
+ if (mrq->cmd->error) {
mmc_request_done(mmc, mrq);
return;
}
@@ -1086,7 +1130,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct variant_data *variant = host->variant;
u32 pwr = 0;
unsigned long flags;
- int ret;
pm_runtime_get_sync(mmc_dev(mmc));
@@ -1096,23 +1139,13 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
switch (ios->power_mode) {
case MMC_POWER_OFF:
- if (host->vcc)
- ret = mmc_regulator_set_ocr(mmc, host->vcc, 0);
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
break;
case MMC_POWER_UP:
- if (host->vcc) {
- ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd);
- if (ret) {
- dev_err(mmc_dev(mmc), "unable to set OCR\n");
- /*
- * The .set_ios() function in the mmc_host_ops
- * struct return void, and failing to set the
- * power should be rare so we print an error
- * and return here.
- */
- goto out;
- }
- }
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+
/*
* The ST Micro variant doesn't have the PL180s MCI_PWR_UP
* and instead uses MCI_PWR_ON so apply whatever value is
@@ -1154,6 +1187,13 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
}
+ /*
+ * If clock = 0 and the variant requires the MMCIPOWER to be used for
+ * gating the clock, the MCI_PWR_ON bit is cleared.
+ */
+ if (!ios->clock && variant->pwrreg_clkgate)
+ pwr &= ~MCI_PWR_ON;
+
spin_lock_irqsave(&host->lock, flags);
mmci_set_clkreg(host, ios->clock);
@@ -1161,7 +1201,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
spin_unlock_irqrestore(&host->lock, flags);
- out:
pm_runtime_mark_last_busy(mmc_dev(mmc));
pm_runtime_put_autosuspend(mmc_dev(mmc));
}
@@ -1384,32 +1423,19 @@ static int mmci_probe(struct amba_device *dev,
} else
dev_warn(&dev->dev, "could not get default pinstate\n");
-#ifdef CONFIG_REGULATOR
- /* If we're using the regulator framework, try to fetch a regulator */
- host->vcc = regulator_get(&dev->dev, "vmmc");
- if (IS_ERR(host->vcc))
- host->vcc = NULL;
- else {
- int mask = mmc_regulator_get_ocrmask(host->vcc);
-
- if (mask < 0)
- dev_err(&dev->dev, "error getting OCR mask (%d)\n",
- mask);
- else {
- host->mmc->ocr_avail = (u32) mask;
- if (plat->ocr_mask)
- dev_warn(&dev->dev,
- "Provided ocr_mask/setpower will not be used "
- "(using regulator instead)\n");
- }
- }
-#endif
- /* Fall back to platform data if no regulator is found */
- if (host->vcc == NULL)
+ /* Get regulators and the supported OCR mask */
+ mmc_regulator_get_supply(mmc);
+ if (!mmc->ocr_avail)
mmc->ocr_avail = plat->ocr_mask;
+ else if (plat->ocr_mask)
+ dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
+
mmc->caps = plat->capabilities;
mmc->caps2 = plat->capabilities2;
+ /* We support these PM capabilities. */
+ mmc->pm_caps = MMC_PM_KEEP_POWER;
+
/*
* We can do SGIO
*/
@@ -1585,10 +1611,6 @@ static int mmci_remove(struct amba_device *dev)
clk_disable_unprepare(host->clk);
clk_put(host->clk);
- if (host->vcc)
- mmc_regulator_set_ocr(mmc, host->vcc, 0);
- regulator_put(host->vcc);
-
mmc_free_host(mmc);
amba_release_regions(dev);
@@ -1636,8 +1658,37 @@ static int mmci_resume(struct device *dev)
}
#endif
+#ifdef CONFIG_PM_RUNTIME
+static int mmci_runtime_suspend(struct device *dev)
+{
+ struct amba_device *adev = to_amba_device(dev);
+ struct mmc_host *mmc = amba_get_drvdata(adev);
+
+ if (mmc) {
+ struct mmci_host *host = mmc_priv(mmc);
+ clk_disable_unprepare(host->clk);
+ }
+
+ return 0;
+}
+
+static int mmci_runtime_resume(struct device *dev)
+{
+ struct amba_device *adev = to_amba_device(dev);
+ struct mmc_host *mmc = amba_get_drvdata(adev);
+
+ if (mmc) {
+ struct mmci_host *host = mmc_priv(mmc);
+ clk_prepare_enable(host->clk);
+ }
+
+ return 0;
+}
+#endif
+
static const struct dev_pm_ops mmci_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume)
+ SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
};
static struct amba_id mmci_ids[] = {
@@ -1652,6 +1703,11 @@ static struct amba_id mmci_ids[] = {
.data = &variant_arm_extended_fifo,
},
{
+ .id = 0x02041180,
+ .mask = 0xff0fffff,
+ .data = &variant_arm_extended_fifo_hwfc,
+ },
+ {
.id = 0x00041181,
.mask = 0x000fffff,
.data = &variant_arm,
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index d34d8c0add8e..1f33ad5333a0 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -28,6 +28,8 @@
#define MCI_ST_UX500_NEG_EDGE (1 << 13)
#define MCI_ST_UX500_HWFCEN (1 << 14)
#define MCI_ST_UX500_CLK_INV (1 << 15)
+/* Modified PL180 on Versatile Express platform */
+#define MCI_ARM_HWFCEN (1 << 12)
#define MMCIARGUMENT 0x008
#define MMCICOMMAND 0x00c
@@ -193,7 +195,6 @@ struct mmci_host {
/* pio stuff */
struct sg_mapping_iter sg_miter;
unsigned int size;
- struct regulator *vcc;
/* pinctrl handles */
struct pinctrl *pinctrl;
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index de4c20b3936c..f8dd36102949 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -50,8 +50,6 @@ struct mvsd_host {
struct timer_list timer;
struct mmc_host *mmc;
struct device *dev;
- struct resource *res;
- int irq;
struct clk *clk;
int gpio_card_detect;
int gpio_write_protect;
@@ -718,10 +716,6 @@ static int __init mvsd_probe(struct platform_device *pdev)
if (!r || irq < 0 || !mvsd_data)
return -ENXIO;
- r = request_mem_region(r->start, SZ_1K, DRIVER_NAME);
- if (!r)
- return -EBUSY;
-
mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);
if (!mmc) {
ret = -ENOMEM;
@@ -731,8 +725,8 @@ static int __init mvsd_probe(struct platform_device *pdev)
host = mmc_priv(mmc);
host->mmc = mmc;
host->dev = &pdev->dev;
- host->res = r;
host->base_clock = mvsd_data->clock / 2;
+ host->clk = ERR_PTR(-EINVAL);
mmc->ops = &mvsd_ops;
@@ -752,7 +746,7 @@ static int __init mvsd_probe(struct platform_device *pdev)
spin_lock_init(&host->lock);
- host->base = ioremap(r->start, SZ_4K);
+ host->base = devm_request_and_ioremap(&pdev->dev, r);
if (!host->base) {
ret = -ENOMEM;
goto out;
@@ -765,44 +759,45 @@ static int __init mvsd_probe(struct platform_device *pdev)
mvsd_power_down(host);
- ret = request_irq(irq, mvsd_irq, 0, DRIVER_NAME, host);
+ ret = devm_request_irq(&pdev->dev, irq, mvsd_irq, 0, DRIVER_NAME, host);
if (ret) {
pr_err("%s: cannot assign irq %d\n", DRIVER_NAME, irq);
goto out;
- } else
- host->irq = irq;
+ }
/* Not all platforms can gate the clock, so it is not
an error if the clock does not exists. */
- host->clk = clk_get(&pdev->dev, NULL);
- if (!IS_ERR(host->clk)) {
+ host->clk = devm_clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(host->clk))
clk_prepare_enable(host->clk);
- }
if (mvsd_data->gpio_card_detect) {
- ret = gpio_request(mvsd_data->gpio_card_detect,
- DRIVER_NAME " cd");
+ ret = devm_gpio_request_one(&pdev->dev,
+ mvsd_data->gpio_card_detect,
+ GPIOF_IN, DRIVER_NAME " cd");
if (ret == 0) {
- gpio_direction_input(mvsd_data->gpio_card_detect);
irq = gpio_to_irq(mvsd_data->gpio_card_detect);
- ret = request_irq(irq, mvsd_card_detect_irq,
- IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING,
- DRIVER_NAME " cd", host);
+ ret = devm_request_irq(&pdev->dev, irq,
+ mvsd_card_detect_irq,
+ IRQ_TYPE_EDGE_RISING |
+ IRQ_TYPE_EDGE_FALLING,
+ DRIVER_NAME " cd", host);
if (ret == 0)
host->gpio_card_detect =
mvsd_data->gpio_card_detect;
else
- gpio_free(mvsd_data->gpio_card_detect);
+ devm_gpio_free(&pdev->dev,
+ mvsd_data->gpio_card_detect);
}
}
if (!host->gpio_card_detect)
mmc->caps |= MMC_CAP_NEEDS_POLL;
if (mvsd_data->gpio_write_protect) {
- ret = gpio_request(mvsd_data->gpio_write_protect,
- DRIVER_NAME " wp");
+ ret = devm_gpio_request_one(&pdev->dev,
+ mvsd_data->gpio_write_protect,
+ GPIOF_IN, DRIVER_NAME " wp");
if (ret == 0) {
- gpio_direction_input(mvsd_data->gpio_write_protect);
host->gpio_write_protect =
mvsd_data->gpio_write_protect;
}
@@ -824,26 +819,11 @@ static int __init mvsd_probe(struct platform_device *pdev)
return 0;
out:
- if (host) {
- if (host->irq)
- free_irq(host->irq, host);
- if (host->gpio_card_detect) {
- free_irq(gpio_to_irq(host->gpio_card_detect), host);
- gpio_free(host->gpio_card_detect);
- }
- if (host->gpio_write_protect)
- gpio_free(host->gpio_write_protect);
- if (host->base)
- iounmap(host->base);
- }
- if (r)
- release_resource(r);
- if (mmc)
- if (!IS_ERR_OR_NULL(host->clk)) {
+ if (mmc) {
+ if (!IS_ERR(host->clk))
clk_disable_unprepare(host->clk);
- clk_put(host->clk);
- }
mmc_free_host(mmc);
+ }
return ret;
}
@@ -852,28 +832,16 @@ static int __exit mvsd_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
- if (mmc) {
- struct mvsd_host *host = mmc_priv(mmc);
+ struct mvsd_host *host = mmc_priv(mmc);
- if (host->gpio_card_detect) {
- free_irq(gpio_to_irq(host->gpio_card_detect), host);
- gpio_free(host->gpio_card_detect);
- }
- mmc_remove_host(mmc);
- free_irq(host->irq, host);
- if (host->gpio_write_protect)
- gpio_free(host->gpio_write_protect);
- del_timer_sync(&host->timer);
- mvsd_power_down(host);
- iounmap(host->base);
- release_resource(host->res);
+ mmc_remove_host(mmc);
+ del_timer_sync(&host->timer);
+ mvsd_power_down(host);
+
+ if (!IS_ERR(host->clk))
+ clk_disable_unprepare(host->clk);
+ mmc_free_host(mmc);
- if (!IS_ERR(host->clk)) {
- clk_disable_unprepare(host->clk);
- clk_put(host->clk);
- }
- mmc_free_host(mmc);
- }
platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 206fe499ded5..5b665551a6f3 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -614,9 +614,9 @@ static int mxs_mmc_probe(struct platform_device *pdev)
host = mmc_priv(mmc);
ssp = &host->ssp;
ssp->dev = &pdev->dev;
- ssp->base = devm_request_and_ioremap(&pdev->dev, iores);
- if (!ssp->base) {
- ret = -EADDRNOTAVAIL;
+ ssp->base = devm_ioremap_resource(&pdev->dev, iores);
+ if (IS_ERR(ssp->base)) {
+ ret = PTR_ERR(ssp->base);
goto out_mmc_free;
}
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 571915dfb218..f74b5adca642 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -1060,26 +1060,6 @@ static int sd_wait_voltage_stable_2(struct realtek_pci_sdmmc *host)
return 0;
}
-static int sd_change_bank_voltage(struct realtek_pci_sdmmc *host, u8 voltage)
-{
- struct rtsx_pcr *pcr = host->pcr;
- int err;
-
- if (voltage == SD_IO_3V3) {
- err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
- if (err < 0)
- return err;
- } else if (voltage == SD_IO_1V8) {
- err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
- if (err < 0)
- return err;
- } else {
- return -EINVAL;
- }
-
- return 0;
-}
-
static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct realtek_pci_sdmmc *host = mmc_priv(mmc);
@@ -1098,11 +1078,11 @@ static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
rtsx_pci_start_run(pcr);
if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
- voltage = SD_IO_3V3;
+ voltage = OUTPUT_3V3;
else
- voltage = SD_IO_1V8;
+ voltage = OUTPUT_1V8;
- if (voltage == SD_IO_1V8) {
+ if (voltage == OUTPUT_1V8) {
err = rtsx_pci_write_register(pcr,
SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_B);
if (err < 0)
@@ -1113,11 +1093,11 @@ static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
goto out;
}
- err = sd_change_bank_voltage(host, voltage);
+ err = rtsx_pci_switch_output_voltage(pcr, voltage);
if (err < 0)
goto out;
- if (voltage == SD_IO_1V8) {
+ if (voltage == OUTPUT_1V8) {
err = sd_wait_voltage_stable_2(host);
if (err < 0)
goto out;
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 82a8de148a8f..a0c621421ee8 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -651,10 +651,9 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
#endif
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->ioaddr = devm_request_and_ioremap(&pdev->dev, res);
- if (!host->ioaddr) {
- dev_err(dev, "failed to map registers\n");
- ret = -ENXIO;
+ host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->ioaddr)) {
+ ret = PTR_ERR(host->ioaddr);
goto err_req_regs;
}
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 73fcbbeb78d0..03f2eb5627ec 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -291,7 +291,7 @@ config SSFDC
config SM_FTL
tristate "SmartMedia/xD new translation layer"
- depends on EXPERIMENTAL && BLOCK
+ depends on BLOCK
select MTD_BLKDEVS
select MTD_NAND_ECC
help
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index e469b01d40d2..c219e3d098d9 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -225,7 +225,7 @@ config MTD_ABSENT
config MTD_XIP
bool "XIP aware MTD support"
- depends on !SMP && (MTD_CFI_INTELEXT || MTD_CFI_AMDSTD) && EXPERIMENTAL && ARCH_MTD_XIP
+ depends on !SMP && (MTD_CFI_INTELEXT || MTD_CFI_AMDSTD) && ARCH_MTD_XIP
default y if XIP_KERNEL
help
This allows MTD support to work with flash memory which is also
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 27f80cd8aef3..12311f506ca1 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -52,7 +52,7 @@ config MTD_MS02NV
config MTD_DATAFLASH
tristate "Support for AT45xxx DataFlash"
- depends on SPI_MASTER && EXPERIMENTAL
+ depends on SPI_MASTER
help
This enables access to AT45xxx DataFlash chips, using SPI.
Sometimes DataFlash chips are packaged inside MMC-format
@@ -81,7 +81,7 @@ config MTD_DATAFLASH_OTP
config MTD_M25P80
tristate "Support most SPI Flash chips (AT26DF, M25P, W25X, ...)"
- depends on SPI_MASTER && EXPERIMENTAL
+ depends on SPI_MASTER
help
This enables access to most modern SPI flash chips, used for
program and data storage. Series supported include Atmel AT26DF,
@@ -272,6 +272,7 @@ config MTD_DOCG3
tristate "M-Systems Disk-On-Chip G3"
select BCH
select BCH_CONST_PARAMS
+ select BITREVERSE
---help---
This provides an MTD device driver for the M-Systems DiskOnChip
G3 devices.
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index 2aabd96bf0ff..8a82b8bc21e1 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -949,10 +949,9 @@ static int spear_smi_probe(struct platform_device *pdev)
smi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->io_base = devm_request_and_ioremap(&pdev->dev, smi_base);
- if (!dev->io_base) {
- ret = -EIO;
- dev_err(&pdev->dev, "devm_request_and_ioremap fail\n");
+ dev->io_base = devm_ioremap_resource(&pdev->dev, smi_base);
+ if (IS_ERR(dev->io_base)) {
+ ret = PTR_ERR(dev->io_base);
goto err;
}
diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c
index a2dc2ae4b24e..c3525d2a2fa8 100644
--- a/drivers/mtd/maps/autcpu12-nvram.c
+++ b/drivers/mtd/maps/autcpu12-nvram.c
@@ -16,6 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/err.h>
#include <linux/sizes.h>
#include <linux/types.h>
@@ -55,12 +56,10 @@ static int autcpu12_nvram_probe(struct platform_device *pdev)
priv->map.bankwidth = 4;
priv->map.phys = res->start;
priv->map.size = resource_size(res);
- priv->map.virt = devm_request_and_ioremap(&pdev->dev, res);
+ priv->map.virt = devm_ioremap_resource(&pdev->dev, res);
strcpy((char *)priv->map.name, res->name);
- if (!priv->map.virt) {
- dev_err(&pdev->dev, "failed to remap mem resource\n");
- return -EBUSY;
- }
+ if (IS_ERR(priv->map.virt))
+ return PTR_ERR(priv->map.virt);
simple_map_init(&priv->map);
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index 3c3c791eb96a..d1da6ede3845 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -7,6 +7,7 @@
* Copyright (C) 2010 John Crispin <blogic@openwrt.org>
*/
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -136,10 +137,9 @@ ltq_mtd_probe(struct platform_device *pdev)
ltq_mtd->map = kzalloc(sizeof(struct map_info), GFP_KERNEL);
ltq_mtd->map->phys = ltq_mtd->res->start;
ltq_mtd->map->size = resource_size(ltq_mtd->res);
- ltq_mtd->map->virt = devm_request_and_ioremap(&pdev->dev, ltq_mtd->res);
- if (!ltq_mtd->map->virt) {
- dev_err(&pdev->dev, "failed to remap mem resource\n");
- err = -EBUSY;
+ ltq_mtd->map->virt = devm_ioremap_resource(&pdev->dev, ltq_mtd->res);
+ if (IS_ERR(ltq_mtd->map->virt)) {
+ err = PTR_ERR(ltq_mtd->map->virt);
goto err_out;
}
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 67cc73c18ddd..7901d72c9242 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -170,7 +170,7 @@ static int of_flash_probe(struct platform_device *dev)
resource_size_t res_size;
struct mtd_part_parser_data ppdata;
bool map_indirect;
- const char *mtd_name;
+ const char *mtd_name = NULL;
match = of_match_device(of_flash_match, &dev->dev);
if (!match)
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 5819eb575210..81bf5e52601e 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -260,8 +260,7 @@ config MTD_NAND_S3C2410_CLKSTOP
approximately 5mA of power when there is nothing happening.
config MTD_NAND_DISKONCHIP
- tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation) (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation)"
depends on HAS_IOMEM
select REED_SOLOMON
select REED_SOLOMON_DEC16
@@ -331,8 +330,8 @@ config MTD_NAND_DISKONCHIP_BBTWRITE
parameter "inftl_bbt_write=1".
config MTD_NAND_DOCG4
- tristate "Support for DiskOnChip G4 (EXPERIMENTAL)"
- depends on EXPERIMENTAL && HAS_IOMEM
+ tristate "Support for DiskOnChip G4"
+ depends on HAS_IOMEM
select BCH
select BITREVERSE
help
diff --git a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
index 86c9a79b89b3..595de4012e71 100644
--- a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
+++ b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
@@ -17,8 +17,8 @@
#include "bcm47xxnflash.h"
/* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has
- * shown 164 retries as maxiumum. */
-#define NFLASH_READY_RETRIES 1000
+ * shown ~1000 retries as maxiumum. */
+#define NFLASH_READY_RETRIES 10000
#define NFLASH_SECTOR_SIZE 512
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 3502606f6480..feae55c7b880 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -523,7 +523,7 @@ static struct nand_ecclayout hwecc4_2048 __initconst = {
static const struct of_device_id davinci_nand_of_match[] = {
{.compatible = "ti,davinci-nand", },
{},
-}
+};
MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
static struct davinci_nand_pdata
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 67e62d3d495c..09af555408b7 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -937,42 +937,35 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
if (!res)
return -EINVAL;
- host->data_va = devm_request_and_ioremap(&pdev->dev, res);
- if (!host->data_va) {
- dev_err(&pdev->dev, "data ioremap failed\n");
- return -ENOMEM;
- }
+ host->data_va = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->data_va))
+ return PTR_ERR(host->data_va);
+
host->data_pa = (dma_addr_t)res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr");
if (!res)
return -EINVAL;
- host->addr_va = devm_request_and_ioremap(&pdev->dev, res);
- if (!host->addr_va) {
- dev_err(&pdev->dev, "ale ioremap failed\n");
- return -ENOMEM;
- }
+ host->addr_va = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->addr_va))
+ return PTR_ERR(host->addr_va);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
if (!res)
return -EINVAL;
- host->cmd_va = devm_request_and_ioremap(&pdev->dev, res);
- if (!host->cmd_va) {
- dev_err(&pdev->dev, "ale ioremap failed\n");
- return -ENOMEM;
- }
+ host->cmd_va = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->cmd_va))
+ return PTR_ERR(host->cmd_va);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs");
if (!res)
return -EINVAL;
- host->regs_va = devm_request_and_ioremap(&pdev->dev, res);
- if (!host->regs_va) {
- dev_err(&pdev->dev, "regs ioremap failed\n");
- return -ENOMEM;
- }
+ host->regs_va = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->regs_va))
+ return PTR_ERR(host->regs_va);
host->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(host->clk)) {
@@ -1218,6 +1211,7 @@ static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops, fsmc_nand_suspend, fsmc_nand_resume);
#ifdef CONFIG_OF
static const struct of_device_id fsmc_nand_id_table[] = {
{ .compatible = "st,spear600-fsmc-nand" },
+ { .compatible = "stericsson,fsmc-nand" },
{}
};
MODULE_DEVICE_TABLE(of, fsmc_nand_id_table);
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index f182befa7360..0ca22ae9135c 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -677,11 +677,10 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
return -ENXIO;
}
- host->io_base = devm_request_and_ioremap(&pdev->dev, rc);
- if (host->io_base == NULL) {
- dev_err(&pdev->dev, "ioremap failed\n");
- return -EIO;
- }
+ host->io_base = devm_ioremap_resource(&pdev->dev, rc);
+ if (IS_ERR(host->io_base))
+ return PTR_ERR(host->io_base);
+
host->io_base_phy = rc->start;
mtd = &host->mtd;
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
index 030b78c62895..be94ed5abefb 100644
--- a/drivers/mtd/nand/lpc32xx_slc.c
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -778,11 +778,9 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
}
host->io_base_dma = rc->start;
- host->io_base = devm_request_and_ioremap(&pdev->dev, rc);
- if (host->io_base == NULL) {
- dev_err(&pdev->dev, "ioremap failed\n");
- return -ENOMEM;
- }
+ host->io_base = devm_ioremap_resource(&pdev->dev, rc);
+ if (IS_ERR(host->io_base))
+ return PTR_ERR(host->io_base);
if (pdev->dev.of_node)
host->ncfg = lpc32xx_parse_dt(&pdev->dev);
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 45204e41a028..60ac5b98b718 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -1437,9 +1437,9 @@ static int mxcnd_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- host->regs_ip = devm_request_and_ioremap(&pdev->dev, res);
- if (!host->regs_ip)
- return -ENOMEM;
+ host->regs_ip = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->regs_ip))
+ return PTR_ERR(host->regs_ip);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
} else {
@@ -1449,9 +1449,9 @@ static int mxcnd_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
- host->base = devm_request_and_ioremap(&pdev->dev, res);
- if (!host->base)
- return -ENOMEM;
+ host->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->base))
+ return PTR_ERR(host->base);
host->main_area0 = host->base;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 8323ac991ad1..3766682a0289 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2857,8 +2857,11 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
int i;
int val;
- /* ONFI need to be probed in 8 bits mode */
- WARN_ON(chip->options & NAND_BUSWIDTH_16);
+ /* ONFI need to be probed in 8 bits mode, and 16 bits should be selected with NAND_BUSWIDTH_AUTO */
+ if (chip->options & NAND_BUSWIDTH_16) {
+ pr_err("Trying ONFI probe in 16 bits mode, aborting !\n");
+ return 0;
+ }
/* Try ONFI for unknown chip or LP */
chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 0002d5e94f0d..1d333497cfcb 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1332,6 +1332,7 @@ static int omap_nand_probe(struct platform_device *pdev)
dma_cap_mask_t mask;
unsigned sig;
struct resource *res;
+ struct mtd_part_parser_data ppdata = {};
pdata = pdev->dev.platform_data;
if (pdata == NULL) {
@@ -1557,7 +1558,8 @@ static int omap_nand_probe(struct platform_device *pdev)
goto out_release_mem_region;
}
- mtd_device_parse_register(&info->mtd, NULL, NULL, pdata->parts,
+ ppdata.of_node = pdata->of_node;
+ mtd_device_parse_register(&info->mtd, NULL, &ppdata, pdata->parts,
pdata->nr_parts);
platform_set_drvdata(pdev, &info->mtd);
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index df954b4dcba2..d65afd23e171 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -952,10 +952,9 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
info->platform = plat;
info->cpu_type = cpu_type;
- info->regs = devm_request_and_ioremap(&pdev->dev, res);
- if (info->regs == NULL) {
- dev_err(&pdev->dev, "cannot reserve register region\n");
- err = -EIO;
+ info->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(info->regs)) {
+ err = PTR_ERR(info->regs);
goto exit_error;
}
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index e3d7266e256f..e1e8748aa47b 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -9,6 +9,7 @@
* (C) Copyright TOSHIBA CORPORATION 2004-2007
* All Rights Reserved.
*/
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -286,9 +287,9 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
drvdata = devm_kzalloc(&dev->dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- drvdata->base = devm_request_and_ioremap(&dev->dev, res);
- if (!drvdata->base)
- return -EBUSY;
+ drvdata->base = devm_ioremap_resource(&dev->dev, res);
+ if (IS_ERR(drvdata->base))
+ return PTR_ERR(drvdata->base);
hold = plat->hold ?: 20; /* tDH */
spw = plat->spw ?: 90; /* max(tREADID, tWP, tRP) */
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 065f3fe02a2f..eec2aedb4ab8 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -637,6 +637,7 @@ static int omap2_onenand_probe(struct platform_device *pdev)
struct onenand_chip *this;
int r;
struct resource *res;
+ struct mtd_part_parser_data ppdata = {};
pdata = pdev->dev.platform_data;
if (pdata == NULL) {
@@ -767,7 +768,8 @@ static int omap2_onenand_probe(struct platform_device *pdev)
if ((r = onenand_scan(&c->mtd, 1)) < 0)
goto err_release_regulator;
- r = mtd_device_parse_register(&c->mtd, NULL, NULL,
+ ppdata.of_node = pdata->of_node;
+ r = mtd_device_parse_register(&c->mtd, NULL, &ppdata,
pdata ? pdata->parts : NULL,
pdata ? pdata->nr_parts : 0);
if (r)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 6a70184c3f23..56c2d75a63d4 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -123,8 +123,7 @@ config IFB
source "drivers/net/team/Kconfig"
config MACVLAN
- tristate "MAC-VLAN support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ tristate "MAC-VLAN support"
---help---
This allows one to create virtual interfaces that map packets to
or from specific MAC addresses to a particular interface.
@@ -138,7 +137,7 @@ config MACVLAN
will be called macvlan.
config MACVTAP
- tristate "MAC-VLAN based tap driver (EXPERIMENTAL)"
+ tristate "MAC-VLAN based tap driver"
depends on MACVLAN
help
This adds a specialized tap character device driver that is based
@@ -189,6 +188,10 @@ config NETPOLL_TRAP
config NET_POLL_CONTROLLER
def_bool NETPOLL
+config NTB_NETDEV
+ tristate "Virtual Ethernet over NTB"
+ depends on NTB
+
config RIONET
tristate "RapidIO Ethernet over messaging driver support"
depends on RAPIDIO
@@ -234,8 +237,8 @@ config VETH
versa.
config VIRTIO_NET
- tristate "Virtio network driver (EXPERIMENTAL)"
- depends on EXPERIMENTAL && VIRTIO
+ tristate "Virtio network driver"
+ depends on VIRTIO
---help---
This is the virtual network driver for virtio. It can be used with
lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 335db78fd987..ef3d090efedf 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -71,3 +71,4 @@ obj-$(CONFIG_USB_IPHETH) += usb/
obj-$(CONFIG_USB_CDC_PHONET) += usb/
obj-$(CONFIG_HYPERV_NET) += hyperv/
+obj-$(CONFIG_NTB_NETDEV) += ntb_netdev.o
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index e3f0faca98d0..3a8c7532ee0d 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -37,35 +37,14 @@
ethernet adaptor have the name "eth[0123...]".
*/
-extern struct net_device *ne2_probe(int unit);
extern struct net_device *hp100_probe(int unit);
extern struct net_device *ultra_probe(int unit);
-extern struct net_device *ultra32_probe(int unit);
extern struct net_device *wd_probe(int unit);
-extern struct net_device *el2_probe(int unit);
extern struct net_device *ne_probe(int unit);
-extern struct net_device *hp_probe(int unit);
-extern struct net_device *hp_plus_probe(int unit);
-extern struct net_device *express_probe(int unit);
-extern struct net_device *eepro_probe(int unit);
-extern struct net_device *at1700_probe(int unit);
extern struct net_device *fmv18x_probe(int unit);
-extern struct net_device *eth16i_probe(int unit);
extern struct net_device *i82596_probe(int unit);
-extern struct net_device *ewrk3_probe(int unit);
-extern struct net_device *el1_probe(int unit);
-extern struct net_device *el16_probe(int unit);
-extern struct net_device *elmc_probe(int unit);
-extern struct net_device *elplus_probe(int unit);
-extern struct net_device *ac3200_probe(int unit);
-extern struct net_device *es_probe(int unit);
-extern struct net_device *lne390_probe(int unit);
-extern struct net_device *e2100_probe(int unit);
-extern struct net_device *ni5010_probe(int unit);
-extern struct net_device *ni52_probe(int unit);
extern struct net_device *ni65_probe(int unit);
extern struct net_device *sonic_probe(int unit);
-extern struct net_device *seeq8005_probe(int unit);
extern struct net_device *smc_init(int unit);
extern struct net_device *atarilance_probe(int unit);
extern struct net_device *sun3lance_probe(int unit);
@@ -77,13 +56,9 @@ extern struct net_device *tc515_probe(int unit);
extern struct net_device *lance_probe(int unit);
extern struct net_device *mac8390_probe(int unit);
extern struct net_device *mac89x0_probe(int unit);
-extern struct net_device *mc32_probe(int unit);
extern struct net_device *cops_probe(int unit);
extern struct net_device *ltpc_probe(void);
-/* Detachable devices ("pocket adaptors") */
-extern struct net_device *de620_probe(int unit);
-
/* Fibre Channel adapters */
extern int iph5526_probe(struct net_device *dev);
@@ -111,29 +86,6 @@ static int __init probe_list2(int unit, struct devprobe2 *p, int autoprobe)
}
/*
- * This is a bit of an artificial separation as there are PCI drivers
- * that also probe for EISA cards (in the PCI group) and there are ISA
- * drivers that probe for EISA cards (in the ISA group). These are the
- * legacy EISA only driver probes, and also the legacy PCI probes
- */
-
-static struct devprobe2 eisa_probes[] __initdata = {
-#ifdef CONFIG_ULTRA32
- {ultra32_probe, 0},
-#endif
-#ifdef CONFIG_AC3200
- {ac3200_probe, 0},
-#endif
-#ifdef CONFIG_ES3210
- {es_probe, 0},
-#endif
-#ifdef CONFIG_LNE390
- {lne390_probe, 0},
-#endif
- {NULL, 0},
-};
-
-/*
* ISA probes that touch addresses < 0x400 (including those that also
* look for EISA/PCI cards in addition to ISA cards).
*/
@@ -150,18 +102,6 @@ static struct devprobe2 isa_probes[] __initdata = {
#ifdef CONFIG_WD80x3
{wd_probe, 0},
#endif
-#ifdef CONFIG_EL2 /* 3c503 */
- {el2_probe, 0},
-#endif
-#ifdef CONFIG_HPLAN
- {hp_probe, 0},
-#endif
-#ifdef CONFIG_HPLAN_PLUS
- {hp_plus_probe, 0},
-#endif
-#ifdef CONFIG_E2100 /* Cabletron E21xx series. */
- {e2100_probe, 0},
-#endif
#if defined(CONFIG_NE2000) || \
defined(CONFIG_NE_H8300) /* ISA (use ne2k-pci for PCI cards) */
{ne_probe, 0},
@@ -172,60 +112,20 @@ static struct devprobe2 isa_probes[] __initdata = {
#ifdef CONFIG_SMC9194
{smc_init, 0},
#endif
-#ifdef CONFIG_SEEQ8005
- {seeq8005_probe, 0},
-#endif
#ifdef CONFIG_CS89x0
#ifndef CONFIG_CS89x0_PLATFORM
{cs89x0_probe, 0},
#endif
#endif
-#ifdef CONFIG_AT1700
- {at1700_probe, 0},
-#endif
-#ifdef CONFIG_ETH16I
- {eth16i_probe, 0}, /* ICL EtherTeam 16i/32 */
-#endif
-#ifdef CONFIG_EEXPRESS /* Intel EtherExpress */
- {express_probe, 0},
-#endif
-#ifdef CONFIG_EEXPRESS_PRO /* Intel EtherExpress Pro/10 */
- {eepro_probe, 0},
-#endif
-#ifdef CONFIG_EWRK3 /* DEC EtherWORKS 3 */
- {ewrk3_probe, 0},
-#endif
-#if defined(CONFIG_APRICOT) || defined(CONFIG_MVME16x_NET) || defined(CONFIG_BVME6000_NET) /* Intel I82596 */
+#if defined(CONFIG_MVME16x_NET) || defined(CONFIG_BVME6000_NET) /* Intel I82596 */
{i82596_probe, 0},
#endif
-#ifdef CONFIG_EL1 /* 3c501 */
- {el1_probe, 0},
-#endif
-#ifdef CONFIG_EL16 /* 3c507 */
- {el16_probe, 0},
-#endif
-#ifdef CONFIG_ELPLUS /* 3c505 */
- {elplus_probe, 0},
-#endif
-#ifdef CONFIG_NI5010
- {ni5010_probe, 0},
-#endif
-#ifdef CONFIG_NI52
- {ni52_probe, 0},
-#endif
#ifdef CONFIG_NI65
{ni65_probe, 0},
#endif
{NULL, 0},
};
-static struct devprobe2 parport_probes[] __initdata = {
-#ifdef CONFIG_DE620 /* D-Link DE-620 adapter */
- {de620_probe, 0},
-#endif
- {NULL, 0},
-};
-
static struct devprobe2 m68k_probes[] __initdata = {
#ifdef CONFIG_ATARILANCE /* Lance-based Atari ethernet boards */
{atarilance_probe, 0},
@@ -264,9 +164,7 @@ static void __init ethif_probe2(int unit)
return;
(void)( probe_list2(unit, m68k_probes, base_addr == 0) &&
- probe_list2(unit, eisa_probes, base_addr == 0) &&
- probe_list2(unit, isa_probes, base_addr == 0) &&
- probe_list2(unit, parport_probes, base_addr == 0));
+ probe_list2(unit, isa_probes, base_addr == 0));
}
/* Statically configured drivers -- order matters here. */
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index a030e635f001..fc58d118d844 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -389,13 +389,13 @@ static u8 __get_duplex(struct port *port)
/**
* __initialize_port_locks - initialize a port's STATE machine spinlock
- * @port: the port we're looking at
+ * @port: the slave of the port we're looking at
*
*/
-static inline void __initialize_port_locks(struct port *port)
+static inline void __initialize_port_locks(struct slave *slave)
{
// make sure it isn't called twice
- spin_lock_init(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
+ spin_lock_init(&(SLAVE_AD_INFO(slave).state_machine_lock));
}
//conversions
@@ -1127,7 +1127,7 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
// INFO_RECEIVED_LOOPBACK_FRAMES
pr_err("%s: An illegal loopback occurred on adapter (%s).\n"
"Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
- port->slave->dev->master->name, port->slave->dev->name);
+ port->slave->bond->dev->name, port->slave->dev->name);
return;
}
__update_selected(lacpdu, port);
@@ -1306,7 +1306,7 @@ static void ad_port_selection_logic(struct port *port)
}
if (!curr_port) { // meaning: the port was related to an aggregator but was not on the aggregator port list
pr_warning("%s: Warning: Port %d (on %s) was related to aggregator %d but was not on its port list\n",
- port->slave->dev->master->name,
+ port->slave->bond->dev->name,
port->actor_port_number,
port->slave->dev->name,
port->aggregator->aggregator_identifier);
@@ -1386,7 +1386,7 @@ static void ad_port_selection_logic(struct port *port)
port->aggregator->aggregator_identifier);
} else {
pr_err("%s: Port %d (on %s) did not find a suitable aggregator\n",
- port->slave->dev->master->name,
+ port->slave->bond->dev->name,
port->actor_port_number, port->slave->dev->name);
}
}
@@ -1463,7 +1463,7 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
default:
pr_warning("%s: Impossible agg select mode %d\n",
- curr->slave->dev->master->name,
+ curr->slave->bond->dev->name,
__get_agg_selection_mode(curr->lag_ports));
break;
}
@@ -1571,7 +1571,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
// check if any partner replys
if (best->is_individual) {
pr_warning("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
- best->slave ? best->slave->dev->master->name : "NULL");
+ best->slave ? best->slave->bond->dev->name : "NULL");
}
best->is_active = 1;
@@ -1898,7 +1898,7 @@ int bond_3ad_bind_slave(struct slave *slave)
if (bond == NULL) {
pr_err("%s: The slave %s is not attached to its bond\n",
- slave->dev->master->name, slave->dev->name);
+ slave->bond->dev->name, slave->dev->name);
return -1;
}
@@ -1910,6 +1910,7 @@ int bond_3ad_bind_slave(struct slave *slave)
ad_initialize_port(port, bond->params.lacp_fast);
+ __initialize_port_locks(slave);
port->slave = slave;
port->actor_port_number = SLAVE_AD_INFO(slave).id;
// key is determined according to the link speed, duplex and user key(which is yet not supported)
@@ -1932,8 +1933,6 @@ int bond_3ad_bind_slave(struct slave *slave)
port->next_port_in_aggregator = NULL;
__disable_port(port);
- __initialize_port_locks(port);
-
// aggregator initialization
aggregator = &(SLAVE_AD_INFO(slave).aggregator);
@@ -1973,7 +1972,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
// if slave is null, the whole port is not initialized
if (!port->slave) {
pr_warning("Warning: %s: Trying to unbind an uninitialized port on %s\n",
- slave->dev->master->name, slave->dev->name);
+ slave->bond->dev->name, slave->dev->name);
return;
}
@@ -2009,7 +2008,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
if ((new_aggregator->lag_ports == port) && new_aggregator->is_active) {
pr_info("%s: Removing an active aggregator\n",
- aggregator->slave->dev->master->name);
+ aggregator->slave->bond->dev->name);
// select new active aggregator
select_new_active_agg = 1;
}
@@ -2040,7 +2039,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
ad_agg_selection_logic(__get_first_agg(port));
} else {
pr_warning("%s: Warning: unbinding aggregator, and could not find a new aggregator for its ports\n",
- slave->dev->master->name);
+ slave->bond->dev->name);
}
} else { // in case that the only port related to this aggregator is the one we want to remove
select_new_active_agg = aggregator->is_active;
@@ -2048,7 +2047,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
ad_clear_agg(aggregator);
if (select_new_active_agg) {
pr_info("%s: Removing an active aggregator\n",
- slave->dev->master->name);
+ slave->bond->dev->name);
// select new active aggregator
ad_agg_selection_logic(__get_first_agg(port));
}
@@ -2076,7 +2075,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
ad_clear_agg(temp_aggregator);
if (select_new_active_agg) {
pr_info("%s: Removing an active aggregator\n",
- slave->dev->master->name);
+ slave->bond->dev->name);
// select new active aggregator
ad_agg_selection_logic(__get_first_agg(port));
}
@@ -2184,7 +2183,7 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u1
if (!port->slave) {
pr_warning("%s: Warning: port of slave %s is uninitialized\n",
- slave->dev->name, slave->dev->master->name);
+ slave->dev->name, slave->bond->dev->name);
return ret;
}
@@ -2240,7 +2239,7 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
// if slave is null, the whole port is not initialized
if (!port->slave) {
pr_warning("Warning: %s: speed changed for uninitialized port on %s\n",
- slave->dev->master->name, slave->dev->name);
+ slave->bond->dev->name, slave->dev->name);
return;
}
@@ -2268,7 +2267,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
// if slave is null, the whole port is not initialized
if (!port->slave) {
pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n",
- slave->dev->master->name, slave->dev->name);
+ slave->bond->dev->name, slave->dev->name);
return;
}
@@ -2297,7 +2296,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
// if slave is null, the whole port is not initialized
if (!port->slave) {
pr_warning("Warning: %s: link status changed for uninitialized port on %s\n",
- slave->dev->master->name, slave->dev->name);
+ slave->bond->dev->name, slave->dev->name);
return;
}
@@ -2494,11 +2493,13 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
struct port *port = NULL;
int lacp_fast;
- read_lock(&bond->lock);
+ write_lock_bh(&bond->lock);
lacp_fast = bond->params.lacp_fast;
bond_for_each_slave(bond, slave, i) {
port = &(SLAVE_AD_INFO(slave).port);
+ if (port->slave == NULL)
+ continue;
__get_state_machine_lock(port);
if (lacp_fast)
port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT;
@@ -2507,5 +2508,5 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
__release_state_machine_lock(port);
}
- read_unlock(&bond->lock);
+ write_unlock_bh(&bond->lock);
}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 7c9d136e74be..f5e052723029 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -507,7 +507,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
client_info->mac_dst);
if (!skb) {
pr_err("%s: Error: failed to create an ARP packet\n",
- client_info->slave->dev->master->name);
+ client_info->slave->bond->dev->name);
continue;
}
@@ -517,7 +517,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
skb = vlan_put_tag(skb, client_info->vlan_id);
if (!skb) {
pr_err("%s: Error: failed to insert VLAN tag\n",
- client_info->slave->dev->master->name);
+ client_info->slave->bond->dev->name);
continue;
}
}
@@ -1043,7 +1043,7 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
if (dev_set_mac_address(dev, &s_addr)) {
pr_err("%s: Error: dev_set_mac_address of dev %s failed!\n"
"ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n",
- dev->master->name, dev->name);
+ slave->bond->dev->name, dev->name);
return -EOPNOTSUPP;
}
return 0;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b7d45f367d4a..11d01d67b3f5 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -746,11 +746,9 @@ static void __bond_resend_igmp_join_requests(struct net_device *dev)
{
struct in_device *in_dev;
- rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
if (in_dev)
ip_mc_rejoin_groups(in_dev);
- rcu_read_unlock();
}
/*
@@ -760,9 +758,10 @@ static void __bond_resend_igmp_join_requests(struct net_device *dev)
*/
static void bond_resend_igmp_join_requests(struct bonding *bond)
{
- struct net_device *bond_dev, *vlan_dev, *master_dev;
+ struct net_device *bond_dev, *vlan_dev, *upper_dev;
struct vlan_entry *vlan;
+ rcu_read_lock();
read_lock(&bond->lock);
bond_dev = bond->dev;
@@ -774,18 +773,14 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
* if bond is enslaved to a bridge,
* then rejoin all groups on its master
*/
- master_dev = bond_dev->master;
- if (master_dev)
- if ((master_dev->priv_flags & IFF_EBRIDGE)
- && (bond_dev->priv_flags & IFF_BRIDGE_PORT))
- __bond_resend_igmp_join_requests(master_dev);
+ upper_dev = netdev_master_upper_dev_get_rcu(bond_dev);
+ if (upper_dev && upper_dev->priv_flags & IFF_EBRIDGE)
+ __bond_resend_igmp_join_requests(upper_dev);
/* rejoin all groups on vlan devices */
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- rcu_read_lock();
vlan_dev = __vlan_find_dev_deep(bond_dev,
vlan->vlan_id);
- rcu_read_unlock();
if (vlan_dev)
__bond_resend_igmp_join_requests(vlan_dev);
}
@@ -794,13 +789,16 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
read_unlock(&bond->lock);
+ rcu_read_unlock();
}
static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
mcast_work.work);
+ rcu_read_lock();
bond_resend_igmp_join_requests(bond);
+ rcu_read_unlock();
}
/*
@@ -1251,7 +1249,7 @@ static inline void slave_disable_netpoll(struct slave *slave)
return;
slave->np = NULL;
- __netpoll_free_rcu(np);
+ __netpoll_free_async(np);
}
static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
{
@@ -1322,14 +1320,15 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
/*---------------------------------- IOCTL ----------------------------------*/
-static int bond_sethwaddr(struct net_device *bond_dev,
- struct net_device *slave_dev)
+static void bond_set_dev_addr(struct net_device *bond_dev,
+ struct net_device *slave_dev)
{
pr_debug("bond_dev=%p\n", bond_dev);
pr_debug("slave_dev=%p\n", slave_dev);
pr_debug("slave_dev->addr_len=%d\n", slave_dev->addr_len);
memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len);
- return 0;
+ bond_dev->addr_assign_type = NET_ADDR_SET;
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
}
static netdev_features_t bond_fix_features(struct net_device *dev,
@@ -1493,6 +1492,27 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
return ret;
}
+static int bond_master_upper_dev_link(struct net_device *bond_dev,
+ struct net_device *slave_dev)
+{
+ int err;
+
+ err = netdev_master_upper_dev_link(slave_dev, bond_dev);
+ if (err)
+ return err;
+ slave_dev->flags |= IFF_SLAVE;
+ rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE);
+ return 0;
+}
+
+static void bond_upper_dev_unlink(struct net_device *bond_dev,
+ struct net_device *slave_dev)
+{
+ netdev_upper_dev_unlink(slave_dev, bond_dev);
+ slave_dev->flags &= ~IFF_SLAVE;
+ rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE);
+}
+
/* enslave device <slave> to bond device <master> */
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
{
@@ -1609,10 +1629,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
/* If this is the first slave, then we need to set the master's hardware
* address to be the same as the slave's. */
- if (is_zero_ether_addr(bond->dev->dev_addr))
- memcpy(bond->dev->dev_addr, slave_dev->dev_addr,
- slave_dev->addr_len);
-
+ if (bond->dev_addr_from_first)
+ bond_set_dev_addr(bond->dev, slave_dev);
new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
if (!new_slave) {
@@ -1655,9 +1673,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
}
- res = netdev_set_bond_master(slave_dev, bond_dev);
+ res = bond_master_upper_dev_link(bond_dev, slave_dev);
if (res) {
- pr_debug("Error %d calling netdev_set_bond_master\n", res);
+ pr_debug("Error %d calling bond_master_upper_dev_link\n", res);
goto err_restore_mac;
}
@@ -1891,7 +1909,7 @@ err_close:
dev_close(slave_dev);
err_unset_master:
- netdev_set_bond_master(slave_dev, NULL);
+ bond_upper_dev_unlink(bond_dev, slave_dev);
err_restore_mac:
if (!bond->params.fail_over_mac) {
@@ -1919,7 +1937,8 @@ err_undo_flags:
/*
* Try to release the slave device <slave> from the bond device <master>
* It is legal to access curr_active_slave without a lock because all the function
- * is write-locked.
+ * is write-locked. If "all" is true it means that the function is being called
+ * while destroying a bond interface and all slaves are being released.
*
* The rules for slave state should be:
* for Active/Backup:
@@ -1927,7 +1946,9 @@ err_undo_flags:
* for Bonded connections:
* The first up interface should be left on and all others downed.
*/
-int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
+static int __bond_release_one(struct net_device *bond_dev,
+ struct net_device *slave_dev,
+ bool all)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *oldcurrent;
@@ -1936,7 +1957,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
/* slave is not a slave or master is not master of this slave */
if (!(slave_dev->flags & IFF_SLAVE) ||
- (slave_dev->master != bond_dev)) {
+ !netdev_has_upper_dev(slave_dev, bond_dev)) {
pr_err("%s: Error: cannot release %s.\n",
bond_dev->name, slave_dev->name);
return -EINVAL;
@@ -1964,7 +1985,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
synchronize_net();
write_lock_bh(&bond->lock);
- if (!bond->params.fail_over_mac) {
+ if (!all && !bond->params.fail_over_mac) {
if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond->slave_cnt > 1)
pr_warning("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
@@ -2010,7 +2031,9 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
write_lock_bh(&bond->lock);
}
- if (oldcurrent == slave) {
+ if (all) {
+ bond->curr_active_slave = NULL;
+ } else if (oldcurrent == slave) {
/*
* Note that we hold RTNL over this sequence, so there
* is no concern that another slave add/remove event
@@ -2029,12 +2052,8 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
if (bond->slave_cnt == 0) {
bond_set_carrier(bond);
-
- /* if the last slave was removed, zero the mac address
- * of the master so it will be set by the application
- * to the mac address of the first slave
- */
- memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
+ eth_hw_addr_random(bond_dev);
+ bond->dev_addr_from_first = true;
if (bond_vlan_used(bond)) {
pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
@@ -2080,7 +2099,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
netif_addr_unlock_bh(bond_dev);
}
- netdev_set_bond_master(slave_dev, NULL);
+ bond_upper_dev_unlink(bond_dev, slave_dev);
slave_disable_netpoll(slave);
@@ -2103,6 +2122,12 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
return 0; /* deletion OK */
}
+/* A wrapper used because of ndo_del_link */
+int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
+{
+ return __bond_release_one(bond_dev, slave_dev, false);
+}
+
/*
* First release a slave and then destroy the bond if no more slaves are left.
* Must be under rtnl_lock when this function is called.
@@ -2124,121 +2149,6 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
}
/*
- * This function releases all slaves.
- */
-static int bond_release_all(struct net_device *bond_dev)
-{
- struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave;
- struct net_device *slave_dev;
- struct sockaddr addr;
-
- write_lock_bh(&bond->lock);
-
- netif_carrier_off(bond_dev);
-
- if (bond->slave_cnt == 0)
- goto out;
-
- bond->current_arp_slave = NULL;
- bond->primary_slave = NULL;
- bond_change_active_slave(bond, NULL);
-
- while ((slave = bond->first_slave) != NULL) {
- /* Inform AD package of unbinding of slave
- * before slave is detached from the list.
- */
- if (bond->params.mode == BOND_MODE_8023AD)
- bond_3ad_unbind_slave(slave);
-
- slave_dev = slave->dev;
- bond_detach_slave(bond, slave);
-
- /* now that the slave is detached, unlock and perform
- * all the undo steps that should not be called from
- * within a lock.
- */
- write_unlock_bh(&bond->lock);
-
- /* unregister rx_handler early so bond_handle_frame wouldn't
- * be called for this slave anymore.
- */
- netdev_rx_handler_unregister(slave_dev);
- synchronize_net();
-
- if (bond_is_lb(bond)) {
- /* must be called only after the slave
- * has been detached from the list
- */
- bond_alb_deinit_slave(bond, slave);
- }
-
- bond_destroy_slave_symlinks(bond_dev, slave_dev);
- bond_del_vlans_from_slave(bond, slave_dev);
-
- /* If the mode USES_PRIMARY, then we should only remove its
- * promisc and mc settings if it was the curr_active_slave, but that was
- * already taken care of above when we detached the slave
- */
- if (!USES_PRIMARY(bond->params.mode)) {
- /* unset promiscuity level from slave */
- if (bond_dev->flags & IFF_PROMISC)
- dev_set_promiscuity(slave_dev, -1);
-
- /* unset allmulti level from slave */
- if (bond_dev->flags & IFF_ALLMULTI)
- dev_set_allmulti(slave_dev, -1);
-
- /* flush master's mc_list from slave */
- netif_addr_lock_bh(bond_dev);
- bond_mc_list_flush(bond_dev, slave_dev);
- netif_addr_unlock_bh(bond_dev);
- }
-
- netdev_set_bond_master(slave_dev, NULL);
-
- slave_disable_netpoll(slave);
-
- /* close slave before restoring its mac address */
- dev_close(slave_dev);
-
- if (!bond->params.fail_over_mac) {
- /* restore original ("permanent") mac address*/
- memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
- addr.sa_family = slave_dev->type;
- dev_set_mac_address(slave_dev, &addr);
- }
-
- kfree(slave);
-
- /* re-acquire the lock before getting the next slave */
- write_lock_bh(&bond->lock);
- }
-
- /* zero the mac address of the master so it will be
- * set by the application to the mac address of the
- * first slave
- */
- memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
-
- if (bond_vlan_used(bond)) {
- pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
- bond_dev->name, bond_dev->name);
- pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
- bond_dev->name);
- }
-
- pr_info("%s: released all slaves\n", bond_dev->name);
-
-out:
- write_unlock_bh(&bond->lock);
-
- bond_compute_features(bond);
-
- return 0;
-}
-
-/*
* This function changes the active slave to slave <slave_dev>.
* It returns -EINVAL in the following cases.
* - <slave_dev> is not found in the list.
@@ -2259,8 +2169,9 @@ static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_devi
if (!USES_PRIMARY(bond->params.mode))
return -EINVAL;
- /* Verify that master_dev is indeed the master of slave_dev */
- if (!(slave_dev->flags & IFF_SLAVE) || (slave_dev->master != bond_dev))
+ /* Verify that bond_dev is indeed the master of slave_dev */
+ if (!(slave_dev->flags & IFF_SLAVE) ||
+ !netdev_has_upper_dev(slave_dev, bond_dev))
return -EINVAL;
read_lock(&bond->lock);
@@ -3258,36 +3169,32 @@ static int bond_master_netdev_event(unsigned long event,
static int bond_slave_netdev_event(unsigned long event,
struct net_device *slave_dev)
{
- struct net_device *bond_dev = slave_dev->master;
- struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave = NULL;
+ struct slave *slave = bond_slave_get_rtnl(slave_dev);
+ struct bonding *bond = slave->bond;
+ struct net_device *bond_dev = slave->bond->dev;
+ u32 old_speed;
+ u8 old_duplex;
switch (event) {
case NETDEV_UNREGISTER:
- if (bond_dev) {
- if (bond->setup_by_slave)
- bond_release_and_destroy(bond_dev, slave_dev);
- else
- bond_release(bond_dev, slave_dev);
- }
+ if (bond->setup_by_slave)
+ bond_release_and_destroy(bond_dev, slave_dev);
+ else
+ bond_release(bond_dev, slave_dev);
break;
case NETDEV_UP:
case NETDEV_CHANGE:
- slave = bond_get_slave_by_dev(bond, slave_dev);
- if (slave) {
- u32 old_speed = slave->speed;
- u8 old_duplex = slave->duplex;
+ old_speed = slave->speed;
+ old_duplex = slave->duplex;
- bond_update_speed_duplex(slave);
+ bond_update_speed_duplex(slave);
- if (bond->params.mode == BOND_MODE_8023AD) {
- if (old_speed != slave->speed)
- bond_3ad_adapter_speed_changed(slave);
- if (old_duplex != slave->duplex)
- bond_3ad_adapter_duplex_changed(slave);
- }
+ if (bond->params.mode == BOND_MODE_8023AD) {
+ if (old_speed != slave->speed)
+ bond_3ad_adapter_speed_changed(slave);
+ if (old_duplex != slave->duplex)
+ bond_3ad_adapter_duplex_changed(slave);
}
-
break;
case NETDEV_DOWN:
/*
@@ -3604,6 +3511,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
struct ifslave k_sinfo;
struct ifslave __user *u_sinfo = NULL;
struct mii_ioctl_data *mii = NULL;
+ struct net *net;
int res = 0;
pr_debug("bond_ioctl: master=%s, cmd=%d\n", bond_dev->name, cmd);
@@ -3670,10 +3578,12 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
break;
}
- if (!capable(CAP_NET_ADMIN))
+ net = dev_net(bond_dev);
+
+ if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
- slave_dev = dev_get_by_name(dev_net(bond_dev), ifr->ifr_slave);
+ slave_dev = dev_get_by_name(net, ifr->ifr_slave);
pr_debug("slave_dev=%p:\n", slave_dev);
@@ -3692,7 +3602,8 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
break;
case BOND_SETHWADDR_OLD:
case SIOCBONDSETHWADDR:
- res = bond_sethwaddr(bond_dev, slave_dev);
+ bond_set_dev_addr(bond_dev, slave_dev);
+ res = 0;
break;
case BOND_CHANGE_ACTIVE_OLD:
case SIOCBONDCHANGEACTIVE:
@@ -4314,11 +4225,12 @@ void bond_set_mode_ops(struct bonding *bond, int mode)
}
static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
- struct ethtool_drvinfo *drvinfo)
+ struct ethtool_drvinfo *drvinfo)
{
- strncpy(drvinfo->driver, DRV_NAME, 32);
- strncpy(drvinfo->version, DRV_VERSION, 32);
- snprintf(drvinfo->fw_version, 32, "%d", BOND_ABI_VERSION);
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
+ BOND_ABI_VERSION);
}
static const struct ethtool_ops bond_ethtool_ops = {
@@ -4352,6 +4264,10 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_fix_features = bond_fix_features,
};
+static const struct device_type bond_type = {
+ .name = "bond",
+};
+
static void bond_destructor(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
@@ -4382,6 +4298,8 @@ static void bond_setup(struct net_device *bond_dev)
bond_dev->destructor = bond_destructor;
+ SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
+
/* Initialize the device options */
bond_dev->tx_queue_len = 0;
bond_dev->flags |= IFF_MASTER|IFF_MULTICAST;
@@ -4427,7 +4345,9 @@ static void bond_uninit(struct net_device *bond_dev)
bond_netpoll_cleanup(bond_dev);
/* Release the bonded slaves */
- bond_release_all(bond_dev);
+ while (bond->first_slave != NULL)
+ __bond_release_one(bond_dev, bond->first_slave->dev, true);
+ pr_info("%s: released all slaves\n", bond_dev->name);
list_del(&bond->bond_list);
@@ -4841,6 +4761,13 @@ static int bond_init(struct net_device *bond_dev)
bond_debug_register(bond);
+ /* Ensure valid dev_addr */
+ if (is_zero_ether_addr(bond_dev->dev_addr) &&
+ bond_dev->addr_assign_type == NET_ADDR_PERM) {
+ eth_hw_addr_random(bond_dev);
+ bond->dev_addr_from_first = true;
+ }
+
__hw_addr_init(&bond->mc_list);
return 0;
}
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 1877ed7ca086..1c9e09fbdff8 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1053,6 +1053,7 @@ static ssize_t bonding_store_primary(struct device *d,
pr_info("%s: Setting primary slave to None.\n",
bond->dev->name);
bond->primary_slave = NULL;
+ memset(bond->params.primary, 0, sizeof(bond->params.primary));
bond_select_active_slave(bond);
goto out;
}
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 21b68e5c14fd..2baec24388b1 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -248,6 +248,7 @@ struct bonding {
/* debugging support via debugfs */
struct dentry *debug_dir;
#endif /* CONFIG_DEBUG_FS */
+ bool dev_addr_from_first;
};
static inline bool bond_vlan_used(struct bonding *bond)
@@ -258,6 +259,9 @@ static inline bool bond_vlan_used(struct bonding *bond)
#define bond_slave_get_rcu(dev) \
((struct slave *) rcu_dereference(dev->rx_handler_data))
+#define bond_slave_get_rtnl(dev) \
+ ((struct slave *) rtnl_dereference(dev->rx_handler_data))
+
/**
* Returns NULL if the net_device does not belong to any of the bond's slaves
*
@@ -280,11 +284,9 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
{
- if (!slave || !slave->dev->master) {
+ if (!slave || !slave->bond)
return NULL;
- }
-
- return netdev_priv(slave->dev->master);
+ return slave->bond;
}
static inline bool bond_is_lb(const struct bonding *bond)
@@ -360,10 +362,9 @@ static inline void bond_netpoll_send_skb(const struct slave *slave,
static inline void bond_set_slave_inactive_flags(struct slave *slave)
{
- struct bonding *bond = netdev_priv(slave->dev->master);
- if (!bond_is_lb(bond))
+ if (!bond_is_lb(slave->bond))
bond_set_backup_slave(slave);
- if (!bond->params.all_slaves_active)
+ if (!slave->bond->params.all_slaves_active)
slave->inactive = 1;
}
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index abf4d7a9dcce..60c2142373c9 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -6,7 +6,7 @@ comment "CAIF transport drivers"
config CAIF_TTY
tristate "CAIF TTY transport driver"
- depends on CAIF
+ depends on CAIF && TTY
default n
---help---
The CAIF TTY transport driver is a Line Discipline (ldisc)
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 5de74e762021..666891a9a248 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -91,7 +91,7 @@ static inline void update_tty_status(struct ser_device *ser)
ser->tty->hw_stopped << 4 |
ser->tty->flow_stopped << 3 |
ser->tty->packet << 2 |
- ser->tty->low_latency << 1 |
+ ser->tty->port->low_latency << 1 |
ser->tty->warned;
}
static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
index bc497d718858..bce8bac311c9 100644
--- a/drivers/net/caif/caif_shmcore.c
+++ b/drivers/net/caif/caif_shmcore.c
@@ -633,9 +633,6 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
kmalloc(sizeof(struct buf_list), GFP_KERNEL);
if (tx_buf == NULL) {
- pr_warn("ERROR, Could not"
- " allocate dynamic mem. for tx_buf,"
- " Bailing out ...\n");
free_netdev(pshm_dev->pshm_netdev);
return -ENOMEM;
}
@@ -662,9 +659,6 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
kmalloc(sizeof(struct buf_list), GFP_KERNEL);
if (rx_buf == NULL) {
- pr_warn("ERROR, Could not"
- " allocate dynamic mem.for rx_buf,"
- " Bailing out ...\n");
free_netdev(pshm_dev->pshm_netdev);
return -ENOMEM;
}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index b56bd9e80957..9862b2e07644 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -1,9 +1,7 @@
menu "CAN Device Drivers"
- depends on CAN
config CAN_VCAN
tristate "Virtual Local CAN Interface (vcan)"
- depends on CAN
---help---
Similar to the network loopback devices, vcan offers a
virtual local CAN interface.
@@ -13,7 +11,7 @@ config CAN_VCAN
config CAN_SLCAN
tristate "Serial / USB serial CAN Adaptors (slcan)"
- depends on CAN
+ depends on TTY
---help---
CAN driver for several 'low cost' CAN interfaces that are attached
via serial lines or via USB-to-serial adapters using the LAWICEL
@@ -33,16 +31,16 @@ config CAN_SLCAN
config CAN_DEV
tristate "Platform CAN drivers with Netlink support"
- depends on CAN
default y
---help---
Enables the common framework for platform CAN drivers with Netlink
support. This is the standard library for CAN drivers.
If unsure, say Y.
+if CAN_DEV
+
config CAN_CALC_BITTIMING
bool "CAN bit-timing calculation"
- depends on CAN_DEV
default y
---help---
If enabled, CAN bit-timing parameters will be calculated for the
@@ -54,15 +52,26 @@ config CAN_CALC_BITTIMING
arguments "tq", "prop_seg", "phase_seg1", "phase_seg2" and "sjw".
If unsure, say Y.
+config CAN_LEDS
+ bool "Enable LED triggers for Netlink based drivers"
+ depends on LEDS_CLASS
+ select LEDS_TRIGGERS
+ ---help---
+ This option adds two LED triggers for packet receive and transmit
+ events on each supported CAN device.
+
+ Say Y here if you are working on a system with led-class supported
+ LEDs and you want to use them as canbus activity indicators.
+
config CAN_AT91
tristate "Atmel AT91 onchip CAN controller"
- depends on CAN_DEV && (ARCH_AT91SAM9263 || ARCH_AT91SAM9X5)
+ depends on ARCH_AT91SAM9263 || ARCH_AT91SAM9X5
---help---
This is a driver for the SoC CAN controller in Atmel's AT91SAM9263
and AT91SAM9X5 processors.
config CAN_TI_HECC
- depends on CAN_DEV && ARCH_OMAP3
+ depends on ARCH_OMAP3
tristate "TI High End CAN Controller"
---help---
Driver for TI HECC (High End CAN Controller) module found on many
@@ -70,12 +79,12 @@ config CAN_TI_HECC
config CAN_MCP251X
tristate "Microchip MCP251x SPI CAN controllers"
- depends on CAN_DEV && SPI && HAS_DMA
+ depends on SPI && HAS_DMA
---help---
Driver for the Microchip MCP251x SPI CAN controllers.
config CAN_BFIN
- depends on CAN_DEV && (BF534 || BF536 || BF537 || BF538 || BF539 || BF54x)
+ depends on BF534 || BF536 || BF537 || BF538 || BF539 || BF54x
tristate "Analog Devices Blackfin on-chip CAN"
---help---
Driver for the Analog Devices Blackfin on-chip CAN controllers
@@ -85,7 +94,7 @@ config CAN_BFIN
config CAN_JANZ_ICAN3
tristate "Janz VMOD-ICAN3 Intelligent CAN controller"
- depends on CAN_DEV && MFD_JANZ_CMODIO
+ depends on MFD_JANZ_CMODIO
---help---
Driver for Janz VMOD-ICAN3 Intelligent CAN controller module, which
connects to a MODULbus carrier board.
@@ -98,13 +107,13 @@ config HAVE_CAN_FLEXCAN
config CAN_FLEXCAN
tristate "Support for Freescale FLEXCAN based chips"
- depends on CAN_DEV && HAVE_CAN_FLEXCAN
+ depends on HAVE_CAN_FLEXCAN
---help---
Say Y here if you want to support for Freescale FlexCAN.
config PCH_CAN
tristate "Intel EG20T PCH CAN controller"
- depends on CAN_DEV && PCI
+ depends on PCI
---help---
This driver is for PCH CAN of Topcliff (Intel EG20T PCH) which
is an IOH for x86 embedded processor (Intel Atom E6xx series).
@@ -112,7 +121,7 @@ config PCH_CAN
config CAN_GRCAN
tristate "Aeroflex Gaisler GRCAN and GRHCAN CAN devices"
- depends on CAN_DEV && OF
+ depends on OF
---help---
Say Y here if you want to use Aeroflex Gaisler GRCAN or GRHCAN.
Note that the driver supports little endian, even though little
@@ -131,9 +140,10 @@ source "drivers/net/can/usb/Kconfig"
source "drivers/net/can/softing/Kconfig"
+endif
+
config CAN_DEBUG_DEVICES
bool "CAN devices debugging messages"
- depends on CAN
---help---
Say Y here if you want the CAN device drivers to produce a bunch of
debug messages to the system log. Select this if you are having
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 7de59862bbe9..c7440392adbb 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -8,6 +8,8 @@ obj-$(CONFIG_CAN_SLCAN) += slcan.o
obj-$(CONFIG_CAN_DEV) += can-dev.o
can-dev-y := dev.o
+can-dev-$(CONFIG_CAN_LEDS) += led.o
+
obj-y += usb/
obj-y += softing/
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 81baefda037b..44f363792b59 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -37,6 +37,7 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
+#include <linux/can/led.h>
#define AT91_MB_MASK(i) ((1 << (i)) - 1)
@@ -641,6 +642,8 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+
+ can_led_event(dev, CAN_LED_EVENT_RX);
}
/**
@@ -875,6 +878,7 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
can_get_echo_skb(dev, mb - get_mb_tx_first(priv));
dev->stats.tx_packets++;
+ can_led_event(dev, CAN_LED_EVENT_TX);
}
}
@@ -1128,6 +1132,8 @@ static int at91_open(struct net_device *dev)
goto out_close;
}
+ can_led_event(dev, CAN_LED_EVENT_OPEN);
+
/* start chip and queuing */
at91_chip_start(dev);
napi_enable(&priv->napi);
@@ -1159,6 +1165,8 @@ static int at91_close(struct net_device *dev)
close_candev(dev);
+ can_led_event(dev, CAN_LED_EVENT_STOP);
+
return 0;
}
@@ -1321,6 +1329,8 @@ static int at91_can_probe(struct platform_device *pdev)
goto exit_free;
}
+ devm_can_led_init(dev);
+
dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
priv->reg_base, dev->irq);
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
index 3b83bafcd947..61ffc12d8fd8 100644
--- a/drivers/net/can/c_can/Kconfig
+++ b/drivers/net/can/c_can/Kconfig
@@ -1,6 +1,6 @@
menuconfig CAN_C_CAN
tristate "Bosch C_CAN/D_CAN devices"
- depends on CAN_DEV && HAS_IOMEM
+ depends on HAS_IOMEM
if CAN_C_CAN
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 5233b8f58d77..a668cd491cb3 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -39,6 +39,7 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
+#include <linux/can/led.h>
#include "c_can.h"
@@ -477,6 +478,8 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
stats->rx_packets++;
stats->rx_bytes += frame->can_dlc;
+ can_led_event(dev, CAN_LED_EVENT_RX);
+
return 0;
}
@@ -488,8 +491,12 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),
IFX_WRITE_LOW_16BIT(mask));
+
+ /* According to C_CAN documentation, the reserved bit
+ * in IFx_MASK2 register is fixed 1
+ */
priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
- IFX_WRITE_HIGH_16BIT(mask));
+ IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
IFX_WRITE_LOW_16BIT(id));
@@ -751,6 +758,7 @@ static void c_can_do_tx(struct net_device *dev)
C_CAN_IFACE(MSGCTRL_REG, 0))
& IF_MCONT_DLC_MASK;
stats->tx_packets++;
+ can_led_event(dev, CAN_LED_EVENT_TX);
c_can_inval_msg_object(dev, 0, msg_obj_no);
} else {
break;
@@ -960,7 +968,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
break;
case LEC_ACK_ERROR:
netdev_dbg(dev, "ack error\n");
- cf->data[2] |= (CAN_ERR_PROT_LOC_ACK |
+ cf->data[3] |= (CAN_ERR_PROT_LOC_ACK |
CAN_ERR_PROT_LOC_ACK_DEL);
break;
case LEC_BIT1_ERROR:
@@ -973,7 +981,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
break;
case LEC_CRC_ERROR:
netdev_dbg(dev, "CRC error\n");
- cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
+ cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
CAN_ERR_PROT_LOC_CRC_DEL);
break;
default:
@@ -1115,6 +1123,8 @@ static int c_can_open(struct net_device *dev)
napi_enable(&priv->napi);
+ can_led_event(dev, CAN_LED_EVENT_OPEN);
+
/* start the c_can controller */
c_can_start(dev);
@@ -1143,6 +1153,8 @@ static int c_can_close(struct net_device *dev)
c_can_reset_ram(priv, false);
c_can_pm_runtime_put_sync(priv);
+ can_led_event(dev, CAN_LED_EVENT_STOP);
+
return 0;
}
@@ -1268,6 +1280,8 @@ int register_c_can_dev(struct net_device *dev)
err = register_candev(dev);
if (err)
c_can_pm_runtime_disable(priv);
+ else
+ devm_can_led_init(dev);
return err;
}
diff --git a/drivers/net/can/cc770/Kconfig b/drivers/net/can/cc770/Kconfig
index 22c07a8c8b43..6a9a5ba79220 100644
--- a/drivers/net/can/cc770/Kconfig
+++ b/drivers/net/can/cc770/Kconfig
@@ -1,6 +1,6 @@
menuconfig CAN_CC770
tristate "Bosch CC770 and Intel AN82527 devices"
- depends on CAN_DEV && HAS_IOMEM
+ depends on HAS_IOMEM
if CAN_CC770
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 8233e5ed2939..f9cba4123c66 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -24,7 +24,9 @@
#include <linux/if_arp.h>
#include <linux/can.h>
#include <linux/can/dev.h>
+#include <linux/can/skb.h>
#include <linux/can/netlink.h>
+#include <linux/can/led.h>
#include <net/rtnetlink.h>
#define MOD_DESC "CAN device driver interface"
@@ -501,13 +503,18 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
{
struct sk_buff *skb;
- skb = netdev_alloc_skb(dev, sizeof(struct can_frame));
+ skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+ sizeof(struct can_frame));
if (unlikely(!skb))
return NULL;
skb->protocol = htons(ETH_P_CAN);
skb->pkt_type = PACKET_BROADCAST;
skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+
*cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
memset(*cf, 0, sizeof(struct can_frame));
@@ -794,10 +801,25 @@ void unregister_candev(struct net_device *dev)
}
EXPORT_SYMBOL_GPL(unregister_candev);
+/*
+ * Test if a network device is a candev based device
+ * and return the can_priv* if so.
+ */
+struct can_priv *safe_candev_priv(struct net_device *dev)
+{
+ if ((dev->type != ARPHRD_CAN) || (dev->rtnl_link_ops != &can_link_ops))
+ return NULL;
+
+ return netdev_priv(dev);
+}
+EXPORT_SYMBOL_GPL(safe_candev_priv);
+
static __init int can_dev_init(void)
{
int err;
+ can_led_notifier_init();
+
err = rtnl_link_register(&can_link_ops);
if (!err)
printk(KERN_INFO MOD_DESC "\n");
@@ -809,6 +831,8 @@ module_init(can_dev_init);
static __exit void can_dev_exit(void)
{
rtnl_link_unregister(&can_link_ops);
+
+ can_led_notifier_exit();
}
module_exit(can_dev_exit);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 0289a6d86f66..769d29ed106d 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -23,6 +23,7 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
+#include <linux/can/led.h>
#include <linux/can/platform/flexcan.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -564,6 +565,8 @@ static int flexcan_read_frame(struct net_device *dev)
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ can_led_event(dev, CAN_LED_EVENT_RX);
+
return 1;
}
@@ -652,6 +655,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
if (reg_iflag1 & (1 << FLEXCAN_TX_BUF_ID)) {
stats->tx_bytes += can_get_echo_skb(dev, 0);
stats->tx_packets++;
+ can_led_event(dev, CAN_LED_EVENT_TX);
flexcan_write((1 << FLEXCAN_TX_BUF_ID), &regs->iflag1);
netif_wake_queue(dev);
}
@@ -865,6 +869,9 @@ static int flexcan_open(struct net_device *dev)
err = flexcan_chip_start(dev);
if (err)
goto out_close;
+
+ can_led_event(dev, CAN_LED_EVENT_OPEN);
+
napi_enable(&priv->napi);
netif_start_queue(dev);
@@ -893,6 +900,8 @@ static int flexcan_close(struct net_device *dev)
close_candev(dev);
+ can_led_event(dev, CAN_LED_EVENT_STOP);
+
return 0;
}
@@ -1092,6 +1101,8 @@ static int flexcan_probe(struct platform_device *pdev)
goto failed_register;
}
+ devm_can_led_init(dev);
+
dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
priv->base, dev->irq);
diff --git a/drivers/net/can/led.c b/drivers/net/can/led.c
new file mode 100644
index 000000000000..f27fca65dc4a
--- /dev/null
+++ b/drivers/net/can/led.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2012, Fabio Baltieri <fabio.baltieri@gmail.com>
+ * Copyright 2012, Kurt Van Dijck <kurt.van.dijck@eia.be>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/can/dev.h>
+
+#include <linux/can/led.h>
+
+static unsigned long led_delay = 50;
+module_param(led_delay, ulong, 0644);
+MODULE_PARM_DESC(led_delay,
+ "blink delay time for activity leds (msecs, default: 50).");
+
+/* Trigger a LED event in response to a CAN device event */
+void can_led_event(struct net_device *netdev, enum can_led_event event)
+{
+ struct can_priv *priv = netdev_priv(netdev);
+
+ switch (event) {
+ case CAN_LED_EVENT_OPEN:
+ led_trigger_event(priv->tx_led_trig, LED_FULL);
+ led_trigger_event(priv->rx_led_trig, LED_FULL);
+ break;
+ case CAN_LED_EVENT_STOP:
+ led_trigger_event(priv->tx_led_trig, LED_OFF);
+ led_trigger_event(priv->rx_led_trig, LED_OFF);
+ break;
+ case CAN_LED_EVENT_TX:
+ if (led_delay)
+ led_trigger_blink_oneshot(priv->tx_led_trig,
+ &led_delay, &led_delay, 1);
+ break;
+ case CAN_LED_EVENT_RX:
+ if (led_delay)
+ led_trigger_blink_oneshot(priv->rx_led_trig,
+ &led_delay, &led_delay, 1);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(can_led_event);
+
+static void can_led_release(struct device *gendev, void *res)
+{
+ struct can_priv *priv = netdev_priv(to_net_dev(gendev));
+
+ led_trigger_unregister_simple(priv->tx_led_trig);
+ led_trigger_unregister_simple(priv->rx_led_trig);
+}
+
+/* Register CAN LED triggers for a CAN device
+ *
+ * This is normally called from a driver's probe function
+ */
+void devm_can_led_init(struct net_device *netdev)
+{
+ struct can_priv *priv = netdev_priv(netdev);
+ void *res;
+
+ res = devres_alloc(can_led_release, 0, GFP_KERNEL);
+ if (!res) {
+ netdev_err(netdev, "cannot register LED triggers\n");
+ return;
+ }
+
+ snprintf(priv->tx_led_trig_name, sizeof(priv->tx_led_trig_name),
+ "%s-tx", netdev->name);
+ snprintf(priv->rx_led_trig_name, sizeof(priv->rx_led_trig_name),
+ "%s-rx", netdev->name);
+
+ led_trigger_register_simple(priv->tx_led_trig_name,
+ &priv->tx_led_trig);
+ led_trigger_register_simple(priv->rx_led_trig_name,
+ &priv->rx_led_trig);
+
+ devres_add(&netdev->dev, res);
+}
+EXPORT_SYMBOL_GPL(devm_can_led_init);
+
+/* NETDEV rename notifier to rename the associated led triggers too */
+static int can_led_notifier(struct notifier_block *nb, unsigned long msg,
+ void *data)
+{
+ struct net_device *netdev = data;
+ struct can_priv *priv = safe_candev_priv(netdev);
+ char name[CAN_LED_NAME_SZ];
+
+ if (!priv)
+ return NOTIFY_DONE;
+
+ if (msg == NETDEV_CHANGENAME) {
+ snprintf(name, sizeof(name), "%s-tx", netdev->name);
+ led_trigger_rename_static(name, priv->tx_led_trig);
+
+ snprintf(name, sizeof(name), "%s-rx", netdev->name);
+ led_trigger_rename_static(name, priv->rx_led_trig);
+ }
+
+ return NOTIFY_DONE;
+}
+
+/* notifier block for netdevice event */
+static struct notifier_block can_netdev_notifier __read_mostly = {
+ .notifier_call = can_led_notifier,
+};
+
+int __init can_led_notifier_init(void)
+{
+ return register_netdevice_notifier(&can_netdev_notifier);
+}
+
+void __exit can_led_notifier_exit(void)
+{
+ unregister_netdevice_notifier(&can_netdev_notifier);
+}
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 5eaf47b8e37b..f32b9fc6a983 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -60,6 +60,7 @@
#include <linux/can/core.h>
#include <linux/can/dev.h>
+#include <linux/can/led.h>
#include <linux/can/platform/mcp251x.h>
#include <linux/completion.h>
#include <linux/delay.h>
@@ -494,6 +495,9 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
priv->net->stats.rx_packets++;
priv->net->stats.rx_bytes += frame->can_dlc;
+
+ can_led_event(priv->net, CAN_LED_EVENT_RX);
+
netif_rx_ni(skb);
}
@@ -707,6 +711,8 @@ static int mcp251x_stop(struct net_device *net)
mutex_unlock(&priv->mcp_lock);
+ can_led_event(net, CAN_LED_EVENT_STOP);
+
return 0;
}
@@ -905,6 +911,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
if (intf & CANINTF_TX) {
net->stats.tx_packets++;
net->stats.tx_bytes += priv->tx_len - 1;
+ can_led_event(net, CAN_LED_EVENT_TX);
if (priv->tx_len) {
can_get_echo_skb(net, 0);
priv->tx_len = 0;
@@ -968,6 +975,9 @@ static int mcp251x_open(struct net_device *net)
mcp251x_open_clean(net);
goto open_unlock;
}
+
+ can_led_event(net, CAN_LED_EVENT_OPEN);
+
netif_wake_queue(net);
open_unlock:
@@ -1077,10 +1087,15 @@ static int mcp251x_can_probe(struct spi_device *spi)
pdata->transceiver_enable(0);
ret = register_candev(net);
- if (!ret) {
- dev_info(&spi->dev, "probed\n");
- return ret;
- }
+ if (ret)
+ goto error_probe;
+
+ devm_can_led_init(net);
+
+ dev_info(&spi->dev, "probed\n");
+
+ return ret;
+
error_probe:
if (!mcp251x_enable_dma)
kfree(priv->spi_rx_buf);
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
index d38706958af6..f19be5269e7b 100644
--- a/drivers/net/can/mscan/Kconfig
+++ b/drivers/net/can/mscan/Kconfig
@@ -1,5 +1,5 @@
config CAN_MSCAN
- depends on CAN_DEV && (PPC || M68K)
+ depends on PPC || M68K
tristate "Support for Freescale MSCAN based chips"
---help---
The Motorola Scalable Controller Area Network (MSCAN) definition
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 7d1748575b1f..5c314a961970 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -560,7 +560,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)
stats->rx_errors++;
break;
case PCH_CRC_ERR:
- cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
CAN_ERR_PROT_LOC_CRC_DEL;
priv->can.can_stats.bus_error++;
stats->rx_errors++;
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 92f73c708a3d..b39ca5b3ea7f 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -1,6 +1,6 @@
menuconfig CAN_SJA1000
tristate "Philips/NXP SJA1000 devices"
- depends on CAN_DEV && HAS_IOMEM
+ depends on HAS_IOMEM
if CAN_SJA1000
@@ -99,11 +99,11 @@ config CAN_TSCAN1
tristate "TS-CAN1 PC104 boards"
depends on ISA
help
- This driver is for Technologic Systems' TSCAN-1 PC104 boards.
- http://www.embeddedarm.com/products/board-detail.php?product=TS-CAN1
- The driver supports multiple boards and automatically configures them:
- PLD IO base addresses are read from jumpers JP1 and JP2,
- IRQ numbers are read from jumpers JP4 and JP5,
- SJA1000 IO base addresses are chosen heuristically (first that works).
+ This driver is for Technologic Systems' TSCAN-1 PC104 boards.
+ http://www.embeddedarm.com/products/board-detail.php?product=TS-CAN1
+ The driver supports multiple boards and automatically configures them:
+ PLD IO base addresses are read from jumpers JP1 and JP2,
+ IRQ numbers are read from jumpers JP4 and JP5,
+ SJA1000 IO base addresses are chosen heuristically (first that works).
endif
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 036a326836b2..36d298da2af6 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -238,7 +238,6 @@ static int ems_pci_add_card(struct pci_dev *pdev,
/* Allocating card structures to hold addresses, ... */
card = kzalloc(sizeof(struct ems_pci_card), GFP_KERNEL);
if (card == NULL) {
- dev_err(&pdev->dev, "Unable to allocate memory\n");
pci_disable_device(pdev);
return -ENOMEM;
}
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index d84888f03d92..d1e7f1006ddd 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -339,8 +339,7 @@ static void peak_pciec_set_leds(struct peak_pciec_card *card, u8 led_mask, u8 s)
*/
static void peak_pciec_start_led_work(struct peak_pciec_card *card)
{
- if (!delayed_work_pending(&card->led_work))
- schedule_delayed_work(&card->led_work, HZ);
+ schedule_delayed_work(&card->led_work, HZ);
}
/*
@@ -451,11 +450,8 @@ static int peak_pciec_probe(struct pci_dev *pdev, struct net_device *dev)
} else {
/* create the bit banging I2C adapter structure */
card = kzalloc(sizeof(struct peak_pciec_card), GFP_KERNEL);
- if (!card) {
- dev_err(&pdev->dev,
- "failed allocating memory for i2c chip\n");
+ if (!card)
return -ENOMEM;
- }
card->cfg_base = chan->cfg_base;
card->reg_base = priv->reg_base;
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index f1175142b0a0..1a7020ba37f5 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -660,7 +660,6 @@ static int pcan_probe(struct pcmcia_device *pdev)
card = kzalloc(sizeof(struct pcan_pccard), GFP_KERNEL);
if (!card) {
- dev_err(&pdev->dev, "couldn't allocate card memory\n");
err = -ENOMEM;
goto probe_err_2;
}
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 11d1062a9449..a042cdc260dc 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -508,7 +508,6 @@ static int plx_pci_add_card(struct pci_dev *pdev,
/* Allocate card structures to hold addresses, ... */
card = kzalloc(sizeof(*card), GFP_KERNEL);
if (!card) {
- dev_err(&pdev->dev, "Unable to allocate memory\n");
pci_disable_device(pdev);
return -ENOMEM;
}
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 83ee11eca0e2..daf4013a8fc7 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -60,6 +60,7 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
+#include <linux/can/led.h>
#include "sja1000.h"
@@ -368,6 +369,8 @@ static void sja1000_rx(struct net_device *dev)
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+
+ can_led_event(dev, CAN_LED_EVENT_RX);
}
static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
@@ -521,6 +524,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
can_get_echo_skb(dev, 0);
}
netif_wake_queue(dev);
+ can_led_event(dev, CAN_LED_EVENT_TX);
}
if (isrc & IRQ_RI) {
/* receive interrupt */
@@ -575,6 +579,8 @@ static int sja1000_open(struct net_device *dev)
/* init and start chi */
sja1000_start(dev);
+ can_led_event(dev, CAN_LED_EVENT_OPEN);
+
netif_start_queue(dev);
return 0;
@@ -592,6 +598,8 @@ static int sja1000_close(struct net_device *dev)
close_candev(dev);
+ can_led_event(dev, CAN_LED_EVENT_STOP);
+
return 0;
}
@@ -639,6 +647,8 @@ static const struct net_device_ops sja1000_netdev_ops = {
int register_sja1000dev(struct net_device *dev)
{
+ int ret;
+
if (!sja1000_probe_chip(dev))
return -ENODEV;
@@ -648,7 +658,12 @@ int register_sja1000dev(struct net_device *dev)
set_reset_mode(dev);
chipset_init(dev);
- return register_candev(dev);
+ ret = register_candev(dev);
+
+ if (!ret)
+ devm_can_led_init(dev);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(register_sja1000dev);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index adc3708d8829..06b7e097d36e 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -55,6 +55,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/can.h>
+#include <linux/can/skb.h>
static __initconst const char banner[] =
KERN_INFO "slcan: serial line CAN interface driver\n";
@@ -184,7 +185,8 @@ static void slc_bump(struct slcan *sl)
cf.data[i] |= tmp;
}
- skb = dev_alloc_skb(sizeof(struct can_frame));
+ skb = dev_alloc_skb(sizeof(struct can_frame) +
+ sizeof(struct can_skb_priv));
if (!skb)
return;
@@ -192,6 +194,10 @@ static void slc_bump(struct slcan *sl)
skb->protocol = htons(ETH_P_CAN);
skb->pkt_type = PACKET_BROADCAST;
skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = sl->dev->ifindex;
+
memcpy(skb_put(skb, sizeof(struct can_frame)),
&cf, sizeof(struct can_frame));
netif_rx_ni(skb);
diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig
index 5de46a9a77bb..96b6fe158b5b 100644
--- a/drivers/net/can/softing/Kconfig
+++ b/drivers/net/can/softing/Kconfig
@@ -1,6 +1,6 @@
config CAN_SOFTING
tristate "Softing Gmbh CAN generic support"
- depends on CAN_DEV && HAS_IOMEM
+ depends on HAS_IOMEM
---help---
Support for CAN cards from Softing Gmbh & some cards
from Vector Gmbh.
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index f898c6363729..f21fc37ec578 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -50,6 +50,7 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
+#include <linux/can/led.h>
#include <linux/can/platform/ti_hecc.h>
#define DRV_NAME "ti_hecc"
@@ -593,6 +594,7 @@ static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno)
spin_unlock_irqrestore(&priv->mbx_lock, flags);
stats->rx_bytes += cf->can_dlc;
+ can_led_event(priv->ndev, CAN_LED_EVENT_RX);
netif_receive_skb(skb);
stats->rx_packets++;
@@ -746,12 +748,12 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
}
if (err_status & HECC_CANES_CRCE) {
hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
- cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
CAN_ERR_PROT_LOC_CRC_DEL;
}
if (err_status & HECC_CANES_ACKE) {
hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
- cf->data[2] |= CAN_ERR_PROT_LOC_ACK |
+ cf->data[3] |= CAN_ERR_PROT_LOC_ACK |
CAN_ERR_PROT_LOC_ACK_DEL;
}
}
@@ -796,6 +798,7 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
stats->tx_bytes += hecc_read_mbx(priv, mbxno,
HECC_CANMCF) & 0xF;
stats->tx_packets++;
+ can_led_event(ndev, CAN_LED_EVENT_TX);
can_get_echo_skb(ndev, mbxno);
--priv->tx_tail;
}
@@ -851,6 +854,8 @@ static int ti_hecc_open(struct net_device *ndev)
return err;
}
+ can_led_event(ndev, CAN_LED_EVENT_OPEN);
+
ti_hecc_start(ndev);
napi_enable(&priv->napi);
netif_start_queue(ndev);
@@ -869,6 +874,8 @@ static int ti_hecc_close(struct net_device *ndev)
close_candev(ndev);
ti_hecc_transceiver_switch(priv, 0);
+ can_led_event(ndev, CAN_LED_EVENT_STOP);
+
return 0;
}
@@ -961,6 +968,9 @@ static int ti_hecc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "register_candev() failed\n");
goto probe_exit_clk;
}
+
+ devm_can_led_init(ndev);
+
dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
priv->base, (u32) ndev->irq);
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index a4e4bee35710..fc96a3d83ebe 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -1,5 +1,5 @@
menu "CAN USB interfaces"
- depends on USB && CAN_DEV
+ depends on USB
config CAN_EMS_USB
tristate "EMS CPC-USB/ARM7 CAN/USB interface"
@@ -48,4 +48,10 @@ config CAN_PEAK_USB
This driver supports the PCAN-USB and PCAN-USB Pro adapters
from PEAK-System Technik (http://www.peak-system.com).
+config CAN_8DEV_USB
+ tristate "8 devices USB2CAN interface"
+ ---help---
+ This driver supports the USB2CAN interface
+ from 8 devices (http://www.8devices.com).
+
endmenu
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index 80a2ee41fd61..becef460a91a 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -6,5 +6,6 @@ obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o
obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
+obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index c69f0b72b352..5f9a7ad9b964 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -1014,17 +1014,13 @@ static int ems_usb_probe(struct usb_interface *intf,
}
dev->intr_in_buffer = kzalloc(INTR_IN_BUFFER_SIZE, GFP_KERNEL);
- if (!dev->intr_in_buffer) {
- dev_err(&intf->dev, "Couldn't alloc Intr buffer\n");
+ if (!dev->intr_in_buffer)
goto cleanup_intr_urb;
- }
dev->tx_msg_buffer = kzalloc(CPC_HEADER_SIZE +
sizeof(struct ems_cpc_msg), GFP_KERNEL);
- if (!dev->tx_msg_buffer) {
- dev_err(&intf->dev, "Couldn't alloc Tx buffer\n");
+ if (!dev->tx_msg_buffer)
goto cleanup_intr_in_buffer;
- }
usb_set_intfdata(intf, dev);
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 5b58a4d87397..45cb9f3c1324 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -561,7 +561,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
if (!buf) {
- netdev_err(netdev, "No memory left for USB buffer\n");
usb_free_urb(urb);
return -ENOMEM;
}
@@ -1268,7 +1267,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
if (!buf) {
- netdev_err(netdev, "No memory left for USB buffer\n");
stats->tx_dropped++;
goto nobufmem;
}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index d9290ea788e0..a0f647f92bf5 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -386,7 +386,6 @@ static int peak_usb_start(struct peak_usb_device *dev)
buf = kmalloc(dev->adapter->rx_buffer_size, GFP_KERNEL);
if (!buf) {
- netdev_err(netdev, "No memory left for USB buffer\n");
usb_free_urb(urb);
err = -ENOMEM;
break;
@@ -442,7 +441,6 @@ static int peak_usb_start(struct peak_usb_device *dev)
buf = kmalloc(dev->adapter->tx_buffer_size, GFP_KERNEL);
if (!buf) {
- netdev_err(netdev, "No memory left for USB buffer\n");
usb_free_urb(urb);
err = -ENOMEM;
break;
@@ -634,7 +632,6 @@ static int peak_usb_restart(struct peak_usb_device *dev)
/* also allocate enough space for the commands to send */
buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_ATOMIC);
if (!buf) {
- netdev_err(dev->netdev, "no memory left for async cmd\n");
usb_free_urb(urb);
return -ENOMEM;
}
@@ -729,8 +726,6 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
/* allocate a buffer large enough to send commands */
dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL);
if (!dev->cmd_buf) {
- dev_err(&intf->dev, "%s: couldn't alloc cmd buffer\n",
- PCAN_USB_DRIVER_NAME);
err = -ENOMEM;
goto lbl_set_intf_data;
}
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
new file mode 100644
index 000000000000..6e15ef08f301
--- /dev/null
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -0,0 +1,1031 @@
+/*
+ * CAN driver for "8 devices" USB2CAN converter
+ *
+ * Copyright (C) 2012 Bernd Krumboeck (krumboeck@universalnet.at)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.
+ *
+ * This driver is inspired by the 3.2.0 version of drivers/net/can/usb/ems_usb.c
+ * and drivers/net/can/usb/esd_usb2.c
+ *
+ * Many thanks to Gerhard Bertelsmann (info@gerhard-bertelsmann.de)
+ * for testing and fixing this driver. Also many thanks to "8 devices",
+ * who were very cooperative and answered my questions.
+ */
+
+#include <linux/init.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/led.h>
+
+/* driver constants */
+#define MAX_RX_URBS 20
+#define MAX_TX_URBS 20
+#define RX_BUFFER_SIZE 64
+
+/* vendor and product id */
+#define USB_8DEV_VENDOR_ID 0x0483
+#define USB_8DEV_PRODUCT_ID 0x1234
+
+/* endpoints */
+enum usb_8dev_endpoint {
+ USB_8DEV_ENDP_DATA_RX = 1,
+ USB_8DEV_ENDP_DATA_TX,
+ USB_8DEV_ENDP_CMD_RX,
+ USB_8DEV_ENDP_CMD_TX
+};
+
+/* device CAN clock */
+#define USB_8DEV_ABP_CLOCK 32000000
+
+/* setup flags */
+#define USB_8DEV_SILENT 0x01
+#define USB_8DEV_LOOPBACK 0x02
+#define USB_8DEV_DISABLE_AUTO_RESTRANS 0x04
+#define USB_8DEV_STATUS_FRAME 0x08
+
+/* commands */
+enum usb_8dev_cmd {
+ USB_8DEV_RESET = 1,
+ USB_8DEV_OPEN,
+ USB_8DEV_CLOSE,
+ USB_8DEV_SET_SPEED,
+ USB_8DEV_SET_MASK_FILTER,
+ USB_8DEV_GET_STATUS,
+ USB_8DEV_GET_STATISTICS,
+ USB_8DEV_GET_SERIAL,
+ USB_8DEV_GET_SOFTW_VER,
+ USB_8DEV_GET_HARDW_VER,
+ USB_8DEV_RESET_TIMESTAMP,
+ USB_8DEV_GET_SOFTW_HARDW_VER
+};
+
+/* command options */
+#define USB_8DEV_BAUD_MANUAL 0x09
+#define USB_8DEV_CMD_START 0x11
+#define USB_8DEV_CMD_END 0x22
+
+#define USB_8DEV_CMD_SUCCESS 0
+#define USB_8DEV_CMD_ERROR 255
+
+#define USB_8DEV_CMD_TIMEOUT 1000
+
+/* frames */
+#define USB_8DEV_DATA_START 0x55
+#define USB_8DEV_DATA_END 0xAA
+
+#define USB_8DEV_TYPE_CAN_FRAME 0
+#define USB_8DEV_TYPE_ERROR_FRAME 3
+
+#define USB_8DEV_EXTID 0x01
+#define USB_8DEV_RTR 0x02
+#define USB_8DEV_ERR_FLAG 0x04
+
+/* status */
+#define USB_8DEV_STATUSMSG_OK 0x00 /* Normal condition. */
+#define USB_8DEV_STATUSMSG_OVERRUN 0x01 /* Overrun occured when sending */
+#define USB_8DEV_STATUSMSG_BUSLIGHT 0x02 /* Error counter has reached 96 */
+#define USB_8DEV_STATUSMSG_BUSHEAVY 0x03 /* Error count. has reached 128 */
+#define USB_8DEV_STATUSMSG_BUSOFF 0x04 /* Device is in BUSOFF */
+#define USB_8DEV_STATUSMSG_STUFF 0x20 /* Stuff Error */
+#define USB_8DEV_STATUSMSG_FORM 0x21 /* Form Error */
+#define USB_8DEV_STATUSMSG_ACK 0x23 /* Ack Error */
+#define USB_8DEV_STATUSMSG_BIT0 0x24 /* Bit1 Error */
+#define USB_8DEV_STATUSMSG_BIT1 0x25 /* Bit0 Error */
+#define USB_8DEV_STATUSMSG_CRC 0x27 /* CRC Error */
+
+#define USB_8DEV_RP_MASK 0x7F /* Mask for Receive Error Bit */
+
+
+/* table of devices that work with this driver */
+static const struct usb_device_id usb_8dev_table[] = {
+ { USB_DEVICE(USB_8DEV_VENDOR_ID, USB_8DEV_PRODUCT_ID) },
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, usb_8dev_table);
+
+struct usb_8dev_tx_urb_context {
+ struct usb_8dev_priv *priv;
+
+ u32 echo_index;
+ u8 dlc;
+};
+
+/* Structure to hold all of our device specific stuff */
+struct usb_8dev_priv {
+ struct can_priv can; /* must be the first member */
+
+ struct sk_buff *echo_skb[MAX_TX_URBS];
+
+ struct usb_device *udev;
+ struct net_device *netdev;
+
+ atomic_t active_tx_urbs;
+ struct usb_anchor tx_submitted;
+ struct usb_8dev_tx_urb_context tx_contexts[MAX_TX_URBS];
+
+ struct usb_anchor rx_submitted;
+
+ struct can_berr_counter bec;
+
+ u8 *cmd_msg_buffer;
+
+ struct mutex usb_8dev_cmd_lock;
+
+};
+
+/* tx frame */
+struct __packed usb_8dev_tx_msg {
+ u8 begin;
+ u8 flags; /* RTR and EXT_ID flag */
+ __be32 id; /* upper 3 bits not used */
+ u8 dlc; /* data length code 0-8 bytes */
+ u8 data[8]; /* 64-bit data */
+ u8 end;
+};
+
+/* rx frame */
+struct __packed usb_8dev_rx_msg {
+ u8 begin;
+ u8 type; /* frame type */
+ u8 flags; /* RTR and EXT_ID flag */
+ __be32 id; /* upper 3 bits not used */
+ u8 dlc; /* data length code 0-8 bytes */
+ u8 data[8]; /* 64-bit data */
+ __be32 timestamp; /* 32-bit timestamp */
+ u8 end;
+};
+
+/* command frame */
+struct __packed usb_8dev_cmd_msg {
+ u8 begin;
+ u8 channel; /* unkown - always 0 */
+ u8 command; /* command to execute */
+ u8 opt1; /* optional parameter / return value */
+ u8 opt2; /* optional parameter 2 */
+ u8 data[10]; /* optional parameter and data */
+ u8 end;
+};
+
+static int usb_8dev_send_cmd_msg(struct usb_8dev_priv *priv, u8 *msg, int size)
+{
+ int actual_length;
+
+ return usb_bulk_msg(priv->udev,
+ usb_sndbulkpipe(priv->udev, USB_8DEV_ENDP_CMD_TX),
+ msg, size, &actual_length, USB_8DEV_CMD_TIMEOUT);
+}
+
+static int usb_8dev_wait_cmd_msg(struct usb_8dev_priv *priv, u8 *msg, int size,
+ int *actual_length)
+{
+ return usb_bulk_msg(priv->udev,
+ usb_rcvbulkpipe(priv->udev, USB_8DEV_ENDP_CMD_RX),
+ msg, size, actual_length, USB_8DEV_CMD_TIMEOUT);
+}
+
+/* Send command to device and receive result.
+ * Command was successful when opt1 = 0.
+ */
+static int usb_8dev_send_cmd(struct usb_8dev_priv *priv,
+ struct usb_8dev_cmd_msg *out,
+ struct usb_8dev_cmd_msg *in)
+{
+ int err;
+ int num_bytes_read;
+ struct net_device *netdev;
+
+ netdev = priv->netdev;
+
+ out->begin = USB_8DEV_CMD_START;
+ out->end = USB_8DEV_CMD_END;
+
+ mutex_lock(&priv->usb_8dev_cmd_lock);
+
+ memcpy(priv->cmd_msg_buffer, out,
+ sizeof(struct usb_8dev_cmd_msg));
+
+ err = usb_8dev_send_cmd_msg(priv, priv->cmd_msg_buffer,
+ sizeof(struct usb_8dev_cmd_msg));
+ if (err < 0) {
+ netdev_err(netdev, "sending command message failed\n");
+ goto failed;
+ }
+
+ err = usb_8dev_wait_cmd_msg(priv, priv->cmd_msg_buffer,
+ sizeof(struct usb_8dev_cmd_msg),
+ &num_bytes_read);
+ if (err < 0) {
+ netdev_err(netdev, "no command message answer\n");
+ goto failed;
+ }
+
+ memcpy(in, priv->cmd_msg_buffer, sizeof(struct usb_8dev_cmd_msg));
+
+ if (in->begin != USB_8DEV_CMD_START || in->end != USB_8DEV_CMD_END ||
+ num_bytes_read != 16 || in->opt1 != 0)
+ err = -EPROTO;
+
+failed:
+ mutex_unlock(&priv->usb_8dev_cmd_lock);
+ return err;
+}
+
+/* Send open command to device */
+static int usb_8dev_cmd_open(struct usb_8dev_priv *priv)
+{
+ struct can_bittiming *bt = &priv->can.bittiming;
+ struct usb_8dev_cmd_msg outmsg;
+ struct usb_8dev_cmd_msg inmsg;
+ u32 ctrlmode = priv->can.ctrlmode;
+ u32 flags = USB_8DEV_STATUS_FRAME;
+ __be32 beflags;
+ __be16 bebrp;
+
+ memset(&outmsg, 0, sizeof(outmsg));
+ outmsg.command = USB_8DEV_OPEN;
+ outmsg.opt1 = USB_8DEV_BAUD_MANUAL;
+ outmsg.data[0] = bt->prop_seg + bt->phase_seg1;
+ outmsg.data[1] = bt->phase_seg2;
+ outmsg.data[2] = bt->sjw;
+
+ /* BRP */
+ bebrp = cpu_to_be16((u16)bt->brp);
+ memcpy(&outmsg.data[3], &bebrp, sizeof(bebrp));
+
+ /* flags */
+ if (ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ flags |= USB_8DEV_LOOPBACK;
+ if (ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ flags |= USB_8DEV_SILENT;
+ if (ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ flags |= USB_8DEV_DISABLE_AUTO_RESTRANS;
+
+ beflags = cpu_to_be32(flags);
+ memcpy(&outmsg.data[5], &beflags, sizeof(beflags));
+
+ return usb_8dev_send_cmd(priv, &outmsg, &inmsg);
+}
+
+/* Send close command to device */
+static int usb_8dev_cmd_close(struct usb_8dev_priv *priv)
+{
+ struct usb_8dev_cmd_msg inmsg;
+ struct usb_8dev_cmd_msg outmsg = {
+ .channel = 0,
+ .command = USB_8DEV_CLOSE,
+ .opt1 = 0,
+ .opt2 = 0
+ };
+
+ return usb_8dev_send_cmd(priv, &outmsg, &inmsg);
+}
+
+/* Get firmware and hardware version */
+static int usb_8dev_cmd_version(struct usb_8dev_priv *priv, u32 *res)
+{
+ struct usb_8dev_cmd_msg inmsg;
+ struct usb_8dev_cmd_msg outmsg = {
+ .channel = 0,
+ .command = USB_8DEV_GET_SOFTW_HARDW_VER,
+ .opt1 = 0,
+ .opt2 = 0
+ };
+
+ int err = usb_8dev_send_cmd(priv, &outmsg, &inmsg);
+ if (err)
+ return err;
+
+ *res = be32_to_cpup((__be32 *)inmsg.data);
+
+ return err;
+}
+
+/* Set network device mode
+ *
+ * Maybe we should leave this function empty, because the device
+ * set mode variable with open command.
+ */
+static int usb_8dev_set_mode(struct net_device *netdev, enum can_mode mode)
+{
+ struct usb_8dev_priv *priv = netdev_priv(netdev);
+ int err = 0;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ err = usb_8dev_cmd_open(priv);
+ if (err)
+ netdev_warn(netdev, "couldn't start device");
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+/* Read error/status frames */
+static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
+ struct usb_8dev_rx_msg *msg)
+{
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct net_device_stats *stats = &priv->netdev->stats;
+
+ /* Error message:
+ * byte 0: Status
+ * byte 1: bit 7: Receive Passive
+ * byte 1: bit 0-6: Receive Error Counter
+ * byte 2: Transmit Error Counter
+ * byte 3: Always 0 (maybe reserved for future use)
+ */
+
+ u8 state = msg->data[0];
+ u8 rxerr = msg->data[1] & USB_8DEV_RP_MASK;
+ u8 txerr = msg->data[2];
+ int rx_errors = 0;
+ int tx_errors = 0;
+
+ skb = alloc_can_err_skb(priv->netdev, &cf);
+ if (!skb)
+ return;
+
+ switch (state) {
+ case USB_8DEV_STATUSMSG_OK:
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ cf->can_id |= CAN_ERR_PROT;
+ cf->data[2] = CAN_ERR_PROT_ACTIVE;
+ break;
+ case USB_8DEV_STATUSMSG_BUSOFF:
+ priv->can.state = CAN_STATE_BUS_OFF;
+ cf->can_id |= CAN_ERR_BUSOFF;
+ can_bus_off(priv->netdev);
+ break;
+ case USB_8DEV_STATUSMSG_OVERRUN:
+ case USB_8DEV_STATUSMSG_BUSLIGHT:
+ case USB_8DEV_STATUSMSG_BUSHEAVY:
+ cf->can_id |= CAN_ERR_CRTL;
+ break;
+ default:
+ priv->can.state = CAN_STATE_ERROR_WARNING;
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ priv->can.can_stats.bus_error++;
+ break;
+ }
+
+ switch (state) {
+ case USB_8DEV_STATUSMSG_OK:
+ case USB_8DEV_STATUSMSG_BUSOFF:
+ break;
+ case USB_8DEV_STATUSMSG_ACK:
+ cf->can_id |= CAN_ERR_ACK;
+ tx_errors = 1;
+ break;
+ case USB_8DEV_STATUSMSG_CRC:
+ cf->data[2] |= CAN_ERR_PROT_UNSPEC;
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+ CAN_ERR_PROT_LOC_CRC_DEL;
+ rx_errors = 1;
+ break;
+ case USB_8DEV_STATUSMSG_BIT0:
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ tx_errors = 1;
+ break;
+ case USB_8DEV_STATUSMSG_BIT1:
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ tx_errors = 1;
+ break;
+ case USB_8DEV_STATUSMSG_FORM:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ rx_errors = 1;
+ break;
+ case USB_8DEV_STATUSMSG_STUFF:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ rx_errors = 1;
+ break;
+ case USB_8DEV_STATUSMSG_OVERRUN:
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ stats->rx_over_errors++;
+ rx_errors = 1;
+ break;
+ case USB_8DEV_STATUSMSG_BUSLIGHT:
+ priv->can.state = CAN_STATE_ERROR_WARNING;
+ cf->data[1] = (txerr > rxerr) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ priv->can.can_stats.error_warning++;
+ break;
+ case USB_8DEV_STATUSMSG_BUSHEAVY:
+ priv->can.state = CAN_STATE_ERROR_PASSIVE;
+ cf->data[1] = (txerr > rxerr) ?
+ CAN_ERR_CRTL_TX_PASSIVE :
+ CAN_ERR_CRTL_RX_PASSIVE;
+ priv->can.can_stats.error_passive++;
+ break;
+ default:
+ netdev_warn(priv->netdev,
+ "Unknown status/error message (%d)\n", state);
+ break;
+ }
+
+ if (tx_errors) {
+ cf->data[2] |= CAN_ERR_PROT_TX;
+ stats->tx_errors++;
+ }
+
+ if (rx_errors)
+ stats->rx_errors++;
+
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+
+ priv->bec.txerr = txerr;
+ priv->bec.rxerr = rxerr;
+
+ netif_rx(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+}
+
+/* Read data and status frames */
+static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv,
+ struct usb_8dev_rx_msg *msg)
+{
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct net_device_stats *stats = &priv->netdev->stats;
+
+ if (msg->type == USB_8DEV_TYPE_ERROR_FRAME &&
+ msg->flags == USB_8DEV_ERR_FLAG) {
+ usb_8dev_rx_err_msg(priv, msg);
+ } else if (msg->type == USB_8DEV_TYPE_CAN_FRAME) {
+ skb = alloc_can_skb(priv->netdev, &cf);
+ if (!skb)
+ return;
+
+ cf->can_id = be32_to_cpu(msg->id);
+ cf->can_dlc = get_can_dlc(msg->dlc & 0xF);
+
+ if (msg->flags & USB_8DEV_EXTID)
+ cf->can_id |= CAN_EFF_FLAG;
+
+ if (msg->flags & USB_8DEV_RTR)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(cf->data, msg->data, cf->can_dlc);
+
+ netif_rx(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ can_led_event(priv->netdev, CAN_LED_EVENT_RX);
+ } else {
+ netdev_warn(priv->netdev, "frame type %d unknown",
+ msg->type);
+ }
+
+}
+
+/* Callback for reading data from device
+ *
+ * Check urb status, call read function and resubmit urb read operation.
+ */
+static void usb_8dev_read_bulk_callback(struct urb *urb)
+{
+ struct usb_8dev_priv *priv = urb->context;
+ struct net_device *netdev;
+ int retval;
+ int pos = 0;
+
+ netdev = priv->netdev;
+
+ if (!netif_device_present(netdev))
+ return;
+
+ switch (urb->status) {
+ case 0: /* success */
+ break;
+
+ case -ENOENT:
+ case -ESHUTDOWN:
+ return;
+
+ default:
+ netdev_info(netdev, "Rx URB aborted (%d)\n",
+ urb->status);
+ goto resubmit_urb;
+ }
+
+ while (pos < urb->actual_length) {
+ struct usb_8dev_rx_msg *msg;
+
+ if (pos + sizeof(struct usb_8dev_rx_msg) > urb->actual_length) {
+ netdev_err(priv->netdev, "format error\n");
+ break;
+ }
+
+ msg = (struct usb_8dev_rx_msg *)(urb->transfer_buffer + pos);
+ usb_8dev_rx_can_msg(priv, msg);
+
+ pos += sizeof(struct usb_8dev_rx_msg);
+ }
+
+resubmit_urb:
+ usb_fill_bulk_urb(urb, priv->udev,
+ usb_rcvbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_RX),
+ urb->transfer_buffer, RX_BUFFER_SIZE,
+ usb_8dev_read_bulk_callback, priv);
+
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
+
+ if (retval == -ENODEV)
+ netif_device_detach(netdev);
+ else if (retval)
+ netdev_err(netdev,
+ "failed resubmitting read bulk urb: %d\n", retval);
+}
+
+/* Callback handler for write operations
+ *
+ * Free allocated buffers, check transmit status and
+ * calculate statistic.
+ */
+static void usb_8dev_write_bulk_callback(struct urb *urb)
+{
+ struct usb_8dev_tx_urb_context *context = urb->context;
+ struct usb_8dev_priv *priv;
+ struct net_device *netdev;
+
+ BUG_ON(!context);
+
+ priv = context->priv;
+ netdev = priv->netdev;
+
+ /* free up our allocated buffer */
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
+
+ atomic_dec(&priv->active_tx_urbs);
+
+ if (!netif_device_present(netdev))
+ return;
+
+ if (urb->status)
+ netdev_info(netdev, "Tx URB aborted (%d)\n",
+ urb->status);
+
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += context->dlc;
+
+ can_get_echo_skb(netdev, context->echo_index);
+
+ can_led_event(netdev, CAN_LED_EVENT_TX);
+
+ /* Release context */
+ context->echo_index = MAX_TX_URBS;
+
+ netif_wake_queue(netdev);
+}
+
+/* Send data to device */
+static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct usb_8dev_priv *priv = netdev_priv(netdev);
+ struct net_device_stats *stats = &netdev->stats;
+ struct can_frame *cf = (struct can_frame *) skb->data;
+ struct usb_8dev_tx_msg *msg;
+ struct urb *urb;
+ struct usb_8dev_tx_urb_context *context = NULL;
+ u8 *buf;
+ int i, err;
+ size_t size = sizeof(struct usb_8dev_tx_msg);
+
+ if (can_dropped_invalid_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
+ /* create a URB, and a buffer for it, and copy the data to the URB */
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ netdev_err(netdev, "No memory left for URBs\n");
+ goto nomem;
+ }
+
+ buf = usb_alloc_coherent(priv->udev, size, GFP_ATOMIC,
+ &urb->transfer_dma);
+ if (!buf) {
+ netdev_err(netdev, "No memory left for USB buffer\n");
+ goto nomembuf;
+ }
+
+ memset(buf, 0, size);
+
+ msg = (struct usb_8dev_tx_msg *)buf;
+ msg->begin = USB_8DEV_DATA_START;
+ msg->flags = 0x00;
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ msg->flags |= USB_8DEV_RTR;
+
+ if (cf->can_id & CAN_EFF_FLAG)
+ msg->flags |= USB_8DEV_EXTID;
+
+ msg->id = cpu_to_be32(cf->can_id & CAN_ERR_MASK);
+ msg->dlc = cf->can_dlc;
+ memcpy(msg->data, cf->data, cf->can_dlc);
+ msg->end = USB_8DEV_DATA_END;
+
+ for (i = 0; i < MAX_TX_URBS; i++) {
+ if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
+ context = &priv->tx_contexts[i];
+ break;
+ }
+ }
+
+ /* May never happen! When this happens we'd more URBs in flight as
+ * allowed (MAX_TX_URBS).
+ */
+ if (!context)
+ goto nofreecontext;
+
+ context->priv = priv;
+ context->echo_index = i;
+ context->dlc = cf->can_dlc;
+
+ usb_fill_bulk_urb(urb, priv->udev,
+ usb_sndbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_TX),
+ buf, size, usb_8dev_write_bulk_callback, context);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ usb_anchor_urb(urb, &priv->tx_submitted);
+
+ can_put_echo_skb(skb, netdev, context->echo_index);
+
+ atomic_inc(&priv->active_tx_urbs);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (unlikely(err))
+ goto failed;
+ else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
+ /* Slow down tx path */
+ netif_stop_queue(netdev);
+
+ /* Release our reference to this URB, the USB core will eventually free
+ * it entirely.
+ */
+ usb_free_urb(urb);
+
+ return NETDEV_TX_OK;
+
+nofreecontext:
+ usb_unanchor_urb(urb);
+ usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
+
+ netdev_warn(netdev, "couldn't find free context");
+
+ return NETDEV_TX_BUSY;
+
+failed:
+ can_free_echo_skb(netdev, context->echo_index);
+
+ usb_unanchor_urb(urb);
+ usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
+
+ atomic_dec(&priv->active_tx_urbs);
+
+ if (err == -ENODEV)
+ netif_device_detach(netdev);
+ else
+ netdev_warn(netdev, "failed tx_urb %d\n", err);
+
+nomembuf:
+ usb_free_urb(urb);
+
+nomem:
+ dev_kfree_skb(skb);
+ stats->tx_dropped++;
+
+ return NETDEV_TX_OK;
+}
+
+static int usb_8dev_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
+{
+ struct usb_8dev_priv *priv = netdev_priv(netdev);
+
+ bec->txerr = priv->bec.txerr;
+ bec->rxerr = priv->bec.rxerr;
+
+ return 0;
+}
+
+/* Start USB device */
+static int usb_8dev_start(struct usb_8dev_priv *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ int err, i;
+
+ for (i = 0; i < MAX_RX_URBS; i++) {
+ struct urb *urb = NULL;
+ u8 *buf;
+
+ /* create a URB, and a buffer for it */
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ netdev_err(netdev, "No memory left for URBs\n");
+ err = -ENOMEM;
+ break;
+ }
+
+ buf = usb_alloc_coherent(priv->udev, RX_BUFFER_SIZE, GFP_KERNEL,
+ &urb->transfer_dma);
+ if (!buf) {
+ netdev_err(netdev, "No memory left for USB buffer\n");
+ usb_free_urb(urb);
+ err = -ENOMEM;
+ break;
+ }
+
+ usb_fill_bulk_urb(urb, priv->udev,
+ usb_rcvbulkpipe(priv->udev,
+ USB_8DEV_ENDP_DATA_RX),
+ buf, RX_BUFFER_SIZE,
+ usb_8dev_read_bulk_callback, priv);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ usb_anchor_urb(urb, &priv->rx_submitted);
+
+ err = usb_submit_urb(urb, GFP_KERNEL);
+ if (err) {
+ usb_unanchor_urb(urb);
+ usb_free_coherent(priv->udev, RX_BUFFER_SIZE, buf,
+ urb->transfer_dma);
+ break;
+ }
+
+ /* Drop reference, USB core will take care of freeing it */
+ usb_free_urb(urb);
+ }
+
+ /* Did we submit any URBs */
+ if (i == 0) {
+ netdev_warn(netdev, "couldn't setup read URBs\n");
+ return err;
+ }
+
+ /* Warn if we've couldn't transmit all the URBs */
+ if (i < MAX_RX_URBS)
+ netdev_warn(netdev, "rx performance may be slow\n");
+
+ err = usb_8dev_cmd_open(priv);
+ if (err)
+ goto failed;
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ return 0;
+
+failed:
+ if (err == -ENODEV)
+ netif_device_detach(priv->netdev);
+
+ netdev_warn(netdev, "couldn't submit control: %d\n", err);
+
+ return err;
+}
+
+/* Open USB device */
+static int usb_8dev_open(struct net_device *netdev)
+{
+ struct usb_8dev_priv *priv = netdev_priv(netdev);
+ int err;
+
+ /* common open */
+ err = open_candev(netdev);
+ if (err)
+ return err;
+
+ can_led_event(netdev, CAN_LED_EVENT_OPEN);
+
+ /* finally start device */
+ err = usb_8dev_start(priv);
+ if (err) {
+ if (err == -ENODEV)
+ netif_device_detach(priv->netdev);
+
+ netdev_warn(netdev, "couldn't start device: %d\n",
+ err);
+
+ close_candev(netdev);
+
+ return err;
+ }
+
+ netif_start_queue(netdev);
+
+ return 0;
+}
+
+static void unlink_all_urbs(struct usb_8dev_priv *priv)
+{
+ int i;
+
+ usb_kill_anchored_urbs(&priv->rx_submitted);
+
+ usb_kill_anchored_urbs(&priv->tx_submitted);
+ atomic_set(&priv->active_tx_urbs, 0);
+
+ for (i = 0; i < MAX_TX_URBS; i++)
+ priv->tx_contexts[i].echo_index = MAX_TX_URBS;
+}
+
+/* Close USB device */
+static int usb_8dev_close(struct net_device *netdev)
+{
+ struct usb_8dev_priv *priv = netdev_priv(netdev);
+ int err = 0;
+
+ /* Send CLOSE command to CAN controller */
+ err = usb_8dev_cmd_close(priv);
+ if (err)
+ netdev_warn(netdev, "couldn't stop device");
+
+ priv->can.state = CAN_STATE_STOPPED;
+
+ netif_stop_queue(netdev);
+
+ /* Stop polling */
+ unlink_all_urbs(priv);
+
+ close_candev(netdev);
+
+ can_led_event(netdev, CAN_LED_EVENT_STOP);
+
+ return err;
+}
+
+static const struct net_device_ops usb_8dev_netdev_ops = {
+ .ndo_open = usb_8dev_open,
+ .ndo_stop = usb_8dev_close,
+ .ndo_start_xmit = usb_8dev_start_xmit,
+};
+
+static const struct can_bittiming_const usb_8dev_bittiming_const = {
+ .name = "usb_8dev",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
+/* Probe USB device
+ *
+ * Check device and firmware.
+ * Set supported modes and bittiming constants.
+ * Allocate some memory.
+ */
+static int usb_8dev_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct net_device *netdev;
+ struct usb_8dev_priv *priv;
+ int i, err = -ENOMEM;
+ u32 version;
+ char buf[18];
+ struct usb_device *usbdev = interface_to_usbdev(intf);
+
+ /* product id looks strange, better we also check iProduct string */
+ if (usb_string(usbdev, usbdev->descriptor.iProduct, buf,
+ sizeof(buf)) > 0 && strcmp(buf, "USB2CAN converter")) {
+ dev_info(&usbdev->dev, "ignoring: not an USB2CAN converter\n");
+ return -ENODEV;
+ }
+
+ netdev = alloc_candev(sizeof(struct usb_8dev_priv), MAX_TX_URBS);
+ if (!netdev) {
+ dev_err(&intf->dev, "Couldn't alloc candev\n");
+ return -ENOMEM;
+ }
+
+ priv = netdev_priv(netdev);
+
+ priv->udev = usbdev;
+ priv->netdev = netdev;
+
+ priv->can.state = CAN_STATE_STOPPED;
+ priv->can.clock.freq = USB_8DEV_ABP_CLOCK;
+ priv->can.bittiming_const = &usb_8dev_bittiming_const;
+ priv->can.do_set_mode = usb_8dev_set_mode;
+ priv->can.do_get_berr_counter = usb_8dev_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+ CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_ONE_SHOT;
+
+ netdev->netdev_ops = &usb_8dev_netdev_ops;
+
+ netdev->flags |= IFF_ECHO; /* we support local echo */
+
+ init_usb_anchor(&priv->rx_submitted);
+
+ init_usb_anchor(&priv->tx_submitted);
+ atomic_set(&priv->active_tx_urbs, 0);
+
+ for (i = 0; i < MAX_TX_URBS; i++)
+ priv->tx_contexts[i].echo_index = MAX_TX_URBS;
+
+ priv->cmd_msg_buffer = kzalloc(sizeof(struct usb_8dev_cmd_msg),
+ GFP_KERNEL);
+ if (!priv->cmd_msg_buffer)
+ goto cleanup_candev;
+
+ usb_set_intfdata(intf, priv);
+
+ SET_NETDEV_DEV(netdev, &intf->dev);
+
+ mutex_init(&priv->usb_8dev_cmd_lock);
+
+ err = register_candev(netdev);
+ if (err) {
+ netdev_err(netdev,
+ "couldn't register CAN device: %d\n", err);
+ goto cleanup_cmd_msg_buffer;
+ }
+
+ err = usb_8dev_cmd_version(priv, &version);
+ if (err) {
+ netdev_err(netdev, "can't get firmware version\n");
+ goto cleanup_cmd_msg_buffer;
+ } else {
+ netdev_info(netdev,
+ "firmware: %d.%d, hardware: %d.%d\n",
+ (version>>24) & 0xff, (version>>16) & 0xff,
+ (version>>8) & 0xff, version & 0xff);
+ }
+
+ devm_can_led_init(netdev);
+
+ return 0;
+
+cleanup_cmd_msg_buffer:
+ kfree(priv->cmd_msg_buffer);
+
+cleanup_candev:
+ free_candev(netdev);
+
+ return err;
+
+}
+
+/* Called by the usb core when driver is unloaded or device is removed */
+static void usb_8dev_disconnect(struct usb_interface *intf)
+{
+ struct usb_8dev_priv *priv = usb_get_intfdata(intf);
+
+ usb_set_intfdata(intf, NULL);
+
+ if (priv) {
+ netdev_info(priv->netdev, "device disconnected\n");
+
+ unregister_netdev(priv->netdev);
+ free_candev(priv->netdev);
+
+ unlink_all_urbs(priv);
+ }
+
+}
+
+static struct usb_driver usb_8dev_driver = {
+ .name = "usb_8dev",
+ .probe = usb_8dev_probe,
+ .disconnect = usb_8dev_disconnect,
+ .id_table = usb_8dev_table,
+};
+
+module_usb_driver(usb_8dev_driver);
+
+MODULE_AUTHOR("Bernd Krumboeck <krumboeck@universalnet.at>");
+MODULE_DESCRIPTION("CAN driver for 8 devices USB2CAN interfaces");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 021d69c5d9bc..29e272cc7a98 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1448,10 +1448,10 @@ static int e100_set_settings(struct net_device *dev,
static void e100_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strncpy(info->driver, "ETRAX 100LX", sizeof(info->driver) - 1);
- strncpy(info->version, "$Revision: 1.31 $", sizeof(info->version) - 1);
- strncpy(info->fw_version, "N/A", sizeof(info->fw_version) - 1);
- strncpy(info->bus_info, "N/A", sizeof(info->bus_info) - 1);
+ strlcpy(info->driver, "ETRAX 100LX", sizeof(info->driver));
+ strlcpy(info->version, "$Revision: 1.31 $", sizeof(info->version));
+ strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
}
static int e100_nway_reset(struct net_device *dev)
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 325391d19bad..7a54ec04b418 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -8,6 +8,8 @@
* (at your option) any later version.
*/
+#include <linux/delay.h>
+#include <linux/jiffies.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -66,36 +68,30 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds)
{
int i;
int ret;
+ unsigned long timeout;
- /*
- * Set all ports to the disabled state.
- */
+ /* Set all ports to the disabled state. */
for (i = 0; i < 6; i++) {
ret = REG_READ(REG_PORT(i), 0x04);
REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
}
- /*
- * Wait for transmit queues to drain.
- */
- msleep(2);
+ /* Wait for transmit queues to drain. */
+ usleep_range(2000, 4000);
- /*
- * Reset the switch.
- */
+ /* Reset the switch. */
REG_WRITE(REG_GLOBAL, 0x0a, 0xa130);
- /*
- * Wait up to one second for reset to complete.
- */
- for (i = 0; i < 1000; i++) {
+ /* Wait up to one second for reset to complete. */
+ timeout = jiffies + 1 * HZ;
+ while (time_before(jiffies, timeout)) {
ret = REG_READ(REG_GLOBAL, 0x00);
if ((ret & 0x8000) == 0x0000)
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
- if (i == 1000)
+ if (time_after(jiffies, timeout))
return -ETIMEDOUT;
return 0;
@@ -103,15 +99,13 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds)
static int mv88e6060_setup_global(struct dsa_switch *ds)
{
- /*
- * Disable discarding of frames with excessive collisions,
+ /* Disable discarding of frames with excessive collisions,
* set the maximum frame size to 1536 bytes, and mask all
* interrupt sources.
*/
REG_WRITE(REG_GLOBAL, 0x04, 0x0800);
- /*
- * Enable automatic address learning, set the address
+ /* Enable automatic address learning, set the address
* database size to 1024 entries, and set the default aging
* time to 5 minutes.
*/
@@ -124,16 +118,14 @@ static int mv88e6060_setup_port(struct dsa_switch *ds, int p)
{
int addr = REG_PORT(p);
- /*
- * Do not force flow control, disable Ingress and Egress
+ /* Do not force flow control, disable Ingress and Egress
* Header tagging, disable VLAN tunneling, and set the port
* state to Forwarding. Additionally, if this is the CPU
* port, enable Ingress and Egress Trailer tagging mode.
*/
REG_WRITE(addr, 0x04, dsa_is_cpu_port(ds, p) ? 0x4103 : 0x0003);
- /*
- * Port based VLAN map: give each port its own address
+ /* Port based VLAN map: give each port its own address
* database, allow the CPU port to talk to each of the 'real'
* ports, and allow each of the 'real' ports to only talk to
* the CPU port.
@@ -144,8 +136,7 @@ static int mv88e6060_setup_port(struct dsa_switch *ds, int p)
ds->phys_port_mask :
(1 << ds->dst->cpu_port)));
- /*
- * Port Association Vector: when learning source addresses
+ /* Port Association Vector: when learning source addresses
* of packets, add the address to the address database using
* a port bitmap that has only the bit for this port set and
* the other bits clear.
@@ -245,7 +236,7 @@ static void mv88e6060_poll_link(struct dsa_switch *ds)
if (!link) {
if (netif_carrier_ok(dev)) {
- printk(KERN_INFO "%s: link down\n", dev->name);
+ netdev_info(dev, "link down\n");
netif_carrier_off(dev);
}
continue;
@@ -256,10 +247,11 @@ static void mv88e6060_poll_link(struct dsa_switch *ds)
fc = ((port_status & 0xc000) == 0xc000) ? 1 : 0;
if (!netif_carrier_ok(dev)) {
- printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
- "flow control %sabled\n", dev->name,
- speed, duplex ? "full" : "half",
- fc ? "en" : "dis");
+ netdev_info(dev,
+ "link up, %d Mb/s, %s duplex, flow control %sabled\n",
+ speed,
+ duplex ? "full" : "half",
+ fc ? "en" : "dis");
netif_carrier_on(dev);
}
}
diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c
index c17c75b9f531..41ee5b6ae917 100644
--- a/drivers/net/dsa/mv88e6123_61_65.c
+++ b/drivers/net/dsa/mv88e6123_61_65.c
@@ -8,6 +8,8 @@
* (at your option) any later version.
*/
+#include <linux/delay.h>
+#include <linux/jiffies.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -50,36 +52,30 @@ static int mv88e6123_61_65_switch_reset(struct dsa_switch *ds)
{
int i;
int ret;
+ unsigned long timeout;
- /*
- * Set all ports to the disabled state.
- */
+ /* Set all ports to the disabled state. */
for (i = 0; i < 8; i++) {
ret = REG_READ(REG_PORT(i), 0x04);
REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
}
- /*
- * Wait for transmit queues to drain.
- */
- msleep(2);
+ /* Wait for transmit queues to drain. */
+ usleep_range(2000, 4000);
- /*
- * Reset the switch.
- */
+ /* Reset the switch. */
REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
- /*
- * Wait up to one second for reset to complete.
- */
- for (i = 0; i < 1000; i++) {
+ /* Wait up to one second for reset to complete. */
+ timeout = jiffies + 1 * HZ;
+ while (time_before(jiffies, timeout)) {
ret = REG_READ(REG_GLOBAL, 0x00);
if ((ret & 0xc800) == 0xc800)
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
- if (i == 1000)
+ if (time_after(jiffies, timeout))
return -ETIMEDOUT;
return 0;
@@ -90,54 +86,45 @@ static int mv88e6123_61_65_setup_global(struct dsa_switch *ds)
int ret;
int i;
- /*
- * Disable the PHY polling unit (since there won't be any
+ /* Disable the PHY polling unit (since there won't be any
* external PHYs to poll), don't discard packets with
* excessive collisions, and mask all interrupt sources.
*/
REG_WRITE(REG_GLOBAL, 0x04, 0x0000);
- /*
- * Set the default address aging time to 5 minutes, and
+ /* Set the default address aging time to 5 minutes, and
* enable address learn messages to be sent to all message
* ports.
*/
REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
- /*
- * Configure the priority mapping registers.
- */
+ /* Configure the priority mapping registers. */
ret = mv88e6xxx_config_prio(ds);
if (ret < 0)
return ret;
- /*
- * Configure the upstream port, and configure the upstream
+ /* Configure the upstream port, and configure the upstream
* port as the port to which ingress and egress monitor frames
* are to be sent.
*/
REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110));
- /*
- * Disable remote management for now, and set the switch's
+ /* Disable remote management for now, and set the switch's
* DSA device number.
*/
REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f);
- /*
- * Send all frames with destination addresses matching
+ /* Send all frames with destination addresses matching
* 01:80:c2:00:00:2x to the CPU port.
*/
REG_WRITE(REG_GLOBAL2, 0x02, 0xffff);
- /*
- * Send all frames with destination addresses matching
+ /* Send all frames with destination addresses matching
* 01:80:c2:00:00:0x to the CPU port.
*/
REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
- /*
- * Disable the loopback filter, disable flow control
+ /* Disable the loopback filter, disable flow control
* messages, disable flood broadcast override, disable
* removing of provider tags, disable ATU age violation
* interrupts, disable tag flow control, force flow
@@ -146,9 +133,7 @@ static int mv88e6123_61_65_setup_global(struct dsa_switch *ds)
*/
REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
- /*
- * Program the DSA routing table.
- */
+ /* Program the DSA routing table. */
for (i = 0; i < 32; i++) {
int nexthop;
@@ -159,33 +144,24 @@ static int mv88e6123_61_65_setup_global(struct dsa_switch *ds)
REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
}
- /*
- * Clear all trunk masks.
- */
+ /* Clear all trunk masks. */
for (i = 0; i < 8; i++)
REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0xff);
- /*
- * Clear all trunk mappings.
- */
+ /* Clear all trunk mappings. */
for (i = 0; i < 16; i++)
REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
- /*
- * Disable ingress rate limiting by resetting all ingress
+ /* Disable ingress rate limiting by resetting all ingress
* rate limit registers to their initial state.
*/
for (i = 0; i < 6; i++)
REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8));
- /*
- * Initialise cross-chip port VLAN table to reset defaults.
- */
+ /* Initialise cross-chip port VLAN table to reset defaults. */
REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000);
- /*
- * Clear the priority override table.
- */
+ /* Clear the priority override table. */
for (i = 0; i < 16; i++)
REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8));
@@ -199,8 +175,7 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
int addr = REG_PORT(p);
u16 val;
- /*
- * MAC Forcing register: don't force link, speed, duplex
+ /* MAC Forcing register: don't force link, speed, duplex
* or flow control state to any particular values on physical
* ports, but force the CPU port and all DSA ports to 1000 Mb/s
* full duplex.
@@ -210,15 +185,13 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
else
REG_WRITE(addr, 0x01, 0x0003);
- /*
- * Do not limit the period of time that this port can be
+ /* Do not limit the period of time that this port can be
* paused for by the remote end or the period of time that
* this port can pause the remote end.
*/
REG_WRITE(addr, 0x02, 0x0000);
- /*
- * Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
+ /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
* disable Header mode, enable IGMP/MLD snooping, disable VLAN
* tunneling, determine priority by looking at 802.1p and IP
* priority fields (IP prio has precedence), and set STP state
@@ -245,14 +218,12 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
val |= 0x000c;
REG_WRITE(addr, 0x04, val);
- /*
- * Port Control 1: disable trunking. Also, if this is the
+ /* Port Control 1: disable trunking. Also, if this is the
* CPU port, enable learn messages to be sent to this port.
*/
REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000);
- /*
- * Port based VLAN map: give each port its own address
+ /* Port based VLAN map: give each port its own address
* database, allow the CPU port to talk to each of the 'real'
* ports, and allow each of the 'real' ports to only talk to
* the upstream port.
@@ -264,14 +235,12 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
val |= 1 << dsa_upstream_port(ds);
REG_WRITE(addr, 0x06, val);
- /*
- * Default VLAN ID and priority: don't set a default VLAN
+ /* Default VLAN ID and priority: don't set a default VLAN
* ID, and set the default packet priority to zero.
*/
REG_WRITE(addr, 0x07, 0x0000);
- /*
- * Port Control 2: don't force a good FCS, set the maximum
+ /* Port Control 2: don't force a good FCS, set the maximum
* frame size to 10240 bytes, don't let the switch add or
* strip 802.1q tags, don't discard tagged or untagged frames
* on this port, do a destination address lookup on all
@@ -281,48 +250,36 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
*/
REG_WRITE(addr, 0x08, 0x2080);
- /*
- * Egress rate control: disable egress rate control.
- */
+ /* Egress rate control: disable egress rate control. */
REG_WRITE(addr, 0x09, 0x0001);
- /*
- * Egress rate control 2: disable egress rate control.
- */
+ /* Egress rate control 2: disable egress rate control. */
REG_WRITE(addr, 0x0a, 0x0000);
- /*
- * Port Association Vector: when learning source addresses
+ /* Port Association Vector: when learning source addresses
* of packets, add the address to the address database using
* a port bitmap that has only the bit for this port set and
* the other bits clear.
*/
REG_WRITE(addr, 0x0b, 1 << p);
- /*
- * Port ATU control: disable limiting the number of address
+ /* Port ATU control: disable limiting the number of address
* database entries that this port is allowed to use.
*/
REG_WRITE(addr, 0x0c, 0x0000);
- /*
- * Priorit Override: disable DA, SA and VTU priority override.
- */
+ /* Priority Override: disable DA, SA and VTU priority override. */
REG_WRITE(addr, 0x0d, 0x0000);
- /*
- * Port Ethertype: use the Ethertype DSA Ethertype value.
- */
+ /* Port Ethertype: use the Ethertype DSA Ethertype value. */
REG_WRITE(addr, 0x0f, ETH_P_EDSA);
- /*
- * Tag Remap: use an identity 802.1p prio -> switch prio
+ /* Tag Remap: use an identity 802.1p prio -> switch prio
* mapping.
*/
REG_WRITE(addr, 0x18, 0x3210);
- /*
- * Tag Remap 2: use an identity 802.1p prio -> switch prio
+ /* Tag Remap 2: use an identity 802.1p prio -> switch prio
* mapping.
*/
REG_WRITE(addr, 0x19, 0x7654);
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
index 55888b06d8b4..dadfafba64e9 100644
--- a/drivers/net/dsa/mv88e6131.c
+++ b/drivers/net/dsa/mv88e6131.c
@@ -8,6 +8,8 @@
* (at your option) any later version.
*/
+#include <linux/delay.h>
+#include <linux/jiffies.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -15,9 +17,7 @@
#include <net/dsa.h>
#include "mv88e6xxx.h"
-/*
- * Switch product IDs
- */
+/* Switch product IDs */
#define ID_6085 0x04a0
#define ID_6095 0x0950
#define ID_6131 0x1060
@@ -44,36 +44,30 @@ static int mv88e6131_switch_reset(struct dsa_switch *ds)
{
int i;
int ret;
+ unsigned long timeout;
- /*
- * Set all ports to the disabled state.
- */
+ /* Set all ports to the disabled state. */
for (i = 0; i < 11; i++) {
ret = REG_READ(REG_PORT(i), 0x04);
REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
}
- /*
- * Wait for transmit queues to drain.
- */
- msleep(2);
+ /* Wait for transmit queues to drain. */
+ usleep_range(2000, 4000);
- /*
- * Reset the switch.
- */
+ /* Reset the switch. */
REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
- /*
- * Wait up to one second for reset to complete.
- */
- for (i = 0; i < 1000; i++) {
+ /* Wait up to one second for reset to complete. */
+ timeout = jiffies + 1 * HZ;
+ while (time_before(jiffies, timeout)) {
ret = REG_READ(REG_GLOBAL, 0x00);
if ((ret & 0xc800) == 0xc800)
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
- if (i == 1000)
+ if (time_after(jiffies, timeout))
return -ETIMEDOUT;
return 0;
@@ -84,42 +78,34 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)
int ret;
int i;
- /*
- * Enable the PHY polling unit, don't discard packets with
+ /* Enable the PHY polling unit, don't discard packets with
* excessive collisions, use a weighted fair queueing scheme
* to arbitrate between packet queues, set the maximum frame
* size to 1632, and mask all interrupt sources.
*/
REG_WRITE(REG_GLOBAL, 0x04, 0x4400);
- /*
- * Set the default address aging time to 5 minutes, and
+ /* Set the default address aging time to 5 minutes, and
* enable address learn messages to be sent to all message
* ports.
*/
REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
- /*
- * Configure the priority mapping registers.
- */
+ /* Configure the priority mapping registers. */
ret = mv88e6xxx_config_prio(ds);
if (ret < 0)
return ret;
- /*
- * Set the VLAN ethertype to 0x8100.
- */
+ /* Set the VLAN ethertype to 0x8100. */
REG_WRITE(REG_GLOBAL, 0x19, 0x8100);
- /*
- * Disable ARP mirroring, and configure the upstream port as
+ /* Disable ARP mirroring, and configure the upstream port as
* the port to which ingress and egress monitor frames are to
* be sent.
*/
REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1100) | 0x00f0);
- /*
- * Disable cascade port functionality unless this device
+ /* Disable cascade port functionality unless this device
* is used in a cascade configuration, and set the switch's
* DSA device number.
*/
@@ -128,23 +114,19 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)
else
REG_WRITE(REG_GLOBAL, 0x1c, 0xe000 | (ds->index & 0x1f));
- /*
- * Send all frames with destination addresses matching
+ /* Send all frames with destination addresses matching
* 01:80:c2:00:00:0x to the CPU port.
*/
REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
- /*
- * Ignore removed tag data on doubly tagged packets, disable
+ /* Ignore removed tag data on doubly tagged packets, disable
* flow control messages, force flow control priority to the
* highest, and send all special multicast frames to the CPU
* port at the highest priority.
*/
REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
- /*
- * Program the DSA routing table.
- */
+ /* Program the DSA routing table. */
for (i = 0; i < 32; i++) {
int nexthop;
@@ -155,20 +137,15 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)
REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
}
- /*
- * Clear all trunk masks.
- */
+ /* Clear all trunk masks. */
for (i = 0; i < 8; i++)
REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0x7ff);
- /*
- * Clear all trunk mappings.
- */
+ /* Clear all trunk mappings. */
for (i = 0; i < 16; i++)
REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
- /*
- * Force the priority of IGMP/MLD snoop frames and ARP frames
+ /* Force the priority of IGMP/MLD snoop frames and ARP frames
* to the highest setting.
*/
REG_WRITE(REG_GLOBAL2, 0x0f, 0x00ff);
@@ -182,8 +159,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
int addr = REG_PORT(p);
u16 val;
- /*
- * MAC Forcing register: don't force link, speed, duplex
+ /* MAC Forcing register: don't force link, speed, duplex
* or flow control state to any particular values on physical
* ports, but force the CPU port and all DSA ports to 1000 Mb/s
* (100 Mb/s on 6085) full duplex.
@@ -196,8 +172,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
else
REG_WRITE(addr, 0x01, 0x0003);
- /*
- * Port Control: disable Core Tag, disable Drop-on-Lock,
+ /* Port Control: disable Core Tag, disable Drop-on-Lock,
* transmit frames unmodified, disable Header mode,
* enable IGMP/MLD snoop, disable DoubleTag, disable VLAN
* tunneling, determine priority by looking at 802.1p and
@@ -214,8 +189,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
val = 0x0433;
if (p == dsa_upstream_port(ds)) {
val |= 0x0104;
- /*
- * On 6085, unknown multicast forward is controlled
+ /* On 6085, unknown multicast forward is controlled
* here rather than in Port Control 2 register.
*/
if (ps->id == ID_6085)
@@ -225,14 +199,12 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
val |= 0x0100;
REG_WRITE(addr, 0x04, val);
- /*
- * Port Control 1: disable trunking. Also, if this is the
+ /* Port Control 1: disable trunking. Also, if this is the
* CPU port, enable learn messages to be sent to this port.
*/
REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000);
- /*
- * Port based VLAN map: give each port its own address
+ /* Port based VLAN map: give each port its own address
* database, allow the CPU port to talk to each of the 'real'
* ports, and allow each of the 'real' ports to only talk to
* the upstream port.
@@ -244,14 +216,12 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
val |= 1 << dsa_upstream_port(ds);
REG_WRITE(addr, 0x06, val);
- /*
- * Default VLAN ID and priority: don't set a default VLAN
+ /* Default VLAN ID and priority: don't set a default VLAN
* ID, and set the default packet priority to zero.
*/
REG_WRITE(addr, 0x07, 0x0000);
- /*
- * Port Control 2: don't force a good FCS, don't use
+ /* Port Control 2: don't force a good FCS, don't use
* VLAN-based, source address-based or destination
* address-based priority overrides, don't let the switch
* add or strip 802.1q tags, don't discard tagged or
@@ -264,8 +234,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
* forwarding of unknown multicast addresses.
*/
if (ps->id == ID_6085)
- /*
- * on 6085, bits 3:0 are reserved, bit 6 control ARP
+ /* on 6085, bits 3:0 are reserved, bit 6 control ARP
* mirroring, and multicast forward is handled in
* Port Control register.
*/
@@ -277,32 +246,25 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
REG_WRITE(addr, 0x08, val);
}
- /*
- * Rate Control: disable ingress rate limiting.
- */
+ /* Rate Control: disable ingress rate limiting. */
REG_WRITE(addr, 0x09, 0x0000);
- /*
- * Rate Control 2: disable egress rate limiting.
- */
+ /* Rate Control 2: disable egress rate limiting. */
REG_WRITE(addr, 0x0a, 0x0000);
- /*
- * Port Association Vector: when learning source addresses
+ /* Port Association Vector: when learning source addresses
* of packets, add the address to the address database using
* a port bitmap that has only the bit for this port set and
* the other bits clear.
*/
REG_WRITE(addr, 0x0b, 1 << p);
- /*
- * Tag Remap: use an identity 802.1p prio -> switch prio
+ /* Tag Remap: use an identity 802.1p prio -> switch prio
* mapping.
*/
REG_WRITE(addr, 0x18, 0x3210);
- /*
- * Tag Remap 2: use an identity 802.1p prio -> switch prio
+ /* Tag Remap 2: use an identity 802.1p prio -> switch prio
* mapping.
*/
REG_WRITE(addr, 0x19, 0x7654);
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index a2c62c2f30ee..17314ed9456d 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -8,6 +8,8 @@
* (at your option) any later version.
*/
+#include <linux/delay.h>
+#include <linux/jiffies.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -15,8 +17,7 @@
#include <net/dsa.h>
#include "mv88e6xxx.h"
-/*
- * If the switch's ADDR[4:0] strap pins are strapped to zero, it will
+/* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
* use all 32 SMI bus addresses on its SMI bus, and all switch registers
* will be directly accessible on some {device address,register address}
* pair. If the ADDR[4:0] pins are not strapped to zero, the switch
@@ -48,30 +49,22 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
if (sw_addr == 0)
return mdiobus_read(bus, addr, reg);
- /*
- * Wait for the bus to become free.
- */
+ /* Wait for the bus to become free. */
ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
if (ret < 0)
return ret;
- /*
- * Transmit the read command.
- */
+ /* Transmit the read command. */
ret = mdiobus_write(bus, sw_addr, 0, 0x9800 | (addr << 5) | reg);
if (ret < 0)
return ret;
- /*
- * Wait for the read command to complete.
- */
+ /* Wait for the read command to complete. */
ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
if (ret < 0)
return ret;
- /*
- * Read the data.
- */
+ /* Read the data. */
ret = mdiobus_read(bus, sw_addr, 1);
if (ret < 0)
return ret;
@@ -100,30 +93,22 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
if (sw_addr == 0)
return mdiobus_write(bus, addr, reg, val);
- /*
- * Wait for the bus to become free.
- */
+ /* Wait for the bus to become free. */
ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
if (ret < 0)
return ret;
- /*
- * Transmit the data to write.
- */
+ /* Transmit the data to write. */
ret = mdiobus_write(bus, sw_addr, 1, val);
if (ret < 0)
return ret;
- /*
- * Transmit the write command.
- */
+ /* Transmit the write command. */
ret = mdiobus_write(bus, sw_addr, 0, 0x9400 | (addr << 5) | reg);
if (ret < 0)
return ret;
- /*
- * Wait for the write command to complete.
- */
+ /* Wait for the write command to complete. */
ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
if (ret < 0)
return ret;
@@ -146,9 +131,7 @@ int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
int mv88e6xxx_config_prio(struct dsa_switch *ds)
{
- /*
- * Configure the IP ToS mapping registers.
- */
+ /* Configure the IP ToS mapping registers. */
REG_WRITE(REG_GLOBAL, 0x10, 0x0000);
REG_WRITE(REG_GLOBAL, 0x11, 0x0000);
REG_WRITE(REG_GLOBAL, 0x12, 0x5555);
@@ -158,9 +141,7 @@ int mv88e6xxx_config_prio(struct dsa_switch *ds)
REG_WRITE(REG_GLOBAL, 0x16, 0xffff);
REG_WRITE(REG_GLOBAL, 0x17, 0xffff);
- /*
- * Configure the IEEE 802.1p priority mapping register.
- */
+ /* Configure the IEEE 802.1p priority mapping register. */
REG_WRITE(REG_GLOBAL, 0x18, 0xfa41);
return 0;
@@ -183,14 +164,10 @@ int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
for (i = 0; i < 6; i++) {
int j;
- /*
- * Write the MAC address byte.
- */
+ /* Write the MAC address byte. */
REG_WRITE(REG_GLOBAL2, 0x0d, 0x8000 | (i << 8) | addr[i]);
- /*
- * Wait for the write to complete.
- */
+ /* Wait for the write to complete. */
for (j = 0; j < 16; j++) {
ret = REG_READ(REG_GLOBAL2, 0x0d);
if ((ret & 0x8000) == 0)
@@ -221,16 +198,17 @@ int mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum, u16 val)
static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
{
int ret;
- int i;
+ unsigned long timeout;
ret = REG_READ(REG_GLOBAL, 0x04);
REG_WRITE(REG_GLOBAL, 0x04, ret & ~0x4000);
- for (i = 0; i < 1000; i++) {
- ret = REG_READ(REG_GLOBAL, 0x00);
- msleep(1);
- if ((ret & 0xc000) != 0xc000)
- return 0;
+ timeout = jiffies + 1 * HZ;
+ while (time_before(jiffies, timeout)) {
+ ret = REG_READ(REG_GLOBAL, 0x00);
+ usleep_range(1000, 2000);
+ if ((ret & 0xc000) != 0xc000)
+ return 0;
}
return -ETIMEDOUT;
@@ -239,16 +217,17 @@ static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
{
int ret;
- int i;
+ unsigned long timeout;
ret = REG_READ(REG_GLOBAL, 0x04);
REG_WRITE(REG_GLOBAL, 0x04, ret | 0x4000);
- for (i = 0; i < 1000; i++) {
- ret = REG_READ(REG_GLOBAL, 0x00);
- msleep(1);
- if ((ret & 0xc000) == 0xc000)
- return 0;
+ timeout = jiffies + 1 * HZ;
+ while (time_before(jiffies, timeout)) {
+ ret = REG_READ(REG_GLOBAL, 0x00);
+ usleep_range(1000, 2000);
+ if ((ret & 0xc000) == 0xc000)
+ return 0;
}
return -ETIMEDOUT;
@@ -260,11 +239,11 @@ static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
if (mutex_trylock(&ps->ppu_mutex)) {
- struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
+ struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
- if (mv88e6xxx_ppu_enable(ds) == 0)
- ps->ppu_disabled = 0;
- mutex_unlock(&ps->ppu_mutex);
+ if (mv88e6xxx_ppu_enable(ds) == 0)
+ ps->ppu_disabled = 0;
+ mutex_unlock(&ps->ppu_mutex);
}
}
@@ -282,22 +261,21 @@ static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
mutex_lock(&ps->ppu_mutex);
- /*
- * If the PHY polling unit is enabled, disable it so that
+ /* If the PHY polling unit is enabled, disable it so that
* we can access the PHY registers. If it was already
* disabled, cancel the timer that is going to re-enable
* it.
*/
if (!ps->ppu_disabled) {
- ret = mv88e6xxx_ppu_disable(ds);
- if (ret < 0) {
- mutex_unlock(&ps->ppu_mutex);
- return ret;
- }
- ps->ppu_disabled = 1;
+ ret = mv88e6xxx_ppu_disable(ds);
+ if (ret < 0) {
+ mutex_unlock(&ps->ppu_mutex);
+ return ret;
+ }
+ ps->ppu_disabled = 1;
} else {
- del_timer(&ps->ppu_timer);
- ret = 0;
+ del_timer(&ps->ppu_timer);
+ ret = 0;
}
return ret;
@@ -307,9 +285,7 @@ static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
{
struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
- /*
- * Schedule a timer to re-enable the PHY polling unit.
- */
+ /* Schedule a timer to re-enable the PHY polling unit. */
mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
mutex_unlock(&ps->ppu_mutex);
}
@@ -331,8 +307,8 @@ int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum)
ret = mv88e6xxx_ppu_access_get(ds);
if (ret >= 0) {
- ret = mv88e6xxx_reg_read(ds, addr, regnum);
- mv88e6xxx_ppu_access_put(ds);
+ ret = mv88e6xxx_reg_read(ds, addr, regnum);
+ mv88e6xxx_ppu_access_put(ds);
}
return ret;
@@ -345,8 +321,8 @@ int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
ret = mv88e6xxx_ppu_access_get(ds);
if (ret >= 0) {
- ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
- mv88e6xxx_ppu_access_put(ds);
+ ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
+ mv88e6xxx_ppu_access_put(ds);
}
return ret;
@@ -380,7 +356,7 @@ void mv88e6xxx_poll_link(struct dsa_switch *ds)
if (!link) {
if (netif_carrier_ok(dev)) {
- printk(KERN_INFO "%s: link down\n", dev->name);
+ netdev_info(dev, "link down\n");
netif_carrier_off(dev);
}
continue;
@@ -404,10 +380,11 @@ void mv88e6xxx_poll_link(struct dsa_switch *ds)
fc = (port_status & 0x8000) ? 1 : 0;
if (!netif_carrier_ok(dev)) {
- printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
- "flow control %sabled\n", dev->name,
- speed, duplex ? "full" : "half",
- fc ? "en" : "dis");
+ netdev_info(dev,
+ "link up, %d Mb/s, %s duplex, flow control %sabled\n",
+ speed,
+ duplex ? "full" : "half",
+ fc ? "en" : "dis");
netif_carrier_on(dev);
}
}
@@ -431,14 +408,10 @@ static int mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
{
int ret;
- /*
- * Snapshot the hardware statistics counters for this port.
- */
+ /* Snapshot the hardware statistics counters for this port. */
REG_WRITE(REG_GLOBAL, 0x1d, 0xdc00 | port);
- /*
- * Wait for the snapshotting to complete.
- */
+ /* Wait for the snapshotting to complete. */
ret = mv88e6xxx_stats_wait(ds);
if (ret < 0)
return ret;
@@ -502,9 +475,7 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
return;
}
- /*
- * Read each of the counters.
- */
+ /* Read each of the counters. */
for (i = 0; i < nr_stats; i++) {
struct mv88e6xxx_hw_stat *s = stats + i;
u32 low;
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index fc2cd7b90e8d..911ede58dd12 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -16,16 +16,14 @@
#define REG_GLOBAL2 0x1c
struct mv88e6xxx_priv_state {
- /*
- * When using multi-chip addressing, this mutex protects
+ /* When using multi-chip addressing, this mutex protects
* access to the indirect access registers. (In single-chip
* mode, this mutex is effectively useless.)
*/
struct mutex smi_mutex;
#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
- /*
- * Handles automatic disabling and re-enabling of the PHY
+ /* Handles automatic disabling and re-enabling of the PHY
* polling unit.
*/
struct mutex ppu_mutex;
@@ -34,8 +32,7 @@ struct mv88e6xxx_priv_state {
struct timer_list ppu_timer;
#endif
- /*
- * This mutex serialises access to the statistics unit.
+ /* This mutex serialises access to the statistics unit.
* Hold this mutex over snapshot + dump sequences.
*/
struct mutex stats_mutex;
@@ -52,7 +49,7 @@ struct mv88e6xxx_hw_stat {
int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg);
int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg);
int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
- int reg, u16 val);
+ int reg, u16 val);
int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val);
int mv88e6xxx_config_prio(struct dsa_switch *ds);
int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr);
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index c260af5411d0..42aa54af6842 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -100,6 +100,15 @@ static void dummy_dev_uninit(struct net_device *dev)
free_percpu(dev->dstats);
}
+static int dummy_change_carrier(struct net_device *dev, bool new_carrier)
+{
+ if (new_carrier)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+ return 0;
+}
+
static const struct net_device_ops dummy_netdev_ops = {
.ndo_init = dummy_dev_init,
.ndo_uninit = dummy_dev_uninit,
@@ -108,6 +117,7 @@ static const struct net_device_ops dummy_netdev_ops = {
.ndo_set_rx_mode = set_multicast_list,
.ndo_set_mac_address = eth_mac_addr,
.ndo_get_stats64 = dummy_get_stats64,
+ .ndo_change_carrier = dummy_change_carrier,
};
static void dummy_setup(struct net_device *dev)
diff --git a/drivers/net/ethernet/3com/3c501.c b/drivers/net/ethernet/3com/3c501.c
deleted file mode 100644
index 2038eaabaea4..000000000000
--- a/drivers/net/ethernet/3com/3c501.c
+++ /dev/null
@@ -1,896 +0,0 @@
-/* 3c501.c: A 3Com 3c501 Ethernet driver for Linux. */
-/*
- Written 1992,1993,1994 Donald Becker
-
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency. This software may be used and
- distributed according to the terms of the GNU General Public License,
- incorporated herein by reference.
-
- This is a device driver for the 3Com Etherlink 3c501.
- Do not purchase this card, even as a joke. It's performance is horrible,
- and it breaks in many ways.
-
- The original author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
- Fixed (again!) the missing interrupt locking on TX/RX shifting.
- Alan Cox <alan@lxorguk.ukuu.org.uk>
-
- Removed calls to init_etherdev since they are no longer needed, and
- cleaned up modularization just a bit. The driver still allows only
- the default address for cards when loaded as a module, but that's
- really less braindead than anyone using a 3c501 board. :)
- 19950208 (invid@msen.com)
-
- Added traps for interrupts hitting the window as we clear and TX load
- the board. Now getting 150K/second FTP with a 3c501 card. Still playing
- with a TX-TX optimisation to see if we can touch 180-200K/second as seems
- theoretically maximum.
- 19950402 Alan Cox <alan@lxorguk.ukuu.org.uk>
-
- Cleaned up for 2.3.x because we broke SMP now.
- 20000208 Alan Cox <alan@lxorguk.ukuu.org.uk>
-
- Check up pass for 2.5. Nothing significant changed
- 20021009 Alan Cox <alan@lxorguk.ukuu.org.uk>
-
- Fixed zero fill corner case
- 20030104 Alan Cox <alan@lxorguk.ukuu.org.uk>
-
-
- For the avoidance of doubt the "preferred form" of this code is one which
- is in an open non patent encumbered format. Where cryptographic key signing
- forms part of the process of creating an executable the information
- including keys needed to generate an equivalently functional executable
- are deemed to be part of the source code.
-
-*/
-
-
-/**
- * DOC: 3c501 Card Notes
- *
- * Some notes on this thing if you have to hack it. [Alan]
- *
- * Some documentation is available from 3Com. Due to the boards age
- * standard responses when you ask for this will range from 'be serious'
- * to 'give it to a museum'. The documentation is incomplete and mostly
- * of historical interest anyway.
- *
- * The basic system is a single buffer which can be used to receive or
- * transmit a packet. A third command mode exists when you are setting
- * things up.
- *
- * If it's transmitting it's not receiving and vice versa. In fact the
- * time to get the board back into useful state after an operation is
- * quite large.
- *
- * The driver works by keeping the board in receive mode waiting for a
- * packet to arrive. When one arrives it is copied out of the buffer
- * and delivered to the kernel. The card is reloaded and off we go.
- *
- * When transmitting lp->txing is set and the card is reset (from
- * receive mode) [possibly losing a packet just received] to command
- * mode. A packet is loaded and transmit mode triggered. The interrupt
- * handler runs different code for transmit interrupts and can handle
- * returning to receive mode or retransmissions (yes you have to help
- * out with those too).
- *
- * DOC: Problems
- *
- * There are a wide variety of undocumented error returns from the card
- * and you basically have to kick the board and pray if they turn up. Most
- * only occur under extreme load or if you do something the board doesn't
- * like (eg touching a register at the wrong time).
- *
- * The driver is less efficient than it could be. It switches through
- * receive mode even if more transmits are queued. If this worries you buy
- * a real Ethernet card.
- *
- * The combination of slow receive restart and no real multicast
- * filter makes the board unusable with a kernel compiled for IP
- * multicasting in a real multicast environment. That's down to the board,
- * but even with no multicast programs running a multicast IP kernel is
- * in group 224.0.0.1 and you will therefore be listening to all multicasts.
- * One nv conference running over that Ethernet and you can give up.
- *
- */
-
-#define DRV_NAME "3c501"
-#define DRV_VERSION "2002/10/09"
-
-
-static const char version[] =
- DRV_NAME ".c: " DRV_VERSION " Alan Cox (alan@lxorguk.ukuu.org.uk).\n";
-
-/*
- * Braindamage remaining:
- * The 3c501 board.
- */
-
-#include <linux/module.h>
-
-#include <linux/kernel.h>
-#include <linux/fcntl.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/spinlock.h>
-#include <linux/ethtool.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-
-#include "3c501.h"
-
-/*
- * The boilerplate probe code.
- */
-
-static int io = 0x280;
-static int irq = 5;
-static int mem_start;
-
-/**
- * el1_probe - probe for a 3c501
- * @dev: The device structure passed in to probe.
- *
- * This can be called from two places. The network layer will probe using
- * a device structure passed in with the probe information completed. For a
- * modular driver we use #init_module to fill in our own structure and probe
- * for it.
- *
- * Returns 0 on success. ENXIO if asked not to probe and ENODEV if asked to
- * probe and failing to find anything.
- */
-
-struct net_device * __init el1_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
- static const unsigned ports[] = { 0x280, 0x300, 0};
- const unsigned *port;
- int err = 0;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- io = dev->base_addr;
- irq = dev->irq;
- mem_start = dev->mem_start & 7;
- }
-
- if (io > 0x1ff) { /* Check a single specified location. */
- err = el1_probe1(dev, io);
- } else if (io != 0) {
- err = -ENXIO; /* Don't probe at all. */
- } else {
- for (port = ports; *port && el1_probe1(dev, *port); port++)
- ;
- if (!*port)
- err = -ENODEV;
- }
- if (err)
- goto out;
- err = register_netdev(dev);
- if (err)
- goto out1;
- return dev;
-out1:
- release_region(dev->base_addr, EL1_IO_EXTENT);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-static const struct net_device_ops el_netdev_ops = {
- .ndo_open = el_open,
- .ndo_stop = el1_close,
- .ndo_start_xmit = el_start_xmit,
- .ndo_tx_timeout = el_timeout,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/**
- * el1_probe1:
- * @dev: The device structure to use
- * @ioaddr: An I/O address to probe at.
- *
- * The actual probe. This is iterated over by #el1_probe in order to
- * check all the applicable device locations.
- *
- * Returns 0 for a success, in which case the device is activated,
- * EAGAIN if the IRQ is in use by another driver, and ENODEV if the
- * board cannot be found.
- */
-
-static int __init el1_probe1(struct net_device *dev, int ioaddr)
-{
- struct net_local *lp;
- const char *mname; /* Vendor name */
- unsigned char station_addr[6];
- int autoirq = 0;
- int i;
-
- /*
- * Reserve I/O resource for exclusive use by this driver
- */
-
- if (!request_region(ioaddr, EL1_IO_EXTENT, DRV_NAME))
- return -ENODEV;
-
- /*
- * Read the station address PROM data from the special port.
- */
-
- for (i = 0; i < 6; i++) {
- outw(i, ioaddr + EL1_DATAPTR);
- station_addr[i] = inb(ioaddr + EL1_SAPROM);
- }
- /*
- * Check the first three octets of the S.A. for 3Com's prefix, or
- * for the Sager NP943 prefix.
- */
-
- if (station_addr[0] == 0x02 && station_addr[1] == 0x60 &&
- station_addr[2] == 0x8c)
- mname = "3c501";
- else if (station_addr[0] == 0x00 && station_addr[1] == 0x80 &&
- station_addr[2] == 0xC8)
- mname = "NP943";
- else {
- release_region(ioaddr, EL1_IO_EXTENT);
- return -ENODEV;
- }
-
- /*
- * We auto-IRQ by shutting off the interrupt line and letting it
- * float high.
- */
-
- dev->irq = irq;
-
- if (dev->irq < 2) {
- unsigned long irq_mask;
-
- irq_mask = probe_irq_on();
- inb(RX_STATUS); /* Clear pending interrupts. */
- inb(TX_STATUS);
- outb(AX_LOOP + 1, AX_CMD);
-
- outb(0x00, AX_CMD);
-
- mdelay(20);
- autoirq = probe_irq_off(irq_mask);
-
- if (autoirq == 0) {
- pr_warning("%s probe at %#x failed to detect IRQ line.\n",
- mname, ioaddr);
- release_region(ioaddr, EL1_IO_EXTENT);
- return -EAGAIN;
- }
- }
-
- outb(AX_RESET+AX_LOOP, AX_CMD); /* Loopback mode. */
- dev->base_addr = ioaddr;
- memcpy(dev->dev_addr, station_addr, ETH_ALEN);
-
- if (mem_start & 0xf)
- el_debug = mem_start & 0x7;
- if (autoirq)
- dev->irq = autoirq;
-
- pr_info("%s: %s EtherLink at %#lx, using %sIRQ %d.\n",
- dev->name, mname, dev->base_addr,
- autoirq ? "auto":"assigned ", dev->irq);
-
-#ifdef CONFIG_IP_MULTICAST
- pr_warning("WARNING: Use of the 3c501 in a multicast kernel is NOT recommended.\n");
-#endif
-
- if (el_debug)
- pr_debug("%s", version);
-
- lp = netdev_priv(dev);
- memset(lp, 0, sizeof(struct net_local));
- spin_lock_init(&lp->lock);
-
- /*
- * The EL1-specific entries in the device structure.
- */
-
- dev->netdev_ops = &el_netdev_ops;
- dev->watchdog_timeo = HZ;
- dev->ethtool_ops = &netdev_ethtool_ops;
- return 0;
-}
-
-/**
- * el1_open:
- * @dev: device that is being opened
- *
- * When an ifconfig is issued which changes the device flags to include
- * IFF_UP this function is called. It is only called when the change
- * occurs, not when the interface remains up. #el1_close will be called
- * when it goes down.
- *
- * Returns 0 for a successful open, or -EAGAIN if someone has run off
- * with our interrupt line.
- */
-
-static int el_open(struct net_device *dev)
-{
- int retval;
- int ioaddr = dev->base_addr;
- struct net_local *lp = netdev_priv(dev);
- unsigned long flags;
-
- if (el_debug > 2)
- pr_debug("%s: Doing el_open()...\n", dev->name);
-
- retval = request_irq(dev->irq, el_interrupt, 0, dev->name, dev);
- if (retval)
- return retval;
-
- spin_lock_irqsave(&lp->lock, flags);
- el_reset(dev);
- spin_unlock_irqrestore(&lp->lock, flags);
-
- lp->txing = 0; /* Board in RX mode */
- outb(AX_RX, AX_CMD); /* Aux control, irq and receive enabled */
- netif_start_queue(dev);
- return 0;
-}
-
-/**
- * el_timeout:
- * @dev: The 3c501 card that has timed out
- *
- * Attempt to restart the board. This is basically a mixture of extreme
- * violence and prayer
- *
- */
-
-static void el_timeout(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- if (el_debug)
- pr_debug("%s: transmit timed out, txsr %#2x axsr=%02x rxsr=%02x.\n",
- dev->name, inb(TX_STATUS),
- inb(AX_STATUS), inb(RX_STATUS));
- dev->stats.tx_errors++;
- outb(TX_NORM, TX_CMD);
- outb(RX_NORM, RX_CMD);
- outb(AX_OFF, AX_CMD); /* Just trigger a false interrupt. */
- outb(AX_RX, AX_CMD); /* Aux control, irq and receive enabled */
- lp->txing = 0; /* Ripped back in to RX */
- netif_wake_queue(dev);
-}
-
-
-/**
- * el_start_xmit:
- * @skb: The packet that is queued to be sent
- * @dev: The 3c501 card we want to throw it down
- *
- * Attempt to send a packet to a 3c501 card. There are some interesting
- * catches here because the 3c501 is an extremely old and therefore
- * stupid piece of technology.
- *
- * If we are handling an interrupt on the other CPU we cannot load a packet
- * as we may still be attempting to retrieve the last RX packet buffer.
- *
- * When a transmit times out we dump the card into control mode and just
- * start again. It happens enough that it isn't worth logging.
- *
- * We avoid holding the spin locks when doing the packet load to the board.
- * The device is very slow, and its DMA mode is even slower. If we held the
- * lock while loading 1500 bytes onto the controller we would drop a lot of
- * serial port characters. This requires we do extra locking, but we have
- * no real choice.
- */
-
-static netdev_tx_t el_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- unsigned long flags;
-
- /*
- * Avoid incoming interrupts between us flipping txing and flipping
- * mode as the driver assumes txing is a faithful indicator of card
- * state
- */
-
- spin_lock_irqsave(&lp->lock, flags);
-
- /*
- * Avoid timer-based retransmission conflicts.
- */
-
- netif_stop_queue(dev);
-
- do {
- int len = skb->len;
- int pad = 0;
- int gp_start;
- unsigned char *buf = skb->data;
-
- if (len < ETH_ZLEN)
- pad = ETH_ZLEN - len;
-
- gp_start = 0x800 - (len + pad);
-
- lp->tx_pkt_start = gp_start;
- lp->collisions = 0;
-
- dev->stats.tx_bytes += skb->len;
-
- /*
- * Command mode with status cleared should [in theory]
- * mean no more interrupts can be pending on the card.
- */
-
- outb_p(AX_SYS, AX_CMD);
- inb_p(RX_STATUS);
- inb_p(TX_STATUS);
-
- lp->loading = 1;
- lp->txing = 1;
-
- /*
- * Turn interrupts back on while we spend a pleasant
- * afternoon loading bytes into the board
- */
-
- spin_unlock_irqrestore(&lp->lock, flags);
-
- /* Set rx packet area to 0. */
- outw(0x00, RX_BUF_CLR);
- /* aim - packet will be loaded into buffer start */
- outw(gp_start, GP_LOW);
- /* load buffer (usual thing each byte increments the pointer) */
- outsb(DATAPORT, buf, len);
- if (pad) {
- while (pad--) /* Zero fill buffer tail */
- outb(0, DATAPORT);
- }
- /* the board reuses the same register */
- outw(gp_start, GP_LOW);
-
- if (lp->loading != 2) {
- /* fire ... Trigger xmit. */
- outb(AX_XMIT, AX_CMD);
- lp->loading = 0;
- if (el_debug > 2)
- pr_debug(" queued xmit.\n");
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- }
- /* A receive upset our load, despite our best efforts */
- if (el_debug > 2)
- pr_debug("%s: burped during tx load.\n", dev->name);
- spin_lock_irqsave(&lp->lock, flags);
- } while (1);
-}
-
-/**
- * el_interrupt:
- * @irq: Interrupt number
- * @dev_id: The 3c501 that burped
- *
- * Handle the ether interface interrupts. The 3c501 needs a lot more
- * hand holding than most cards. In particular we get a transmit interrupt
- * with a collision error because the board firmware isn't capable of rewinding
- * its own transmit buffer pointers. It can however count to 16 for us.
- *
- * On the receive side the card is also very dumb. It has no buffering to
- * speak of. We simply pull the packet out of its PIO buffer (which is slow)
- * and queue it for the kernel. Then we reset the card for the next packet.
- *
- * We sometimes get surprise interrupts late both because the SMP IRQ delivery
- * is message passing and because the card sometimes seems to deliver late. I
- * think if it is part way through a receive and the mode is changed it carries
- * on receiving and sends us an interrupt. We have to band aid all these cases
- * to get a sensible 150kBytes/second performance. Even then you want a small
- * TCP window.
- */
-
-static irqreturn_t el_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct net_local *lp;
- int ioaddr;
- int axsr; /* Aux. status reg. */
-
- ioaddr = dev->base_addr;
- lp = netdev_priv(dev);
-
- spin_lock(&lp->lock);
-
- /*
- * What happened ?
- */
-
- axsr = inb(AX_STATUS);
-
- /*
- * Log it
- */
-
- if (el_debug > 3)
- pr_debug("%s: el_interrupt() aux=%#02x\n", dev->name, axsr);
-
- if (lp->loading == 1 && !lp->txing)
- pr_warning("%s: Inconsistent state loading while not in tx\n",
- dev->name);
-
- if (lp->txing) {
- /*
- * Board in transmit mode. May be loading. If we are
- * loading we shouldn't have got this.
- */
- int txsr = inb(TX_STATUS);
-
- if (lp->loading == 1) {
- if (el_debug > 2)
- pr_debug("%s: Interrupt while loading [txsr=%02x gp=%04x rp=%04x]\n",
- dev->name, txsr, inw(GP_LOW), inw(RX_LOW));
-
- /* Force a reload */
- lp->loading = 2;
- spin_unlock(&lp->lock);
- goto out;
- }
- if (el_debug > 6)
- pr_debug("%s: txsr=%02x gp=%04x rp=%04x\n", dev->name,
- txsr, inw(GP_LOW), inw(RX_LOW));
-
- if ((axsr & 0x80) && (txsr & TX_READY) == 0) {
- /*
- * FIXME: is there a logic to whether to keep
- * on trying or reset immediately ?
- */
- if (el_debug > 1)
- pr_debug("%s: Unusual interrupt during Tx, txsr=%02x axsr=%02x gp=%03x rp=%03x.\n",
- dev->name, txsr, axsr,
- inw(ioaddr + EL1_DATAPTR),
- inw(ioaddr + EL1_RXPTR));
- lp->txing = 0;
- netif_wake_queue(dev);
- } else if (txsr & TX_16COLLISIONS) {
- /*
- * Timed out
- */
- if (el_debug)
- pr_debug("%s: Transmit failed 16 times, Ethernet jammed?\n", dev->name);
- outb(AX_SYS, AX_CMD);
- lp->txing = 0;
- dev->stats.tx_aborted_errors++;
- netif_wake_queue(dev);
- } else if (txsr & TX_COLLISION) {
- /*
- * Retrigger xmit.
- */
-
- if (el_debug > 6)
- pr_debug("%s: retransmitting after a collision.\n", dev->name);
- /*
- * Poor little chip can't reset its own start
- * pointer
- */
-
- outb(AX_SYS, AX_CMD);
- outw(lp->tx_pkt_start, GP_LOW);
- outb(AX_XMIT, AX_CMD);
- dev->stats.collisions++;
- spin_unlock(&lp->lock);
- goto out;
- } else {
- /*
- * It worked.. we will now fall through and receive
- */
- dev->stats.tx_packets++;
- if (el_debug > 6)
- pr_debug("%s: Tx succeeded %s\n", dev->name,
- (txsr & TX_RDY) ? "." : "but tx is busy!");
- /*
- * This is safe the interrupt is atomic WRT itself.
- */
- lp->txing = 0;
- /* In case more to transmit */
- netif_wake_queue(dev);
- }
- } else {
- /*
- * In receive mode.
- */
-
- int rxsr = inb(RX_STATUS);
- if (el_debug > 5)
- pr_debug("%s: rxsr=%02x txsr=%02x rp=%04x\n",
- dev->name, rxsr, inb(TX_STATUS), inw(RX_LOW));
- /*
- * Just reading rx_status fixes most errors.
- */
- if (rxsr & RX_MISSED)
- dev->stats.rx_missed_errors++;
- else if (rxsr & RX_RUNT) {
- /* Handled to avoid board lock-up. */
- dev->stats.rx_length_errors++;
- if (el_debug > 5)
- pr_debug("%s: runt.\n", dev->name);
- } else if (rxsr & RX_GOOD) {
- /*
- * Receive worked.
- */
- el_receive(dev);
- } else {
- /*
- * Nothing? Something is broken!
- */
- if (el_debug > 2)
- pr_debug("%s: No packet seen, rxsr=%02x **resetting 3c501***\n",
- dev->name, rxsr);
- el_reset(dev);
- }
- }
-
- /*
- * Move into receive mode
- */
-
- outb(AX_RX, AX_CMD);
- outw(0x00, RX_BUF_CLR);
- inb(RX_STATUS); /* Be certain that interrupts are cleared. */
- inb(TX_STATUS);
- spin_unlock(&lp->lock);
-out:
- return IRQ_HANDLED;
-}
-
-
-/**
- * el_receive:
- * @dev: Device to pull the packets from
- *
- * We have a good packet. Well, not really "good", just mostly not broken.
- * We must check everything to see if it is good. In particular we occasionally
- * get wild packet sizes from the card. If the packet seems sane we PIO it
- * off the card and queue it for the protocol layers.
- */
-
-static void el_receive(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- int pkt_len;
- struct sk_buff *skb;
-
- pkt_len = inw(RX_LOW);
-
- if (el_debug > 4)
- pr_debug(" el_receive %d.\n", pkt_len);
-
- if (pkt_len < 60 || pkt_len > 1536) {
- if (el_debug)
- pr_debug("%s: bogus packet, length=%d\n",
- dev->name, pkt_len);
- dev->stats.rx_over_errors++;
- return;
- }
-
- /*
- * Command mode so we can empty the buffer
- */
-
- outb(AX_SYS, AX_CMD);
- skb = netdev_alloc_skb(dev, pkt_len + 2);
-
- /*
- * Start of frame
- */
-
- outw(0x00, GP_LOW);
- if (skb == NULL) {
- pr_info("%s: Memory squeeze, dropping packet.\n", dev->name);
- dev->stats.rx_dropped++;
- return;
- } else {
- skb_reserve(skb, 2); /* Force 16 byte alignment */
- /*
- * The read increments through the bytes. The interrupt
- * handler will fix the pointer when it returns to
- * receive mode.
- */
- insb(DATAPORT, skb_put(skb, pkt_len), pkt_len);
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- }
-}
-
-/**
- * el_reset: Reset a 3c501 card
- * @dev: The 3c501 card about to get zapped
- *
- * Even resetting a 3c501 isn't simple. When you activate reset it loses all
- * its configuration. You must hold the lock when doing this. The function
- * cannot take the lock itself as it is callable from the irq handler.
- */
-
-static void el_reset(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- if (el_debug > 2)
- pr_info("3c501 reset...\n");
- outb(AX_RESET, AX_CMD); /* Reset the chip */
- /* Aux control, irq and loopback enabled */
- outb(AX_LOOP, AX_CMD);
- {
- int i;
- for (i = 0; i < 6; i++) /* Set the station address. */
- outb(dev->dev_addr[i], ioaddr + i);
- }
-
- outw(0, RX_BUF_CLR); /* Set rx packet area to 0. */
- outb(TX_NORM, TX_CMD); /* tx irq on done, collision */
- outb(RX_NORM, RX_CMD); /* Set Rx commands. */
- inb(RX_STATUS); /* Clear status. */
- inb(TX_STATUS);
- lp->txing = 0;
-}
-
-/**
- * el1_close:
- * @dev: 3c501 card to shut down
- *
- * Close a 3c501 card. The IFF_UP flag has been cleared by the user via
- * the SIOCSIFFLAGS ioctl. We stop any further transmissions being queued,
- * and then disable the interrupts. Finally we reset the chip. The effects
- * of the rest will be cleaned up by #el1_open. Always returns 0 indicating
- * a success.
- */
-
-static int el1_close(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- if (el_debug > 2)
- pr_info("%s: Shutting down Ethernet card at %#x.\n",
- dev->name, ioaddr);
-
- netif_stop_queue(dev);
-
- /*
- * Free and disable the IRQ.
- */
-
- free_irq(dev->irq, dev);
- outb(AX_RESET, AX_CMD); /* Reset the chip */
-
- return 0;
-}
-
-/**
- * set_multicast_list:
- * @dev: The device to adjust
- *
- * Set or clear the multicast filter for this adaptor to use the best-effort
- * filtering supported. The 3c501 supports only three modes of filtering.
- * It always receives broadcasts and packets for itself. You can choose to
- * optionally receive all packets, or all multicast packets on top of this.
- */
-
-static void set_multicast_list(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- if (dev->flags & IFF_PROMISC) {
- outb(RX_PROM, RX_CMD);
- inb(RX_STATUS);
- } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) {
- /* Multicast or all multicast is the same */
- outb(RX_MULT, RX_CMD);
- inb(RX_STATUS); /* Clear status. */
- } else {
- outb(RX_NORM, RX_CMD);
- inb(RX_STATUS);
- }
-}
-
-
-static void netdev_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr);
-}
-
-static u32 netdev_get_msglevel(struct net_device *dev)
-{
- return debug;
-}
-
-static void netdev_set_msglevel(struct net_device *dev, u32 level)
-{
- debug = level;
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
- .get_msglevel = netdev_get_msglevel,
- .set_msglevel = netdev_set_msglevel,
-};
-
-#ifdef MODULE
-
-static struct net_device *dev_3c501;
-
-module_param(io, int, 0);
-module_param(irq, int, 0);
-MODULE_PARM_DESC(io, "EtherLink I/O base address");
-MODULE_PARM_DESC(irq, "EtherLink IRQ number");
-
-/**
- * init_module:
- *
- * When the driver is loaded as a module this function is called. We fake up
- * a device structure with the base I/O and interrupt set as if it were being
- * called from Space.c. This minimises the extra code that would otherwise
- * be required.
- *
- * Returns 0 for success or -EIO if a card is not found. Returning an error
- * here also causes the module to be unloaded
- */
-
-int __init init_module(void)
-{
- dev_3c501 = el1_probe(-1);
- if (IS_ERR(dev_3c501))
- return PTR_ERR(dev_3c501);
- return 0;
-}
-
-/**
- * cleanup_module:
- *
- * The module is being unloaded. We unhook our network device from the system
- * and then free up the resources we took when the card was found.
- */
-
-void __exit cleanup_module(void)
-{
- struct net_device *dev = dev_3c501;
- unregister_netdev(dev);
- release_region(dev->base_addr, EL1_IO_EXTENT);
- free_netdev(dev);
-}
-
-#endif /* MODULE */
-
-MODULE_AUTHOR("Donald Becker, Alan Cox");
-MODULE_DESCRIPTION("Support for the ancient 3Com 3c501 ethernet card");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/net/ethernet/3com/3c501.h b/drivers/net/ethernet/3com/3c501.h
deleted file mode 100644
index 183fd55f03cb..000000000000
--- a/drivers/net/ethernet/3com/3c501.h
+++ /dev/null
@@ -1,91 +0,0 @@
-
-/*
- * Index to functions.
- */
-
-static int el1_probe1(struct net_device *dev, int ioaddr);
-static int el_open(struct net_device *dev);
-static void el_timeout(struct net_device *dev);
-static netdev_tx_t el_start_xmit(struct sk_buff *skb, struct net_device *dev);
-static irqreturn_t el_interrupt(int irq, void *dev_id);
-static void el_receive(struct net_device *dev);
-static void el_reset(struct net_device *dev);
-static int el1_close(struct net_device *dev);
-static void set_multicast_list(struct net_device *dev);
-static const struct ethtool_ops netdev_ethtool_ops;
-
-#define EL1_IO_EXTENT 16
-
-#ifndef EL_DEBUG
-#define EL_DEBUG 0 /* use 0 for production, 1 for devel., >2 for debug */
-#endif /* Anything above 5 is wordy death! */
-#define debug el_debug
-static int el_debug = EL_DEBUG;
-
-/*
- * Board-specific info in netdev_priv(dev).
- */
-
-struct net_local
-{
- int tx_pkt_start; /* The length of the current Tx packet. */
- int collisions; /* Tx collisions this packet */
- int loading; /* Spot buffer load collisions */
- int txing; /* True if card is in TX mode */
- spinlock_t lock; /* Serializing lock */
-};
-
-
-#define RX_STATUS (ioaddr + 0x06)
-#define RX_CMD RX_STATUS
-#define TX_STATUS (ioaddr + 0x07)
-#define TX_CMD TX_STATUS
-#define GP_LOW (ioaddr + 0x08)
-#define GP_HIGH (ioaddr + 0x09)
-#define RX_BUF_CLR (ioaddr + 0x0A)
-#define RX_LOW (ioaddr + 0x0A)
-#define RX_HIGH (ioaddr + 0x0B)
-#define SAPROM (ioaddr + 0x0C)
-#define AX_STATUS (ioaddr + 0x0E)
-#define AX_CMD AX_STATUS
-#define DATAPORT (ioaddr + 0x0F)
-#define TX_RDY 0x08 /* In TX_STATUS */
-
-#define EL1_DATAPTR 0x08
-#define EL1_RXPTR 0x0A
-#define EL1_SAPROM 0x0C
-#define EL1_DATAPORT 0x0f
-
-/*
- * Writes to the ax command register.
- */
-
-#define AX_OFF 0x00 /* Irq off, buffer access on */
-#define AX_SYS 0x40 /* Load the buffer */
-#define AX_XMIT 0x44 /* Transmit a packet */
-#define AX_RX 0x48 /* Receive a packet */
-#define AX_LOOP 0x0C /* Loopback mode */
-#define AX_RESET 0x80
-
-/*
- * Normal receive mode written to RX_STATUS. We must intr on short packets
- * to avoid bogus rx lockups.
- */
-
-#define RX_NORM 0xA8 /* 0x68 == all addrs, 0xA8 only to me. */
-#define RX_PROM 0x68 /* Senior Prom, uhmm promiscuous mode. */
-#define RX_MULT 0xE8 /* Accept multicast packets. */
-#define TX_NORM 0x0A /* Interrupt on everything that might hang the chip */
-
-/*
- * TX_STATUS register.
- */
-
-#define TX_COLLISION 0x02
-#define TX_16COLLISIONS 0x04
-#define TX_READY 0x08
-
-#define RX_RUNT 0x08
-#define RX_MISSED 0x01 /* Missed a packet due to 3c501 braindamage. */
-#define RX_GOOD 0x30 /* Good packet 0x20, or simple overflow 0x10. */
-
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 633c709b9d99..f36ff99fd394 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -1161,8 +1161,8 @@ el3_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
static void el3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static int el3_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 59e1e001bc3f..94c656f5a05d 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -1542,9 +1542,10 @@ static void set_rx_mode(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ snprintf(info->bus_info, sizeof(info->bus_info), "ISA 0x%lx",
+ dev->base_addr);
}
static u32 netdev_get_msglevel(struct net_device *dev)
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index 66df93638085..ffd8de28a76a 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -432,7 +432,7 @@ static int tc574_config(struct pcmcia_device *link)
netdev_info(dev, "%s at io %#3lx, irq %d, hw_addr %pM\n",
cardname, dev->base_addr, dev->irq, dev->dev_addr);
netdev_info(dev, " %dK FIFO split %s Rx:Tx, %sMII interface.\n",
- 8 << config & Ram_size,
+ 8 << (config & Ram_size),
ram_split[(config & Ram_split) >> Ram_split_shift],
config & Autoselect ? "autoselect " : "");
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index ed0feb3cc6fa..1928e2001587 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1293,7 +1293,6 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum);
for (i = 0; i < 3; i++)
((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]);
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
if (print_info)
pr_cont(" %pM", dev->dev_addr);
/* Unfortunately an all zero eeprom passes the checksum and this
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index eb56174469a7..1c71c763f680 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_3COM
bool "3Com devices"
default y
- depends on ISA || EISA || MCA || PCI || PCMCIA
+ depends on ISA || EISA || PCI || PCMCIA
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -18,23 +18,9 @@ config NET_VENDOR_3COM
if NET_VENDOR_3COM
-config EL1
- tristate "3c501 \"EtherLink\" support"
- depends on ISA
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. Also, consider buying a
- new card, since the 3c501 is slow, broken, and obsolete: you will
- have problems. Some people suggest to ping ("man ping") a nearby
- machine every minute ("man cron") when using this card.
-
- To compile this driver as a module, choose M here. The module
- will be called 3c501.
-
config EL3
- tristate "3c509/3c529 (MCA)/3c579 \"EtherLink III\" support"
- depends on (ISA || EISA || MCA)
+ tristate "3c509/3c579 \"EtherLink III\" support"
+ depends on (ISA || EISA)
---help---
If you have a network (Ethernet) card belonging to the 3Com
EtherLinkIII series, say Y and read the Ethernet-HOWTO, available
diff --git a/drivers/net/ethernet/3com/Makefile b/drivers/net/ethernet/3com/Makefile
index 1e5382a30ead..74046afab993 100644
--- a/drivers/net/ethernet/3com/Makefile
+++ b/drivers/net/ethernet/3com/Makefile
@@ -2,7 +2,6 @@
# Makefile for the 3Com Ethernet device drivers
#
-obj-$(CONFIG_EL1) += 3c501.o
obj-$(CONFIG_EL3) += 3c509.o
obj-$(CONFIG_3C515) += 3c515.o
obj-$(CONFIG_PCMCIA_3C589) += 3c589_cs.o
diff --git a/drivers/net/ethernet/8390/3c503.c b/drivers/net/ethernet/8390/3c503.c
deleted file mode 100644
index 49d76bd0dc86..000000000000
--- a/drivers/net/ethernet/8390/3c503.c
+++ /dev/null
@@ -1,777 +0,0 @@
-/* 3c503.c: A shared-memory NS8390 ethernet driver for linux. */
-/*
- Written 1992-94 by Donald Becker.
-
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency. This software may be used and
- distributed according to the terms of the GNU General Public License,
- incorporated herein by reference.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
-
- This driver should work with the 3c503 and 3c503/16. It should be used
- in shared memory mode for best performance, although it may also work
- in programmed-I/O mode.
-
- Sources:
- EtherLink II Technical Reference Manual,
- EtherLink II/16 Technical Reference Manual Supplement,
- 3Com Corporation, 5400 Bayfront Plaza, Santa Clara CA 95052-8145
-
- The Crynwr 3c503 packet driver.
-
- Changelog:
-
- Paul Gortmaker : add support for the 2nd 8kB of RAM on 16 bit cards.
- Paul Gortmaker : multiple card support for module users.
- rjohnson@analogic.com : Fix up PIO interface for efficient operation.
- Jeff Garzik : ethtool support
-
-*/
-
-#define DRV_NAME "3c503"
-#define DRV_VERSION "1.10a"
-#define DRV_RELDATE "11/17/2001"
-
-
-static const char version[] =
- DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Donald Becker (becker@scyld.com)\n";
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/ethtool.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/byteorder.h>
-
-#include "8390.h"
-#include "3c503.h"
-#define WRD_COUNT 4
-
-static int el2_pio_probe(struct net_device *dev);
-static int el2_probe1(struct net_device *dev, int ioaddr);
-
-/* A zero-terminated list of I/O addresses to be probed in PIO mode. */
-static unsigned int netcard_portlist[] __initdata =
- { 0x300,0x310,0x330,0x350,0x250,0x280,0x2a0,0x2e0,0};
-
-#define EL2_IO_EXTENT 16
-
-static int el2_open(struct net_device *dev);
-static int el2_close(struct net_device *dev);
-static void el2_reset_8390(struct net_device *dev);
-static void el2_init_card(struct net_device *dev);
-static void el2_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page);
-static void el2_block_input(struct net_device *dev, int count, struct sk_buff *skb,
- int ring_offset);
-static void el2_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page);
-static const struct ethtool_ops netdev_ethtool_ops;
-
-
-/* This routine probes for a memory-mapped 3c503 board by looking for
- the "location register" at the end of the jumpered boot PROM space.
- This works even if a PROM isn't there.
-
- If the ethercard isn't found there is an optional probe for
- ethercard jumpered to programmed-I/O mode.
- */
-static int __init do_el2_probe(struct net_device *dev)
-{
- int *addr, addrs[] = { 0xddffe, 0xd9ffe, 0xcdffe, 0xc9ffe, 0};
- int base_addr = dev->base_addr;
- int irq = dev->irq;
-
- if (base_addr > 0x1ff) /* Check a single specified location. */
- return el2_probe1(dev, base_addr);
- else if (base_addr != 0) /* Don't probe at all. */
- return -ENXIO;
-
- for (addr = addrs; *addr; addr++) {
- void __iomem *p = ioremap(*addr, 1);
- unsigned base_bits;
- int i;
-
- if (!p)
- continue;
- base_bits = readb(p);
- iounmap(p);
- i = ffs(base_bits) - 1;
- if (i == -1 || base_bits != (1 << i))
- continue;
- if (el2_probe1(dev, netcard_portlist[i]) == 0)
- return 0;
- dev->irq = irq;
- }
-#if ! defined(no_probe_nonshared_memory)
- return el2_pio_probe(dev);
-#else
- return -ENODEV;
-#endif
-}
-
-/* Try all of the locations that aren't obviously empty. This touches
- a lot of locations, and is much riskier than the code above. */
-static int __init
-el2_pio_probe(struct net_device *dev)
-{
- int i;
- int base_addr = dev->base_addr;
- int irq = dev->irq;
-
- if (base_addr > 0x1ff) /* Check a single specified location. */
- return el2_probe1(dev, base_addr);
- else if (base_addr != 0) /* Don't probe at all. */
- return -ENXIO;
-
- for (i = 0; netcard_portlist[i]; i++) {
- if (el2_probe1(dev, netcard_portlist[i]) == 0)
- return 0;
- dev->irq = irq;
- }
-
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init el2_probe(int unit)
-{
- struct net_device *dev = alloc_eip_netdev();
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_el2_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static const struct net_device_ops el2_netdev_ops = {
- .ndo_open = el2_open,
- .ndo_stop = el2_close,
-
- .ndo_start_xmit = eip_start_xmit,
- .ndo_tx_timeout = eip_tx_timeout,
- .ndo_get_stats = eip_get_stats,
- .ndo_set_rx_mode = eip_set_multicast_list,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = eip_poll,
-#endif
-};
-
-/* Probe for the Etherlink II card at I/O port base IOADDR,
- returning non-zero on success. If found, set the station
- address and memory parameters in DEVICE. */
-static int __init
-el2_probe1(struct net_device *dev, int ioaddr)
-{
- int i, iobase_reg, membase_reg, saved_406, wordlength, retval;
- static unsigned version_printed;
- unsigned long vendor_id;
-
- if (!request_region(ioaddr, EL2_IO_EXTENT, DRV_NAME))
- return -EBUSY;
-
- if (!request_region(ioaddr + 0x400, 8, DRV_NAME)) {
- retval = -EBUSY;
- goto out;
- }
-
- /* Reset and/or avoid any lurking NE2000 */
- if (inb(ioaddr + 0x408) == 0xff) {
- mdelay(1);
- retval = -ENODEV;
- goto out1;
- }
-
- /* We verify that it's a 3C503 board by checking the first three octets
- of its ethernet address. */
- iobase_reg = inb(ioaddr+0x403);
- membase_reg = inb(ioaddr+0x404);
- /* ASIC location registers should be 0 or have only a single bit set. */
- if ((iobase_reg & (iobase_reg - 1)) ||
- (membase_reg & (membase_reg - 1))) {
- retval = -ENODEV;
- goto out1;
- }
- saved_406 = inb_p(ioaddr + 0x406);
- outb_p(ECNTRL_RESET|ECNTRL_THIN, ioaddr + 0x406); /* Reset it... */
- outb_p(ECNTRL_THIN, ioaddr + 0x406);
- /* Map the station addr PROM into the lower I/O ports. We now check
- for both the old and new 3Com prefix */
- outb(ECNTRL_SAPROM|ECNTRL_THIN, ioaddr + 0x406);
- vendor_id = inb(ioaddr)*0x10000 + inb(ioaddr + 1)*0x100 + inb(ioaddr + 2);
- if ((vendor_id != OLD_3COM_ID) && (vendor_id != NEW_3COM_ID)) {
- /* Restore the register we frobbed. */
- outb(saved_406, ioaddr + 0x406);
- retval = -ENODEV;
- goto out1;
- }
-
- if (ei_debug && version_printed++ == 0)
- pr_debug("%s", version);
-
- dev->base_addr = ioaddr;
-
- pr_info("%s: 3c503 at i/o base %#3x, node ", dev->name, ioaddr);
-
- /* Retrieve and print the ethernet address. */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + i);
- pr_cont("%pM", dev->dev_addr);
-
- /* Map the 8390 back into the window. */
- outb(ECNTRL_THIN, ioaddr + 0x406);
-
- /* Check for EL2/16 as described in tech. man. */
- outb_p(E8390_PAGE0, ioaddr + E8390_CMD);
- outb_p(0, ioaddr + EN0_DCFG);
- outb_p(E8390_PAGE2, ioaddr + E8390_CMD);
- wordlength = inb_p(ioaddr + EN0_DCFG) & ENDCFG_WTS;
- outb_p(E8390_PAGE0, ioaddr + E8390_CMD);
-
- /* Probe for, turn on and clear the board's shared memory. */
- if (ei_debug > 2)
- pr_cont(" memory jumpers %2.2x ", membase_reg);
- outb(EGACFR_NORM, ioaddr + 0x405); /* Enable RAM */
-
- /* This should be probed for (or set via an ioctl()) at run-time.
- Right now we use a sleazy hack to pass in the interface number
- at boot-time via the low bits of the mem_end field. That value is
- unused, and the low bits would be discarded even if it was used. */
-#if defined(EI8390_THICK) || defined(EL2_AUI)
- ei_status.interface_num = 1;
-#else
- ei_status.interface_num = dev->mem_end & 0xf;
-#endif
- pr_cont(", using %sternal xcvr.\n", ei_status.interface_num == 0 ? "in" : "ex");
-
- if ((membase_reg & 0xf0) == 0) {
- dev->mem_start = 0;
- ei_status.name = "3c503-PIO";
- ei_status.mem = NULL;
- } else {
- dev->mem_start = ((membase_reg & 0xc0) ? 0xD8000 : 0xC8000) +
- ((membase_reg & 0xA0) ? 0x4000 : 0);
-#define EL2_MEMSIZE (EL2_MB1_STOP_PG - EL2_MB1_START_PG)*256
- ei_status.mem = ioremap(dev->mem_start, EL2_MEMSIZE);
-
-#ifdef EL2MEMTEST
- /* This has never found an error, but someone might care.
- Note that it only tests the 2nd 8kB on 16kB 3c503/16
- cards between card addr. 0x2000 and 0x3fff. */
- { /* Check the card's memory. */
- void __iomem *mem_base = ei_status.mem;
- unsigned int test_val = 0xbbadf00d;
- writel(0xba5eba5e, mem_base);
- for (i = sizeof(test_val); i < EL2_MEMSIZE; i+=sizeof(test_val)) {
- writel(test_val, mem_base + i);
- if (readl(mem_base) != 0xba5eba5e ||
- readl(mem_base + i) != test_val) {
- pr_warning("3c503: memory failure or memory address conflict.\n");
- dev->mem_start = 0;
- ei_status.name = "3c503-PIO";
- iounmap(mem_base);
- ei_status.mem = NULL;
- break;
- }
- test_val += 0x55555555;
- writel(0, mem_base + i);
- }
- }
-#endif /* EL2MEMTEST */
-
- if (dev->mem_start)
- dev->mem_end = dev->mem_start + EL2_MEMSIZE;
-
- if (wordlength) { /* No Tx pages to skip over to get to Rx */
- ei_status.priv = 0;
- ei_status.name = "3c503/16";
- } else {
- ei_status.priv = TX_PAGES * 256;
- ei_status.name = "3c503";
- }
- }
-
- /*
- Divide up the memory on the card. This is the same regardless of
- whether shared-mem or PIO is used. For 16 bit cards (16kB RAM),
- we use the entire 8k of bank1 for an Rx ring. We only use 3k
- of the bank0 for 2 full size Tx packet slots. For 8 bit cards,
- (8kB RAM) we use 3kB of bank1 for two Tx slots, and the remaining
- 5kB for an Rx ring. */
-
- if (wordlength) {
- ei_status.tx_start_page = EL2_MB0_START_PG;
- ei_status.rx_start_page = EL2_MB1_START_PG;
- } else {
- ei_status.tx_start_page = EL2_MB1_START_PG;
- ei_status.rx_start_page = EL2_MB1_START_PG + TX_PAGES;
- }
-
- /* Finish setting the board's parameters. */
- ei_status.stop_page = EL2_MB1_STOP_PG;
- ei_status.word16 = wordlength;
- ei_status.reset_8390 = el2_reset_8390;
- ei_status.get_8390_hdr = el2_get_8390_hdr;
- ei_status.block_input = el2_block_input;
- ei_status.block_output = el2_block_output;
-
- if (dev->irq == 2)
- dev->irq = 9;
- else if (dev->irq > 5 && dev->irq != 9) {
- pr_warning("3c503: configured interrupt %d invalid, will use autoIRQ.\n",
- dev->irq);
- dev->irq = 0;
- }
-
- ei_status.saved_irq = dev->irq;
-
- dev->netdev_ops = &el2_netdev_ops;
- dev->ethtool_ops = &netdev_ethtool_ops;
-
- retval = register_netdev(dev);
- if (retval)
- goto out1;
-
- if (dev->mem_start)
- pr_info("%s: %s - %dkB RAM, 8kB shared mem window at %#6lx-%#6lx.\n",
- dev->name, ei_status.name, (wordlength+1)<<3,
- dev->mem_start, dev->mem_end-1);
-
- else
- {
- ei_status.tx_start_page = EL2_MB1_START_PG;
- ei_status.rx_start_page = EL2_MB1_START_PG + TX_PAGES;
- pr_info("%s: %s, %dkB RAM, using programmed I/O (REJUMPER for SHARED MEMORY).\n",
- dev->name, ei_status.name, (wordlength+1)<<3);
- }
- release_region(ioaddr + 0x400, 8);
- return 0;
-out1:
- release_region(ioaddr + 0x400, 8);
-out:
- release_region(ioaddr, EL2_IO_EXTENT);
- return retval;
-}
-
-static irqreturn_t el2_probe_interrupt(int irq, void *seen)
-{
- *(bool *)seen = true;
- return IRQ_HANDLED;
-}
-
-static int
-el2_open(struct net_device *dev)
-{
- int retval;
-
- if (dev->irq < 2) {
- static const int irqlist[] = {5, 9, 3, 4, 0};
- const int *irqp = irqlist;
-
- outb(EGACFR_NORM, E33G_GACFR); /* Enable RAM and interrupts. */
- do {
- bool seen;
-
- retval = request_irq(*irqp, el2_probe_interrupt, 0,
- dev->name, &seen);
- if (retval == -EBUSY)
- continue;
- if (retval < 0)
- goto err_disable;
-
- /* Twinkle the interrupt, and check if it's seen. */
- seen = false;
- smp_wmb();
- outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR);
- outb_p(0x00, E33G_IDCFR);
- msleep(1);
- free_irq(*irqp, &seen);
- if (!seen)
- continue;
-
- retval = request_irq(dev->irq = *irqp, eip_interrupt, 0,
- dev->name, dev);
- if (retval == -EBUSY)
- continue;
- if (retval < 0)
- goto err_disable;
- break;
- } while (*++irqp);
-
- if (*irqp == 0) {
- err_disable:
- outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */
- return -EAGAIN;
- }
- } else {
- if ((retval = request_irq(dev->irq, eip_interrupt, 0, dev->name, dev))) {
- return retval;
- }
- }
-
- el2_init_card(dev);
- eip_open(dev);
- return 0;
-}
-
-static int
-el2_close(struct net_device *dev)
-{
- free_irq(dev->irq, dev);
- dev->irq = ei_status.saved_irq;
- outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */
-
- eip_close(dev);
- return 0;
-}
-
-/* This is called whenever we have a unrecoverable failure:
- transmit timeout
- Bad ring buffer packet header
- */
-static void
-el2_reset_8390(struct net_device *dev)
-{
- if (ei_debug > 1) {
- pr_debug("%s: Resetting the 3c503 board...", dev->name);
- pr_cont(" %#lx=%#02x %#lx=%#02x %#lx=%#02x...", E33G_IDCFR, inb(E33G_IDCFR),
- E33G_CNTRL, inb(E33G_CNTRL), E33G_GACFR, inb(E33G_GACFR));
- }
- outb_p(ECNTRL_RESET|ECNTRL_THIN, E33G_CNTRL);
- ei_status.txing = 0;
- outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
- el2_init_card(dev);
- if (ei_debug > 1)
- pr_cont("done\n");
-}
-
-/* Initialize the 3c503 GA registers after a reset. */
-static void
-el2_init_card(struct net_device *dev)
-{
- /* Unmap the station PROM and select the DIX or BNC connector. */
- outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
-
- /* Set ASIC copy of rx's first and last+1 buffer pages */
- /* These must be the same as in the 8390. */
- outb(ei_status.rx_start_page, E33G_STARTPG);
- outb(ei_status.stop_page, E33G_STOPPG);
-
- /* Point the vector pointer registers somewhere ?harmless?. */
- outb(0xff, E33G_VP2); /* Point at the ROM restart location 0xffff0 */
- outb(0xff, E33G_VP1);
- outb(0x00, E33G_VP0);
- /* Turn off all interrupts until we're opened. */
- outb_p(0x00, dev->base_addr + EN0_IMR);
- /* Enable IRQs iff started. */
- outb(EGACFR_NORM, E33G_GACFR);
-
- /* Set the interrupt line. */
- outb_p((0x04 << (dev->irq == 9 ? 2 : dev->irq)), E33G_IDCFR);
- outb_p((WRD_COUNT << 1), E33G_DRQCNT); /* Set burst size to 8 */
- outb_p(0x20, E33G_DMAAH); /* Put a valid addr in the GA DMA */
- outb_p(0x00, E33G_DMAAL);
- return; /* We always succeed */
-}
-
-/*
- * Either use the shared memory (if enabled on the board) or put the packet
- * out through the ASIC FIFO.
- */
-static void
-el2_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page)
-{
- unsigned short int *wrd;
- int boguscount; /* timeout counter */
- unsigned short word; /* temporary for better machine code */
- void __iomem *base = ei_status.mem;
-
- if (ei_status.word16) /* Tx packets go into bank 0 on EL2/16 card */
- outb(EGACFR_RSEL|EGACFR_TCM, E33G_GACFR);
- else
- outb(EGACFR_NORM, E33G_GACFR);
-
- if (base) { /* Shared memory transfer */
- memcpy_toio(base + ((start_page - ei_status.tx_start_page) << 8),
- buf, count);
- outb(EGACFR_NORM, E33G_GACFR); /* Back to bank1 in case on bank0 */
- return;
- }
-
-/*
- * No shared memory, put the packet out the other way.
- * Set up then start the internal memory transfer to Tx Start Page
- */
-
- word = (unsigned short)start_page;
- outb(word&0xFF, E33G_DMAAH);
- outb(word>>8, E33G_DMAAL);
-
- outb_p((ei_status.interface_num ? ECNTRL_AUI : ECNTRL_THIN ) | ECNTRL_OUTPUT
- | ECNTRL_START, E33G_CNTRL);
-
-/*
- * Here I am going to write data to the FIFO as quickly as possible.
- * Note that E33G_FIFOH is defined incorrectly. It is really
- * E33G_FIFOL, the lowest port address for both the byte and
- * word write. Variable 'count' is NOT checked. Caller must supply a
- * valid count. Note that I may write a harmless extra byte to the
- * 8390 if the byte-count was not even.
- */
- wrd = (unsigned short int *) buf;
- count = (count + 1) >> 1;
- for(;;)
- {
- boguscount = 0x1000;
- while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0)
- {
- if(!boguscount--)
- {
- pr_notice("%s: FIFO blocked in el2_block_output.\n", dev->name);
- el2_reset_8390(dev);
- goto blocked;
- }
- }
- if(count > WRD_COUNT)
- {
- outsw(E33G_FIFOH, wrd, WRD_COUNT);
- wrd += WRD_COUNT;
- count -= WRD_COUNT;
- }
- else
- {
- outsw(E33G_FIFOH, wrd, count);
- break;
- }
- }
- blocked:;
- outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
-}
-
-/* Read the 4 byte, page aligned 8390 specific header. */
-static void
-el2_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
- int boguscount;
- void __iomem *base = ei_status.mem;
- unsigned short word;
-
- if (base) { /* Use the shared memory. */
- void __iomem *hdr_start = base + ((ring_page - EL2_MB1_START_PG)<<8);
- memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
- hdr->count = le16_to_cpu(hdr->count);
- return;
- }
-
-/*
- * No shared memory, use programmed I/O.
- */
-
- word = (unsigned short)ring_page;
- outb(word&0xFF, E33G_DMAAH);
- outb(word>>8, E33G_DMAAL);
-
- outb_p((ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI) | ECNTRL_INPUT
- | ECNTRL_START, E33G_CNTRL);
- boguscount = 0x1000;
- while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0)
- {
- if(!boguscount--)
- {
- pr_notice("%s: FIFO blocked in el2_get_8390_hdr.\n", dev->name);
- memset(hdr, 0x00, sizeof(struct e8390_pkt_hdr));
- el2_reset_8390(dev);
- goto blocked;
- }
- }
- insw(E33G_FIFOH, hdr, (sizeof(struct e8390_pkt_hdr))>> 1);
- blocked:;
- outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
-}
-
-
-static void
-el2_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
-{
- int boguscount = 0;
- void __iomem *base = ei_status.mem;
- unsigned short int *buf;
- unsigned short word;
-
- /* Maybe enable shared memory just be to be safe... nahh.*/
- if (base) { /* Use the shared memory. */
- ring_offset -= (EL2_MB1_START_PG<<8);
- if (ring_offset + count > EL2_MEMSIZE) {
- /* We must wrap the input move. */
- int semi_count = EL2_MEMSIZE - ring_offset;
- memcpy_fromio(skb->data, base + ring_offset, semi_count);
- count -= semi_count;
- memcpy_fromio(skb->data + semi_count, base + ei_status.priv, count);
- } else {
- memcpy_fromio(skb->data, base + ring_offset, count);
- }
- return;
- }
-
-/*
- * No shared memory, use programmed I/O.
- */
- word = (unsigned short) ring_offset;
- outb(word>>8, E33G_DMAAH);
- outb(word&0xFF, E33G_DMAAL);
-
- outb_p((ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI) | ECNTRL_INPUT
- | ECNTRL_START, E33G_CNTRL);
-
-/*
- * Here I also try to get data as fast as possible. I am betting that I
- * can read one extra byte without clobbering anything in the kernel because
- * this would only occur on an odd byte-count and allocation of skb->data
- * is word-aligned. Variable 'count' is NOT checked. Caller must check
- * for a valid count.
- * [This is currently quite safe.... but if one day the 3c503 explodes
- * you know where to come looking ;)]
- */
-
- buf = (unsigned short int *) skb->data;
- count = (count + 1) >> 1;
- for(;;)
- {
- boguscount = 0x1000;
- while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0)
- {
- if(!boguscount--)
- {
- pr_notice("%s: FIFO blocked in el2_block_input.\n", dev->name);
- el2_reset_8390(dev);
- goto blocked;
- }
- }
- if(count > WRD_COUNT)
- {
- insw(E33G_FIFOH, buf, WRD_COUNT);
- buf += WRD_COUNT;
- count -= WRD_COUNT;
- }
- else
- {
- insw(E33G_FIFOH, buf, count);
- break;
- }
- }
- blocked:;
- outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
-}
-
-
-static void netdev_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr);
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
-};
-
-#ifdef MODULE
-#define MAX_EL2_CARDS 4 /* Max number of EL2 cards per module */
-
-static struct net_device *dev_el2[MAX_EL2_CARDS];
-static int io[MAX_EL2_CARDS];
-static int irq[MAX_EL2_CARDS];
-static int xcvr[MAX_EL2_CARDS]; /* choose int. or ext. xcvr */
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(xcvr, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
-MODULE_PARM_DESC(xcvr, "transceiver(s) (0=internal, 1=external)");
-MODULE_DESCRIPTION("3Com ISA EtherLink II, II/16 (3c503, 3c503/16) driver");
-MODULE_LICENSE("GPL");
-
-/* This is set up so that only a single autoprobe takes place per call.
-ISA device autoprobes on a running machine are not recommended. */
-int __init
-init_module(void)
-{
- struct net_device *dev;
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < MAX_EL2_CARDS; this_dev++) {
- if (io[this_dev] == 0) {
- if (this_dev != 0) break; /* only autoprobe 1st one */
- pr_notice("3c503.c: Presently autoprobing (not recommended) for a single card.\n");
- }
- dev = alloc_eip_netdev();
- if (!dev)
- break;
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- dev->mem_end = xcvr[this_dev]; /* low 4bits = xcvr sel. */
- if (do_el2_probe(dev) == 0) {
- dev_el2[found++] = dev;
- continue;
- }
- free_netdev(dev);
- pr_warning("3c503.c: No 3c503 card found (i/o = 0x%x).\n", io[this_dev]);
- break;
- }
- if (found)
- return 0;
- return -ENXIO;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
- /* NB: el2_close() handles free_irq */
- release_region(dev->base_addr, EL2_IO_EXTENT);
- if (ei_status.mem)
- iounmap(ei_status.mem);
-}
-
-void __exit
-cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < MAX_EL2_CARDS; this_dev++) {
- struct net_device *dev = dev_el2[this_dev];
- if (dev) {
- unregister_netdev(dev);
- cleanup_card(dev);
- free_netdev(dev);
- }
- }
-}
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/3c503.h b/drivers/net/ethernet/8390/3c503.h
deleted file mode 100644
index e2367b82a2ec..000000000000
--- a/drivers/net/ethernet/8390/3c503.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/* Definitions for the 3Com 3c503 Etherlink 2. */
-/* This file is distributed under the GPL.
- Many of these names and comments are directly from the Crynwr packet
- drivers, which are released under the GPL. */
-
-#define EL2H (dev->base_addr + 0x400)
-#define EL2L (dev->base_addr)
-
-/* Vendor unique hardware addr. prefix. 3Com has 2 because they ran
- out of available addresses on the first one... */
-
-#define OLD_3COM_ID 0x02608c
-#define NEW_3COM_ID 0x0020af
-
-/* Shared memory management parameters. NB: The 8 bit cards have only
- one bank (MB1) which serves both Tx and Rx packet space. The 16bit
- cards have 2 banks, MB0 for Tx packets, and MB1 for Rx packets.
- You choose which bank appears in the sh. mem window with EGACFR_MBSn */
-
-#define EL2_MB0_START_PG (0x00) /* EL2/16 Tx packets go in bank 0 */
-#define EL2_MB1_START_PG (0x20) /* First page of bank 1 */
-#define EL2_MB1_STOP_PG (0x40) /* Last page +1 of bank 1 */
-
-/* 3Com 3c503 ASIC registers */
-#define E33G_STARTPG (EL2H+0) /* Start page, matching EN0_STARTPG */
-#define E33G_STOPPG (EL2H+1) /* Stop page, must match EN0_STOPPG */
-#define E33G_DRQCNT (EL2H+2) /* DMA burst count */
-#define E33G_IOBASE (EL2H+3) /* Read of I/O base jumpers. */
- /* (non-useful, but it also appears at the end of EPROM space) */
-#define E33G_ROMBASE (EL2H+4) /* Read of memory base jumpers. */
-#define E33G_GACFR (EL2H+5) /* Config/setup bits for the ASIC GA */
-#define E33G_CNTRL (EL2H+6) /* Board's main control register */
-#define E33G_STATUS (EL2H+7) /* Status on completions. */
-#define E33G_IDCFR (EL2H+8) /* Interrupt/DMA config register */
- /* (Which IRQ to assert, DMA chan to use) */
-#define E33G_DMAAH (EL2H+9) /* High byte of DMA address reg */
-#define E33G_DMAAL (EL2H+10) /* Low byte of DMA address reg */
-/* "Vector pointer" - if this address matches a read, the EPROM (rather than
- shared RAM) is mapped into memory space. */
-#define E33G_VP2 (EL2H+11)
-#define E33G_VP1 (EL2H+12)
-#define E33G_VP0 (EL2H+13)
-#define E33G_FIFOH (EL2H+14) /* FIFO for programmed I/O moves */
-#define E33G_FIFOL (EL2H+15) /* ... low byte of above. */
-
-/* Bits in E33G_CNTRL register: */
-
-#define ECNTRL_RESET (0x01) /* Software reset of the ASIC and 8390 */
-#define ECNTRL_THIN (0x02) /* Onboard xcvr enable, AUI disable */
-#define ECNTRL_AUI (0x00) /* Onboard xcvr disable, AUI enable */
-#define ECNTRL_SAPROM (0x04) /* Map the station address prom */
-#define ECNTRL_DBLBFR (0x20) /* FIFO configuration bit */
-#define ECNTRL_OUTPUT (0x40) /* PC-to-3C503 direction if 1 */
-#define ECNTRL_INPUT (0x00) /* 3C503-to-PC direction if 0 */
-#define ECNTRL_START (0x80) /* Start the DMA logic */
-
-/* Bits in E33G_STATUS register: */
-
-#define ESTAT_DPRDY (0x80) /* Data port (of FIFO) ready */
-#define ESTAT_UFLW (0x40) /* Tried to read FIFO when it was empty */
-#define ESTAT_OFLW (0x20) /* Tried to write FIFO when it was full */
-#define ESTAT_DTC (0x10) /* Terminal Count from PC bus DMA logic */
-#define ESTAT_DIP (0x08) /* DMA In Progress */
-
-/* Bits in E33G_GACFR register: */
-
-#define EGACFR_NIM (0x80) /* NIC interrupt mask */
-#define EGACFR_TCM (0x40) /* DMA term. count interrupt mask */
-#define EGACFR_RSEL (0x08) /* Map a bank of card mem into system mem */
-#define EGACFR_MBS2 (0x04) /* Memory bank select, bit 2. */
-#define EGACFR_MBS1 (0x02) /* Memory bank select, bit 1. */
-#define EGACFR_MBS0 (0x01) /* Memory bank select, bit 0. */
-
-#define EGACFR_NORM (0x49) /* TCM | RSEL | MBS0 */
-#define EGACFR_IRQOFF (0xc9) /* TCM | RSEL | MBS0 | NIM */
-
-/*
- MBS2 MBS1 MBS0 Sh. mem windows card mem at:
- ---- ---- ---- -----------------------------
- 0 0 0 0x0000 -- bank 0
- 0 0 1 0x2000 -- bank 1 (only choice for 8bit card)
- 0 1 0 0x4000 -- bank 2, not used
- 0 1 1 0x6000 -- bank 3, not used
-
-There was going to be a 32k card that used bank 2 and 3, but it
-never got produced.
-
-*/
-
-
-/* End of 3C503 parameter definitions */
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index e1219e037c04..a5f91e1e8fe3 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -5,10 +5,7 @@
config NET_VENDOR_8390
bool "National Semi-conductor 8390 devices"
default y
- depends on NET_VENDOR_NATSEMI && (AMIGA_PCMCIA || PCI || SUPERH || \
- ISA || MCA || EISA || MAC || M32R || MACH_TX49XX || \
- MCA_LEGACY || H8300 || ARM || MIPS || ZORRO || PCMCIA || \
- EXPERIMENTAL)
+ depends on NET_VENDOR_NATSEMI
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -21,30 +18,6 @@ config NET_VENDOR_8390
if NET_VENDOR_8390
-config EL2
- tristate "3c503 \"EtherLink II\" support"
- depends on ISA
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called 3c503.
-
-config AC3200
- tristate "Ansel Communications EISA 3200 support (EXPERIMENTAL)"
- depends on PCI && (ISA || EISA) && EXPERIMENTAL
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called ac3200.
-
config PCMCIA_AXNET
tristate "Asix AX88190 PCMCIA support"
depends on PCMCIA
@@ -74,54 +47,6 @@ config AX88796_93CX6
---help---
Select this if your platform comes with an external 93CX6 eeprom.
-config E2100
- tristate "Cabletron E21xx support"
- depends on ISA
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called e2100.
-
-config ES3210
- tristate "Racal-Interlan EISA ES3210 support (EXPERIMENTAL)"
- depends on PCI && EISA && EXPERIMENTAL
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called es3210.
-
-config HPLAN_PLUS
- tristate "HP PCLAN+ (27247B and 27252A) support"
- depends on ISA
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called hp-plus.
-
-config HPLAN
- tristate "HP PCLAN (27245 and other 27xxx series) support"
- depends on ISA
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called hp.
-
config HYDRA
tristate "Hydra support"
depends on ZORRO
@@ -140,18 +65,6 @@ config ARM_ETHERH
If you have an Acorn system with one of these network cards, you
should say Y to this option if you wish to use it with Linux.
-config LNE390
- tristate "Mylex EISA LNE390A/B support (EXPERIMENTAL)"
- depends on PCI && EISA && EXPERIMENTAL
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called lne390.
-
config MAC8390
bool "Macintosh NS 8390 based ethernet cards"
depends on MAC
@@ -187,11 +100,7 @@ config NE2000
without a specific driver are compatible with NE2000.
If you have a PCI NE2000 card however, say N here and Y to "PCI
- NE2000 and clone support" under "EISA, VLB, PCI and on board
- controllers" below. If you have a NE2000 card and are running on
- an MCA system (a bus system used on some IBM PS/2 computers and
- laptops), say N here and Y to "NE/2 (ne2000 MCA version) support",
- below.
+ NE2000 and clone support" below.
To compile this driver as a module, choose M here. The module
will be called ne.
@@ -226,19 +135,6 @@ config APNE
To compile this driver as a module, choose M here: the module
will be called apne.
-config NE3210
- tristate "Novell/Eagle/Microdyne NE3210 EISA support (EXPERIMENTAL)"
- depends on PCI && EISA && EXPERIMENTAL
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. Note that this driver
- will NOT WORK for NE3200 cards as they are completely different.
-
- To compile this driver as a module, choose M here. The module
- will be called ne3210.
-
config PCMCIA_PCNET
tristate "NE2000 compatible PCMCIA support"
depends on PCMCIA
@@ -288,18 +184,6 @@ config ULTRA
To compile this driver as a module, choose M here. The module
will be called smc-ultra.
-config ULTRA32
- tristate "SMC Ultra32 EISA support"
- depends on EISA
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called smc-ultra32.
-
config WD80x3
tristate "WD80*3 support"
depends on ISA
diff --git a/drivers/net/ethernet/8390/Makefile b/drivers/net/ethernet/8390/Makefile
index f43038babf86..588954a79b2a 100644
--- a/drivers/net/ethernet/8390/Makefile
+++ b/drivers/net/ethernet/8390/Makefile
@@ -3,27 +3,17 @@
#
obj-$(CONFIG_MAC8390) += mac8390.o
-obj-$(CONFIG_AC3200) += ac3200.o 8390.o
obj-$(CONFIG_APNE) += apne.o 8390.o
obj-$(CONFIG_ARM_ETHERH) += etherh.o
obj-$(CONFIG_AX88796) += ax88796.o
-obj-$(CONFIG_E2100) += e2100.o 8390.o
-obj-$(CONFIG_EL2) += 3c503.o 8390p.o
-obj-$(CONFIG_ES3210) += es3210.o 8390.o
-obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390p.o
-obj-$(CONFIG_HPLAN) += hp.o 8390p.o
obj-$(CONFIG_HYDRA) += hydra.o 8390.o
-obj-$(CONFIG_LNE390) += lne390.o 8390.o
obj-$(CONFIG_MCF8390) += mcf8390.o 8390.o
obj-$(CONFIG_NE2000) += ne.o 8390p.o
-obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o
obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o
-obj-$(CONFIG_NE3210) += ne3210.o 8390.o
obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
obj-$(CONFIG_PCMCIA_AXNET) += axnet_cs.o 8390.o
obj-$(CONFIG_PCMCIA_PCNET) += pcnet_cs.o 8390.o
obj-$(CONFIG_STNIC) += stnic.o 8390.o
obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o
-obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o
obj-$(CONFIG_WD80x3) += wd.o 8390.o
obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o
diff --git a/drivers/net/ethernet/8390/ac3200.c b/drivers/net/ethernet/8390/ac3200.c
deleted file mode 100644
index ccf07942ff6e..000000000000
--- a/drivers/net/ethernet/8390/ac3200.c
+++ /dev/null
@@ -1,431 +0,0 @@
-/* ac3200.c: A driver for the Ansel Communications EISA ethernet adaptor. */
-/*
- Written 1993, 1994 by Donald Becker.
- Copyright 1993 United States Government as represented by the Director,
- National Security Agency. This software may only be used and distributed
- according to the terms of the GNU General Public License as modified by SRC,
- incorporated herein by reference.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
- This is driver for the Ansel Communications Model 3200 EISA Ethernet LAN
- Adapter. The programming information is from the users manual, as related
- by glee@ardnassak.math.clemson.edu.
-
- Changelog:
-
- Paul Gortmaker 05/98 : add support for shared mem above 1MB.
-
- */
-
-static const char version[] =
- "ac3200.c:v1.01 7/1/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
-
-#include <linux/module.h>
-#include <linux/eisa.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-
-#include "8390.h"
-
-#define DRV_NAME "ac3200"
-
-/* Offsets from the base address. */
-#define AC_NIC_BASE 0x00
-#define AC_SA_PROM 0x16 /* The station address PROM. */
-#define AC_ADDR0 0x00 /* Prefix station address values. */
-#define AC_ADDR1 0x40
-#define AC_ADDR2 0x90
-#define AC_ID_PORT 0xC80
-#define AC_EISA_ID 0x0110d305
-#define AC_RESET_PORT 0xC84
-#define AC_RESET 0x00
-#define AC_ENABLE 0x01
-#define AC_CONFIG 0xC90 /* The configuration port. */
-
-#define AC_IO_EXTENT 0x20
- /* Actually accessed is:
- * AC_NIC_BASE (0-15)
- * AC_SA_PROM (0-5)
- * AC_ID_PORT (0-3)
- * AC_RESET_PORT
- * AC_CONFIG
- */
-
-/* Decoding of the configuration register. */
-static unsigned char config2irqmap[8] __initdata = {15, 12, 11, 10, 9, 7, 5, 3};
-static int addrmap[8] =
-{0xFF0000, 0xFE0000, 0xFD0000, 0xFFF0000, 0xFFE0000, 0xFFC0000, 0xD0000, 0 };
-static const char *port_name[4] = { "10baseT", "invalid", "AUI", "10base2"};
-
-#define config2irq(configval) config2irqmap[((configval) >> 3) & 7]
-#define config2mem(configval) addrmap[(configval) & 7]
-#define config2name(configval) port_name[((configval) >> 6) & 3]
-
-/* First and last 8390 pages. */
-#define AC_START_PG 0x00 /* First page of 8390 TX buffer */
-#define AC_STOP_PG 0x80 /* Last page +1 of the 8390 RX ring */
-
-static int ac_probe1(int ioaddr, struct net_device *dev);
-
-static int ac_open(struct net_device *dev);
-static void ac_reset_8390(struct net_device *dev);
-static void ac_block_input(struct net_device *dev, int count,
- struct sk_buff *skb, int ring_offset);
-static void ac_block_output(struct net_device *dev, const int count,
- const unsigned char *buf, const int start_page);
-static void ac_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page);
-
-static int ac_close_card(struct net_device *dev);
-
-
-/* Probe for the AC3200.
-
- The AC3200 can be identified by either the EISA configuration registers,
- or the unique value in the station address PROM.
- */
-
-static int __init do_ac3200_probe(struct net_device *dev)
-{
- unsigned short ioaddr = dev->base_addr;
- int irq = dev->irq;
- int mem_start = dev->mem_start;
-
- if (ioaddr > 0x1ff) /* Check a single specified location. */
- return ac_probe1(ioaddr, dev);
- else if (ioaddr > 0) /* Don't probe at all. */
- return -ENXIO;
-
- if ( ! EISA_bus)
- return -ENXIO;
-
- for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
- if (ac_probe1(ioaddr, dev) == 0)
- return 0;
- dev->irq = irq;
- dev->mem_start = mem_start;
- }
-
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init ac3200_probe(int unit)
-{
- struct net_device *dev = alloc_ei_netdev();
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_ac3200_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static const struct net_device_ops ac_netdev_ops = {
- .ndo_open = ac_open,
- .ndo_stop = ac_close_card,
-
- .ndo_start_xmit = ei_start_xmit,
- .ndo_tx_timeout = ei_tx_timeout,
- .ndo_get_stats = ei_get_stats,
- .ndo_set_rx_mode = ei_set_multicast_list,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ei_poll,
-#endif
-};
-
-static int __init ac_probe1(int ioaddr, struct net_device *dev)
-{
- int i, retval;
-
- if (!request_region(ioaddr, AC_IO_EXTENT, DRV_NAME))
- return -EBUSY;
-
- if (inb_p(ioaddr + AC_ID_PORT) == 0xff) {
- retval = -ENODEV;
- goto out;
- }
-
- if (inl(ioaddr + AC_ID_PORT) != AC_EISA_ID) {
- retval = -ENODEV;
- goto out;
- }
-
-#ifndef final_version
- printk(KERN_DEBUG "AC3200 ethercard configuration register is %#02x,"
- " EISA ID %02x %02x %02x %02x.\n", inb(ioaddr + AC_CONFIG),
- inb(ioaddr + AC_ID_PORT + 0), inb(ioaddr + AC_ID_PORT + 1),
- inb(ioaddr + AC_ID_PORT + 2), inb(ioaddr + AC_ID_PORT + 3));
-#endif
-
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + AC_SA_PROM + i);
-
- printk(KERN_DEBUG "AC3200 in EISA slot %d, node %pM",
- ioaddr/0x1000, dev->dev_addr);
-#if 0
- /* Check the vendor ID/prefix. Redundant after checking the EISA ID */
- if (inb(ioaddr + AC_SA_PROM + 0) != AC_ADDR0
- || inb(ioaddr + AC_SA_PROM + 1) != AC_ADDR1
- || inb(ioaddr + AC_SA_PROM + 2) != AC_ADDR2 ) {
- printk(", not found (invalid prefix).\n");
- retval = -ENODEV;
- goto out;
- }
-#endif
-
- /* Assign and allocate the interrupt now. */
- if (dev->irq == 0) {
- dev->irq = config2irq(inb(ioaddr + AC_CONFIG));
- printk(", using");
- } else {
- dev->irq = irq_canonicalize(dev->irq);
- printk(", assigning");
- }
-
- retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev);
- if (retval) {
- printk (" nothing! Unable to get IRQ %d.\n", dev->irq);
- goto out;
- }
-
- printk(" IRQ %d, %s port\n", dev->irq, port_name[dev->if_port]);
-
- dev->base_addr = ioaddr;
-
-#ifdef notyet
- if (dev->mem_start) { /* Override the value from the board. */
- for (i = 0; i < 7; i++)
- if (addrmap[i] == dev->mem_start)
- break;
- if (i >= 7)
- i = 0;
- outb((inb(ioaddr + AC_CONFIG) & ~7) | i, ioaddr + AC_CONFIG);
- }
-#endif
-
- dev->if_port = inb(ioaddr + AC_CONFIG) >> 6;
- dev->mem_start = config2mem(inb(ioaddr + AC_CONFIG));
-
- printk("%s: AC3200 at %#3x with %dkB memory at physical address %#lx.\n",
- dev->name, ioaddr, AC_STOP_PG/4, dev->mem_start);
-
- /*
- * BEWARE!! Some dain-bramaged EISA SCUs will allow you to put
- * the card mem within the region covered by `normal' RAM !!!
- *
- * ioremap() will fail in that case.
- */
- ei_status.mem = ioremap(dev->mem_start, AC_STOP_PG*0x100);
- if (!ei_status.mem) {
- printk(KERN_ERR "ac3200.c: Unable to remap card memory above 1MB !!\n");
- printk(KERN_ERR "ac3200.c: Try using EISA SCU to set memory below 1MB.\n");
- printk(KERN_ERR "ac3200.c: Driver NOT installed.\n");
- retval = -EINVAL;
- goto out1;
- }
- printk("ac3200.c: remapped %dkB card memory to virtual address %p\n",
- AC_STOP_PG/4, ei_status.mem);
-
- dev->mem_start = (unsigned long)ei_status.mem;
- dev->mem_end = dev->mem_start + (AC_STOP_PG - AC_START_PG)*256;
-
- ei_status.name = "AC3200";
- ei_status.tx_start_page = AC_START_PG;
- ei_status.rx_start_page = AC_START_PG + TX_PAGES;
- ei_status.stop_page = AC_STOP_PG;
- ei_status.word16 = 1;
-
- if (ei_debug > 0)
- printk(version);
-
- ei_status.reset_8390 = &ac_reset_8390;
- ei_status.block_input = &ac_block_input;
- ei_status.block_output = &ac_block_output;
- ei_status.get_8390_hdr = &ac_get_8390_hdr;
-
- dev->netdev_ops = &ac_netdev_ops;
- NS8390_init(dev, 0);
-
- retval = register_netdev(dev);
- if (retval)
- goto out2;
- return 0;
-out2:
- if (ei_status.reg0)
- iounmap(ei_status.mem);
-out1:
- free_irq(dev->irq, dev);
-out:
- release_region(ioaddr, AC_IO_EXTENT);
- return retval;
-}
-
-static int ac_open(struct net_device *dev)
-{
-#ifdef notyet
- /* Someday we may enable the IRQ and shared memory here. */
- int ioaddr = dev->base_addr;
-#endif
-
- ei_open(dev);
- return 0;
-}
-
-static void ac_reset_8390(struct net_device *dev)
-{
- ushort ioaddr = dev->base_addr;
-
- outb(AC_RESET, ioaddr + AC_RESET_PORT);
- if (ei_debug > 1) printk("resetting AC3200, t=%ld...", jiffies);
-
- ei_status.txing = 0;
- outb(AC_ENABLE, ioaddr + AC_RESET_PORT);
- if (ei_debug > 1) printk("reset done\n");
-}
-
-/* Grab the 8390 specific header. Similar to the block_input routine, but
- we don't need to be concerned with ring wrap as the header will be at
- the start of a page, so we optimize accordingly. */
-
-static void
-ac_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
- void __iomem *hdr_start = ei_status.mem + ((ring_page - AC_START_PG)<<8);
- memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
-}
-
-/* Block input and output are easy on shared memory ethercards, the only
- complication is when the ring buffer wraps. */
-
-static void ac_block_input(struct net_device *dev, int count, struct sk_buff *skb,
- int ring_offset)
-{
- void __iomem *start = ei_status.mem + ring_offset - AC_START_PG*256;
-
- if (ring_offset + count > AC_STOP_PG*256) {
- /* We must wrap the input move. */
- int semi_count = AC_STOP_PG*256 - ring_offset;
- memcpy_fromio(skb->data, start, semi_count);
- count -= semi_count;
- memcpy_fromio(skb->data + semi_count,
- ei_status.mem + TX_PAGES*256, count);
- } else {
- memcpy_fromio(skb->data, start, count);
- }
-}
-
-static void ac_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page)
-{
- void __iomem *shmem = ei_status.mem + ((start_page - AC_START_PG)<<8);
-
- memcpy_toio(shmem, buf, count);
-}
-
-static int ac_close_card(struct net_device *dev)
-{
- if (ei_debug > 1)
- printk("%s: Shutting down ethercard.\n", dev->name);
-
-#ifdef notyet
- /* We should someday disable shared memory and interrupts. */
- outb(0x00, ioaddr + 6); /* Disable interrupts. */
- free_irq(dev->irq, dev);
-#endif
-
- ei_close(dev);
- return 0;
-}
-
-#ifdef MODULE
-#define MAX_AC32_CARDS 4 /* Max number of AC32 cards per module */
-static struct net_device *dev_ac32[MAX_AC32_CARDS];
-static int io[MAX_AC32_CARDS];
-static int irq[MAX_AC32_CARDS];
-static int mem[MAX_AC32_CARDS];
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(mem, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s)");
-MODULE_PARM_DESC(mem, "Memory base address(es)");
-MODULE_DESCRIPTION("Ansel AC3200 EISA ethernet driver");
-MODULE_LICENSE("GPL");
-
-static int __init ac3200_module_init(void)
-{
- struct net_device *dev;
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < MAX_AC32_CARDS; this_dev++) {
- if (io[this_dev] == 0 && this_dev != 0)
- break;
- dev = alloc_ei_netdev();
- if (!dev)
- break;
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- dev->mem_start = mem[this_dev]; /* Currently ignored by driver */
- if (do_ac3200_probe(dev) == 0) {
- dev_ac32[found++] = dev;
- continue;
- }
- free_netdev(dev);
- printk(KERN_WARNING "ac3200.c: No ac3200 card found (i/o = 0x%x).\n", io[this_dev]);
- break;
- }
- if (found)
- return 0;
- return -ENXIO;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
- /* Someday free_irq may be in ac_close_card() */
- free_irq(dev->irq, dev);
- release_region(dev->base_addr, AC_IO_EXTENT);
- iounmap(ei_status.mem);
-}
-
-static void __exit ac3200_module_exit(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < MAX_AC32_CARDS; this_dev++) {
- struct net_device *dev = dev_ac32[this_dev];
- if (dev) {
- unregister_netdev(dev);
- cleanup_card(dev);
- free_netdev(dev);
- }
- }
-}
-module_init(ac3200_module_init);
-module_exit(ac3200_module_exit);
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 70dba5d01ad3..cab306a9888e 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -358,7 +358,7 @@ static int ax_mii_probe(struct net_device *dev)
return -ENODEV;
}
- ret = phy_connect_direct(dev, phy_dev, ax_handle_link_change, 0,
+ ret = phy_connect_direct(dev, phy_dev, ax_handle_link_change,
PHY_INTERFACE_MODE_MII);
if (ret) {
netdev_err(dev, "Could not attach to PHY\n");
@@ -469,9 +469,9 @@ static void ax_get_drvinfo(struct net_device *dev,
{
struct platform_device *pdev = to_platform_device(dev->dev.parent);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pdev->name);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
}
static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/8390/e2100.c b/drivers/net/ethernet/8390/e2100.c
deleted file mode 100644
index ed55ce85ebbf..000000000000
--- a/drivers/net/ethernet/8390/e2100.c
+++ /dev/null
@@ -1,489 +0,0 @@
-/* e2100.c: A Cabletron E2100 series ethernet driver for linux. */
-/*
- Written 1993-1994 by Donald Becker.
-
- Copyright 1994 by Donald Becker.
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency. This software may be used and
- distributed according to the terms of the GNU General Public License,
- incorporated herein by reference.
-
- This is a driver for the Cabletron E2100 series ethercards.
-
- The Author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
- The E2100 series ethercard is a fairly generic shared memory 8390
- implementation. The only unusual aspect is the way the shared memory
- registers are set: first you do an inb() in what is normally the
- station address region, and the low three bits of next outb() *address*
- is used as the write value for that register. Either someone wasn't
- too used to dem bit en bites, or they were trying to obfuscate the
- programming interface.
-
- There is an additional complication when setting the window on the packet
- buffer. You must first do a read into the packet buffer region with the
- low 8 address bits the address setting the page for the start of the packet
- buffer window, and then do the above operation. See mem_on() for details.
-
- One bug on the chip is that even a hard reset won't disable the memory
- window, usually resulting in a hung machine if mem_off() isn't called.
- If this happens, you must power down the machine for about 30 seconds.
-*/
-
-static const char version[] =
- "e2100.c:v1.01 7/21/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-
-#include <asm/io.h>
-
-#include "8390.h"
-
-#define DRV_NAME "e2100"
-
-static int e21_probe_list[] = {0x300, 0x280, 0x380, 0x220, 0};
-
-/* Offsets from the base_addr.
- Read from the ASIC register, and the low three bits of the next outb()
- address is used to set the corresponding register. */
-#define E21_NIC_OFFSET 0 /* Offset to the 8390 NIC. */
-#define E21_ASIC 0x10
-#define E21_MEM_ENABLE 0x10
-#define E21_MEM_ON 0x05 /* Enable memory in 16 bit mode. */
-#define E21_MEM_ON_8 0x07 /* Enable memory in 8 bit mode. */
-#define E21_MEM_BASE 0x11
-#define E21_IRQ_LOW 0x12 /* The low three bits of the IRQ number. */
-#define E21_IRQ_HIGH 0x14 /* The high IRQ bit and media select ... */
-#define E21_MEDIA 0x14 /* (alias). */
-#define E21_ALT_IFPORT 0x02 /* Set to use the other (BNC,AUI) port. */
-#define E21_BIG_MEM 0x04 /* Use a bigger (64K) buffer (we don't) */
-#define E21_SAPROM 0x10 /* Offset to station address data. */
-#define E21_IO_EXTENT 0x20
-
-static inline void mem_on(short port, volatile char __iomem *mem_base,
- unsigned char start_page )
-{
- /* This is a little weird: set the shared memory window by doing a
- read. The low address bits specify the starting page. */
- readb(mem_base+start_page);
- inb(port + E21_MEM_ENABLE);
- outb(E21_MEM_ON, port + E21_MEM_ENABLE + E21_MEM_ON);
-}
-
-static inline void mem_off(short port)
-{
- inb(port + E21_MEM_ENABLE);
- outb(0x00, port + E21_MEM_ENABLE);
-}
-
-/* In other drivers I put the TX pages first, but the E2100 window circuitry
- is designed to have a 4K Tx region last. The windowing circuitry wraps the
- window at 0x2fff->0x0000 so that the packets at e.g. 0x2f00 in the RX ring
- appear contiguously in the window. */
-#define E21_RX_START_PG 0x00 /* First page of RX buffer */
-#define E21_RX_STOP_PG 0x30 /* Last page +1 of RX ring */
-#define E21_BIG_RX_STOP_PG 0xF0 /* Last page +1 of RX ring */
-#define E21_TX_START_PG E21_RX_STOP_PG /* First page of TX buffer */
-
-static int e21_probe1(struct net_device *dev, int ioaddr);
-
-static int e21_open(struct net_device *dev);
-static void e21_reset_8390(struct net_device *dev);
-static void e21_block_input(struct net_device *dev, int count,
- struct sk_buff *skb, int ring_offset);
-static void e21_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page);
-static void e21_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page);
-static int e21_open(struct net_device *dev);
-static int e21_close(struct net_device *dev);
-
-
-/* Probe for the E2100 series ethercards. These cards have an 8390 at the
- base address and the station address at both offset 0x10 and 0x18. I read
- the station address from offset 0x18 to avoid the dataport of NE2000
- ethercards, and look for Ctron's unique ID (first three octets of the
- station address).
- */
-
-static int __init do_e2100_probe(struct net_device *dev)
-{
- int *port;
- int base_addr = dev->base_addr;
- int irq = dev->irq;
-
- if (base_addr > 0x1ff) /* Check a single specified location. */
- return e21_probe1(dev, base_addr);
- else if (base_addr != 0) /* Don't probe at all. */
- return -ENXIO;
-
- for (port = e21_probe_list; *port; port++) {
- dev->irq = irq;
- if (e21_probe1(dev, *port) == 0)
- return 0;
- }
-
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init e2100_probe(int unit)
-{
- struct net_device *dev = alloc_ei_netdev();
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_e2100_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static const struct net_device_ops e21_netdev_ops = {
- .ndo_open = e21_open,
- .ndo_stop = e21_close,
-
- .ndo_start_xmit = ei_start_xmit,
- .ndo_tx_timeout = ei_tx_timeout,
- .ndo_get_stats = ei_get_stats,
- .ndo_set_rx_mode = ei_set_multicast_list,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ei_poll,
-#endif
-};
-
-static int __init e21_probe1(struct net_device *dev, int ioaddr)
-{
- int i, status, retval;
- unsigned char *station_addr = dev->dev_addr;
- static unsigned version_printed;
-
- if (!request_region(ioaddr, E21_IO_EXTENT, DRV_NAME))
- return -EBUSY;
-
- /* First check the station address for the Ctron prefix. */
- if (inb(ioaddr + E21_SAPROM + 0) != 0x00 ||
- inb(ioaddr + E21_SAPROM + 1) != 0x00 ||
- inb(ioaddr + E21_SAPROM + 2) != 0x1d) {
- retval = -ENODEV;
- goto out;
- }
-
- /* Verify by making certain that there is a 8390 at there. */
- outb(E8390_NODMA + E8390_STOP, ioaddr);
- udelay(1); /* we want to delay one I/O cycle - which is 2MHz */
- status = inb(ioaddr);
- if (status != 0x21 && status != 0x23) {
- retval = -ENODEV;
- goto out;
- }
-
- /* Read the station address PROM. */
- for (i = 0; i < 6; i++)
- station_addr[i] = inb(ioaddr + E21_SAPROM + i);
-
- inb(ioaddr + E21_MEDIA); /* Point to media selection. */
- outb(0, ioaddr + E21_ASIC); /* and disable the secondary interface. */
-
- if (ei_debug && version_printed++ == 0)
- printk(version);
-
- for (i = 0; i < 6; i++)
- printk(" %02X", station_addr[i]);
-
- if (dev->irq < 2) {
- static const int irqlist[] = {15, 11, 10, 12, 5, 9, 3, 4};
- for (i = 0; i < ARRAY_SIZE(irqlist); i++)
- if (request_irq (irqlist[i], NULL, 0, "bogus", NULL) != -EBUSY) {
- dev->irq = irqlist[i];
- break;
- }
- if (i >= ARRAY_SIZE(irqlist)) {
- printk(" unable to get IRQ %d.\n", dev->irq);
- retval = -EAGAIN;
- goto out;
- }
- } else if (dev->irq == 2) /* Fixup luser bogosity: IRQ2 is really IRQ9 */
- dev->irq = 9;
-
- /* The 8390 is at the base address. */
- dev->base_addr = ioaddr;
-
- ei_status.name = "E2100";
- ei_status.word16 = 1;
- ei_status.tx_start_page = E21_TX_START_PG;
- ei_status.rx_start_page = E21_RX_START_PG;
- ei_status.stop_page = E21_RX_STOP_PG;
- ei_status.saved_irq = dev->irq;
-
- /* Check the media port used. The port can be passed in on the
- low mem_end bits. */
- if (dev->mem_end & 15)
- dev->if_port = dev->mem_end & 7;
- else {
- dev->if_port = 0;
- inb(ioaddr + E21_MEDIA); /* Turn automatic media detection on. */
- for(i = 0; i < 6; i++)
- if (station_addr[i] != inb(ioaddr + E21_SAPROM + 8 + i)) {
- dev->if_port = 1;
- break;
- }
- }
-
- /* Never map in the E21 shared memory unless you are actively using it.
- Also, the shared memory has effective only one setting -- spread all
- over the 128K region! */
- if (dev->mem_start == 0)
- dev->mem_start = 0xd0000;
-
- ei_status.mem = ioremap(dev->mem_start, 2*1024);
- if (!ei_status.mem) {
- printk("unable to remap memory\n");
- retval = -EAGAIN;
- goto out;
- }
-
-#ifdef notdef
- /* These values are unused. The E2100 has a 2K window into the packet
- buffer. The window can be set to start on any page boundary. */
- ei_status.rmem_start = dev->mem_start + TX_PAGES*256;
- dev->mem_end = ei_status.rmem_end = dev->mem_start + 2*1024;
-#endif
-
- printk(", IRQ %d, %s media, memory @ %#lx.\n", dev->irq,
- dev->if_port ? "secondary" : "primary", dev->mem_start);
-
- ei_status.reset_8390 = &e21_reset_8390;
- ei_status.block_input = &e21_block_input;
- ei_status.block_output = &e21_block_output;
- ei_status.get_8390_hdr = &e21_get_8390_hdr;
-
- dev->netdev_ops = &e21_netdev_ops;
- NS8390_init(dev, 0);
-
- retval = register_netdev(dev);
- if (retval)
- goto out;
- return 0;
-out:
- release_region(ioaddr, E21_IO_EXTENT);
- return retval;
-}
-
-static int
-e21_open(struct net_device *dev)
-{
- short ioaddr = dev->base_addr;
- int retval;
-
- if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev)))
- return retval;
-
- /* Set the interrupt line and memory base on the hardware. */
- inb(ioaddr + E21_IRQ_LOW);
- outb(0, ioaddr + E21_ASIC + (dev->irq & 7));
- inb(ioaddr + E21_IRQ_HIGH); /* High IRQ bit, and if_port. */
- outb(0, ioaddr + E21_ASIC + (dev->irq > 7 ? 1:0)
- + (dev->if_port ? E21_ALT_IFPORT : 0));
- inb(ioaddr + E21_MEM_BASE);
- outb(0, ioaddr + E21_ASIC + ((dev->mem_start >> 17) & 7));
-
- ei_open(dev);
- return 0;
-}
-
-static void
-e21_reset_8390(struct net_device *dev)
-{
- short ioaddr = dev->base_addr;
-
- outb(0x01, ioaddr);
- if (ei_debug > 1) printk("resetting the E2180x3 t=%ld...", jiffies);
- ei_status.txing = 0;
-
- /* Set up the ASIC registers, just in case something changed them. */
-
- if (ei_debug > 1) printk("reset done\n");
-}
-
-/* Grab the 8390 specific header. We put the 2k window so the header page
- appears at the start of the shared memory. */
-
-static void
-e21_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
-
- short ioaddr = dev->base_addr;
- char __iomem *shared_mem = ei_status.mem;
-
- mem_on(ioaddr, shared_mem, ring_page);
-
-#ifdef notdef
- /* Officially this is what we are doing, but the readl() is faster */
- memcpy_fromio(hdr, shared_mem, sizeof(struct e8390_pkt_hdr));
-#else
- ((unsigned int*)hdr)[0] = readl(shared_mem);
-#endif
-
- /* Turn off memory access: we would need to reprogram the window anyway. */
- mem_off(ioaddr);
-
-}
-
-/* Block input and output are easy on shared memory ethercards.
- The E21xx makes block_input() especially easy by wrapping the top
- ring buffer to the bottom automatically. */
-static void
-e21_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
-{
- short ioaddr = dev->base_addr;
- char __iomem *shared_mem = ei_status.mem;
-
- mem_on(ioaddr, shared_mem, (ring_offset>>8));
-
- memcpy_fromio(skb->data, ei_status.mem + (ring_offset & 0xff), count);
-
- mem_off(ioaddr);
-}
-
-static void
-e21_block_output(struct net_device *dev, int count, const unsigned char *buf,
- int start_page)
-{
- short ioaddr = dev->base_addr;
- volatile char __iomem *shared_mem = ei_status.mem;
-
- /* Set the shared memory window start by doing a read, with the low address
- bits specifying the starting page. */
- readb(shared_mem + start_page);
- mem_on(ioaddr, shared_mem, start_page);
-
- memcpy_toio(shared_mem, buf, count);
- mem_off(ioaddr);
-}
-
-static int
-e21_close(struct net_device *dev)
-{
- short ioaddr = dev->base_addr;
-
- if (ei_debug > 1)
- printk("%s: Shutting down ethercard.\n", dev->name);
-
- free_irq(dev->irq, dev);
- dev->irq = ei_status.saved_irq;
-
- /* Shut off the interrupt line and secondary interface. */
- inb(ioaddr + E21_IRQ_LOW);
- outb(0, ioaddr + E21_ASIC);
- inb(ioaddr + E21_IRQ_HIGH); /* High IRQ bit, and if_port. */
- outb(0, ioaddr + E21_ASIC);
-
- ei_close(dev);
-
- /* Double-check that the memory has been turned off, because really
- really bad things happen if it isn't. */
- mem_off(ioaddr);
-
- return 0;
-}
-
-
-#ifdef MODULE
-#define MAX_E21_CARDS 4 /* Max number of E21 cards per module */
-static struct net_device *dev_e21[MAX_E21_CARDS];
-static int io[MAX_E21_CARDS];
-static int irq[MAX_E21_CARDS];
-static int mem[MAX_E21_CARDS];
-static int xcvr[MAX_E21_CARDS]; /* choose int. or ext. xcvr */
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(mem, int, NULL, 0);
-module_param_array(xcvr, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s)");
-MODULE_PARM_DESC(mem, " memory base address(es)");
-MODULE_PARM_DESC(xcvr, "transceiver(s) (0=internal, 1=external)");
-MODULE_DESCRIPTION("Cabletron E2100 ISA ethernet driver");
-MODULE_LICENSE("GPL");
-
-/* This is set up so that only a single autoprobe takes place per call.
-ISA device autoprobes on a running machine are not recommended. */
-
-int __init init_module(void)
-{
- struct net_device *dev;
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < MAX_E21_CARDS; this_dev++) {
- if (io[this_dev] == 0) {
- if (this_dev != 0) break; /* only autoprobe 1st one */
- printk(KERN_NOTICE "e2100.c: Presently autoprobing (not recommended) for a single card.\n");
- }
- dev = alloc_ei_netdev();
- if (!dev)
- break;
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- dev->mem_start = mem[this_dev];
- dev->mem_end = xcvr[this_dev]; /* low 4bits = xcvr sel. */
- if (do_e2100_probe(dev) == 0) {
- dev_e21[found++] = dev;
- continue;
- }
- free_netdev(dev);
- printk(KERN_WARNING "e2100.c: No E2100 card found (i/o = 0x%x).\n", io[this_dev]);
- break;
- }
- if (found)
- return 0;
- return -ENXIO;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
- /* NB: e21_close() handles free_irq */
- iounmap(ei_status.mem);
- release_region(dev->base_addr, E21_IO_EXTENT);
-}
-
-void __exit
-cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < MAX_E21_CARDS; this_dev++) {
- struct net_device *dev = dev_e21[this_dev];
- if (dev) {
- unregister_netdev(dev);
- cleanup_card(dev);
- free_netdev(dev);
- }
- }
-}
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/es3210.c b/drivers/net/ethernet/8390/es3210.c
deleted file mode 100644
index ba1b5c95531f..000000000000
--- a/drivers/net/ethernet/8390/es3210.c
+++ /dev/null
@@ -1,445 +0,0 @@
-/*
- es3210.c
-
- Linux driver for Racal-Interlan ES3210 EISA Network Adapter
-
- Copyright (C) 1996, Paul Gortmaker.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- Information and Code Sources:
-
- 1) The existing myriad of Linux 8390 drivers written by Donald Becker.
-
- 2) Once again Russ Nelson's asm packet driver provided additional info.
-
- 3) Info for getting IRQ and sh-mem gleaned from the EISA cfg files.
- Too bad it doesn't work -- see below.
-
- The ES3210 is an EISA shared memory NS8390 implementation. Note
- that all memory copies to/from the board must be 32bit transfers.
- Which rules out using eth_io_copy_and_sum() in this driver.
-
- Apparently there are two slightly different revisions of the
- card, since there are two distinct EISA cfg files (!rii0101.cfg
- and !rii0102.cfg) One has media select in the cfg file and the
- other doesn't. Hopefully this will work with either.
-
- That is about all I can tell you about it, having never actually
- even seen one of these cards. :) Try http://www.interlan.com
- if you want more info.
-
- Thanks go to Mark Salazar for testing v0.02 of this driver.
-
- Bugs, to-fix, etc:
-
- 1) The EISA cfg ports that are *supposed* to have the IRQ and shared
- mem values just read 0xff all the time. Hrrmpf. Apparently the
- same happens with the packet driver as the code for reading
- these registers is disabled there. In the meantime, boot with:
- ether=<IRQ>,0,0x<shared_mem_addr>,eth0 to override the IRQ and
- shared memory detection. (The i/o port detection is okay.)
-
- 2) Module support currently untested. Probably works though.
-
-*/
-
-static const char version[] =
- "es3210.c: Driver revision v0.03, 14/09/96\n";
-
-#include <linux/module.h>
-#include <linux/eisa.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-
-#include <asm/io.h>
-
-#include "8390.h"
-
-static int es_probe1(struct net_device *dev, int ioaddr);
-
-static void es_reset_8390(struct net_device *dev);
-
-static void es_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page);
-static void es_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset);
-static void es_block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page);
-
-#define ES_START_PG 0x00 /* First page of TX buffer */
-#define ES_STOP_PG 0x40 /* Last page +1 of RX ring */
-
-#define ES_IO_EXTENT 0x37 /* The cfg file says 0xc90 -> 0xcc7 */
-#define ES_ID_PORT 0xc80 /* Same for all EISA cards */
-#define ES_SA_PROM 0xc90 /* Start of e'net addr. */
-#define ES_RESET_PORT 0xc84 /* From the packet driver source */
-#define ES_NIC_OFFSET 0xca0 /* Hello, the 8390 is *here* */
-
-#define ES_ADDR0 0x02 /* 3 byte vendor prefix */
-#define ES_ADDR1 0x07
-#define ES_ADDR2 0x01
-
-/*
- * Two card revisions. EISA ID's are always rev. minor, rev. major,, and
- * then the three vendor letters stored in 5 bits each, with an "a" = 1.
- * For eg: "rii" = 10010 01001 01001 = 0x4929, which is how the EISA
- * config utility determines automagically what config file(s) to use.
- */
-#define ES_EISA_ID1 0x01012949 /* !rii0101.cfg */
-#define ES_EISA_ID2 0x02012949 /* !rii0102.cfg */
-
-#define ES_CFG1 0xcc0 /* IOPORT(1) --> IOPORT(6) in cfg file */
-#define ES_CFG2 0xcc1
-#define ES_CFG3 0xcc2
-#define ES_CFG4 0xcc3
-#define ES_CFG5 0xcc4
-#define ES_CFG6 0xc84 /* NB: 0xc84 is also "reset" port. */
-
-/*
- * You can OR any of the following bits together and assign it
- * to ES_DEBUG to get verbose driver info during operation.
- * Some of these don't do anything yet.
- */
-
-#define ES_D_PROBE 0x01
-#define ES_D_RX_PKT 0x02
-#define ES_D_TX_PKT 0x04
-#define ED_D_IRQ 0x08
-
-#define ES_DEBUG 0
-
-static unsigned char lo_irq_map[] __initdata = {3, 4, 5, 6, 7, 9, 10};
-static unsigned char hi_irq_map[] __initdata = {11, 12, 0, 14, 0, 0, 0, 15};
-
-/*
- * Probe for the card. The best way is to read the EISA ID if it
- * is known. Then we check the prefix of the station address
- * PROM for a match against the Racal-Interlan assigned value.
- */
-
-static int __init do_es_probe(struct net_device *dev)
-{
- unsigned short ioaddr = dev->base_addr;
- int irq = dev->irq;
- int mem_start = dev->mem_start;
-
- if (ioaddr > 0x1ff) /* Check a single specified location. */
- return es_probe1(dev, ioaddr);
- else if (ioaddr > 0) /* Don't probe at all. */
- return -ENXIO;
-
- if (!EISA_bus) {
-#if ES_DEBUG & ES_D_PROBE
- printk("es3210.c: Not EISA bus. Not probing high ports.\n");
-#endif
- return -ENXIO;
- }
-
- /* EISA spec allows for up to 16 slots, but 8 is typical. */
- for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
- if (es_probe1(dev, ioaddr) == 0)
- return 0;
- dev->irq = irq;
- dev->mem_start = mem_start;
- }
-
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init es_probe(int unit)
-{
- struct net_device *dev = alloc_ei_netdev();
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_es_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static int __init es_probe1(struct net_device *dev, int ioaddr)
-{
- int i, retval;
- unsigned long eisa_id;
-
- if (!request_region(ioaddr + ES_SA_PROM, ES_IO_EXTENT, "es3210"))
- return -ENODEV;
-
-#if ES_DEBUG & ES_D_PROBE
- printk("es3210.c: probe at %#x, ID %#8x\n", ioaddr, inl(ioaddr + ES_ID_PORT));
- printk("es3210.c: config regs: %#x %#x %#x %#x %#x %#x\n",
- inb(ioaddr + ES_CFG1), inb(ioaddr + ES_CFG2), inb(ioaddr + ES_CFG3),
- inb(ioaddr + ES_CFG4), inb(ioaddr + ES_CFG5), inb(ioaddr + ES_CFG6));
-#endif
-
-/* Check the EISA ID of the card. */
- eisa_id = inl(ioaddr + ES_ID_PORT);
- if ((eisa_id != ES_EISA_ID1) && (eisa_id != ES_EISA_ID2)) {
- retval = -ENODEV;
- goto out;
- }
-
- for (i = 0; i < ETH_ALEN ; i++)
- dev->dev_addr[i] = inb(ioaddr + ES_SA_PROM + i);
-
-/* Check the Racal vendor ID as well. */
- if (dev->dev_addr[0] != ES_ADDR0 ||
- dev->dev_addr[1] != ES_ADDR1 ||
- dev->dev_addr[2] != ES_ADDR2) {
- printk("es3210.c: card not found %pM (invalid_prefix).\n",
- dev->dev_addr);
- retval = -ENODEV;
- goto out;
- }
-
- printk("es3210.c: ES3210 rev. %ld at %#x, node %pM",
- eisa_id>>24, ioaddr, dev->dev_addr);
-
- /* Snarf the interrupt now. */
- if (dev->irq == 0) {
- unsigned char hi_irq = inb(ioaddr + ES_CFG2) & 0x07;
- unsigned char lo_irq = inb(ioaddr + ES_CFG1) & 0xfe;
-
- if (hi_irq != 0) {
- dev->irq = hi_irq_map[hi_irq - 1];
- } else {
- int i = 0;
- while (lo_irq > (1<<i)) i++;
- dev->irq = lo_irq_map[i];
- }
- printk(" using IRQ %d", dev->irq);
-#if ES_DEBUG & ES_D_PROBE
- printk("es3210.c: hi_irq %#x, lo_irq %#x, dev->irq = %d\n",
- hi_irq, lo_irq, dev->irq);
-#endif
- } else {
- if (dev->irq == 2)
- dev->irq = 9; /* Doh! */
- printk(" assigning IRQ %d", dev->irq);
- }
-
- if (request_irq(dev->irq, ei_interrupt, 0, "es3210", dev)) {
- printk (" unable to get IRQ %d.\n", dev->irq);
- retval = -EAGAIN;
- goto out;
- }
-
- if (dev->mem_start == 0) {
- unsigned char mem_enabled = inb(ioaddr + ES_CFG2) & 0xc0;
- unsigned char mem_bits = inb(ioaddr + ES_CFG3) & 0x07;
-
- if (mem_enabled != 0x80) {
- printk(" shared mem disabled - giving up\n");
- retval = -ENXIO;
- goto out1;
- }
- dev->mem_start = 0xC0000 + mem_bits*0x4000;
- printk(" using ");
- } else {
- printk(" assigning ");
- }
-
- ei_status.mem = ioremap(dev->mem_start, (ES_STOP_PG - ES_START_PG)*256);
- if (!ei_status.mem) {
- printk("ioremap failed - giving up\n");
- retval = -ENXIO;
- goto out1;
- }
-
- dev->mem_end = dev->mem_start + (ES_STOP_PG - ES_START_PG)*256;
-
- printk("mem %#lx-%#lx\n", dev->mem_start, dev->mem_end-1);
-
-#if ES_DEBUG & ES_D_PROBE
- if (inb(ioaddr + ES_CFG5))
- printk("es3210: Warning - DMA channel enabled, but not used here.\n");
-#endif
- /* Note, point at the 8390, and not the card... */
- dev->base_addr = ioaddr + ES_NIC_OFFSET;
-
- ei_status.name = "ES3210";
- ei_status.tx_start_page = ES_START_PG;
- ei_status.rx_start_page = ES_START_PG + TX_PAGES;
- ei_status.stop_page = ES_STOP_PG;
- ei_status.word16 = 1;
-
- if (ei_debug > 0)
- printk(version);
-
- ei_status.reset_8390 = &es_reset_8390;
- ei_status.block_input = &es_block_input;
- ei_status.block_output = &es_block_output;
- ei_status.get_8390_hdr = &es_get_8390_hdr;
-
- dev->netdev_ops = &ei_netdev_ops;
- NS8390_init(dev, 0);
-
- retval = register_netdev(dev);
- if (retval)
- goto out1;
- return 0;
-out1:
- free_irq(dev->irq, dev);
-out:
- release_region(ioaddr + ES_SA_PROM, ES_IO_EXTENT);
- return retval;
-}
-
-/*
- * Reset as per the packet driver method. Judging by the EISA cfg
- * file, this just toggles the "Board Enable" bits (bit 2 and 0).
- */
-
-static void es_reset_8390(struct net_device *dev)
-{
- unsigned short ioaddr = dev->base_addr;
- unsigned long end;
-
- outb(0x04, ioaddr + ES_RESET_PORT);
- if (ei_debug > 1) printk("%s: resetting the ES3210...", dev->name);
-
- end = jiffies + 2*HZ/100;
- while ((signed)(end - jiffies) > 0) continue;
-
- ei_status.txing = 0;
- outb(0x01, ioaddr + ES_RESET_PORT);
- if (ei_debug > 1) printk("reset done\n");
-}
-
-/*
- * Note: In the following three functions is the implicit assumption
- * that the associated memcpy will only use "rep; movsl" as long as
- * we keep the counts as some multiple of doublewords. This is a
- * requirement of the hardware, and also prevents us from using
- * eth_io_copy_and_sum() since we can't guarantee it will limit
- * itself to doubleword access.
- */
-
-/*
- * Grab the 8390 specific header. Similar to the block_input routine, but
- * we don't need to be concerned with ring wrap as the header will be at
- * the start of a page, so we optimize accordingly. (A single doubleword.)
- */
-
-static void
-es_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
- void __iomem *hdr_start = ei_status.mem + ((ring_page - ES_START_PG)<<8);
- memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
- hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */
-}
-
-/*
- * Block input and output are easy on shared memory ethercards, the only
- * complication is when the ring buffer wraps. The count will already
- * be rounded up to a doubleword value via es_get_8390_hdr() above.
- */
-
-static void es_block_input(struct net_device *dev, int count, struct sk_buff *skb,
- int ring_offset)
-{
- void __iomem *xfer_start = ei_status.mem + ring_offset - ES_START_PG*256;
-
- if (ring_offset + count > ES_STOP_PG*256) {
- /* Packet wraps over end of ring buffer. */
- int semi_count = ES_STOP_PG*256 - ring_offset;
- memcpy_fromio(skb->data, xfer_start, semi_count);
- count -= semi_count;
- memcpy_fromio(skb->data + semi_count, ei_status.mem, count);
- } else {
- /* Packet is in one chunk. */
- memcpy_fromio(skb->data, xfer_start, count);
- }
-}
-
-static void es_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page)
-{
- void __iomem *shmem = ei_status.mem + ((start_page - ES_START_PG)<<8);
-
- count = (count + 3) & ~3; /* Round up to doubleword */
- memcpy_toio(shmem, buf, count);
-}
-
-#ifdef MODULE
-#define MAX_ES_CARDS 4 /* Max number of ES3210 cards per module */
-#define NAMELEN 8 /* # of chars for storing dev->name */
-static struct net_device *dev_es3210[MAX_ES_CARDS];
-static int io[MAX_ES_CARDS];
-static int irq[MAX_ES_CARDS];
-static int mem[MAX_ES_CARDS];
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(mem, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s)");
-MODULE_PARM_DESC(mem, "memory base address(es)");
-MODULE_DESCRIPTION("Racal-Interlan ES3210 EISA ethernet driver");
-MODULE_LICENSE("GPL");
-
-int __init init_module(void)
-{
- struct net_device *dev;
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < MAX_ES_CARDS; this_dev++) {
- if (io[this_dev] == 0 && this_dev != 0)
- break;
- dev = alloc_ei_netdev();
- if (!dev)
- break;
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- dev->mem_start = mem[this_dev];
- if (do_es_probe(dev) == 0) {
- dev_es3210[found++] = dev;
- continue;
- }
- free_netdev(dev);
- printk(KERN_WARNING "es3210.c: No es3210 card found (i/o = 0x%x).\n", io[this_dev]);
- break;
- }
- if (found)
- return 0;
- return -ENXIO;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
- free_irq(dev->irq, dev);
- release_region(dev->base_addr, ES_IO_EXTENT);
- iounmap(ei_status.mem);
-}
-
-void __exit
-cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < MAX_ES_CARDS; this_dev++) {
- struct net_device *dev = dev_es3210[this_dev];
- if (dev) {
- unregister_netdev(dev);
- cleanup_card(dev);
- free_netdev(dev);
- }
- }
-}
-#endif /* MODULE */
-
diff --git a/drivers/net/ethernet/8390/hp-plus.c b/drivers/net/ethernet/8390/hp-plus.c
deleted file mode 100644
index 52f70f999c00..000000000000
--- a/drivers/net/ethernet/8390/hp-plus.c
+++ /dev/null
@@ -1,505 +0,0 @@
-/* hp-plus.c: A HP PCLAN/plus ethernet driver for linux. */
-/*
- Written 1994 by Donald Becker.
-
- This driver is for the Hewlett Packard PC LAN (27***) plus ethercards.
- These cards are sold under several model numbers, usually 2724*.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
- As is often the case, a great deal of credit is owed to Russ Nelson.
- The Crynwr packet driver was my primary source of HP-specific
- programming information.
-*/
-
-static const char version[] =
-"hp-plus.c:v1.10 9/24/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
-
-#include <linux/module.h>
-
-#include <linux/string.h> /* Important -- this inlines word moves. */
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-
-#include <asm/io.h>
-
-#include "8390.h"
-
-#define DRV_NAME "hp-plus"
-
-/* A zero-terminated list of I/O addresses to be probed. */
-static unsigned int hpplus_portlist[] __initdata =
-{0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340, 0};
-
-/*
- The HP EtherTwist chip implementation is a fairly routine DP8390
- implementation. It allows both shared memory and programmed-I/O buffer
- access, using a custom interface for both. The programmed-I/O mode is
- entirely implemented in the HP EtherTwist chip, bypassing the problem
- ridden built-in 8390 facilities used on NE2000 designs. The shared
- memory mode is likewise special, with an offset register used to make
- packets appear at the shared memory base. Both modes use a base and bounds
- page register to hide the Rx ring buffer wrap -- a packet that spans the
- end of physical buffer memory appears continuous to the driver. (c.f. the
- 3c503 and Cabletron E2100)
-
- A special note: the internal buffer of the board is only 8 bits wide.
- This lays several nasty traps for the unaware:
- - the 8390 must be programmed for byte-wide operations
- - all I/O and memory operations must work on whole words (the access
- latches are serially preloaded and have no byte-swapping ability).
-
- This board is laid out in I/O space much like the earlier HP boards:
- the first 16 locations are for the board registers, and the second 16 are
- for the 8390. The board is easy to identify, with both a dedicated 16 bit
- ID register and a constant 0x530* value in the upper bits of the paging
- register.
-*/
-
-#define HP_ID 0x00 /* ID register, always 0x4850. */
-#define HP_PAGING 0x02 /* Registers visible @ 8-f, see PageName. */
-#define HPP_OPTION 0x04 /* Bitmapped options, see HP_Option. */
-#define HPP_OUT_ADDR 0x08 /* I/O output location in Perf_Page. */
-#define HPP_IN_ADDR 0x0A /* I/O input location in Perf_Page. */
-#define HP_DATAPORT 0x0c /* I/O data transfer in Perf_Page. */
-#define NIC_OFFSET 0x10 /* Offset to the 8390 registers. */
-#define HP_IO_EXTENT 32
-
-#define HP_START_PG 0x00 /* First page of TX buffer */
-#define HP_STOP_PG 0x80 /* Last page +1 of RX ring */
-
-/* The register set selected in HP_PAGING. */
-enum PageName {
- Perf_Page = 0, /* Normal operation. */
- MAC_Page = 1, /* The ethernet address (+checksum). */
- HW_Page = 2, /* EEPROM-loaded hardware parameters. */
- LAN_Page = 4, /* Transceiver selection, testing, etc. */
- ID_Page = 6 };
-
-/* The bit definitions for the HPP_OPTION register. */
-enum HP_Option {
- NICReset = 1, ChipReset = 2, /* Active low, really UNreset. */
- EnableIRQ = 4, FakeIntr = 8, BootROMEnb = 0x10, IOEnb = 0x20,
- MemEnable = 0x40, ZeroWait = 0x80, MemDisable = 0x1000, };
-
-static int hpp_probe1(struct net_device *dev, int ioaddr);
-
-static void hpp_reset_8390(struct net_device *dev);
-static int hpp_open(struct net_device *dev);
-static int hpp_close(struct net_device *dev);
-static void hpp_mem_block_input(struct net_device *dev, int count,
- struct sk_buff *skb, int ring_offset);
-static void hpp_mem_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page);
-static void hpp_mem_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page);
-static void hpp_io_block_input(struct net_device *dev, int count,
- struct sk_buff *skb, int ring_offset);
-static void hpp_io_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page);
-static void hpp_io_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page);
-
-
-/* Probe a list of addresses for an HP LAN+ adaptor.
- This routine is almost boilerplate. */
-
-static int __init do_hpp_probe(struct net_device *dev)
-{
- int i;
- int base_addr = dev->base_addr;
- int irq = dev->irq;
-
- if (base_addr > 0x1ff) /* Check a single specified location. */
- return hpp_probe1(dev, base_addr);
- else if (base_addr != 0) /* Don't probe at all. */
- return -ENXIO;
-
- for (i = 0; hpplus_portlist[i]; i++) {
- if (hpp_probe1(dev, hpplus_portlist[i]) == 0)
- return 0;
- dev->irq = irq;
- }
-
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init hp_plus_probe(int unit)
-{
- struct net_device *dev = alloc_eip_netdev();
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_hpp_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static const struct net_device_ops hpp_netdev_ops = {
- .ndo_open = hpp_open,
- .ndo_stop = hpp_close,
- .ndo_start_xmit = eip_start_xmit,
- .ndo_tx_timeout = eip_tx_timeout,
- .ndo_get_stats = eip_get_stats,
- .ndo_set_rx_mode = eip_set_multicast_list,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = eip_poll,
-#endif
-};
-
-
-/* Do the interesting part of the probe at a single address. */
-static int __init hpp_probe1(struct net_device *dev, int ioaddr)
-{
- int i, retval;
- unsigned char checksum = 0;
- const char name[] = "HP-PC-LAN+";
- int mem_start;
- static unsigned version_printed;
-
- if (!request_region(ioaddr, HP_IO_EXTENT, DRV_NAME))
- return -EBUSY;
-
- /* Check for the HP+ signature, 50 48 0x 53. */
- if (inw(ioaddr + HP_ID) != 0x4850 ||
- (inw(ioaddr + HP_PAGING) & 0xfff0) != 0x5300) {
- retval = -ENODEV;
- goto out;
- }
-
- if (ei_debug && version_printed++ == 0)
- printk(version);
-
- printk("%s: %s at %#3x, ", dev->name, name, ioaddr);
-
- /* Retrieve and checksum the station address. */
- outw(MAC_Page, ioaddr + HP_PAGING);
-
- for(i = 0; i < ETH_ALEN; i++) {
- unsigned char inval = inb(ioaddr + 8 + i);
- dev->dev_addr[i] = inval;
- checksum += inval;
- }
- checksum += inb(ioaddr + 14);
-
- printk("%pM", dev->dev_addr);
-
- if (checksum != 0xff) {
- printk(" bad checksum %2.2x.\n", checksum);
- retval = -ENODEV;
- goto out;
- } else {
- /* Point at the Software Configuration Flags. */
- outw(ID_Page, ioaddr + HP_PAGING);
- printk(" ID %4.4x", inw(ioaddr + 12));
- }
-
- /* Read the IRQ line. */
- outw(HW_Page, ioaddr + HP_PAGING);
- {
- int irq = inb(ioaddr + 13) & 0x0f;
- int option = inw(ioaddr + HPP_OPTION);
-
- dev->irq = irq;
- if (option & MemEnable) {
- mem_start = inw(ioaddr + 9) << 8;
- printk(", IRQ %d, memory address %#x.\n", irq, mem_start);
- } else {
- mem_start = 0;
- printk(", IRQ %d, programmed-I/O mode.\n", irq);
- }
- }
-
- /* Set the wrap registers for string I/O reads. */
- outw((HP_START_PG + TX_PAGES/2) | ((HP_STOP_PG - 1) << 8), ioaddr + 14);
-
- /* Set the base address to point to the NIC, not the "real" base! */
- dev->base_addr = ioaddr + NIC_OFFSET;
-
- dev->netdev_ops = &hpp_netdev_ops;
-
- ei_status.name = name;
- ei_status.word16 = 0; /* Agggghhhhh! Debug time: 2 days! */
- ei_status.tx_start_page = HP_START_PG;
- ei_status.rx_start_page = HP_START_PG + TX_PAGES/2;
- ei_status.stop_page = HP_STOP_PG;
-
- ei_status.reset_8390 = &hpp_reset_8390;
- ei_status.block_input = &hpp_io_block_input;
- ei_status.block_output = &hpp_io_block_output;
- ei_status.get_8390_hdr = &hpp_io_get_8390_hdr;
-
- /* Check if the memory_enable flag is set in the option register. */
- if (mem_start) {
- ei_status.block_input = &hpp_mem_block_input;
- ei_status.block_output = &hpp_mem_block_output;
- ei_status.get_8390_hdr = &hpp_mem_get_8390_hdr;
- dev->mem_start = mem_start;
- ei_status.mem = ioremap(mem_start,
- (HP_STOP_PG - HP_START_PG)*256);
- if (!ei_status.mem) {
- retval = -ENOMEM;
- goto out;
- }
- ei_status.rmem_start = dev->mem_start + TX_PAGES/2*256;
- dev->mem_end = ei_status.rmem_end
- = dev->mem_start + (HP_STOP_PG - HP_START_PG)*256;
- }
-
- outw(Perf_Page, ioaddr + HP_PAGING);
- NS8390p_init(dev, 0);
- /* Leave the 8390 and HP chip reset. */
- outw(inw(ioaddr + HPP_OPTION) & ~EnableIRQ, ioaddr + HPP_OPTION);
-
- retval = register_netdev(dev);
- if (retval)
- goto out1;
- return 0;
-out1:
- iounmap(ei_status.mem);
-out:
- release_region(ioaddr, HP_IO_EXTENT);
- return retval;
-}
-
-static int
-hpp_open(struct net_device *dev)
-{
- int ioaddr = dev->base_addr - NIC_OFFSET;
- int option_reg;
- int retval;
-
- if ((retval = request_irq(dev->irq, eip_interrupt, 0, dev->name, dev))) {
- return retval;
- }
-
- /* Reset the 8390 and HP chip. */
- option_reg = inw(ioaddr + HPP_OPTION);
- outw(option_reg & ~(NICReset + ChipReset), ioaddr + HPP_OPTION);
- udelay(5);
- /* Unreset the board and enable interrupts. */
- outw(option_reg | (EnableIRQ + NICReset + ChipReset), ioaddr + HPP_OPTION);
-
- /* Set the wrap registers for programmed-I/O operation. */
- outw(HW_Page, ioaddr + HP_PAGING);
- outw((HP_START_PG + TX_PAGES/2) | ((HP_STOP_PG - 1) << 8), ioaddr + 14);
-
- /* Select the operational page. */
- outw(Perf_Page, ioaddr + HP_PAGING);
-
- return eip_open(dev);
-}
-
-static int
-hpp_close(struct net_device *dev)
-{
- int ioaddr = dev->base_addr - NIC_OFFSET;
- int option_reg = inw(ioaddr + HPP_OPTION);
-
- free_irq(dev->irq, dev);
- eip_close(dev);
- outw((option_reg & ~EnableIRQ) | MemDisable | NICReset | ChipReset,
- ioaddr + HPP_OPTION);
-
- return 0;
-}
-
-static void
-hpp_reset_8390(struct net_device *dev)
-{
- int ioaddr = dev->base_addr - NIC_OFFSET;
- int option_reg = inw(ioaddr + HPP_OPTION);
-
- if (ei_debug > 1) printk("resetting the 8390 time=%ld...", jiffies);
-
- outw(option_reg & ~(NICReset + ChipReset), ioaddr + HPP_OPTION);
- /* Pause a few cycles for the hardware reset to take place. */
- udelay(5);
- ei_status.txing = 0;
- outw(option_reg | (EnableIRQ + NICReset + ChipReset), ioaddr + HPP_OPTION);
-
- udelay(5);
-
-
- if ((inb_p(ioaddr+NIC_OFFSET+EN0_ISR) & ENISR_RESET) == 0)
- printk("%s: hp_reset_8390() did not complete.\n", dev->name);
-
- if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies);
-}
-
-/* The programmed-I/O version of reading the 4 byte 8390 specific header.
- Note that transfer with the EtherTwist+ must be on word boundaries. */
-
-static void
-hpp_io_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
- int ioaddr = dev->base_addr - NIC_OFFSET;
-
- outw((ring_page<<8), ioaddr + HPP_IN_ADDR);
- insw(ioaddr + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
-}
-
-/* Block input and output, similar to the Crynwr packet driver. */
-
-static void
-hpp_io_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
-{
- int ioaddr = dev->base_addr - NIC_OFFSET;
- char *buf = skb->data;
-
- outw(ring_offset, ioaddr + HPP_IN_ADDR);
- insw(ioaddr + HP_DATAPORT, buf, count>>1);
- if (count & 0x01)
- buf[count-1] = inw(ioaddr + HP_DATAPORT);
-}
-
-/* The corresponding shared memory versions of the above 2 functions. */
-
-static void
-hpp_mem_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
- int ioaddr = dev->base_addr - NIC_OFFSET;
- int option_reg = inw(ioaddr + HPP_OPTION);
-
- outw((ring_page<<8), ioaddr + HPP_IN_ADDR);
- outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
- memcpy_fromio(hdr, ei_status.mem, sizeof(struct e8390_pkt_hdr));
- outw(option_reg, ioaddr + HPP_OPTION);
- hdr->count = (le16_to_cpu(hdr->count) + 3) & ~3; /* Round up allocation. */
-}
-
-static void
-hpp_mem_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
-{
- int ioaddr = dev->base_addr - NIC_OFFSET;
- int option_reg = inw(ioaddr + HPP_OPTION);
-
- outw(ring_offset, ioaddr + HPP_IN_ADDR);
-
- outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
-
- /* Caution: this relies on get_8390_hdr() rounding up count!
- Also note that we *can't* use eth_io_copy_and_sum() because
- it will not always copy "count" bytes (e.g. padded IP). */
-
- memcpy_fromio(skb->data, ei_status.mem, count);
- outw(option_reg, ioaddr + HPP_OPTION);
-}
-
-/* A special note: we *must* always transfer >=16 bit words.
- It's always safe to round up, so we do. */
-static void
-hpp_io_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page)
-{
- int ioaddr = dev->base_addr - NIC_OFFSET;
- outw(start_page << 8, ioaddr + HPP_OUT_ADDR);
- outsl(ioaddr + HP_DATAPORT, buf, (count+3)>>2);
-}
-
-static void
-hpp_mem_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page)
-{
- int ioaddr = dev->base_addr - NIC_OFFSET;
- int option_reg = inw(ioaddr + HPP_OPTION);
-
- outw(start_page << 8, ioaddr + HPP_OUT_ADDR);
- outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
- memcpy_toio(ei_status.mem, buf, (count + 3) & ~3);
- outw(option_reg, ioaddr + HPP_OPTION);
-}
-
-
-#ifdef MODULE
-#define MAX_HPP_CARDS 4 /* Max number of HPP cards per module */
-static struct net_device *dev_hpp[MAX_HPP_CARDS];
-static int io[MAX_HPP_CARDS];
-static int irq[MAX_HPP_CARDS];
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O port address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s); ignored if properly detected");
-MODULE_DESCRIPTION("HP PC-LAN+ ISA ethernet driver");
-MODULE_LICENSE("GPL");
-
-/* This is set up so that only a single autoprobe takes place per call.
-ISA device autoprobes on a running machine are not recommended. */
-int __init
-init_module(void)
-{
- struct net_device *dev;
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < MAX_HPP_CARDS; this_dev++) {
- if (io[this_dev] == 0) {
- if (this_dev != 0) break; /* only autoprobe 1st one */
- printk(KERN_NOTICE "hp-plus.c: Presently autoprobing (not recommended) for a single card.\n");
- }
- dev = alloc_eip_netdev();
- if (!dev)
- break;
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- if (do_hpp_probe(dev) == 0) {
- dev_hpp[found++] = dev;
- continue;
- }
- free_netdev(dev);
- printk(KERN_WARNING "hp-plus.c: No HP-Plus card found (i/o = 0x%x).\n", io[this_dev]);
- break;
- }
- if (found)
- return 0;
- return -ENXIO;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
- /* NB: hpp_close() handles free_irq */
- iounmap(ei_status.mem);
- release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT);
-}
-
-void __exit
-cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < MAX_HPP_CARDS; this_dev++) {
- struct net_device *dev = dev_hpp[this_dev];
- if (dev) {
- unregister_netdev(dev);
- cleanup_card(dev);
- free_netdev(dev);
- }
- }
-}
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/hp.c b/drivers/net/ethernet/8390/hp.c
deleted file mode 100644
index 37fa89aa4578..000000000000
--- a/drivers/net/ethernet/8390/hp.c
+++ /dev/null
@@ -1,438 +0,0 @@
-/* hp.c: A HP LAN ethernet driver for linux. */
-/*
- Written 1993-94 by Donald Becker.
-
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
- This is a driver for the HP PC-LAN adaptors.
-
- Sources:
- The Crynwr packet driver.
-*/
-
-static const char version[] =
- "hp.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
-
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-
-#include <asm/io.h>
-
-#include "8390.h"
-
-#define DRV_NAME "hp"
-
-/* A zero-terminated list of I/O addresses to be probed. */
-static unsigned int hppclan_portlist[] __initdata =
-{ 0x300, 0x320, 0x340, 0x280, 0x2C0, 0x200, 0x240, 0};
-
-#define HP_IO_EXTENT 32
-
-#define HP_DATAPORT 0x0c /* "Remote DMA" data port. */
-#define HP_ID 0x07
-#define HP_CONFIGURE 0x08 /* Configuration register. */
-#define HP_RUN 0x01 /* 1 == Run, 0 == reset. */
-#define HP_IRQ 0x0E /* Mask for software-configured IRQ line. */
-#define HP_DATAON 0x10 /* Turn on dataport */
-#define NIC_OFFSET 0x10 /* Offset the 8390 registers. */
-
-#define HP_START_PG 0x00 /* First page of TX buffer */
-#define HP_8BSTOP_PG 0x80 /* Last page +1 of RX ring */
-#define HP_16BSTOP_PG 0xFF /* Same, for 16 bit cards. */
-
-static int hp_probe1(struct net_device *dev, int ioaddr);
-
-static void hp_reset_8390(struct net_device *dev);
-static void hp_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page);
-static void hp_block_input(struct net_device *dev, int count,
- struct sk_buff *skb , int ring_offset);
-static void hp_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page);
-
-static void hp_init_card(struct net_device *dev);
-
-/* The map from IRQ number to HP_CONFIGURE register setting. */
-/* My default is IRQ5 0 1 2 3 4 5 6 7 8 9 10 11 */
-static char irqmap[16] __initdata= { 0, 0, 4, 6, 8,10, 0,14, 0, 4, 2,12,0,0,0,0};
-
-
-/* Probe for an HP LAN adaptor.
- Also initialize the card and fill in STATION_ADDR with the station
- address. */
-
-static int __init do_hp_probe(struct net_device *dev)
-{
- int i;
- int base_addr = dev->base_addr;
- int irq = dev->irq;
-
- if (base_addr > 0x1ff) /* Check a single specified location. */
- return hp_probe1(dev, base_addr);
- else if (base_addr != 0) /* Don't probe at all. */
- return -ENXIO;
-
- for (i = 0; hppclan_portlist[i]; i++) {
- if (hp_probe1(dev, hppclan_portlist[i]) == 0)
- return 0;
- dev->irq = irq;
- }
-
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init hp_probe(int unit)
-{
- struct net_device *dev = alloc_eip_netdev();
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_hp_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static int __init hp_probe1(struct net_device *dev, int ioaddr)
-{
- int i, retval, board_id, wordmode;
- const char *name;
- static unsigned version_printed;
-
- if (!request_region(ioaddr, HP_IO_EXTENT, DRV_NAME))
- return -EBUSY;
-
- /* Check for the HP physical address, 08 00 09 xx xx xx. */
- /* This really isn't good enough: we may pick up HP LANCE boards
- also! Avoid the lance 0x5757 signature. */
- if (inb(ioaddr) != 0x08
- || inb(ioaddr+1) != 0x00
- || inb(ioaddr+2) != 0x09
- || inb(ioaddr+14) == 0x57) {
- retval = -ENODEV;
- goto out;
- }
-
- /* Set up the parameters based on the board ID.
- If you have additional mappings, please mail them to me -djb. */
- if ((board_id = inb(ioaddr + HP_ID)) & 0x80) {
- name = "HP27247";
- wordmode = 1;
- } else {
- name = "HP27250";
- wordmode = 0;
- }
-
- if (ei_debug && version_printed++ == 0)
- printk(version);
-
- printk("%s: %s (ID %02x) at %#3x,", dev->name, name, board_id, ioaddr);
-
- for(i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = inb(ioaddr + i);
-
- printk(" %pM", dev->dev_addr);
-
- /* Snarf the interrupt now. Someday this could be moved to open(). */
- if (dev->irq < 2) {
- static const int irq_16list[] = { 11, 10, 5, 3, 4, 7, 9, 0};
- static const int irq_8list[] = { 7, 5, 3, 4, 9, 0};
- const int *irqp = wordmode ? irq_16list : irq_8list;
- do {
- int irq = *irqp;
- if (request_irq (irq, NULL, 0, "bogus", NULL) != -EBUSY) {
- unsigned long cookie = probe_irq_on();
- /* Twinkle the interrupt, and check if it's seen. */
- outb_p(irqmap[irq] | HP_RUN, ioaddr + HP_CONFIGURE);
- outb_p( 0x00 | HP_RUN, ioaddr + HP_CONFIGURE);
- if (irq == probe_irq_off(cookie) /* It's a good IRQ line! */
- && request_irq (irq, eip_interrupt, 0, DRV_NAME, dev) == 0) {
- printk(" selecting IRQ %d.\n", irq);
- dev->irq = *irqp;
- break;
- }
- }
- } while (*++irqp);
- if (*irqp == 0) {
- printk(" no free IRQ lines.\n");
- retval = -EBUSY;
- goto out;
- }
- } else {
- if (dev->irq == 2)
- dev->irq = 9;
- if ((retval = request_irq(dev->irq, eip_interrupt, 0, DRV_NAME, dev))) {
- printk (" unable to get IRQ %d.\n", dev->irq);
- goto out;
- }
- }
-
- /* Set the base address to point to the NIC, not the "real" base! */
- dev->base_addr = ioaddr + NIC_OFFSET;
- dev->netdev_ops = &eip_netdev_ops;
-
- ei_status.name = name;
- ei_status.word16 = wordmode;
- ei_status.tx_start_page = HP_START_PG;
- ei_status.rx_start_page = HP_START_PG + TX_PAGES;
- ei_status.stop_page = wordmode ? HP_16BSTOP_PG : HP_8BSTOP_PG;
-
- ei_status.reset_8390 = hp_reset_8390;
- ei_status.get_8390_hdr = hp_get_8390_hdr;
- ei_status.block_input = hp_block_input;
- ei_status.block_output = hp_block_output;
- hp_init_card(dev);
-
- retval = register_netdev(dev);
- if (retval)
- goto out1;
- return 0;
-out1:
- free_irq(dev->irq, dev);
-out:
- release_region(ioaddr, HP_IO_EXTENT);
- return retval;
-}
-
-static void
-hp_reset_8390(struct net_device *dev)
-{
- int hp_base = dev->base_addr - NIC_OFFSET;
- int saved_config = inb_p(hp_base + HP_CONFIGURE);
-
- if (ei_debug > 1) printk("resetting the 8390 time=%ld...", jiffies);
- outb_p(0x00, hp_base + HP_CONFIGURE);
- ei_status.txing = 0;
- /* Pause just a few cycles for the hardware reset to take place. */
- udelay(5);
-
- outb_p(saved_config, hp_base + HP_CONFIGURE);
- udelay(5);
-
- if ((inb_p(hp_base+NIC_OFFSET+EN0_ISR) & ENISR_RESET) == 0)
- printk("%s: hp_reset_8390() did not complete.\n", dev->name);
-
- if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies);
-}
-
-static void
-hp_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
- int nic_base = dev->base_addr;
- int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE);
-
- outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE);
- outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base);
- outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
- outb_p(0, nic_base + EN0_RCNTHI);
- outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
- outb_p(ring_page, nic_base + EN0_RSARHI);
- outb_p(E8390_RREAD+E8390_START, nic_base);
-
- if (ei_status.word16)
- insw(nic_base - NIC_OFFSET + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
- else
- insb(nic_base - NIC_OFFSET + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr));
-
- outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
-}
-
-/* Block input and output, similar to the Crynwr packet driver. If you are
- porting to a new ethercard look at the packet driver source for hints.
- The HP LAN doesn't use shared memory -- we put the packet
- out through the "remote DMA" dataport. */
-
-static void
-hp_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
-{
- int nic_base = dev->base_addr;
- int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE);
- int xfer_count = count;
- char *buf = skb->data;
-
- outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE);
- outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base);
- outb_p(count & 0xff, nic_base + EN0_RCNTLO);
- outb_p(count >> 8, nic_base + EN0_RCNTHI);
- outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
- outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
- outb_p(E8390_RREAD+E8390_START, nic_base);
- if (ei_status.word16) {
- insw(nic_base - NIC_OFFSET + HP_DATAPORT,buf,count>>1);
- if (count & 0x01)
- buf[count-1] = inb(nic_base - NIC_OFFSET + HP_DATAPORT), xfer_count++;
- } else {
- insb(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count);
- }
- /* This is for the ALPHA version only, remove for later releases. */
- if (ei_debug > 0) { /* DMA termination address check... */
- int high = inb_p(nic_base + EN0_RSARHI);
- int low = inb_p(nic_base + EN0_RSARLO);
- int addr = (high << 8) + low;
- /* Check only the lower 8 bits so we can ignore ring wrap. */
- if (((ring_offset + xfer_count) & 0xff) != (addr & 0xff))
- printk("%s: RX transfer address mismatch, %#4.4x vs. %#4.4x (actual).\n",
- dev->name, ring_offset + xfer_count, addr);
- }
- outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
-}
-
-static void
-hp_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page)
-{
- int nic_base = dev->base_addr;
- int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE);
-
- outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE);
- /* Round the count up for word writes. Do we need to do this?
- What effect will an odd byte count have on the 8390?
- I should check someday. */
- if (ei_status.word16 && (count & 0x01))
- count++;
- /* We should already be in page 0, but to be safe... */
- outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base);
-
-#ifdef NE8390_RW_BUGFIX
- /* Handle the read-before-write bug the same way as the
- Crynwr packet driver -- the NatSemi method doesn't work. */
- outb_p(0x42, nic_base + EN0_RCNTLO);
- outb_p(0, nic_base + EN0_RCNTHI);
- outb_p(0xff, nic_base + EN0_RSARLO);
- outb_p(0x00, nic_base + EN0_RSARHI);
-#define NE_CMD 0x00
- outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
- /* Make certain that the dummy read has occurred. */
- inb_p(0x61);
- inb_p(0x61);
-#endif
-
- outb_p(count & 0xff, nic_base + EN0_RCNTLO);
- outb_p(count >> 8, nic_base + EN0_RCNTHI);
- outb_p(0x00, nic_base + EN0_RSARLO);
- outb_p(start_page, nic_base + EN0_RSARHI);
-
- outb_p(E8390_RWRITE+E8390_START, nic_base);
- if (ei_status.word16) {
- /* Use the 'rep' sequence for 16 bit boards. */
- outsw(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count>>1);
- } else {
- outsb(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count);
- }
-
- /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here -- it's broken! */
-
- /* This is for the ALPHA version only, remove for later releases. */
- if (ei_debug > 0) { /* DMA termination address check... */
- int high = inb_p(nic_base + EN0_RSARHI);
- int low = inb_p(nic_base + EN0_RSARLO);
- int addr = (high << 8) + low;
- if ((start_page << 8) + count != addr)
- printk("%s: TX Transfer address mismatch, %#4.4x vs. %#4.4x.\n",
- dev->name, (start_page << 8) + count, addr);
- }
- outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
-}
-
-/* This function resets the ethercard if something screws up. */
-static void __init
-hp_init_card(struct net_device *dev)
-{
- int irq = dev->irq;
- NS8390p_init(dev, 0);
- outb_p(irqmap[irq&0x0f] | HP_RUN,
- dev->base_addr - NIC_OFFSET + HP_CONFIGURE);
-}
-
-#ifdef MODULE
-#define MAX_HP_CARDS 4 /* Max number of HP cards per module */
-static struct net_device *dev_hp[MAX_HP_CARDS];
-static int io[MAX_HP_CARDS];
-static int irq[MAX_HP_CARDS];
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
-MODULE_DESCRIPTION("HP PC-LAN ISA ethernet driver");
-MODULE_LICENSE("GPL");
-
-/* This is set up so that only a single autoprobe takes place per call.
-ISA device autoprobes on a running machine are not recommended. */
-int __init
-init_module(void)
-{
- struct net_device *dev;
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < MAX_HP_CARDS; this_dev++) {
- if (io[this_dev] == 0) {
- if (this_dev != 0) break; /* only autoprobe 1st one */
- printk(KERN_NOTICE "hp.c: Presently autoprobing (not recommended) for a single card.\n");
- }
- dev = alloc_eip_netdev();
- if (!dev)
- break;
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- if (do_hp_probe(dev) == 0) {
- dev_hp[found++] = dev;
- continue;
- }
- free_netdev(dev);
- printk(KERN_WARNING "hp.c: No HP card found (i/o = 0x%x).\n", io[this_dev]);
- break;
- }
- if (found)
- return 0;
- return -ENXIO;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
- free_irq(dev->irq, dev);
- release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT);
-}
-
-void __exit
-cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < MAX_HP_CARDS; this_dev++) {
- struct net_device *dev = dev_hp[this_dev];
- if (dev) {
- unregister_netdev(dev);
- cleanup_card(dev);
- free_netdev(dev);
- }
- }
-}
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/lne390.c b/drivers/net/ethernet/8390/lne390.c
deleted file mode 100644
index 479409bf2e3c..000000000000
--- a/drivers/net/ethernet/8390/lne390.c
+++ /dev/null
@@ -1,433 +0,0 @@
-/*
- lne390.c
-
- Linux driver for Mylex LNE390 EISA Network Adapter
-
- Copyright (C) 1996-1998, Paul Gortmaker.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- Information and Code Sources:
-
- 1) Based upon framework of es3210 driver.
- 2) The existing myriad of other Linux 8390 drivers by Donald Becker.
- 3) Russ Nelson's asm packet driver provided additional info.
- 4) Info for getting IRQ and sh-mem gleaned from the EISA cfg files.
-
- The LNE390 is an EISA shared memory NS8390 implementation. Note
- that all memory copies to/from the board must be 32bit transfers.
- There are two versions of the card: the lne390a and the lne390b.
- Going by the EISA cfg files, the "a" has jumpers to select between
- BNC/AUI, but the "b" also has RJ-45 and selection is via the SCU.
- The shared memory address selection is also slightly different.
- Note that shared memory address > 1MB are supported with this driver.
-
- You can try <http://www.mylex.com> if you want more info, as I've
- never even seen one of these cards. :)
-
- Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 2000/09/01
- - get rid of check_region
- - no need to check if dev == NULL in lne390_probe1
-*/
-
-static const char *version =
- "lne390.c: Driver revision v0.99.1, 01/09/2000\n";
-
-#include <linux/module.h>
-#include <linux/eisa.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-
-#include <asm/io.h>
-
-#include "8390.h"
-
-#define DRV_NAME "lne390"
-
-static int lne390_probe1(struct net_device *dev, int ioaddr);
-
-static void lne390_reset_8390(struct net_device *dev);
-
-static void lne390_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page);
-static void lne390_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset);
-static void lne390_block_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page);
-
-#define LNE390_START_PG 0x00 /* First page of TX buffer */
-#define LNE390_STOP_PG 0x80 /* Last page +1 of RX ring */
-
-#define LNE390_ID_PORT 0xc80 /* Same for all EISA cards */
-#define LNE390_IO_EXTENT 0x20
-#define LNE390_SA_PROM 0x16 /* Start of e'net addr. */
-#define LNE390_RESET_PORT 0xc84 /* From the pkt driver source */
-#define LNE390_NIC_OFFSET 0x00 /* Hello, the 8390 is *here* */
-
-#define LNE390_ADDR0 0x00 /* 3 byte vendor prefix */
-#define LNE390_ADDR1 0x80
-#define LNE390_ADDR2 0xe5
-
-#define LNE390_ID0 0x10009835 /* 0x3598 = 01101 01100 11000 = mlx */
-#define LNE390_ID1 0x11009835 /* above is the 390A, this is 390B */
-
-#define LNE390_CFG1 0xc84 /* NB: 0xc84 is also "reset" port. */
-#define LNE390_CFG2 0xc90
-
-/*
- * You can OR any of the following bits together and assign it
- * to LNE390_DEBUG to get verbose driver info during operation.
- * Currently only the probe one is implemented.
- */
-
-#define LNE390_D_PROBE 0x01
-#define LNE390_D_RX_PKT 0x02
-#define LNE390_D_TX_PKT 0x04
-#define LNE390_D_IRQ 0x08
-
-#define LNE390_DEBUG 0
-
-static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3};
-static unsigned int shmem_mapA[] __initdata = {0xff, 0xfe, 0xfd, 0xfff, 0xffe, 0xffc, 0x0d, 0x0};
-static unsigned int shmem_mapB[] __initdata = {0xff, 0xfe, 0x0e, 0xfff, 0xffe, 0xffc, 0x0d, 0x0};
-
-/*
- * Probe for the card. The best way is to read the EISA ID if it
- * is known. Then we can check the prefix of the station address
- * PROM for a match against the value assigned to Mylex.
- */
-
-static int __init do_lne390_probe(struct net_device *dev)
-{
- unsigned short ioaddr = dev->base_addr;
- int irq = dev->irq;
- int mem_start = dev->mem_start;
- int ret;
-
- if (ioaddr > 0x1ff) { /* Check a single specified location. */
- if (!request_region(ioaddr, LNE390_IO_EXTENT, DRV_NAME))
- return -EBUSY;
- ret = lne390_probe1(dev, ioaddr);
- if (ret)
- release_region(ioaddr, LNE390_IO_EXTENT);
- return ret;
- }
- else if (ioaddr > 0) /* Don't probe at all. */
- return -ENXIO;
-
- if (!EISA_bus) {
-#if LNE390_DEBUG & LNE390_D_PROBE
- printk("lne390-debug: Not an EISA bus. Not probing high ports.\n");
-#endif
- return -ENXIO;
- }
-
- /* EISA spec allows for up to 16 slots, but 8 is typical. */
- for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
- if (!request_region(ioaddr, LNE390_IO_EXTENT, DRV_NAME))
- continue;
- if (lne390_probe1(dev, ioaddr) == 0)
- return 0;
- release_region(ioaddr, LNE390_IO_EXTENT);
- dev->irq = irq;
- dev->mem_start = mem_start;
- }
-
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init lne390_probe(int unit)
-{
- struct net_device *dev = alloc_ei_netdev();
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_lne390_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static int __init lne390_probe1(struct net_device *dev, int ioaddr)
-{
- int i, revision, ret;
- unsigned long eisa_id;
-
- if (inb_p(ioaddr + LNE390_ID_PORT) == 0xff) return -ENODEV;
-
-#if LNE390_DEBUG & LNE390_D_PROBE
- printk("lne390-debug: probe at %#x, ID %#8x\n", ioaddr, inl(ioaddr + LNE390_ID_PORT));
- printk("lne390-debug: config regs: %#x %#x\n",
- inb(ioaddr + LNE390_CFG1), inb(ioaddr + LNE390_CFG2));
-#endif
-
-
-/* Check the EISA ID of the card. */
- eisa_id = inl(ioaddr + LNE390_ID_PORT);
- if ((eisa_id != LNE390_ID0) && (eisa_id != LNE390_ID1)) {
- return -ENODEV;
- }
-
- revision = (eisa_id >> 24) & 0x01; /* 0 = rev A, 1 rev B */
-
-#if 0
-/* Check the Mylex vendor ID as well. Not really required. */
- if (inb(ioaddr + LNE390_SA_PROM + 0) != LNE390_ADDR0
- || inb(ioaddr + LNE390_SA_PROM + 1) != LNE390_ADDR1
- || inb(ioaddr + LNE390_SA_PROM + 2) != LNE390_ADDR2 ) {
- printk("lne390.c: card not found");
- for (i = 0; i < ETH_ALEN; i++)
- printk(" %02x", inb(ioaddr + LNE390_SA_PROM + i));
- printk(" (invalid prefix).\n");
- return -ENODEV;
- }
-#endif
-
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = inb(ioaddr + LNE390_SA_PROM + i);
- printk("lne390.c: LNE390%X in EISA slot %d, address %pM.\n",
- 0xa+revision, ioaddr/0x1000, dev->dev_addr);
-
- printk("lne390.c: ");
-
- /* Snarf the interrupt now. CFG file has them all listed as `edge' with share=NO */
- if (dev->irq == 0) {
- unsigned char irq_reg = inb(ioaddr + LNE390_CFG2) >> 3;
- dev->irq = irq_map[irq_reg & 0x07];
- printk("using");
- } else {
- /* This is useless unless we reprogram the card here too */
- if (dev->irq == 2) dev->irq = 9; /* Doh! */
- printk("assigning");
- }
- printk(" IRQ %d,", dev->irq);
-
- if ((ret = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev))) {
- printk (" unable to get IRQ %d.\n", dev->irq);
- return ret;
- }
-
- if (dev->mem_start == 0) {
- unsigned char mem_reg = inb(ioaddr + LNE390_CFG2) & 0x07;
-
- if (revision) /* LNE390B */
- dev->mem_start = shmem_mapB[mem_reg] * 0x10000;
- else /* LNE390A */
- dev->mem_start = shmem_mapA[mem_reg] * 0x10000;
- printk(" using ");
- } else {
- /* Should check for value in shmem_map and reprogram the card to use it */
- dev->mem_start &= 0xfff0000;
- printk(" assigning ");
- }
-
- printk("%dkB memory at physical address %#lx\n",
- LNE390_STOP_PG/4, dev->mem_start);
-
- /*
- BEWARE!! Some dain-bramaged EISA SCUs will allow you to put
- the card mem within the region covered by `normal' RAM !!!
-
- ioremap() will fail in that case.
- */
- ei_status.mem = ioremap(dev->mem_start, LNE390_STOP_PG*0x100);
- if (!ei_status.mem) {
- printk(KERN_ERR "lne390.c: Unable to remap card memory above 1MB !!\n");
- printk(KERN_ERR "lne390.c: Try using EISA SCU to set memory below 1MB.\n");
- printk(KERN_ERR "lne390.c: Driver NOT installed.\n");
- ret = -EAGAIN;
- goto cleanup;
- }
- printk("lne390.c: remapped %dkB card memory to virtual address %p\n",
- LNE390_STOP_PG/4, ei_status.mem);
-
- dev->mem_start = (unsigned long)ei_status.mem;
- dev->mem_end = dev->mem_start + (LNE390_STOP_PG - LNE390_START_PG)*256;
-
- /* The 8390 offset is zero for the LNE390 */
- dev->base_addr = ioaddr;
-
- ei_status.name = "LNE390";
- ei_status.tx_start_page = LNE390_START_PG;
- ei_status.rx_start_page = LNE390_START_PG + TX_PAGES;
- ei_status.stop_page = LNE390_STOP_PG;
- ei_status.word16 = 1;
-
- if (ei_debug > 0)
- printk(version);
-
- ei_status.reset_8390 = &lne390_reset_8390;
- ei_status.block_input = &lne390_block_input;
- ei_status.block_output = &lne390_block_output;
- ei_status.get_8390_hdr = &lne390_get_8390_hdr;
-
- dev->netdev_ops = &ei_netdev_ops;
- NS8390_init(dev, 0);
-
- ret = register_netdev(dev);
- if (ret)
- goto unmap;
- return 0;
-unmap:
- if (ei_status.reg0)
- iounmap(ei_status.mem);
-cleanup:
- free_irq(dev->irq, dev);
- return ret;
-}
-
-/*
- * Reset as per the packet driver method. Judging by the EISA cfg
- * file, this just toggles the "Board Enable" bits (bit 2 and 0).
- */
-
-static void lne390_reset_8390(struct net_device *dev)
-{
- unsigned short ioaddr = dev->base_addr;
-
- outb(0x04, ioaddr + LNE390_RESET_PORT);
- if (ei_debug > 1) printk("%s: resetting the LNE390...", dev->name);
-
- mdelay(2);
-
- ei_status.txing = 0;
- outb(0x01, ioaddr + LNE390_RESET_PORT);
- if (ei_debug > 1) printk("reset done\n");
-}
-
-/*
- * Note: In the following three functions is the implicit assumption
- * that the associated memcpy will only use "rep; movsl" as long as
- * we keep the counts as some multiple of doublewords. This is a
- * requirement of the hardware, and also prevents us from using
- * eth_io_copy_and_sum() since we can't guarantee it will limit
- * itself to doubleword access.
- */
-
-/*
- * Grab the 8390 specific header. Similar to the block_input routine, but
- * we don't need to be concerned with ring wrap as the header will be at
- * the start of a page, so we optimize accordingly. (A single doubleword.)
- */
-
-static void
-lne390_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
- void __iomem *hdr_start = ei_status.mem + ((ring_page - LNE390_START_PG)<<8);
- memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
- hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */
-}
-
-/*
- * Block input and output are easy on shared memory ethercards, the only
- * complication is when the ring buffer wraps. The count will already
- * be rounded up to a doubleword value via lne390_get_8390_hdr() above.
- */
-
-static void lne390_block_input(struct net_device *dev, int count, struct sk_buff *skb,
- int ring_offset)
-{
- void __iomem *xfer_start = ei_status.mem + ring_offset - (LNE390_START_PG<<8);
-
- if (ring_offset + count > (LNE390_STOP_PG<<8)) {
- /* Packet wraps over end of ring buffer. */
- int semi_count = (LNE390_STOP_PG<<8) - ring_offset;
- memcpy_fromio(skb->data, xfer_start, semi_count);
- count -= semi_count;
- memcpy_fromio(skb->data + semi_count,
- ei_status.mem + (TX_PAGES<<8), count);
- } else {
- /* Packet is in one chunk. */
- memcpy_fromio(skb->data, xfer_start, count);
- }
-}
-
-static void lne390_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page)
-{
- void __iomem *shmem = ei_status.mem + ((start_page - LNE390_START_PG)<<8);
-
- count = (count + 3) & ~3; /* Round up to doubleword */
- memcpy_toio(shmem, buf, count);
-}
-
-
-#ifdef MODULE
-#define MAX_LNE_CARDS 4 /* Max number of LNE390 cards per module */
-static struct net_device *dev_lne[MAX_LNE_CARDS];
-static int io[MAX_LNE_CARDS];
-static int irq[MAX_LNE_CARDS];
-static int mem[MAX_LNE_CARDS];
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(mem, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s)");
-MODULE_PARM_DESC(mem, "memory base address(es)");
-MODULE_DESCRIPTION("Mylex LNE390A/B EISA Ethernet driver");
-MODULE_LICENSE("GPL");
-
-int __init init_module(void)
-{
- struct net_device *dev;
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < MAX_LNE_CARDS; this_dev++) {
- if (io[this_dev] == 0 && this_dev != 0)
- break;
- dev = alloc_ei_netdev();
- if (!dev)
- break;
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- dev->mem_start = mem[this_dev];
- if (do_lne390_probe(dev) == 0) {
- dev_lne[found++] = dev;
- continue;
- }
- free_netdev(dev);
- printk(KERN_WARNING "lne390.c: No LNE390 card found (i/o = 0x%x).\n", io[this_dev]);
- break;
- }
- if (found)
- return 0;
- return -ENXIO;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
- free_irq(dev->irq, dev);
- release_region(dev->base_addr, LNE390_IO_EXTENT);
- iounmap(ei_status.mem);
-}
-
-void __exit cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < MAX_LNE_CARDS; this_dev++) {
- struct net_device *dev = dev_lne[this_dev];
- if (dev) {
- unregister_netdev(dev);
- cleanup_card(dev);
- free_netdev(dev);
- }
- }
-}
-#endif /* MODULE */
-
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index c0c127913dec..587a885de259 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -374,7 +374,6 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
NS8390_init(dev, 0);
memcpy(dev->dev_addr, SA_prom, dev->addr_len);
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
i = register_netdev(dev);
if (i)
diff --git a/drivers/net/ethernet/8390/ne3210.c b/drivers/net/ethernet/8390/ne3210.c
deleted file mode 100644
index ebcdb52ec739..000000000000
--- a/drivers/net/ethernet/8390/ne3210.c
+++ /dev/null
@@ -1,346 +0,0 @@
-/*
- ne3210.c
-
- Linux driver for Novell NE3210 EISA Network Adapter
-
- Copyright (C) 1998, Paul Gortmaker.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- Information and Code Sources:
-
- 1) Based upon my other EISA 8390 drivers (lne390, es3210, smc-ultra32)
- 2) The existing myriad of other Linux 8390 drivers by Donald Becker.
- 3) Info for getting IRQ and sh-mem gleaned from the EISA cfg file
-
- The NE3210 is an EISA shared memory NS8390 implementation. Shared
- memory address > 1MB should work with this driver.
-
- Note that the .cfg file (3/11/93, v1.0) has AUI and BNC switched
- around (or perhaps there are some defective/backwards cards ???)
-
- This driver WILL NOT WORK FOR THE NE3200 - it is completely different
- and does not use an 8390 at all.
-
- Updated to EISA probing API 5/2003 by Marc Zyngier.
-*/
-
-#include <linux/module.h>
-#include <linux/eisa.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/mm.h>
-
-#include <asm/io.h>
-
-#include "8390.h"
-
-#define DRV_NAME "ne3210"
-
-static void ne3210_reset_8390(struct net_device *dev);
-
-static void ne3210_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page);
-static void ne3210_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset);
-static void ne3210_block_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page);
-
-#define NE3210_START_PG 0x00 /* First page of TX buffer */
-#define NE3210_STOP_PG 0x80 /* Last page +1 of RX ring */
-
-#define NE3210_IO_EXTENT 0x20
-#define NE3210_SA_PROM 0x16 /* Start of e'net addr. */
-#define NE3210_RESET_PORT 0xc84
-#define NE3210_NIC_OFFSET 0x00 /* Hello, the 8390 is *here* */
-
-#define NE3210_ADDR0 0x00 /* 3 byte vendor prefix */
-#define NE3210_ADDR1 0x00
-#define NE3210_ADDR2 0x1b
-
-#define NE3210_CFG1 0xc84 /* NB: 0xc84 is also "reset" port. */
-#define NE3210_CFG2 0xc90
-#define NE3210_CFG_EXTENT (NE3210_CFG2 - NE3210_CFG1 + 1)
-
-/*
- * You can OR any of the following bits together and assign it
- * to NE3210_DEBUG to get verbose driver info during operation.
- * Currently only the probe one is implemented.
- */
-
-#define NE3210_D_PROBE 0x01
-#define NE3210_D_RX_PKT 0x02
-#define NE3210_D_TX_PKT 0x04
-#define NE3210_D_IRQ 0x08
-
-#define NE3210_DEBUG 0x0
-
-static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3};
-static unsigned int shmem_map[] __initdata = {0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0};
-static const char * const ifmap[] __initconst = {"UTP", "?", "BNC", "AUI"};
-static int ifmap_val[] __initdata = {
- IF_PORT_10BASET,
- IF_PORT_UNKNOWN,
- IF_PORT_10BASE2,
- IF_PORT_AUI,
-};
-
-static int __init ne3210_eisa_probe (struct device *device)
-{
- unsigned long ioaddr, phys_mem;
- int i, retval, port_index;
- struct eisa_device *edev = to_eisa_device (device);
- struct net_device *dev;
-
- /* Allocate dev->priv and fill in 8390 specific dev fields. */
- if (!(dev = alloc_ei_netdev ())) {
- printk ("ne3210.c: unable to allocate memory for dev!\n");
- return -ENOMEM;
- }
-
- SET_NETDEV_DEV(dev, device);
- dev_set_drvdata(device, dev);
- ioaddr = edev->base_addr;
-
- if (!request_region(ioaddr, NE3210_IO_EXTENT, DRV_NAME)) {
- retval = -EBUSY;
- goto out;
- }
-
- if (!request_region(ioaddr + NE3210_CFG1,
- NE3210_CFG_EXTENT, DRV_NAME)) {
- retval = -EBUSY;
- goto out1;
- }
-
-#if NE3210_DEBUG & NE3210_D_PROBE
- printk("ne3210-debug: probe at %#x, ID %s\n", ioaddr, edev->id.sig);
- printk("ne3210-debug: config regs: %#x %#x\n",
- inb(ioaddr + NE3210_CFG1), inb(ioaddr + NE3210_CFG2));
-#endif
-
- port_index = inb(ioaddr + NE3210_CFG2) >> 6;
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = inb(ioaddr + NE3210_SA_PROM + i);
- printk("ne3210.c: NE3210 in EISA slot %d, media: %s, addr: %pM.\n",
- edev->slot, ifmap[port_index], dev->dev_addr);
-
- /* Snarf the interrupt now. CFG file has them all listed as `edge' with share=NO */
- dev->irq = irq_map[(inb(ioaddr + NE3210_CFG2) >> 3) & 0x07];
- printk("ne3210.c: using IRQ %d, ", dev->irq);
-
- retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev);
- if (retval) {
- printk (" unable to get IRQ %d.\n", dev->irq);
- goto out2;
- }
-
- phys_mem = shmem_map[inb(ioaddr + NE3210_CFG2) & 0x07] * 0x1000;
-
- /*
- BEWARE!! Some dain-bramaged EISA SCUs will allow you to put
- the card mem within the region covered by `normal' RAM !!!
- */
- if (phys_mem > 1024*1024) { /* phys addr > 1MB */
- if (phys_mem < virt_to_phys(high_memory)) {
- printk(KERN_CRIT "ne3210.c: Card RAM overlaps with normal memory!!!\n");
- printk(KERN_CRIT "ne3210.c: Use EISA SCU to set card memory below 1MB,\n");
- printk(KERN_CRIT "ne3210.c: or to an address above 0x%llx.\n",
- (u64)virt_to_phys(high_memory));
- printk(KERN_CRIT "ne3210.c: Driver NOT installed.\n");
- retval = -EINVAL;
- goto out3;
- }
- }
-
- if (!request_mem_region (phys_mem, NE3210_STOP_PG*0x100, DRV_NAME)) {
- printk ("ne3210.c: Unable to request shared memory at physical address %#lx\n",
- phys_mem);
- goto out3;
- }
-
- printk("%dkB memory at physical address %#lx\n",
- NE3210_STOP_PG/4, phys_mem);
-
- ei_status.mem = ioremap(phys_mem, NE3210_STOP_PG*0x100);
- if (!ei_status.mem) {
- printk(KERN_ERR "ne3210.c: Unable to remap card memory !!\n");
- printk(KERN_ERR "ne3210.c: Driver NOT installed.\n");
- retval = -EAGAIN;
- goto out4;
- }
- printk("ne3210.c: remapped %dkB card memory to virtual address %p\n",
- NE3210_STOP_PG/4, ei_status.mem);
- dev->mem_start = (unsigned long)ei_status.mem;
- dev->mem_end = dev->mem_start + (NE3210_STOP_PG - NE3210_START_PG)*256;
-
- /* The 8390 offset is zero for the NE3210 */
- dev->base_addr = ioaddr;
-
- ei_status.name = "NE3210";
- ei_status.tx_start_page = NE3210_START_PG;
- ei_status.rx_start_page = NE3210_START_PG + TX_PAGES;
- ei_status.stop_page = NE3210_STOP_PG;
- ei_status.word16 = 1;
- ei_status.priv = phys_mem;
-
- if (ei_debug > 0)
- printk("ne3210 loaded.\n");
-
- ei_status.reset_8390 = &ne3210_reset_8390;
- ei_status.block_input = &ne3210_block_input;
- ei_status.block_output = &ne3210_block_output;
- ei_status.get_8390_hdr = &ne3210_get_8390_hdr;
-
- dev->netdev_ops = &ei_netdev_ops;
-
- dev->if_port = ifmap_val[port_index];
-
- if ((retval = register_netdev (dev)))
- goto out5;
-
- NS8390_init(dev, 0);
- return 0;
-
- out5:
- iounmap(ei_status.mem);
- out4:
- release_mem_region (phys_mem, NE3210_STOP_PG*0x100);
- out3:
- free_irq (dev->irq, dev);
- out2:
- release_region (ioaddr + NE3210_CFG1, NE3210_CFG_EXTENT);
- out1:
- release_region (ioaddr, NE3210_IO_EXTENT);
- out:
- free_netdev (dev);
-
- return retval;
-}
-
-static int ne3210_eisa_remove(struct device *device)
-{
- struct net_device *dev = dev_get_drvdata(device);
- unsigned long ioaddr = to_eisa_device (device)->base_addr;
-
- unregister_netdev (dev);
- iounmap(ei_status.mem);
- release_mem_region (ei_status.priv, NE3210_STOP_PG*0x100);
- free_irq (dev->irq, dev);
- release_region (ioaddr + NE3210_CFG1, NE3210_CFG_EXTENT);
- release_region (ioaddr, NE3210_IO_EXTENT);
- free_netdev (dev);
-
- return 0;
-}
-
-/*
- * Reset by toggling the "Board Enable" bits (bit 2 and 0).
- */
-
-static void ne3210_reset_8390(struct net_device *dev)
-{
- unsigned short ioaddr = dev->base_addr;
-
- outb(0x04, ioaddr + NE3210_RESET_PORT);
- if (ei_debug > 1) printk("%s: resetting the NE3210...", dev->name);
-
- mdelay(2);
-
- ei_status.txing = 0;
- outb(0x01, ioaddr + NE3210_RESET_PORT);
- if (ei_debug > 1) printk("reset done\n");
-}
-
-/*
- * Note: In the following three functions is the implicit assumption
- * that the associated memcpy will only use "rep; movsl" as long as
- * we keep the counts as some multiple of doublewords. This is a
- * requirement of the hardware, and also prevents us from using
- * eth_io_copy_and_sum() since we can't guarantee it will limit
- * itself to doubleword access.
- */
-
-/*
- * Grab the 8390 specific header. Similar to the block_input routine, but
- * we don't need to be concerned with ring wrap as the header will be at
- * the start of a page, so we optimize accordingly. (A single doubleword.)
- */
-
-static void
-ne3210_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
- void __iomem *hdr_start = ei_status.mem + ((ring_page - NE3210_START_PG)<<8);
- memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
- hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */
-}
-
-/*
- * Block input and output are easy on shared memory ethercards, the only
- * complication is when the ring buffer wraps. The count will already
- * be rounded up to a doubleword value via ne3210_get_8390_hdr() above.
- */
-
-static void ne3210_block_input(struct net_device *dev, int count, struct sk_buff *skb,
- int ring_offset)
-{
- void __iomem *start = ei_status.mem + ring_offset - NE3210_START_PG*256;
-
- if (ring_offset + count > NE3210_STOP_PG*256) {
- /* Packet wraps over end of ring buffer. */
- int semi_count = NE3210_STOP_PG*256 - ring_offset;
- memcpy_fromio(skb->data, start, semi_count);
- count -= semi_count;
- memcpy_fromio(skb->data + semi_count,
- ei_status.mem + TX_PAGES*256, count);
- } else {
- /* Packet is in one chunk. */
- memcpy_fromio(skb->data, start, count);
- }
-}
-
-static void ne3210_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page)
-{
- void __iomem *shmem = ei_status.mem + ((start_page - NE3210_START_PG)<<8);
-
- count = (count + 3) & ~3; /* Round up to doubleword */
- memcpy_toio(shmem, buf, count);
-}
-
-static struct eisa_device_id ne3210_ids[] = {
- { "EGL0101" },
- { "NVL1801" },
- { "" },
-};
-MODULE_DEVICE_TABLE(eisa, ne3210_ids);
-
-static struct eisa_driver ne3210_eisa_driver = {
- .id_table = ne3210_ids,
- .driver = {
- .name = "ne3210",
- .probe = ne3210_eisa_probe,
- .remove = ne3210_eisa_remove,
- },
-};
-
-MODULE_DESCRIPTION("NE3210 EISA Ethernet driver");
-MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(eisa, ne3210_ids);
-
-static int ne3210_init(void)
-{
- return eisa_driver_register (&ne3210_eisa_driver);
-}
-
-static void ne3210_cleanup(void)
-{
- eisa_driver_unregister (&ne3210_eisa_driver);
-}
-
-module_init (ne3210_init);
-module_exit (ne3210_cleanup);
diff --git a/drivers/net/ethernet/8390/smc-ultra32.c b/drivers/net/ethernet/8390/smc-ultra32.c
deleted file mode 100644
index 923e42aedcfd..000000000000
--- a/drivers/net/ethernet/8390/smc-ultra32.c
+++ /dev/null
@@ -1,463 +0,0 @@
-/* smc-ultra32.c: An SMC Ultra32 EISA ethernet driver for linux.
-
-Sources:
-
- This driver is based on (cloned from) the ISA SMC Ultra driver
- written by Donald Becker. Modifications to support the EISA
- version of the card by Paul Gortmaker and Leonard N. Zubkoff.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
-Theory of Operation:
-
- The SMC Ultra32C card uses the SMC 83c790 chip which is also
- found on the ISA SMC Ultra cards. It has a shared memory mode of
- operation that makes it similar to the ISA version of the card.
- The main difference is that the EISA card has 32KB of RAM, but
- only an 8KB window into that memory. The EISA card also can be
- set for a bus-mastering mode of operation via the ECU, but that
- is not (and probably will never be) supported by this driver.
- The ECU should be run to enable shared memory and to disable the
- bus-mastering feature for use with linux.
-
- By programming the 8390 to use only 8KB RAM, the modifications
- to the ISA driver can be limited to the probe and initialization
- code. This allows easy integration of EISA support into the ISA
- driver. However, the driver development kit from SMC provided the
- register information for sliding the 8KB window, and hence the 8390
- is programmed to use the full 32KB RAM.
-
- Unfortunately this required code changes outside the probe/init
- routines, and thus we decided to separate the EISA driver from
- the ISA one. In this way, ISA users don't end up with a larger
- driver due to the EISA code, and EISA users don't end up with a
- larger driver due to the ISA EtherEZ PIO code. The driver is
- similar to the 3c503/16 driver, in that the window must be set
- back to the 1st 8KB of space for access to the two 8390 Tx slots.
-
- In testing, using only 8KB RAM (3 Tx / 5 Rx) didn't appear to
- be a limiting factor, since the EISA bus could get packets off
- the card fast enough, but having the use of lots of RAM as Rx
- space is extra insurance if interrupt latencies become excessive.
-
-*/
-
-static const char *version = "smc-ultra32.c: 06/97 v1.00\n";
-
-
-#include <linux/module.h>
-#include <linux/eisa.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-
-#include <asm/io.h>
-
-#include "8390.h"
-
-#define DRV_NAME "smc-ultra32"
-
-static int ultra32_probe1(struct net_device *dev, int ioaddr);
-static int ultra32_open(struct net_device *dev);
-static void ultra32_reset_8390(struct net_device *dev);
-static void ultra32_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page);
-static void ultra32_block_input(struct net_device *dev, int count,
- struct sk_buff *skb, int ring_offset);
-static void ultra32_block_output(struct net_device *dev, int count,
- const unsigned char *buf,
- const int start_page);
-static int ultra32_close(struct net_device *dev);
-
-#define ULTRA32_CMDREG 0 /* Offset to ASIC command register. */
-#define ULTRA32_RESET 0x80 /* Board reset, in ULTRA32_CMDREG. */
-#define ULTRA32_MEMENB 0x40 /* Enable the shared memory. */
-#define ULTRA32_NIC_OFFSET 16 /* NIC register offset from the base_addr. */
-#define ULTRA32_IO_EXTENT 32
-#define EN0_ERWCNT 0x08 /* Early receive warning count. */
-
-/*
- * Defines that apply only to the Ultra32 EISA card. Note that
- * "smc" = 10011 01101 00011 = 0x4da3, and hence !smc8010.cfg translates
- * into an EISA ID of 0x1080A34D
- */
-#define ULTRA32_BASE 0xca0
-#define ULTRA32_ID 0x1080a34d
-#define ULTRA32_IDPORT (-0x20) /* 0xc80 */
-/* Config regs 1->7 from the EISA !SMC8010.CFG file. */
-#define ULTRA32_CFG1 0x04 /* 0xca4 */
-#define ULTRA32_CFG2 0x05 /* 0xca5 */
-#define ULTRA32_CFG3 (-0x18) /* 0xc88 */
-#define ULTRA32_CFG4 (-0x17) /* 0xc89 */
-#define ULTRA32_CFG5 (-0x16) /* 0xc8a */
-#define ULTRA32_CFG6 (-0x15) /* 0xc8b */
-#define ULTRA32_CFG7 0x0d /* 0xcad */
-
-static void cleanup_card(struct net_device *dev)
-{
- int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET;
- /* NB: ultra32_close_card() does free_irq */
- release_region(ioaddr, ULTRA32_IO_EXTENT);
- iounmap(ei_status.mem);
-}
-
-/* Probe for the Ultra32. This looks like a 8013 with the station
- address PROM at I/O ports <base>+8 to <base>+13, with a checksum
- following.
-*/
-
-struct net_device * __init ultra32_probe(int unit)
-{
- struct net_device *dev;
- int base;
- int irq;
- int err = -ENODEV;
-
- if (!EISA_bus)
- return ERR_PTR(-ENODEV);
-
- dev = alloc_ei_netdev();
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- }
-
- irq = dev->irq;
-
- /* EISA spec allows for up to 16 slots, but 8 is typical. */
- for (base = 0x1000 + ULTRA32_BASE; base < 0x9000; base += 0x1000) {
- if (ultra32_probe1(dev, base) == 0)
- break;
- dev->irq = irq;
- }
- if (base >= 0x9000)
- goto out;
- err = register_netdev(dev);
- if (err)
- goto out1;
- return dev;
-out1:
- cleanup_card(dev);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-
-static const struct net_device_ops ultra32_netdev_ops = {
- .ndo_open = ultra32_open,
- .ndo_stop = ultra32_close,
- .ndo_start_xmit = ei_start_xmit,
- .ndo_tx_timeout = ei_tx_timeout,
- .ndo_get_stats = ei_get_stats,
- .ndo_set_rx_mode = ei_set_multicast_list,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ei_poll,
-#endif
-};
-
-static int __init ultra32_probe1(struct net_device *dev, int ioaddr)
-{
- int i, edge, media, retval;
- int checksum = 0;
- const char *model_name;
- static unsigned version_printed;
- /* Values from various config regs. */
- unsigned char idreg;
- unsigned char reg4;
- const char *ifmap[] = {"UTP No Link", "", "UTP/AUI", "UTP/BNC"};
-
- if (!request_region(ioaddr, ULTRA32_IO_EXTENT, DRV_NAME))
- return -EBUSY;
-
- if (inb(ioaddr + ULTRA32_IDPORT) == 0xff ||
- inl(ioaddr + ULTRA32_IDPORT) != ULTRA32_ID) {
- retval = -ENODEV;
- goto out;
- }
-
- media = inb(ioaddr + ULTRA32_CFG7) & 0x03;
- edge = inb(ioaddr + ULTRA32_CFG5) & 0x08;
- printk("SMC Ultra32 in EISA Slot %d, Media: %s, %s IRQs.\n",
- ioaddr >> 12, ifmap[media],
- (edge ? "Edge Triggered" : "Level Sensitive"));
-
- idreg = inb(ioaddr + 7);
- reg4 = inb(ioaddr + 4) & 0x7f;
-
- /* Check the ID nibble. */
- if ((idreg & 0xf0) != 0x20) { /* SMC Ultra */
- retval = -ENODEV;
- goto out;
- }
-
- /* Select the station address register set. */
- outb(reg4, ioaddr + 4);
-
- for (i = 0; i < 8; i++)
- checksum += inb(ioaddr + 8 + i);
- if ((checksum & 0xff) != 0xff) {
- retval = -ENODEV;
- goto out;
- }
-
- if (ei_debug && version_printed++ == 0)
- printk(version);
-
- model_name = "SMC Ultra32";
-
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + 8 + i);
-
- printk("%s: %s at 0x%X, %pM",
- dev->name, model_name, ioaddr, dev->dev_addr);
-
- /* Switch from the station address to the alternate register set and
- read the useful registers there. */
- outb(0x80 | reg4, ioaddr + 4);
-
- /* Enable FINE16 mode to avoid BIOS ROM width mismatches @ reboot. */
- outb(0x80 | inb(ioaddr + 0x0c), ioaddr + 0x0c);
-
- /* Reset RAM addr. */
- outb(0x00, ioaddr + 0x0b);
-
- /* Switch back to the station address register set so that the
- MS-DOS driver can find the card after a warm boot. */
- outb(reg4, ioaddr + 4);
-
- if ((inb(ioaddr + ULTRA32_CFG5) & 0x40) == 0) {
- printk("\nsmc-ultra32: Card RAM is disabled! "
- "Run EISA config utility.\n");
- retval = -ENODEV;
- goto out;
- }
- if ((inb(ioaddr + ULTRA32_CFG2) & 0x04) == 0)
- printk("\nsmc-ultra32: Ignoring Bus-Master enable bit. "
- "Run EISA config utility.\n");
-
- if (dev->irq < 2) {
- unsigned char irqmap[] = {0, 9, 3, 5, 7, 10, 11, 15};
- int irq = irqmap[inb(ioaddr + ULTRA32_CFG5) & 0x07];
- if (irq == 0) {
- printk(", failed to detect IRQ line.\n");
- retval = -EAGAIN;
- goto out;
- }
- dev->irq = irq;
- }
-
- /* The 8390 isn't at the base address, so fake the offset */
- dev->base_addr = ioaddr + ULTRA32_NIC_OFFSET;
-
- /* Save RAM address in the unused reg0 to avoid excess inb's. */
- ei_status.reg0 = inb(ioaddr + ULTRA32_CFG3) & 0xfc;
-
- dev->mem_start = 0xc0000 + ((ei_status.reg0 & 0x7c) << 11);
-
- ei_status.name = model_name;
- ei_status.word16 = 1;
- ei_status.tx_start_page = 0;
- ei_status.rx_start_page = TX_PAGES;
- /* All Ultra32 cards have 32KB memory with an 8KB window. */
- ei_status.stop_page = 128;
-
- ei_status.mem = ioremap(dev->mem_start, 0x2000);
- if (!ei_status.mem) {
- printk(", failed to ioremap.\n");
- retval = -ENOMEM;
- goto out;
- }
- dev->mem_end = dev->mem_start + 0x1fff;
-
- printk(", IRQ %d, 32KB memory, 8KB window at 0x%lx-0x%lx.\n",
- dev->irq, dev->mem_start, dev->mem_end);
- ei_status.block_input = &ultra32_block_input;
- ei_status.block_output = &ultra32_block_output;
- ei_status.get_8390_hdr = &ultra32_get_8390_hdr;
- ei_status.reset_8390 = &ultra32_reset_8390;
-
- dev->netdev_ops = &ultra32_netdev_ops;
- NS8390_init(dev, 0);
-
- return 0;
-out:
- release_region(ioaddr, ULTRA32_IO_EXTENT);
- return retval;
-}
-
-static int ultra32_open(struct net_device *dev)
-{
- int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* ASIC addr */
- int irq_flags = (inb(ioaddr + ULTRA32_CFG5) & 0x08) ? 0 : IRQF_SHARED;
- int retval;
-
- retval = request_irq(dev->irq, ei_interrupt, irq_flags, dev->name, dev);
- if (retval)
- return retval;
-
- outb(ULTRA32_MEMENB, ioaddr); /* Enable Shared Memory. */
- outb(0x80, ioaddr + ULTRA32_CFG6); /* Enable Interrupts. */
- outb(0x84, ioaddr + 5); /* Enable MEM16 & Disable Bus Master. */
- outb(0x01, ioaddr + 6); /* Enable Interrupts. */
- /* Set the early receive warning level in window 0 high enough not
- to receive ERW interrupts. */
- outb_p(E8390_NODMA+E8390_PAGE0, dev->base_addr);
- outb(0xff, dev->base_addr + EN0_ERWCNT);
- ei_open(dev);
- return 0;
-}
-
-static int ultra32_close(struct net_device *dev)
-{
- int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* CMDREG */
-
- netif_stop_queue(dev);
-
- if (ei_debug > 1)
- printk("%s: Shutting down ethercard.\n", dev->name);
-
- outb(0x00, ioaddr + ULTRA32_CFG6); /* Disable Interrupts. */
- outb(0x00, ioaddr + 6); /* Disable interrupts. */
- free_irq(dev->irq, dev);
-
- NS8390_init(dev, 0);
-
- return 0;
-}
-
-static void ultra32_reset_8390(struct net_device *dev)
-{
- int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* ASIC base addr */
-
- outb(ULTRA32_RESET, ioaddr);
- if (ei_debug > 1) printk("resetting Ultra32, t=%ld...", jiffies);
- ei_status.txing = 0;
-
- outb(ULTRA32_MEMENB, ioaddr); /* Enable Shared Memory. */
- outb(0x80, ioaddr + ULTRA32_CFG6); /* Enable Interrupts. */
- outb(0x84, ioaddr + 5); /* Enable MEM16 & Disable Bus Master. */
- outb(0x01, ioaddr + 6); /* Enable Interrupts. */
- if (ei_debug > 1) printk("reset done\n");
-}
-
-/* Grab the 8390 specific header. Similar to the block_input routine, but
- we don't need to be concerned with ring wrap as the header will be at
- the start of a page, so we optimize accordingly. */
-
-static void ultra32_get_8390_hdr(struct net_device *dev,
- struct e8390_pkt_hdr *hdr,
- int ring_page)
-{
- void __iomem *hdr_start = ei_status.mem + ((ring_page & 0x1f) << 8);
- unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3;
-
- /* Select correct 8KB Window. */
- outb(ei_status.reg0 | ((ring_page & 0x60) >> 5), RamReg);
-
-#ifdef __BIG_ENDIAN
- /* Officially this is what we are doing, but the readl() is faster */
- /* unfortunately it isn't endian aware of the struct */
- memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
- hdr->count = le16_to_cpu(hdr->count);
-#else
- ((unsigned int*)hdr)[0] = readl(hdr_start);
-#endif
-}
-
-/* Block input and output are easy on shared memory ethercards, the only
- complication is when the ring buffer wraps, or in this case, when a
- packet spans an 8KB boundary. Note that the current 8KB segment is
- already set by the get_8390_hdr routine. */
-
-static void ultra32_block_input(struct net_device *dev,
- int count,
- struct sk_buff *skb,
- int ring_offset)
-{
- void __iomem *xfer_start = ei_status.mem + (ring_offset & 0x1fff);
- unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3;
-
- if ((ring_offset & ~0x1fff) != ((ring_offset + count - 1) & ~0x1fff)) {
- int semi_count = 8192 - (ring_offset & 0x1FFF);
- memcpy_fromio(skb->data, xfer_start, semi_count);
- count -= semi_count;
- if (ring_offset < 96*256) {
- /* Select next 8KB Window. */
- ring_offset += semi_count;
- outb(ei_status.reg0 | ((ring_offset & 0x6000) >> 13), RamReg);
- memcpy_fromio(skb->data + semi_count, ei_status.mem, count);
- } else {
- /* Select first 8KB Window. */
- outb(ei_status.reg0, RamReg);
- memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count);
- }
- } else {
- memcpy_fromio(skb->data, xfer_start, count);
- }
-}
-
-static void ultra32_block_output(struct net_device *dev,
- int count,
- const unsigned char *buf,
- int start_page)
-{
- void __iomem *xfer_start = ei_status.mem + (start_page<<8);
- unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3;
-
- /* Select first 8KB Window. */
- outb(ei_status.reg0, RamReg);
-
- memcpy_toio(xfer_start, buf, count);
-}
-
-#ifdef MODULE
-#define MAX_ULTRA32_CARDS 4 /* Max number of Ultra cards per module */
-static struct net_device *dev_ultra[MAX_ULTRA32_CARDS];
-
-MODULE_DESCRIPTION("SMC Ultra32 EISA ethernet driver");
-MODULE_LICENSE("GPL");
-
-int __init init_module(void)
-{
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < MAX_ULTRA32_CARDS; this_dev++) {
- struct net_device *dev = ultra32_probe(-1);
- if (IS_ERR(dev))
- break;
- dev_ultra[found++] = dev;
- }
- if (found)
- return 0;
- printk(KERN_WARNING "smc-ultra32.c: No SMC Ultra32 found.\n");
- return -ENXIO;
-}
-
-void __exit cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < MAX_ULTRA32_CARDS; this_dev++) {
- struct net_device *dev = dev_ultra[this_dev];
- if (dev) {
- unregister_netdev(dev);
- cleanup_card(dev);
- free_netdev(dev);
- }
- }
-}
-#endif /* MODULE */
-
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index e4ff38949112..ed956e08d38b 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -135,7 +135,6 @@ config ETHOC
source "drivers/net/ethernet/packetengines/Kconfig"
source "drivers/net/ethernet/pasemi/Kconfig"
source "drivers/net/ethernet/qlogic/Kconfig"
-source "drivers/net/ethernet/racal/Kconfig"
source "drivers/net/ethernet/realtek/Kconfig"
source "drivers/net/ethernet/renesas/Kconfig"
source "drivers/net/ethernet/rdc/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index d4473072654a..8268d85f9448 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -53,7 +53,6 @@ obj-$(CONFIG_ETHOC) += ethoc.o
obj-$(CONFIG_NET_PACKET_ENGINE) += packetengines/
obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/
obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/
-obj-$(CONFIG_NET_VENDOR_RACAL) += racal/
obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
obj-$(CONFIG_SH_ETH) += renesas/
obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
index e49c0eff040b..a9481606bbcd 100644
--- a/drivers/net/ethernet/adi/Kconfig
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -61,6 +61,7 @@ config BFIN_RX_DESC_NUM
config BFIN_MAC_USE_HWSTAMP
bool "Use IEEE 1588 hwstamp"
+ depends on BFIN_MAC && BF518
select PTP_1588_CLOCK
default y
---help---
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index c1fdb8be8bee..a175d0be1ae1 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -425,8 +425,8 @@ static int mii_probe(struct net_device *dev, int phy_mode)
return -EINVAL;
}
- phydev = phy_connect(dev, dev_name(&phydev->dev), &bfin_mac_adjust_link,
- 0, phy_mode);
+ phydev = phy_connect(dev, dev_name(&phydev->dev),
+ &bfin_mac_adjust_link, phy_mode);
if (IS_ERR(phydev)) {
netdev_err(dev, "could not attach PHY\n");
@@ -498,10 +498,10 @@ bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, KBUILD_MODNAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "N/A");
- strcpy(info->bus_info, dev_name(&dev->dev));
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
}
static void bfin_mac_ethtool_getwol(struct net_device *dev,
@@ -647,7 +647,6 @@ static int bfin_mac_set_mac_address(struct net_device *dev, void *p)
if (netif_running(dev))
return -EBUSY;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
setup_mac_addr(dev->dev_addr);
return 0;
}
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index aa53115bb38b..0be2195e5034 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1127,10 +1127,11 @@ static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *in
{
struct greth_private *greth = netdev_priv(dev);
- strncpy(info->driver, dev_driver_string(greth->dev), 32);
- strncpy(info->version, "revision: 1.0", 32);
- strncpy(info->bus_info, greth->dev->bus->name, 32);
- strncpy(info->fw_version, "N/A", 32);
+ strlcpy(info->driver, dev_driver_string(greth->dev),
+ sizeof(info->driver));
+ strlcpy(info->version, "revision: 1.0", sizeof(info->version));
+ strlcpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info));
+ strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
info->eedump_len = 0;
info->regdump_len = sizeof(struct greth_regs);
}
@@ -1287,9 +1288,7 @@ static int greth_mdio_probe(struct net_device *dev)
}
ret = phy_connect_direct(dev, phy, &greth_link_change,
- 0, greth->gbit_mac ?
- PHY_INTERFACE_MODE_GMII :
- PHY_INTERFACE_MODE_MII);
+ greth->gbit_mac ? PHY_INTERFACE_MODE_GMII : PHY_INTERFACE_MODE_MII);
if (ret) {
if (netif_msg_ifup(greth))
dev_err(&dev->dev, "could not attach to PHY\n");
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 8350f4b37a8a..13d74aa4033d 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -7,7 +7,7 @@ config NET_VENDOR_AMD
default y
depends on DIO || MACH_DECSTATION || MVME147 || ATARI || SUN3 || \
SUN3X || SBUS || PCI || ZORRO || (ISA && ISA_DMA_API) || \
- (ARM && ARCH_EBSA110) || ISA || EISA || MCA || PCMCIA
+ (ARM && ARCH_EBSA110) || ISA || EISA || PCMCIA
---help---
If you have a network (Ethernet) chipset belonging to this class,
say Y.
@@ -105,19 +105,6 @@ config DECLANCE
DEC (now Compaq) based on the AMD LANCE chipset, including the
DEPCA series. (This chipset is better known via the NE2100 cards.)
-config DEPCA
- tristate "DEPCA, DE10x, DE200, DE201, DE202, DE422 support"
- depends on (ISA || EISA || MCA)
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto> as well as
- <file:drivers/net/ethernet/amd/depca.c>.
-
- To compile this driver as a module, choose M here. The module
- will be called depca.
-
config HPLANCE
bool "HP on-board LANCE support"
depends on DIO
diff --git a/drivers/net/ethernet/amd/Makefile b/drivers/net/ethernet/amd/Makefile
index 175caa5328c9..cdd4301a973d 100644
--- a/drivers/net/ethernet/amd/Makefile
+++ b/drivers/net/ethernet/amd/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_ARM_AM79C961A) += am79c961a.o
obj-$(CONFIG_ARIADNE) += ariadne.o
obj-$(CONFIG_ATARILANCE) += atarilance.o
obj-$(CONFIG_DECLANCE) += declance.o
-obj-$(CONFIG_DEPCA) += depca.o
obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
obj-$(CONFIG_LANCE) += lance.o
obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 2ea221ed4777..de774d419144 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -437,8 +437,8 @@ static int au1000_mii_probe(struct net_device *dev)
/* now we are supposed to have a proper phydev, to attach to... */
BUG_ON(phydev->attached_dev);
- phydev = phy_connect(dev, dev_name(&phydev->dev), &au1000_adjust_link,
- 0, PHY_INTERFACE_MODE_MII);
+ phydev = phy_connect(dev, dev_name(&phydev->dev),
+ &au1000_adjust_link, PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
netdev_err(dev, "Could not attach to PHY\n");
@@ -587,10 +587,10 @@ au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct au1000_private *aup = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- info->fw_version[0] = '\0';
- sprintf(info->bus_info, "%s %d", DRV_NAME, aup->mac_id);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME,
+ aup->mac_id);
info->regdump_len = 0;
}
diff --git a/drivers/net/ethernet/amd/depca.c b/drivers/net/ethernet/amd/depca.c
deleted file mode 100644
index 34a485363d5b..000000000000
--- a/drivers/net/ethernet/amd/depca.c
+++ /dev/null
@@ -1,1910 +0,0 @@
-/* depca.c: A DIGITAL DEPCA & EtherWORKS ethernet driver for linux.
-
- Written 1994, 1995 by David C. Davies.
-
-
- Copyright 1994 David C. Davies
- and
- United States Government
- (as represented by the Director, National Security Agency).
-
- Copyright 1995 Digital Equipment Corporation.
-
-
- This software may be used and distributed according to the terms of
- the GNU General Public License, incorporated herein by reference.
-
- This driver is written for the Digital Equipment Corporation series
- of DEPCA and EtherWORKS ethernet cards:
-
- DEPCA (the original)
- DE100
- DE101
- DE200 Turbo
- DE201 Turbo
- DE202 Turbo (TP BNC)
- DE210
- DE422 (EISA)
-
- The driver has been tested on DE100, DE200 and DE202 cards in a
- relatively busy network. The DE422 has been tested a little.
-
- This driver will NOT work for the DE203, DE204 and DE205 series of
- cards, since they have a new custom ASIC in place of the AMD LANCE
- chip. See the 'ewrk3.c' driver in the Linux source tree for running
- those cards.
-
- I have benchmarked the driver with a DE100 at 595kB/s to (542kB/s from)
- a DECstation 5000/200.
-
- The author may be reached at davies@maniac.ultranet.com
-
- =========================================================================
-
- The driver was originally based on the 'lance.c' driver from Donald
- Becker which is included with the standard driver distribution for
- linux. V0.4 is a complete re-write with only the kernel interface
- remaining from the original code.
-
- 1) Lance.c code in /linux/drivers/net/
- 2) "Ethernet/IEEE 802.3 Family. 1992 World Network Data Book/Handbook",
- AMD, 1992 [(800) 222-9323].
- 3) "Am79C90 CMOS Local Area Network Controller for Ethernet (C-LANCE)",
- AMD, Pub. #17881, May 1993.
- 4) "Am79C960 PCnet-ISA(tm), Single-Chip Ethernet Controller for ISA",
- AMD, Pub. #16907, May 1992
- 5) "DEC EtherWORKS LC Ethernet Controller Owners Manual",
- Digital Equipment corporation, 1990, Pub. #EK-DE100-OM.003
- 6) "DEC EtherWORKS Turbo Ethernet Controller Owners Manual",
- Digital Equipment corporation, 1990, Pub. #EK-DE200-OM.003
- 7) "DEPCA Hardware Reference Manual", Pub. #EK-DEPCA-PR
- Digital Equipment Corporation, 1989
- 8) "DEC EtherWORKS Turbo_(TP BNC) Ethernet Controller Owners Manual",
- Digital Equipment corporation, 1991, Pub. #EK-DE202-OM.001
-
-
- Peter Bauer's depca.c (V0.5) was referred to when debugging V0.1 of this
- driver.
-
- The original DEPCA card requires that the ethernet ROM address counter
- be enabled to count and has an 8 bit NICSR. The ROM counter enabling is
- only done when a 0x08 is read as the first address octet (to minimise
- the chances of writing over some other hardware's I/O register). The
- NICSR accesses have been changed to byte accesses for all the cards
- supported by this driver, since there is only one useful bit in the MSB
- (remote boot timeout) and it is not used. Also, there is a maximum of
- only 48kB network RAM for this card. My thanks to Torbjorn Lindh for
- help debugging all this (and holding my feet to the fire until I got it
- right).
-
- The DE200 series boards have on-board 64kB RAM for use as a shared
- memory network buffer. Only the DE100 cards make use of a 2kB buffer
- mode which has not been implemented in this driver (only the 32kB and
- 64kB modes are supported [16kB/48kB for the original DEPCA]).
-
- At the most only 2 DEPCA cards can be supported on the ISA bus because
- there is only provision for two I/O base addresses on each card (0x300
- and 0x200). The I/O address is detected by searching for a byte sequence
- in the Ethernet station address PROM at the expected I/O address for the
- Ethernet PROM. The shared memory base address is 'autoprobed' by
- looking for the self test PROM and detecting the card name. When a
- second DEPCA is detected, information is placed in the base_addr
- variable of the next device structure (which is created if necessary),
- thus enabling ethif_probe initialization for the device. More than 2
- EISA cards can be supported, but care will be needed assigning the
- shared memory to ensure that each slot has the correct IRQ, I/O address
- and shared memory address assigned.
-
- ************************************************************************
-
- NOTE: If you are using two ISA DEPCAs, it is important that you assign
- the base memory addresses correctly. The driver autoprobes I/O 0x300
- then 0x200. The base memory address for the first device must be less
- than that of the second so that the auto probe will correctly assign the
- I/O and memory addresses on the same card. I can't think of a way to do
- this unambiguously at the moment, since there is nothing on the cards to
- tie I/O and memory information together.
-
- I am unable to test 2 cards together for now, so this code is
- unchecked. All reports, good or bad, are welcome.
-
- ************************************************************************
-
- The board IRQ setting must be at an unused IRQ which is auto-probed
- using Donald Becker's autoprobe routines. DEPCA and DE100 board IRQs are
- {2,3,4,5,7}, whereas the DE200 is at {5,9,10,11,15}. Note that IRQ2 is
- really IRQ9 in machines with 16 IRQ lines.
-
- No 16MB memory limitation should exist with this driver as DMA is not
- used and the common memory area is in low memory on the network card (my
- current system has 20MB and I've not had problems yet).
-
- The ability to load this driver as a loadable module has been added. To
- utilise this ability, you have to do <8 things:
-
- 0) have a copy of the loadable modules code installed on your system.
- 1) copy depca.c from the /linux/drivers/net directory to your favourite
- temporary directory.
- 2) if you wish, edit the source code near line 1530 to reflect the I/O
- address and IRQ you're using (see also 5).
- 3) compile depca.c, but include -DMODULE in the command line to ensure
- that the correct bits are compiled (see end of source code).
- 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
- kernel with the depca configuration turned off and reboot.
- 5) insmod depca.o [irq=7] [io=0x200] [mem=0xd0000] [adapter_name=DE100]
- [Alan Cox: Changed the code to allow command line irq/io assignments]
- [Dave Davies: Changed the code to allow command line mem/name
- assignments]
- 6) run the net startup bits for your eth?? interface manually
- (usually /etc/rc.inet[12] at boot time).
- 7) enjoy!
-
- Note that autoprobing is not allowed in loadable modules - the system is
- already up and running and you're messing with interrupts.
-
- To unload a module, turn off the associated interface
- 'ifconfig eth?? down' then 'rmmod depca'.
-
- To assign a base memory address for the shared memory when running as a
- loadable module, see 5 above. To include the adapter name (if you have
- no PROM but know the card name) also see 5 above. Note that this last
- option will not work with kernel built-in depca's.
-
- The shared memory assignment for a loadable module makes sense to avoid
- the 'memory autoprobe' picking the wrong shared memory (for the case of
- 2 depca's in a PC).
-
- ************************************************************************
- Support for MCA EtherWORKS cards added 11-3-98. (MCA since deleted)
- Verified to work with up to 2 DE212 cards in a system (although not
- fully stress-tested).
-
- Revision History
- ----------------
-
- Version Date Description
-
- 0.1 25-jan-94 Initial writing.
- 0.2 27-jan-94 Added LANCE TX hardware buffer chaining.
- 0.3 1-feb-94 Added multiple DEPCA support.
- 0.31 4-feb-94 Added DE202 recognition.
- 0.32 19-feb-94 Tidy up. Improve multi-DEPCA support.
- 0.33 25-feb-94 Fix DEPCA ethernet ROM counter enable.
- Add jabber packet fix from murf@perftech.com
- and becker@super.org
- 0.34 7-mar-94 Fix DEPCA max network memory RAM & NICSR access.
- 0.35 8-mar-94 Added DE201 recognition. Tidied up.
- 0.351 30-apr-94 Added EISA support. Added DE422 recognition.
- 0.36 16-may-94 DE422 fix released.
- 0.37 22-jul-94 Added MODULE support
- 0.38 15-aug-94 Added DBR ROM switch in depca_close().
- Multi DEPCA bug fix.
- 0.38axp 15-sep-94 Special version for Alpha AXP Linux V1.0.
- 0.381 12-dec-94 Added DE101 recognition, fix multicast bug.
- 0.382 9-feb-95 Fix recognition bug reported by <bkm@star.rl.ac.uk>.
- 0.383 22-feb-95 Fix for conflict with VESA SCSI reported by
- <stromain@alf.dec.com>
- 0.384 17-mar-95 Fix a ring full bug reported by <bkm@star.rl.ac.uk>
- 0.385 3-apr-95 Fix a recognition bug reported by
- <ryan.niemi@lastfrontier.com>
- 0.386 21-apr-95 Fix the last fix...sorry, must be galloping senility
- 0.40 25-May-95 Rewrite for portability & updated.
- ALPHA support from <jestabro@amt.tay1.dec.com>
- 0.41 26-Jun-95 Added verify_area() calls in depca_ioctl() from
- suggestion by <heiko@colossus.escape.de>
- 0.42 27-Dec-95 Add 'mem' shared memory assignment for loadable
- modules.
- Add 'adapter_name' for loadable modules when no PROM.
- Both above from a suggestion by
- <pchen@woodruffs121.residence.gatech.edu>.
- Add new multicasting code.
- 0.421 22-Apr-96 Fix alloc_device() bug <jari@markkus2.fimr.fi>
- 0.422 29-Apr-96 Fix depca_hw_init() bug <jari@markkus2.fimr.fi>
- 0.423 7-Jun-96 Fix module load bug <kmg@barco.be>
- 0.43 16-Aug-96 Update alloc_device() to conform to de4x5.c
- 0.44 1-Sep-97 Fix *_probe() to test check_region() first - bug
- reported by <mmogilvi@elbert.uccs.edu>
- 0.45 3-Nov-98 Added support for MCA EtherWORKS (DE210/DE212) cards
- by <tymm@computer.org>
- 0.451 5-Nov-98 Fixed mca stuff cuz I'm a dummy. <tymm@computer.org>
- 0.5 14-Nov-98 Re-spin for 2.1.x kernels.
- 0.51 27-Jun-99 Correct received packet length for CRC from
- report by <worm@dkik.dk>
- 0.52 16-Oct-00 Fixes for 2.3 io memory accesses
- Fix show-stopper (ints left masked) in depca_interrupt
- by <peterd@pnd-pc.demon.co.uk>
- 0.53 12-Jan-01 Release resources on failure, bss tidbits
- by acme@conectiva.com.br
- 0.54 08-Nov-01 use library crc32 functions
- by Matt_Domsch@dell.com
- 0.55 01-Mar-03 Use EISA/sysfs framework <maz@wild-wind.fr.eu.org>
-
- =========================================================================
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/crc32.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/time.h>
-#include <linux/types.h>
-#include <linux/unistd.h>
-#include <linux/ctype.h>
-#include <linux/moduleparam.h>
-#include <linux/platform_device.h>
-#include <linux/bitops.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#ifdef CONFIG_EISA
-#include <linux/eisa.h>
-#endif
-
-#include "depca.h"
-
-static char version[] __initdata = "depca.c:v0.53 2001/1/12 davies@maniac.ultranet.com\n";
-
-#ifdef DEPCA_DEBUG
-static int depca_debug = DEPCA_DEBUG;
-#else
-static int depca_debug = 1;
-#endif
-
-#define DEPCA_NDA 0xffe0 /* No Device Address */
-
-#define TX_TIMEOUT (1*HZ)
-
-/*
-** Ethernet PROM defines
-*/
-#define PROBE_LENGTH 32
-#define ETH_PROM_SIG 0xAA5500FFUL
-
-/*
-** Set the number of Tx and Rx buffers. Ensure that the memory requested
-** here is <= to the amount of shared memory set up by the board switches.
-** The number of descriptors MUST BE A POWER OF 2.
-**
-** total_memory = NUM_RX_DESC*(8+RX_BUFF_SZ) + NUM_TX_DESC*(8+TX_BUFF_SZ)
-*/
-#define NUM_RX_DESC 8 /* Number of RX descriptors */
-#define NUM_TX_DESC 8 /* Number of TX descriptors */
-#define RX_BUFF_SZ 1536 /* Buffer size for each Rx buffer */
-#define TX_BUFF_SZ 1536 /* Buffer size for each Tx buffer */
-
-/*
-** EISA bus defines
-*/
-#define DEPCA_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
-
-/*
-** ISA Bus defines
-*/
-#define DEPCA_RAM_BASE_ADDRESSES {0xc0000,0xd0000,0xe0000,0x00000}
-#define DEPCA_TOTAL_SIZE 0x10
-
-static struct {
- u_long iobase;
- struct platform_device *device;
-} depca_io_ports[] = {
- { 0x300, NULL },
- { 0x200, NULL },
- { 0 , NULL },
-};
-
-/*
-** Name <-> Adapter mapping
-*/
-#define DEPCA_SIGNATURE {"DEPCA",\
- "DE100","DE101",\
- "DE200","DE201","DE202",\
- "DE210","DE212",\
- "DE422",\
- ""}
-
-static char* __initdata depca_signature[] = DEPCA_SIGNATURE;
-
-enum depca_type {
- DEPCA, de100, de101, de200, de201, de202, de210, de212, de422, unknown
-};
-
-static char depca_string[] = "depca";
-
-static int depca_device_remove (struct device *device);
-
-#ifdef CONFIG_EISA
-static struct eisa_device_id depca_eisa_ids[] = {
- { "DEC4220", de422 },
- { "" }
-};
-MODULE_DEVICE_TABLE(eisa, depca_eisa_ids);
-
-static int depca_eisa_probe (struct device *device);
-
-static struct eisa_driver depca_eisa_driver = {
- .id_table = depca_eisa_ids,
- .driver = {
- .name = depca_string,
- .probe = depca_eisa_probe,
- .remove = depca_device_remove
- }
-};
-#endif
-
-static int depca_isa_probe (struct platform_device *);
-
-static int depca_isa_remove(struct platform_device *pdev)
-{
- return depca_device_remove(&pdev->dev);
-}
-
-static struct platform_driver depca_isa_driver = {
- .probe = depca_isa_probe,
- .remove = depca_isa_remove,
- .driver = {
- .name = depca_string,
- },
-};
-
-/*
-** Miscellaneous info...
-*/
-#define DEPCA_STRLEN 16
-
-/*
-** Memory Alignment. Each descriptor is 4 longwords long. To force a
-** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and
-** DESC_ALIGN. DEPCA_ALIGN aligns the start address of the private memory area
-** and hence the RX descriptor ring's first entry.
-*/
-#define DEPCA_ALIGN4 ((u_long)4 - 1) /* 1 longword align */
-#define DEPCA_ALIGN8 ((u_long)8 - 1) /* 2 longword (quadword) align */
-#define DEPCA_ALIGN DEPCA_ALIGN8 /* Keep the LANCE happy... */
-
-/*
-** The DEPCA Rx and Tx ring descriptors.
-*/
-struct depca_rx_desc {
- volatile s32 base;
- s16 buf_length; /* This length is negative 2's complement! */
- s16 msg_length; /* This length is "normal". */
-};
-
-struct depca_tx_desc {
- volatile s32 base;
- s16 length; /* This length is negative 2's complement! */
- s16 misc; /* Errors and TDR info */
-};
-
-#define LA_MASK 0x0000ffff /* LANCE address mask for mapping network RAM
- to LANCE memory address space */
-
-/*
-** The Lance initialization block, described in databook, in common memory.
-*/
-struct depca_init {
- u16 mode; /* Mode register */
- u8 phys_addr[ETH_ALEN]; /* Physical ethernet address */
- u8 mcast_table[8]; /* Multicast Hash Table. */
- u32 rx_ring; /* Rx ring base pointer & ring length */
- u32 tx_ring; /* Tx ring base pointer & ring length */
-};
-
-#define DEPCA_PKT_STAT_SZ 16
-#define DEPCA_PKT_BIN_SZ 128 /* Should be >=100 unless you
- increase DEPCA_PKT_STAT_SZ */
-struct depca_private {
- char adapter_name[DEPCA_STRLEN]; /* /proc/ioports string */
- enum depca_type adapter; /* Adapter type */
- enum {
- DEPCA_BUS_ISA = 1,
- DEPCA_BUS_EISA,
- } depca_bus; /* type of bus */
- struct depca_init init_block; /* Shadow Initialization block */
-/* CPU address space fields */
- struct depca_rx_desc __iomem *rx_ring; /* Pointer to start of RX descriptor ring */
- struct depca_tx_desc __iomem *tx_ring; /* Pointer to start of TX descriptor ring */
- void __iomem *rx_buff[NUM_RX_DESC]; /* CPU virt address of sh'd memory buffs */
- void __iomem *tx_buff[NUM_TX_DESC]; /* CPU virt address of sh'd memory buffs */
- void __iomem *sh_mem; /* CPU mapped virt address of device RAM */
- u_long mem_start; /* Bus address of device RAM (before remap) */
- u_long mem_len; /* device memory size */
-/* Device address space fields */
- u_long device_ram_start; /* Start of RAM in device addr space */
-/* Offsets used in both address spaces */
- u_long rx_ring_offset; /* Offset from start of RAM to rx_ring */
- u_long tx_ring_offset; /* Offset from start of RAM to tx_ring */
- u_long buffs_offset; /* LANCE Rx and Tx buffers start address. */
-/* Kernel-only (not device) fields */
- int rx_new, tx_new; /* The next free ring entry */
- int rx_old, tx_old; /* The ring entries to be free()ed. */
- spinlock_t lock;
- struct { /* Private stats counters */
- u32 bins[DEPCA_PKT_STAT_SZ];
- u32 unicast;
- u32 multicast;
- u32 broadcast;
- u32 excessive_collisions;
- u32 tx_underruns;
- u32 excessive_underruns;
- } pktStats;
- int txRingMask; /* TX ring mask */
- int rxRingMask; /* RX ring mask */
- s32 rx_rlen; /* log2(rxRingMask+1) for the descriptors */
- s32 tx_rlen; /* log2(txRingMask+1) for the descriptors */
-};
-
-/*
-** The transmit ring full condition is described by the tx_old and tx_new
-** pointers by:
-** tx_old = tx_new Empty ring
-** tx_old = tx_new+1 Full ring
-** tx_old+txRingMask = tx_new Full ring (wrapped condition)
-*/
-#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
- lp->tx_old+lp->txRingMask-lp->tx_new:\
- lp->tx_old -lp->tx_new-1)
-
-/*
-** Public Functions
-*/
-static int depca_open(struct net_device *dev);
-static netdev_tx_t depca_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t depca_interrupt(int irq, void *dev_id);
-static int depca_close(struct net_device *dev);
-static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static void depca_tx_timeout(struct net_device *dev);
-static void set_multicast_list(struct net_device *dev);
-
-/*
-** Private functions
-*/
-static void depca_init_ring(struct net_device *dev);
-static int depca_rx(struct net_device *dev);
-static int depca_tx(struct net_device *dev);
-
-static void LoadCSRs(struct net_device *dev);
-static int InitRestartDepca(struct net_device *dev);
-static int DepcaSignature(char *name, u_long paddr);
-static int DevicePresent(u_long ioaddr);
-static int get_hw_addr(struct net_device *dev);
-static void SetMulticastFilter(struct net_device *dev);
-static int load_packet(struct net_device *dev, struct sk_buff *skb);
-static void depca_dbg_open(struct net_device *dev);
-
-static u_char de1xx_irq[] __initdata = { 2, 3, 4, 5, 7, 9, 0 };
-static u_char de2xx_irq[] __initdata = { 5, 9, 10, 11, 15, 0 };
-static u_char de422_irq[] __initdata = { 5, 9, 10, 11, 0 };
-static u_char *depca_irq;
-
-static int irq;
-static int io;
-static char *adapter_name;
-static int mem; /* For loadable module assignment
- use insmod mem=0x????? .... */
-module_param (irq, int, 0);
-module_param (io, int, 0);
-module_param (adapter_name, charp, 0);
-module_param (mem, int, 0);
-MODULE_PARM_DESC(irq, "DEPCA IRQ number");
-MODULE_PARM_DESC(io, "DEPCA I/O base address");
-MODULE_PARM_DESC(adapter_name, "DEPCA adapter name");
-MODULE_PARM_DESC(mem, "DEPCA shared memory address");
-MODULE_LICENSE("GPL");
-
-/*
-** Miscellaneous defines...
-*/
-#define STOP_DEPCA \
- outw(CSR0, DEPCA_ADDR);\
- outw(STOP, DEPCA_DATA)
-
-static const struct net_device_ops depca_netdev_ops = {
- .ndo_open = depca_open,
- .ndo_start_xmit = depca_start_xmit,
- .ndo_stop = depca_close,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_do_ioctl = depca_ioctl,
- .ndo_tx_timeout = depca_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int __init depca_hw_init (struct net_device *dev, struct device *device)
-{
- struct depca_private *lp;
- int i, j, offset, netRAM, mem_len, status = 0;
- s16 nicsr;
- u_long ioaddr;
- u_long mem_start;
-
- /*
- * We are now supposed to enter this function with the
- * following fields filled with proper values :
- *
- * dev->base_addr
- * lp->mem_start
- * lp->depca_bus
- * lp->adapter
- *
- * dev->irq can be set if known from device configuration (on
- * MCA or EISA) or module option. Otherwise, it will be auto
- * detected.
- */
-
- ioaddr = dev->base_addr;
-
- STOP_DEPCA;
-
- nicsr = inb(DEPCA_NICSR);
- nicsr = ((nicsr & ~SHE & ~RBE & ~IEN) | IM);
- outb(nicsr, DEPCA_NICSR);
-
- if (inw(DEPCA_DATA) != STOP) {
- return -ENXIO;
- }
-
- lp = netdev_priv(dev);
- mem_start = lp->mem_start;
-
- if (!mem_start || lp->adapter < DEPCA || lp->adapter >=unknown)
- return -ENXIO;
-
- printk("%s: %s at 0x%04lx",
- dev_name(device), depca_signature[lp->adapter], ioaddr);
-
- switch (lp->depca_bus) {
-#ifdef CONFIG_EISA
- case DEPCA_BUS_EISA:
- printk(" (EISA slot %d)", to_eisa_device(device)->slot);
- break;
-#endif
-
- case DEPCA_BUS_ISA:
- break;
-
- default:
- printk("Unknown DEPCA bus %d\n", lp->depca_bus);
- return -ENXIO;
- }
-
- printk(", h/w address ");
- status = get_hw_addr(dev);
- printk("%pM", dev->dev_addr);
- if (status != 0) {
- printk(" which has an Ethernet PROM CRC error.\n");
- return -ENXIO;
- }
-
- /* Set up the maximum amount of network RAM(kB) */
- netRAM = ((lp->adapter != DEPCA) ? 64 : 48);
- if ((nicsr & _128KB) && (lp->adapter == de422))
- netRAM = 128;
-
- /* Shared Memory Base Address */
- if (nicsr & BUF) {
- nicsr &= ~BS; /* DEPCA RAM in top 32k */
- netRAM -= 32;
- mem_start += 0x8000;
- }
-
- if ((mem_len = (NUM_RX_DESC * (sizeof(struct depca_rx_desc) + RX_BUFF_SZ) + NUM_TX_DESC * (sizeof(struct depca_tx_desc) + TX_BUFF_SZ) + sizeof(struct depca_init)))
- > (netRAM << 10)) {
- printk(",\n requests %dkB RAM: only %dkB is available!\n", (mem_len >> 10), netRAM);
- return -ENXIO;
- }
-
- printk(",\n has %dkB RAM at 0x%.5lx", netRAM, mem_start);
-
- /* Enable the shadow RAM. */
- if (lp->adapter != DEPCA) {
- nicsr |= SHE;
- outb(nicsr, DEPCA_NICSR);
- }
-
- spin_lock_init(&lp->lock);
- sprintf(lp->adapter_name, "%s (%s)",
- depca_signature[lp->adapter], dev_name(device));
- status = -EBUSY;
-
- /* Initialisation Block */
- if (!request_mem_region (mem_start, mem_len, lp->adapter_name)) {
- printk(KERN_ERR "depca: cannot request ISA memory, aborting\n");
- goto out_priv;
- }
-
- status = -EIO;
- lp->sh_mem = ioremap(mem_start, mem_len);
- if (lp->sh_mem == NULL) {
- printk(KERN_ERR "depca: cannot remap ISA memory, aborting\n");
- goto out1;
- }
-
- lp->mem_start = mem_start;
- lp->mem_len = mem_len;
- lp->device_ram_start = mem_start & LA_MASK;
-
- offset = 0;
- offset += sizeof(struct depca_init);
-
- /* Tx & Rx descriptors (aligned to a quadword boundary) */
- offset = (offset + DEPCA_ALIGN) & ~DEPCA_ALIGN;
- lp->rx_ring = lp->sh_mem + offset;
- lp->rx_ring_offset = offset;
-
- offset += (sizeof(struct depca_rx_desc) * NUM_RX_DESC);
- lp->tx_ring = lp->sh_mem + offset;
- lp->tx_ring_offset = offset;
-
- offset += (sizeof(struct depca_tx_desc) * NUM_TX_DESC);
-
- lp->buffs_offset = offset;
-
- /* Finish initialising the ring information. */
- lp->rxRingMask = NUM_RX_DESC - 1;
- lp->txRingMask = NUM_TX_DESC - 1;
-
- /* Calculate Tx/Rx RLEN size for the descriptors. */
- for (i = 0, j = lp->rxRingMask; j > 0; i++) {
- j >>= 1;
- }
- lp->rx_rlen = (s32) (i << 29);
- for (i = 0, j = lp->txRingMask; j > 0; i++) {
- j >>= 1;
- }
- lp->tx_rlen = (s32) (i << 29);
-
- /* Load the initialisation block */
- depca_init_ring(dev);
-
- /* Initialise the control and status registers */
- LoadCSRs(dev);
-
- /* Enable DEPCA board interrupts for autoprobing */
- nicsr = ((nicsr & ~IM) | IEN);
- outb(nicsr, DEPCA_NICSR);
-
- /* To auto-IRQ we enable the initialization-done and DMA err,
- interrupts. For now we will always get a DMA error. */
- if (dev->irq < 2) {
- unsigned char irqnum;
- unsigned long irq_mask, delay;
-
- irq_mask = probe_irq_on();
-
- /* Assign the correct irq list */
- switch (lp->adapter) {
- case DEPCA:
- case de100:
- case de101:
- depca_irq = de1xx_irq;
- break;
- case de200:
- case de201:
- case de202:
- case de210:
- case de212:
- depca_irq = de2xx_irq;
- break;
- case de422:
- depca_irq = de422_irq;
- break;
-
- default:
- break; /* Not reached */
- }
-
- /* Trigger an initialization just for the interrupt. */
- outw(INEA | INIT, DEPCA_DATA);
-
- delay = jiffies + HZ/50;
- while (time_before(jiffies, delay))
- yield();
-
- irqnum = probe_irq_off(irq_mask);
-
- status = -ENXIO;
- if (!irqnum) {
- printk(" and failed to detect IRQ line.\n");
- goto out2;
- } else {
- for (dev->irq = 0, i = 0; (depca_irq[i]) && (!dev->irq); i++)
- if (irqnum == depca_irq[i]) {
- dev->irq = irqnum;
- printk(" and uses IRQ%d.\n", dev->irq);
- }
-
- if (!dev->irq) {
- printk(" but incorrect IRQ line detected.\n");
- goto out2;
- }
- }
- } else {
- printk(" and assigned IRQ%d.\n", dev->irq);
- }
-
- if (depca_debug > 1) {
- printk(version);
- }
-
- /* The DEPCA-specific entries in the device structure. */
- dev->netdev_ops = &depca_netdev_ops;
- dev->watchdog_timeo = TX_TIMEOUT;
-
- dev->mem_start = 0;
-
- dev_set_drvdata(device, dev);
- SET_NETDEV_DEV (dev, device);
-
- status = register_netdev(dev);
- if (status == 0)
- return 0;
-out2:
- iounmap(lp->sh_mem);
-out1:
- release_mem_region (mem_start, mem_len);
-out_priv:
- return status;
-}
-
-
-static int depca_open(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- u_long ioaddr = dev->base_addr;
- s16 nicsr;
- int status = 0;
-
- STOP_DEPCA;
- nicsr = inb(DEPCA_NICSR);
-
- /* Make sure the shadow RAM is enabled */
- if (lp->adapter != DEPCA) {
- nicsr |= SHE;
- outb(nicsr, DEPCA_NICSR);
- }
-
- /* Re-initialize the DEPCA... */
- depca_init_ring(dev);
- LoadCSRs(dev);
-
- depca_dbg_open(dev);
-
- if (request_irq(dev->irq, depca_interrupt, 0, lp->adapter_name, dev)) {
- printk("depca_open(): Requested IRQ%d is busy\n", dev->irq);
- status = -EAGAIN;
- } else {
-
- /* Enable DEPCA board interrupts and turn off LED */
- nicsr = ((nicsr & ~IM & ~LED) | IEN);
- outb(nicsr, DEPCA_NICSR);
- outw(CSR0, DEPCA_ADDR);
-
- netif_start_queue(dev);
-
- status = InitRestartDepca(dev);
-
- if (depca_debug > 1) {
- printk("CSR0: 0x%4.4x\n", inw(DEPCA_DATA));
- printk("nicsr: 0x%02x\n", inb(DEPCA_NICSR));
- }
- }
- return status;
-}
-
-/* Initialize the lance Rx and Tx descriptor rings. */
-static void depca_init_ring(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- u_int i;
- u_long offset;
-
- /* Lock out other processes whilst setting up the hardware */
- netif_stop_queue(dev);
-
- lp->rx_new = lp->tx_new = 0;
- lp->rx_old = lp->tx_old = 0;
-
- /* Initialize the base address and length of each buffer in the ring */
- for (i = 0; i <= lp->rxRingMask; i++) {
- offset = lp->buffs_offset + i * RX_BUFF_SZ;
- writel((lp->device_ram_start + offset) | R_OWN, &lp->rx_ring[i].base);
- writew(-RX_BUFF_SZ, &lp->rx_ring[i].buf_length);
- lp->rx_buff[i] = lp->sh_mem + offset;
- }
-
- for (i = 0; i <= lp->txRingMask; i++) {
- offset = lp->buffs_offset + (i + lp->rxRingMask + 1) * TX_BUFF_SZ;
- writel((lp->device_ram_start + offset) & 0x00ffffff, &lp->tx_ring[i].base);
- lp->tx_buff[i] = lp->sh_mem + offset;
- }
-
- /* Set up the initialization block */
- lp->init_block.rx_ring = (lp->device_ram_start + lp->rx_ring_offset) | lp->rx_rlen;
- lp->init_block.tx_ring = (lp->device_ram_start + lp->tx_ring_offset) | lp->tx_rlen;
-
- SetMulticastFilter(dev);
-
- for (i = 0; i < ETH_ALEN; i++) {
- lp->init_block.phys_addr[i] = dev->dev_addr[i];
- }
-
- lp->init_block.mode = 0x0000; /* Enable the Tx and Rx */
-}
-
-
-static void depca_tx_timeout(struct net_device *dev)
-{
- u_long ioaddr = dev->base_addr;
-
- printk("%s: transmit timed out, status %04x, resetting.\n", dev->name, inw(DEPCA_DATA));
-
- STOP_DEPCA;
- depca_init_ring(dev);
- LoadCSRs(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
- netif_wake_queue(dev);
- InitRestartDepca(dev);
-}
-
-
-/*
-** Writes a socket buffer to TX descriptor ring and starts transmission
-*/
-static netdev_tx_t depca_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- u_long ioaddr = dev->base_addr;
- int status = 0;
-
- /* Transmitter timeout, serious problems. */
- if (skb->len < 1)
- goto out;
-
- if (skb_padto(skb, ETH_ZLEN))
- goto out;
-
- netif_stop_queue(dev);
-
- if (TX_BUFFS_AVAIL) { /* Fill in a Tx ring entry */
- status = load_packet(dev, skb);
-
- if (!status) {
- /* Trigger an immediate send demand. */
- outw(CSR0, DEPCA_ADDR);
- outw(INEA | TDMD, DEPCA_DATA);
-
- dev_kfree_skb(skb);
- }
- if (TX_BUFFS_AVAIL)
- netif_start_queue(dev);
- } else
- status = NETDEV_TX_LOCKED;
-
- out:
- return status;
-}
-
-/*
-** The DEPCA interrupt handler.
-*/
-static irqreturn_t depca_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct depca_private *lp;
- s16 csr0, nicsr;
- u_long ioaddr;
-
- if (dev == NULL) {
- printk("depca_interrupt(): irq %d for unknown device.\n", irq);
- return IRQ_NONE;
- }
-
- lp = netdev_priv(dev);
- ioaddr = dev->base_addr;
-
- spin_lock(&lp->lock);
-
- /* mask the DEPCA board interrupts and turn on the LED */
- nicsr = inb(DEPCA_NICSR);
- nicsr |= (IM | LED);
- outb(nicsr, DEPCA_NICSR);
-
- outw(CSR0, DEPCA_ADDR);
- csr0 = inw(DEPCA_DATA);
-
- /* Acknowledge all of the current interrupt sources ASAP. */
- outw(csr0 & INTE, DEPCA_DATA);
-
- if (csr0 & RINT) /* Rx interrupt (packet arrived) */
- depca_rx(dev);
-
- if (csr0 & TINT) /* Tx interrupt (packet sent) */
- depca_tx(dev);
-
- /* Any resources available? */
- if ((TX_BUFFS_AVAIL >= 0) && netif_queue_stopped(dev)) {
- netif_wake_queue(dev);
- }
-
- /* Unmask the DEPCA board interrupts and turn off the LED */
- nicsr = (nicsr & ~IM & ~LED);
- outb(nicsr, DEPCA_NICSR);
-
- spin_unlock(&lp->lock);
- return IRQ_HANDLED;
-}
-
-/* Called with lp->lock held */
-static int depca_rx(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- int i, entry;
- s32 status;
-
- for (entry = lp->rx_new; !(readl(&lp->rx_ring[entry].base) & R_OWN); entry = lp->rx_new) {
- status = readl(&lp->rx_ring[entry].base) >> 16;
- if (status & R_STP) { /* Remember start of frame */
- lp->rx_old = entry;
- }
- if (status & R_ENP) { /* Valid frame status */
- if (status & R_ERR) { /* There was an error. */
- dev->stats.rx_errors++; /* Update the error stats. */
- if (status & R_FRAM)
- dev->stats.rx_frame_errors++;
- if (status & R_OFLO)
- dev->stats.rx_over_errors++;
- if (status & R_CRC)
- dev->stats.rx_crc_errors++;
- if (status & R_BUFF)
- dev->stats.rx_fifo_errors++;
- } else {
- short len, pkt_len = readw(&lp->rx_ring[entry].msg_length) - 4;
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb != NULL) {
- unsigned char *buf;
- skb_reserve(skb, 2); /* 16 byte align the IP header */
- buf = skb_put(skb, pkt_len);
- if (entry < lp->rx_old) { /* Wrapped buffer */
- len = (lp->rxRingMask - lp->rx_old + 1) * RX_BUFF_SZ;
- memcpy_fromio(buf, lp->rx_buff[lp->rx_old], len);
- memcpy_fromio(buf + len, lp->rx_buff[0], pkt_len - len);
- } else { /* Linear buffer */
- memcpy_fromio(buf, lp->rx_buff[lp->rx_old], pkt_len);
- }
-
- /*
- ** Notify the upper protocol layers that there is another
- ** packet to handle
- */
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
-
- /*
- ** Update stats
- */
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- for (i = 1; i < DEPCA_PKT_STAT_SZ - 1; i++) {
- if (pkt_len < (i * DEPCA_PKT_BIN_SZ)) {
- lp->pktStats.bins[i]++;
- i = DEPCA_PKT_STAT_SZ;
- }
- }
- if (is_multicast_ether_addr(buf)) {
- if (is_broadcast_ether_addr(buf)) {
- lp->pktStats.broadcast++;
- } else {
- lp->pktStats.multicast++;
- }
- } else if (ether_addr_equal(buf,
- dev->dev_addr)) {
- lp->pktStats.unicast++;
- }
-
- lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
- if (lp->pktStats.bins[0] == 0) { /* Reset counters */
- memset((char *) &lp->pktStats, 0, sizeof(lp->pktStats));
- }
- } else {
- printk("%s: Memory squeeze, deferring packet.\n", dev->name);
- dev->stats.rx_dropped++; /* Really, deferred. */
- break;
- }
- }
- /* Change buffer ownership for this last frame, back to the adapter */
- for (; lp->rx_old != entry; lp->rx_old = (lp->rx_old + 1) & lp->rxRingMask) {
- writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base);
- }
- writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base);
- }
-
- /*
- ** Update entry information
- */
- lp->rx_new = (lp->rx_new + 1) & lp->rxRingMask;
- }
-
- return 0;
-}
-
-/*
-** Buffer sent - check for buffer errors.
-** Called with lp->lock held
-*/
-static int depca_tx(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- int entry;
- s32 status;
- u_long ioaddr = dev->base_addr;
-
- for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
- status = readl(&lp->tx_ring[entry].base) >> 16;
-
- if (status < 0) { /* Packet not yet sent! */
- break;
- } else if (status & T_ERR) { /* An error occurred. */
- status = readl(&lp->tx_ring[entry].misc);
- dev->stats.tx_errors++;
- if (status & TMD3_RTRY)
- dev->stats.tx_aborted_errors++;
- if (status & TMD3_LCAR)
- dev->stats.tx_carrier_errors++;
- if (status & TMD3_LCOL)
- dev->stats.tx_window_errors++;
- if (status & TMD3_UFLO)
- dev->stats.tx_fifo_errors++;
- if (status & (TMD3_BUFF | TMD3_UFLO)) {
- /* Trigger an immediate send demand. */
- outw(CSR0, DEPCA_ADDR);
- outw(INEA | TDMD, DEPCA_DATA);
- }
- } else if (status & (T_MORE | T_ONE)) {
- dev->stats.collisions++;
- } else {
- dev->stats.tx_packets++;
- }
-
- /* Update all the pointers */
- lp->tx_old = (lp->tx_old + 1) & lp->txRingMask;
- }
-
- return 0;
-}
-
-static int depca_close(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- s16 nicsr;
- u_long ioaddr = dev->base_addr;
-
- netif_stop_queue(dev);
-
- outw(CSR0, DEPCA_ADDR);
-
- if (depca_debug > 1) {
- printk("%s: Shutting down ethercard, status was %2.2x.\n", dev->name, inw(DEPCA_DATA));
- }
-
- /*
- ** We stop the DEPCA here -- it occasionally polls
- ** memory if we don't.
- */
- outw(STOP, DEPCA_DATA);
-
- /*
- ** Give back the ROM in case the user wants to go to DOS
- */
- if (lp->adapter != DEPCA) {
- nicsr = inb(DEPCA_NICSR);
- nicsr &= ~SHE;
- outb(nicsr, DEPCA_NICSR);
- }
-
- /*
- ** Free the associated irq
- */
- free_irq(dev->irq, dev);
- return 0;
-}
-
-static void LoadCSRs(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- u_long ioaddr = dev->base_addr;
-
- outw(CSR1, DEPCA_ADDR); /* initialisation block address LSW */
- outw((u16) lp->device_ram_start, DEPCA_DATA);
- outw(CSR2, DEPCA_ADDR); /* initialisation block address MSW */
- outw((u16) (lp->device_ram_start >> 16), DEPCA_DATA);
- outw(CSR3, DEPCA_ADDR); /* ALE control */
- outw(ACON, DEPCA_DATA);
-
- outw(CSR0, DEPCA_ADDR); /* Point back to CSR0 */
-}
-
-static int InitRestartDepca(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- u_long ioaddr = dev->base_addr;
- int i, status = 0;
-
- /* Copy the shadow init_block to shared memory */
- memcpy_toio(lp->sh_mem, &lp->init_block, sizeof(struct depca_init));
-
- outw(CSR0, DEPCA_ADDR); /* point back to CSR0 */
- outw(INIT, DEPCA_DATA); /* initialize DEPCA */
-
- /* wait for lance to complete initialisation */
- for (i = 0; (i < 100) && !(inw(DEPCA_DATA) & IDON); i++);
-
- if (i != 100) {
- /* clear IDON by writing a "1", enable interrupts and start lance */
- outw(IDON | INEA | STRT, DEPCA_DATA);
- if (depca_debug > 2) {
- printk("%s: DEPCA open after %d ticks, init block 0x%08lx csr0 %4.4x.\n", dev->name, i, lp->mem_start, inw(DEPCA_DATA));
- }
- } else {
- printk("%s: DEPCA unopen after %d ticks, init block 0x%08lx csr0 %4.4x.\n", dev->name, i, lp->mem_start, inw(DEPCA_DATA));
- status = -1;
- }
-
- return status;
-}
-
-/*
-** Set or clear the multicast filter for this adaptor.
-*/
-static void set_multicast_list(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- u_long ioaddr = dev->base_addr;
-
- netif_stop_queue(dev);
- while (lp->tx_old != lp->tx_new); /* Wait for the ring to empty */
-
- STOP_DEPCA; /* Temporarily stop the depca. */
- depca_init_ring(dev); /* Initialize the descriptor rings */
-
- if (dev->flags & IFF_PROMISC) { /* Set promiscuous mode */
- lp->init_block.mode |= PROM;
- } else {
- SetMulticastFilter(dev);
- lp->init_block.mode &= ~PROM; /* Unset promiscuous mode */
- }
-
- LoadCSRs(dev); /* Reload CSR3 */
- InitRestartDepca(dev); /* Resume normal operation. */
- netif_start_queue(dev); /* Unlock the TX ring */
-}
-
-/*
-** Calculate the hash code and update the logical address filter
-** from a list of ethernet multicast addresses.
-** Big endian crc one liner is mine, all mine, ha ha ha ha!
-** LANCE calculates its hash codes big endian.
-*/
-static void SetMulticastFilter(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- struct netdev_hw_addr *ha;
- int i, j, bit, byte;
- u16 hashcode;
- u32 crc;
-
- if (dev->flags & IFF_ALLMULTI) { /* Set all multicast bits */
- for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) {
- lp->init_block.mcast_table[i] = (char) 0xff;
- }
- } else {
- for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) { /* Clear the multicast table */
- lp->init_block.mcast_table[i] = 0;
- }
- /* Add multicast addresses */
- netdev_for_each_mc_addr(ha, dev) {
- crc = ether_crc(ETH_ALEN, ha->addr);
- hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */
- for (j = 0; j < 5; j++) { /* ... in reverse order. */
- hashcode = (hashcode << 1) | ((crc >>= 1) & 1);
- }
-
- byte = hashcode >> 3; /* bit[3-5] -> byte in filter */
- bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */
- lp->init_block.mcast_table[byte] |= bit;
- }
- }
-}
-
-static int __init depca_common_init (u_long ioaddr, struct net_device **devp)
-{
- int status = 0;
-
- if (!request_region (ioaddr, DEPCA_TOTAL_SIZE, depca_string)) {
- status = -EBUSY;
- goto out;
- }
-
- if (DevicePresent(ioaddr)) {
- status = -ENODEV;
- goto out_release;
- }
-
- if (!(*devp = alloc_etherdev (sizeof (struct depca_private)))) {
- status = -ENOMEM;
- goto out_release;
- }
-
- return 0;
-
- out_release:
- release_region (ioaddr, DEPCA_TOTAL_SIZE);
- out:
- return status;
-}
-
-/*
-** ISA bus I/O device probe
-*/
-
-static void __init depca_platform_probe (void)
-{
- int i;
- struct platform_device *pldev;
-
- for (i = 0; depca_io_ports[i].iobase; i++) {
- depca_io_ports[i].device = NULL;
-
- /* if an address has been specified on the command
- * line, use it (if valid) */
- if (io && io != depca_io_ports[i].iobase)
- continue;
-
- pldev = platform_device_alloc(depca_string, i);
- if (!pldev)
- continue;
-
- pldev->dev.platform_data = (void *) depca_io_ports[i].iobase;
- depca_io_ports[i].device = pldev;
-
- if (platform_device_add(pldev)) {
- depca_io_ports[i].device = NULL;
- pldev->dev.platform_data = NULL;
- platform_device_put(pldev);
- continue;
- }
-
- if (!pldev->dev.driver) {
- /* The driver was not bound to this device, there was
- * no hardware at this address. Unregister it, as the
- * release function will take care of freeing the
- * allocated structure */
-
- depca_io_ports[i].device = NULL;
- pldev->dev.platform_data = NULL;
- platform_device_unregister (pldev);
- }
- }
-}
-
-static enum depca_type __init depca_shmem_probe (ulong *mem_start)
-{
- u_long mem_base[] = DEPCA_RAM_BASE_ADDRESSES;
- enum depca_type adapter = unknown;
- int i;
-
- for (i = 0; mem_base[i]; i++) {
- *mem_start = mem ? mem : mem_base[i];
- adapter = DepcaSignature (adapter_name, *mem_start);
- if (adapter != unknown)
- break;
- }
-
- return adapter;
-}
-
-static int depca_isa_probe(struct platform_device *device)
-{
- struct net_device *dev;
- struct depca_private *lp;
- u_long ioaddr, mem_start = 0;
- enum depca_type adapter = unknown;
- int status = 0;
-
- ioaddr = (u_long) device->dev.platform_data;
-
- if ((status = depca_common_init (ioaddr, &dev)))
- goto out;
-
- adapter = depca_shmem_probe (&mem_start);
-
- if (adapter == unknown) {
- status = -ENODEV;
- goto out_free;
- }
-
- dev->base_addr = ioaddr;
- dev->irq = irq; /* Use whatever value the user gave
- * us, and 0 if he didn't. */
- lp = netdev_priv(dev);
- lp->depca_bus = DEPCA_BUS_ISA;
- lp->adapter = adapter;
- lp->mem_start = mem_start;
-
- if ((status = depca_hw_init(dev, &device->dev)))
- goto out_free;
-
- return 0;
-
- out_free:
- free_netdev (dev);
- release_region (ioaddr, DEPCA_TOTAL_SIZE);
- out:
- return status;
-}
-
-/*
-** EISA callbacks from sysfs.
-*/
-
-#ifdef CONFIG_EISA
-static int __init depca_eisa_probe (struct device *device)
-{
- enum depca_type adapter = unknown;
- struct eisa_device *edev;
- struct net_device *dev;
- struct depca_private *lp;
- u_long ioaddr, mem_start;
- int status = 0;
-
- edev = to_eisa_device (device);
- ioaddr = edev->base_addr + DEPCA_EISA_IO_PORTS;
-
- if ((status = depca_common_init (ioaddr, &dev)))
- goto out;
-
- /* It would have been nice to get card configuration from the
- * card. Unfortunately, this register is write-only (shares
- * it's address with the ethernet prom)... As we don't parse
- * the EISA configuration structures (yet... :-), just rely on
- * the ISA probing to sort it out... */
-
- adapter = depca_shmem_probe (&mem_start);
- if (adapter == unknown) {
- status = -ENODEV;
- goto out_free;
- }
-
- dev->base_addr = ioaddr;
- dev->irq = irq;
- lp = netdev_priv(dev);
- lp->depca_bus = DEPCA_BUS_EISA;
- lp->adapter = edev->id.driver_data;
- lp->mem_start = mem_start;
-
- if ((status = depca_hw_init(dev, device)))
- goto out_free;
-
- return 0;
-
- out_free:
- free_netdev (dev);
- release_region (ioaddr, DEPCA_TOTAL_SIZE);
- out:
- return status;
-}
-#endif
-
-static int depca_device_remove(struct device *device)
-{
- struct net_device *dev;
- struct depca_private *lp;
- int bus;
-
- dev = dev_get_drvdata(device);
- lp = netdev_priv(dev);
-
- unregister_netdev (dev);
- iounmap (lp->sh_mem);
- release_mem_region (lp->mem_start, lp->mem_len);
- release_region (dev->base_addr, DEPCA_TOTAL_SIZE);
- bus = lp->depca_bus;
- free_netdev (dev);
-
- return 0;
-}
-
-/*
-** Look for a particular board name in the on-board Remote Diagnostics
-** and Boot (readb) ROM. This will also give us a clue to the network RAM
-** base address.
-*/
-static int __init DepcaSignature(char *name, u_long base_addr)
-{
- u_int i, j, k;
- void __iomem *ptr;
- char tmpstr[16];
- u_long prom_addr = base_addr + 0xc000;
- u_long mem_addr = base_addr + 0x8000; /* 32KB */
-
- /* Can't reserve the prom region, it is already marked as
- * used, at least on x86. Instead, reserve a memory region a
- * board would certainly use. If it works, go ahead. If not,
- * run like hell... */
-
- if (!request_mem_region (mem_addr, 16, depca_string))
- return unknown;
-
- /* Copy the first 16 bytes of ROM */
-
- ptr = ioremap(prom_addr, 16);
- if (ptr == NULL) {
- printk(KERN_ERR "depca: I/O remap failed at %lx\n", prom_addr);
- return unknown;
- }
- for (i = 0; i < 16; i++) {
- tmpstr[i] = readb(ptr + i);
- }
- iounmap(ptr);
-
- release_mem_region (mem_addr, 16);
-
- /* Check if PROM contains a valid string */
- for (i = 0; *depca_signature[i] != '\0'; i++) {
- for (j = 0, k = 0; j < 16 && k < strlen(depca_signature[i]); j++) {
- if (depca_signature[i][k] == tmpstr[j]) { /* track signature */
- k++;
- } else { /* lost signature; begin search again */
- k = 0;
- }
- }
- if (k == strlen(depca_signature[i]))
- break;
- }
-
- /* Check if name string is valid, provided there's no PROM */
- if (name && *name && (i == unknown)) {
- for (i = 0; *depca_signature[i] != '\0'; i++) {
- if (strcmp(name, depca_signature[i]) == 0)
- break;
- }
- }
-
- return i;
-}
-
-/*
-** Look for a special sequence in the Ethernet station address PROM that
-** is common across all DEPCA products. Note that the original DEPCA needs
-** its ROM address counter to be initialized and enabled. Only enable
-** if the first address octet is a 0x08 - this minimises the chances of
-** messing around with some other hardware, but it assumes that this DEPCA
-** card initialized itself correctly.
-**
-** Search the Ethernet address ROM for the signature. Since the ROM address
-** counter can start at an arbitrary point, the search must include the entire
-** probe sequence length plus the (length_of_the_signature - 1).
-** Stop the search IMMEDIATELY after the signature is found so that the
-** PROM address counter is correctly positioned at the start of the
-** ethernet address for later read out.
-*/
-static int __init DevicePresent(u_long ioaddr)
-{
- union {
- struct {
- u32 a;
- u32 b;
- } llsig;
- char Sig[sizeof(u32) << 1];
- }
- dev;
- short sigLength = 0;
- s8 data;
- s16 nicsr;
- int i, j, status = 0;
-
- data = inb(DEPCA_PROM); /* clear counter on DEPCA */
- data = inb(DEPCA_PROM); /* read data */
-
- if (data == 0x08) { /* Enable counter on DEPCA */
- nicsr = inb(DEPCA_NICSR);
- nicsr |= AAC;
- outb(nicsr, DEPCA_NICSR);
- }
-
- dev.llsig.a = ETH_PROM_SIG;
- dev.llsig.b = ETH_PROM_SIG;
- sigLength = sizeof(u32) << 1;
-
- for (i = 0, j = 0; j < sigLength && i < PROBE_LENGTH + sigLength - 1; i++) {
- data = inb(DEPCA_PROM);
- if (dev.Sig[j] == data) { /* track signature */
- j++;
- } else { /* lost signature; begin search again */
- if (data == dev.Sig[0]) { /* rare case.... */
- j = 1;
- } else {
- j = 0;
- }
- }
- }
-
- if (j != sigLength) {
- status = -ENODEV; /* search failed */
- }
-
- return status;
-}
-
-/*
-** The DE100 and DE101 PROM accesses were made non-standard for some bizarre
-** reason: access the upper half of the PROM with x=0; access the lower half
-** with x=1.
-*/
-static int __init get_hw_addr(struct net_device *dev)
-{
- u_long ioaddr = dev->base_addr;
- struct depca_private *lp = netdev_priv(dev);
- int i, k, tmp, status = 0;
- u_short j, x, chksum;
-
- x = (((lp->adapter == de100) || (lp->adapter == de101)) ? 1 : 0);
-
- for (i = 0, k = 0, j = 0; j < 3; j++) {
- k <<= 1;
- if (k > 0xffff)
- k -= 0xffff;
-
- k += (u_char) (tmp = inb(DEPCA_PROM + x));
- dev->dev_addr[i++] = (u_char) tmp;
- k += (u_short) ((tmp = inb(DEPCA_PROM + x)) << 8);
- dev->dev_addr[i++] = (u_char) tmp;
-
- if (k > 0xffff)
- k -= 0xffff;
- }
- if (k == 0xffff)
- k = 0;
-
- chksum = (u_char) inb(DEPCA_PROM + x);
- chksum |= (u_short) (inb(DEPCA_PROM + x) << 8);
- if (k != chksum)
- status = -1;
-
- return status;
-}
-
-/*
-** Load a packet into the shared memory
-*/
-static int load_packet(struct net_device *dev, struct sk_buff *skb)
-{
- struct depca_private *lp = netdev_priv(dev);
- int i, entry, end, len, status = NETDEV_TX_OK;
-
- entry = lp->tx_new; /* Ring around buffer number. */
- end = (entry + (skb->len - 1) / TX_BUFF_SZ) & lp->txRingMask;
- if (!(readl(&lp->tx_ring[end].base) & T_OWN)) { /* Enough room? */
- /*
- ** Caution: the write order is important here... don't set up the
- ** ownership rights until all the other information is in place.
- */
- if (end < entry) { /* wrapped buffer */
- len = (lp->txRingMask - entry + 1) * TX_BUFF_SZ;
- memcpy_toio(lp->tx_buff[entry], skb->data, len);
- memcpy_toio(lp->tx_buff[0], skb->data + len, skb->len - len);
- } else { /* linear buffer */
- memcpy_toio(lp->tx_buff[entry], skb->data, skb->len);
- }
-
- /* set up the buffer descriptors */
- len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
- for (i = entry; i != end; i = (i+1) & lp->txRingMask) {
- /* clean out flags */
- writel(readl(&lp->tx_ring[i].base) & ~T_FLAGS, &lp->tx_ring[i].base);
- writew(0x0000, &lp->tx_ring[i].misc); /* clears other error flags */
- writew(-TX_BUFF_SZ, &lp->tx_ring[i].length); /* packet length in buffer */
- len -= TX_BUFF_SZ;
- }
- /* clean out flags */
- writel(readl(&lp->tx_ring[end].base) & ~T_FLAGS, &lp->tx_ring[end].base);
- writew(0x0000, &lp->tx_ring[end].misc); /* clears other error flags */
- writew(-len, &lp->tx_ring[end].length); /* packet length in last buff */
-
- /* start of packet */
- writel(readl(&lp->tx_ring[entry].base) | T_STP, &lp->tx_ring[entry].base);
- /* end of packet */
- writel(readl(&lp->tx_ring[end].base) | T_ENP, &lp->tx_ring[end].base);
-
- for (i = end; i != entry; --i) {
- /* ownership of packet */
- writel(readl(&lp->tx_ring[i].base) | T_OWN, &lp->tx_ring[i].base);
- if (i == 0)
- i = lp->txRingMask + 1;
- }
- writel(readl(&lp->tx_ring[entry].base) | T_OWN, &lp->tx_ring[entry].base);
-
- lp->tx_new = (++end) & lp->txRingMask; /* update current pointers */
- } else {
- status = NETDEV_TX_LOCKED;
- }
-
- return status;
-}
-
-static void depca_dbg_open(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- u_long ioaddr = dev->base_addr;
- struct depca_init *p = &lp->init_block;
- int i;
-
- if (depca_debug > 1) {
- /* Do not copy the shadow init block into shared memory */
- /* Debugging should not affect normal operation! */
- /* The shadow init block will get copied across during InitRestartDepca */
- printk("%s: depca open with irq %d\n", dev->name, dev->irq);
- printk("Descriptor head addresses (CPU):\n");
- printk(" 0x%lx 0x%lx\n", (u_long) lp->rx_ring, (u_long) lp->tx_ring);
- printk("Descriptor addresses (CPU):\nRX: ");
- for (i = 0; i < lp->rxRingMask; i++) {
- if (i < 3) {
- printk("%p ", &lp->rx_ring[i].base);
- }
- }
- printk("...%p\n", &lp->rx_ring[i].base);
- printk("TX: ");
- for (i = 0; i < lp->txRingMask; i++) {
- if (i < 3) {
- printk("%p ", &lp->tx_ring[i].base);
- }
- }
- printk("...%p\n", &lp->tx_ring[i].base);
- printk("\nDescriptor buffers (Device):\nRX: ");
- for (i = 0; i < lp->rxRingMask; i++) {
- if (i < 3) {
- printk("0x%8.8x ", readl(&lp->rx_ring[i].base));
- }
- }
- printk("...0x%8.8x\n", readl(&lp->rx_ring[i].base));
- printk("TX: ");
- for (i = 0; i < lp->txRingMask; i++) {
- if (i < 3) {
- printk("0x%8.8x ", readl(&lp->tx_ring[i].base));
- }
- }
- printk("...0x%8.8x\n", readl(&lp->tx_ring[i].base));
- printk("Initialisation block at 0x%8.8lx(Phys)\n", lp->mem_start);
- printk(" mode: 0x%4.4x\n", p->mode);
- printk(" physical address: %pM\n", p->phys_addr);
- printk(" multicast hash table: ");
- for (i = 0; i < (HASH_TABLE_LEN >> 3) - 1; i++) {
- printk("%2.2x:", p->mcast_table[i]);
- }
- printk("%2.2x\n", p->mcast_table[i]);
- printk(" rx_ring at: 0x%8.8x\n", p->rx_ring);
- printk(" tx_ring at: 0x%8.8x\n", p->tx_ring);
- printk("buffers (Phys): 0x%8.8lx\n", lp->mem_start + lp->buffs_offset);
- printk("Ring size:\nRX: %d Log2(rxRingMask): 0x%8.8x\n", (int) lp->rxRingMask + 1, lp->rx_rlen);
- printk("TX: %d Log2(txRingMask): 0x%8.8x\n", (int) lp->txRingMask + 1, lp->tx_rlen);
- outw(CSR2, DEPCA_ADDR);
- printk("CSR2&1: 0x%4.4x", inw(DEPCA_DATA));
- outw(CSR1, DEPCA_ADDR);
- printk("%4.4x\n", inw(DEPCA_DATA));
- outw(CSR3, DEPCA_ADDR);
- printk("CSR3: 0x%4.4x\n", inw(DEPCA_DATA));
- }
-}
-
-/*
-** Perform IOCTL call functions here. Some are privileged operations and the
-** effective uid is checked in those cases.
-** All multicast IOCTLs will not work here and are for testing purposes only.
-*/
-static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct depca_private *lp = netdev_priv(dev);
- struct depca_ioctl *ioc = (struct depca_ioctl *) &rq->ifr_ifru;
- int i, status = 0;
- u_long ioaddr = dev->base_addr;
- union {
- u8 addr[(HASH_TABLE_LEN * ETH_ALEN)];
- u16 sval[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
- u32 lval[(HASH_TABLE_LEN * ETH_ALEN) >> 2];
- } tmp;
- unsigned long flags;
- void *buf;
-
- switch (ioc->cmd) {
- case DEPCA_GET_HWADDR: /* Get the hardware address */
- for (i = 0; i < ETH_ALEN; i++) {
- tmp.addr[i] = dev->dev_addr[i];
- }
- ioc->len = ETH_ALEN;
- if (copy_to_user(ioc->data, tmp.addr, ioc->len))
- return -EFAULT;
- break;
-
- case DEPCA_SET_HWADDR: /* Set the hardware address */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN))
- return -EFAULT;
- for (i = 0; i < ETH_ALEN; i++) {
- dev->dev_addr[i] = tmp.addr[i];
- }
- netif_stop_queue(dev);
- while (lp->tx_old != lp->tx_new)
- cpu_relax(); /* Wait for the ring to empty */
-
- STOP_DEPCA; /* Temporarily stop the depca. */
- depca_init_ring(dev); /* Initialize the descriptor rings */
- LoadCSRs(dev); /* Reload CSR3 */
- InitRestartDepca(dev); /* Resume normal operation. */
- netif_start_queue(dev); /* Unlock the TX ring */
- break;
-
- case DEPCA_SET_PROM: /* Set Promiscuous Mode */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- netif_stop_queue(dev);
- while (lp->tx_old != lp->tx_new)
- cpu_relax(); /* Wait for the ring to empty */
-
- STOP_DEPCA; /* Temporarily stop the depca. */
- depca_init_ring(dev); /* Initialize the descriptor rings */
- lp->init_block.mode |= PROM; /* Set promiscuous mode */
-
- LoadCSRs(dev); /* Reload CSR3 */
- InitRestartDepca(dev); /* Resume normal operation. */
- netif_start_queue(dev); /* Unlock the TX ring */
- break;
-
- case DEPCA_CLR_PROM: /* Clear Promiscuous Mode */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- netif_stop_queue(dev);
- while (lp->tx_old != lp->tx_new)
- cpu_relax(); /* Wait for the ring to empty */
-
- STOP_DEPCA; /* Temporarily stop the depca. */
- depca_init_ring(dev); /* Initialize the descriptor rings */
- lp->init_block.mode &= ~PROM; /* Clear promiscuous mode */
-
- LoadCSRs(dev); /* Reload CSR3 */
- InitRestartDepca(dev); /* Resume normal operation. */
- netif_start_queue(dev); /* Unlock the TX ring */
- break;
-
- case DEPCA_SAY_BOO: /* Say "Boo!" to the kernel log file */
- if(!capable(CAP_NET_ADMIN))
- return -EPERM;
- printk("%s: Boo!\n", dev->name);
- break;
-
- case DEPCA_GET_MCA: /* Get the multicast address table */
- ioc->len = (HASH_TABLE_LEN >> 3);
- if (copy_to_user(ioc->data, lp->init_block.mcast_table, ioc->len))
- return -EFAULT;
- break;
-
- case DEPCA_SET_MCA: /* Set a multicast address */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (ioc->len >= HASH_TABLE_LEN)
- return -EINVAL;
- if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN * ioc->len))
- return -EFAULT;
- set_multicast_list(dev);
- break;
-
- case DEPCA_CLR_MCA: /* Clear all multicast addresses */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- set_multicast_list(dev);
- break;
-
- case DEPCA_MCA_EN: /* Enable pass all multicast addressing */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- set_multicast_list(dev);
- break;
-
- case DEPCA_GET_STATS: /* Get the driver statistics */
- ioc->len = sizeof(lp->pktStats);
- buf = kmalloc(ioc->len, GFP_KERNEL);
- if(!buf)
- return -ENOMEM;
- spin_lock_irqsave(&lp->lock, flags);
- memcpy(buf, &lp->pktStats, ioc->len);
- spin_unlock_irqrestore(&lp->lock, flags);
- if (copy_to_user(ioc->data, buf, ioc->len))
- status = -EFAULT;
- kfree(buf);
- break;
-
- case DEPCA_CLR_STATS: /* Zero out the driver statistics */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- spin_lock_irqsave(&lp->lock, flags);
- memset(&lp->pktStats, 0, sizeof(lp->pktStats));
- spin_unlock_irqrestore(&lp->lock, flags);
- break;
-
- case DEPCA_GET_REG: /* Get the DEPCA Registers */
- i = 0;
- tmp.sval[i++] = inw(DEPCA_NICSR);
- outw(CSR0, DEPCA_ADDR); /* status register */
- tmp.sval[i++] = inw(DEPCA_DATA);
- memcpy(&tmp.sval[i], &lp->init_block, sizeof(struct depca_init));
- ioc->len = i + sizeof(struct depca_init);
- if (copy_to_user(ioc->data, tmp.addr, ioc->len))
- return -EFAULT;
- break;
-
- default:
- return -EOPNOTSUPP;
- }
-
- return status;
-}
-
-static int __init depca_module_init (void)
-{
- int err = 0;
-
-#ifdef CONFIG_EISA
- err = eisa_driver_register(&depca_eisa_driver);
- if (err)
- goto err_eisa;
-#endif
- err = platform_driver_register(&depca_isa_driver);
- if (err)
- goto err_eisa;
-
- depca_platform_probe();
- return 0;
-
-err_eisa:
-#ifdef CONFIG_EISA
- eisa_driver_unregister(&depca_eisa_driver);
-#endif
- return err;
-}
-
-static void __exit depca_module_exit (void)
-{
- int i;
-#ifdef CONFIG_EISA
- eisa_driver_unregister (&depca_eisa_driver);
-#endif
- platform_driver_unregister (&depca_isa_driver);
-
- for (i = 0; depca_io_ports[i].iobase; i++) {
- if (depca_io_ports[i].device) {
- depca_io_ports[i].device->dev.platform_data = NULL;
- platform_device_unregister (depca_io_ports[i].device);
- depca_io_ports[i].device = NULL;
- }
- }
-}
-
-module_init (depca_module_init);
-module_exit (depca_module_exit);
diff --git a/drivers/net/ethernet/amd/depca.h b/drivers/net/ethernet/amd/depca.h
deleted file mode 100644
index cdcfe4252c16..000000000000
--- a/drivers/net/ethernet/amd/depca.h
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- Written 1994 by David C. Davies.
-
- Copyright 1994 David C. Davies. This software may be used and distributed
- according to the terms of the GNU General Public License, incorporated herein by
- reference.
-*/
-
-/*
-** I/O addresses. Note that the 2k buffer option is not supported in
-** this driver.
-*/
-#define DEPCA_NICSR ioaddr+0x00 /* Network interface CSR */
-#define DEPCA_RBI ioaddr+0x02 /* RAM buffer index (2k buffer mode) */
-#define DEPCA_DATA ioaddr+0x04 /* LANCE registers' data port */
-#define DEPCA_ADDR ioaddr+0x06 /* LANCE registers' address port */
-#define DEPCA_HBASE ioaddr+0x08 /* EISA high memory base address reg. */
-#define DEPCA_PROM ioaddr+0x0c /* Ethernet address ROM data port */
-#define DEPCA_CNFG ioaddr+0x0c /* EISA Configuration port */
-#define DEPCA_RBSA ioaddr+0x0e /* RAM buffer starting address (2k buff.) */
-
-/*
-** These are LANCE registers addressable through DEPCA_ADDR
-*/
-#define CSR0 0
-#define CSR1 1
-#define CSR2 2
-#define CSR3 3
-
-/*
-** NETWORK INTERFACE CSR (NI_CSR) bit definitions
-*/
-
-#define TO 0x0100 /* Time Out for remote boot */
-#define SHE 0x0080 /* SHadow memory Enable */
-#define BS 0x0040 /* Bank Select */
-#define BUF 0x0020 /* BUFfer size (1->32k, 0->64k) */
-#define RBE 0x0010 /* Remote Boot Enable (1->net boot) */
-#define AAC 0x0008 /* Address ROM Address Counter (1->enable) */
-#define _128KB 0x0008 /* 128kB Network RAM (1->enable) */
-#define IM 0x0004 /* Interrupt Mask (1->mask) */
-#define IEN 0x0002 /* Interrupt tristate ENable (1->enable) */
-#define LED 0x0001 /* LED control */
-
-/*
-** Control and Status Register 0 (CSR0) bit definitions
-*/
-
-#define ERR 0x8000 /* Error summary */
-#define BABL 0x4000 /* Babble transmitter timeout error */
-#define CERR 0x2000 /* Collision Error */
-#define MISS 0x1000 /* Missed packet */
-#define MERR 0x0800 /* Memory Error */
-#define RINT 0x0400 /* Receiver Interrupt */
-#define TINT 0x0200 /* Transmit Interrupt */
-#define IDON 0x0100 /* Initialization Done */
-#define INTR 0x0080 /* Interrupt Flag */
-#define INEA 0x0040 /* Interrupt Enable */
-#define RXON 0x0020 /* Receiver on */
-#define TXON 0x0010 /* Transmitter on */
-#define TDMD 0x0008 /* Transmit Demand */
-#define STOP 0x0004 /* Stop */
-#define STRT 0x0002 /* Start */
-#define INIT 0x0001 /* Initialize */
-#define INTM 0xff00 /* Interrupt Mask */
-#define INTE 0xfff0 /* Interrupt Enable */
-
-/*
-** CONTROL AND STATUS REGISTER 3 (CSR3)
-*/
-
-#define BSWP 0x0004 /* Byte SWaP */
-#define ACON 0x0002 /* ALE control */
-#define BCON 0x0001 /* Byte CONtrol */
-
-/*
-** Initialization Block Mode Register
-*/
-
-#define PROM 0x8000 /* Promiscuous Mode */
-#define EMBA 0x0080 /* Enable Modified Back-off Algorithm */
-#define INTL 0x0040 /* Internal Loopback */
-#define DRTY 0x0020 /* Disable Retry */
-#define COLL 0x0010 /* Force Collision */
-#define DTCR 0x0008 /* Disable Transmit CRC */
-#define LOOP 0x0004 /* Loopback */
-#define DTX 0x0002 /* Disable the Transmitter */
-#define DRX 0x0001 /* Disable the Receiver */
-
-/*
-** Receive Message Descriptor 1 (RMD1) bit definitions.
-*/
-
-#define R_OWN 0x80000000 /* Owner bit 0 = host, 1 = lance */
-#define R_ERR 0x4000 /* Error Summary */
-#define R_FRAM 0x2000 /* Framing Error */
-#define R_OFLO 0x1000 /* Overflow Error */
-#define R_CRC 0x0800 /* CRC Error */
-#define R_BUFF 0x0400 /* Buffer Error */
-#define R_STP 0x0200 /* Start of Packet */
-#define R_ENP 0x0100 /* End of Packet */
-
-/*
-** Transmit Message Descriptor 1 (TMD1) bit definitions.
-*/
-
-#define T_OWN 0x80000000 /* Owner bit 0 = host, 1 = lance */
-#define T_ERR 0x4000 /* Error Summary */
-#define T_ADD_FCS 0x2000 /* More the 1 retry needed to Xmit */
-#define T_MORE 0x1000 /* >1 retry to transmit packet */
-#define T_ONE 0x0800 /* 1 try needed to transmit the packet */
-#define T_DEF 0x0400 /* Deferred */
-#define T_STP 0x02000000 /* Start of Packet */
-#define T_ENP 0x01000000 /* End of Packet */
-#define T_FLAGS 0xff000000 /* TX Flags Field */
-
-/*
-** Transmit Message Descriptor 3 (TMD3) bit definitions.
-*/
-
-#define TMD3_BUFF 0x8000 /* BUFFer error */
-#define TMD3_UFLO 0x4000 /* UnderFLOw error */
-#define TMD3_RES 0x2000 /* REServed */
-#define TMD3_LCOL 0x1000 /* Late COLlision */
-#define TMD3_LCAR 0x0800 /* Loss of CARrier */
-#define TMD3_RTRY 0x0400 /* ReTRY error */
-
-/*
-** EISA configuration Register (CNFG) bit definitions
-*/
-
-#define TIMEOUT 0x0100 /* 0:2.5 mins, 1: 30 secs */
-#define REMOTE 0x0080 /* Remote Boot Enable -> 1 */
-#define IRQ11 0x0040 /* Enable -> 1 */
-#define IRQ10 0x0020 /* Enable -> 1 */
-#define IRQ9 0x0010 /* Enable -> 1 */
-#define IRQ5 0x0008 /* Enable -> 1 */
-#define BUFF 0x0004 /* 0: 64kB or 128kB, 1: 32kB */
-#define PADR16 0x0002 /* RAM on 64kB boundary */
-#define PADR17 0x0001 /* RAM on 128kB boundary */
-
-/*
-** Miscellaneous
-*/
-#define HASH_TABLE_LEN 64 /* Bits */
-#define HASH_BITS 0x003f /* 6 LS bits */
-
-#define MASK_INTERRUPTS 1
-#define UNMASK_INTERRUPTS 0
-
-#define EISA_EN 0x0001 /* Enable EISA bus buffers */
-#define EISA_ID iobase+0x0080 /* ID long word for EISA card */
-#define EISA_CTRL iobase+0x0084 /* Control word for EISA card */
-
-/*
-** Include the IOCTL stuff
-*/
-#include <linux/sockios.h>
-
-struct depca_ioctl {
- unsigned short cmd; /* Command to run */
- unsigned short len; /* Length of the data buffer */
- unsigned char __user *data; /* Pointer to the data buffer */
-};
-
-/*
-** Recognised commands for the driver
-*/
-#define DEPCA_GET_HWADDR 0x01 /* Get the hardware address */
-#define DEPCA_SET_HWADDR 0x02 /* Get the hardware address */
-#define DEPCA_SET_PROM 0x03 /* Set Promiscuous Mode */
-#define DEPCA_CLR_PROM 0x04 /* Clear Promiscuous Mode */
-#define DEPCA_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
-#define DEPCA_GET_MCA 0x06 /* Get a multicast address */
-#define DEPCA_SET_MCA 0x07 /* Set a multicast address */
-#define DEPCA_CLR_MCA 0x08 /* Clear a multicast address */
-#define DEPCA_MCA_EN 0x09 /* Enable a multicast address group */
-#define DEPCA_GET_STATS 0x0a /* Get the driver statistics */
-#define DEPCA_CLR_STATS 0x0b /* Zero out the driver statistics */
-#define DEPCA_GET_REG 0x0c /* Get the Register contents */
-#define DEPCA_SET_REG 0x0d /* Set the Register contents */
-#define DEPCA_DUMP 0x0f /* Dump the DEPCA Status */
-
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index a227ccdcb9b5..797f847edf13 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -494,19 +494,15 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
}
memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size));
- new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
- GFP_ATOMIC);
- if (!new_dma_addr_list) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t),
+ GFP_ATOMIC);
+ if (!new_dma_addr_list)
goto free_new_tx_ring;
- }
- new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
- GFP_ATOMIC);
- if (!new_skb_list) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *),
+ GFP_ATOMIC);
+ if (!new_skb_list)
goto free_new_lists;
- }
kfree(lp->tx_skbuff);
kfree(lp->tx_dma_addr);
@@ -564,19 +560,14 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
}
memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size));
- new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
- GFP_ATOMIC);
- if (!new_dma_addr_list) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t), GFP_ATOMIC);
+ if (!new_dma_addr_list)
goto free_new_rx_ring;
- }
- new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
- GFP_ATOMIC);
- if (!new_skb_list) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *),
+ GFP_ATOMIC);
+ if (!new_skb_list)
goto free_new_lists;
- }
/* first copy the current receive buffers */
overlap = min(size, lp->rx_ring_size);
@@ -1688,10 +1679,9 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
memcpy(dev->dev_addr, promaddr, 6);
}
}
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
/* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
- if (!is_valid_ether_addr(dev->perm_addr))
+ if (!is_valid_ether_addr(dev->dev_addr))
memset(dev->dev_addr, 0, ETH_ALEN);
if (pcnet32_debug & NETIF_MSG_PROBE) {
@@ -1934,31 +1924,23 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
GFP_ATOMIC);
- if (!lp->tx_dma_addr) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ if (!lp->tx_dma_addr)
return -ENOMEM;
- }
lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
GFP_ATOMIC);
- if (!lp->rx_dma_addr) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ if (!lp->rx_dma_addr)
return -ENOMEM;
- }
lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
GFP_ATOMIC);
- if (!lp->tx_skbuff) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ if (!lp->tx_skbuff)
return -ENOMEM;
- }
lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
GFP_ATOMIC);
- if (!lp->rx_skbuff) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ if (!lp->rx_skbuff)
return -ENOMEM;
- }
return 0;
}
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index c2d696c88e46..6a40290d3727 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1284,8 +1284,8 @@ static void lance_free_hwresources(struct lance_private *lp)
/* Ethtool support... */
static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strcpy(info->driver, "sunlance");
- strcpy(info->version, "2.02");
+ strlcpy(info->driver, "sunlance", sizeof(info->driver));
+ strlcpy(info->version, "2.02", sizeof(info->version));
}
static const struct ethtool_ops sparc_lance_ethtool_ops = {
diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig
index 1ed886d421f8..36d6abd1cfff 100644
--- a/drivers/net/ethernet/atheros/Kconfig
+++ b/drivers/net/ethernet/atheros/Kconfig
@@ -44,8 +44,8 @@ config ATL1
will be called atl1.
config ATL1E
- tristate "Atheros L1E Gigabit Ethernet support (EXPERIMENTAL)"
- depends on PCI && EXPERIMENTAL
+ tristate "Atheros L1E Gigabit Ethernet support"
+ depends on PCI
select CRC32
select NET_CORE
select MII
@@ -56,8 +56,8 @@ config ATL1E
will be called atl1e.
config ATL1C
- tristate "Atheros L1C Gigabit Ethernet support (EXPERIMENTAL)"
- depends on PCI && EXPERIMENTAL
+ tristate "Atheros L1C Gigabit Ethernet support"
+ depends on PCI
select CRC32
select NET_CORE
select MII
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 56d3f697e0c7..1f07fc633ab9 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -21,7 +21,7 @@
#include "atl1c.h"
-#define ATL1C_DRV_VERSION "1.0.1.0-NAPI"
+#define ATL1C_DRV_VERSION "1.0.1.1-NAPI"
char atl1c_driver_name[] = "atl1c";
char atl1c_driver_version[] = ATL1C_DRV_VERSION;
@@ -472,7 +472,6 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
- netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr);
@@ -983,11 +982,9 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
size = sizeof(struct atl1c_buffer) * (tpd_ring->count * 2 +
rfd_ring->count);
tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
- if (unlikely(!tpd_ring->buffer_info)) {
- dev_err(&pdev->dev, "kzalloc failed, size = %d\n",
- size);
+ if (unlikely(!tpd_ring->buffer_info))
goto err_nomem;
- }
+
for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
tpd_ring[i].buffer_info =
(tpd_ring->buffer_info + count);
@@ -1652,6 +1649,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
u16 num_alloc = 0;
u16 rfd_next_to_use, next_next;
struct atl1c_rx_free_desc *rfd_desc;
+ dma_addr_t mapping;
next_next = rfd_next_to_use = rfd_ring->next_to_use;
if (++next_next == rfd_ring->count)
@@ -1678,9 +1676,18 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
- buffer_info->dma = pci_map_single(pdev, vir_addr,
+ mapping = pci_map_single(pdev, vir_addr,
buffer_info->length,
PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(pdev, mapping))) {
+ dev_kfree_skb(skb);
+ buffer_info->skb = NULL;
+ buffer_info->length = 0;
+ ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
+ netif_warn(adapter, rx_err, adapter->netdev, "RX pci_map_single failed");
+ break;
+ }
+ buffer_info->dma = mapping;
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
ATL1C_PCIMAP_FROMDEVICE);
rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -2015,7 +2022,29 @@ check_sum:
return 0;
}
-static void atl1c_tx_map(struct atl1c_adapter *adapter,
+static void atl1c_tx_rollback(struct atl1c_adapter *adpt,
+ struct atl1c_tpd_desc *first_tpd,
+ enum atl1c_trans_queue type)
+{
+ struct atl1c_tpd_ring *tpd_ring = &adpt->tpd_ring[type];
+ struct atl1c_buffer *buffer_info;
+ struct atl1c_tpd_desc *tpd;
+ u16 first_index, index;
+
+ first_index = first_tpd - (struct atl1c_tpd_desc *)tpd_ring->desc;
+ index = first_index;
+ while (index != tpd_ring->next_to_use) {
+ tpd = ATL1C_TPD_DESC(tpd_ring, index);
+ buffer_info = &tpd_ring->buffer_info[index];
+ atl1c_clean_buffer(adpt->pdev, buffer_info, 0);
+ memset(tpd, 0, sizeof(struct atl1c_tpd_desc));
+ if (++index == tpd_ring->count)
+ index = 0;
+ }
+ tpd_ring->next_to_use = first_index;
+}
+
+static int atl1c_tx_map(struct atl1c_adapter *adapter,
struct sk_buff *skb, struct atl1c_tpd_desc *tpd,
enum atl1c_trans_queue type)
{
@@ -2040,6 +2069,9 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
buffer_info->length = map_len;
buffer_info->dma = pci_map_single(adapter->pdev,
skb->data, hdr_len, PCI_DMA_TODEVICE);
+ if (unlikely(pci_dma_mapping_error(adapter->pdev,
+ buffer_info->dma)))
+ goto err_dma;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
ATL1C_PCIMAP_TODEVICE);
@@ -2062,6 +2094,10 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
buffer_info->dma =
pci_map_single(adapter->pdev, skb->data + mapped_len,
buffer_info->length, PCI_DMA_TODEVICE);
+ if (unlikely(pci_dma_mapping_error(adapter->pdev,
+ buffer_info->dma)))
+ goto err_dma;
+
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
ATL1C_PCIMAP_TODEVICE);
@@ -2083,6 +2119,9 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
frag, 0,
buffer_info->length,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))
+ goto err_dma;
+
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE,
ATL1C_PCIMAP_TODEVICE);
@@ -2095,6 +2134,13 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
/* The last buffer info contain the skb address,
so it will be free after unmap */
buffer_info->skb = skb;
+
+ return 0;
+
+err_dma:
+ buffer_info->dma = 0;
+ buffer_info->length = 0;
+ return -1;
}
static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb,
@@ -2157,10 +2203,18 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
if (skb_network_offset(skb) != ETH_HLEN)
tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */
- atl1c_tx_map(adapter, skb, tpd, type);
- atl1c_tx_queue(adapter, skb, tpd, type);
+ if (atl1c_tx_map(adapter, skb, tpd, type) < 0) {
+ netif_info(adapter, tx_done, adapter->netdev,
+ "tx-skb droppted due to dma error\n");
+ /* roll back tpd/buffer */
+ atl1c_tx_rollback(adapter, tpd, type);
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ dev_kfree_skb(skb);
+ } else {
+ atl1c_tx_queue(adapter, skb, tpd, type);
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ }
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_OK;
}
@@ -2540,10 +2594,9 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
if (atl1c_read_mac_addr(&adapter->hw)) {
/* got a random MAC address, set NET_ADDR_RANDOM to netdev */
- netdev->addr_assign_type |= NET_ADDR_RANDOM;
+ netdev->addr_assign_type = NET_ADDR_RANDOM;
}
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
- memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
if (netif_msg_probe(adapter))
dev_dbg(&pdev->dev, "mac address : %pM\n",
adapter->hw.mac_addr);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index e4466a36d106..92f4734f860d 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -819,8 +819,6 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count);
tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL);
if (tx_ring->tx_buffer == NULL) {
- netdev_err(adapter->netdev, "kzalloc failed, size = D%d\n",
- size);
err = -ENOMEM;
goto failed;
}
@@ -2342,7 +2340,6 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
- memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
netdev_dbg(netdev, "mac address : %pM\n", adapter->hw.mac_addr);
INIT_WORK(&adapter->reset_task, atl1e_reset_task);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 71b3d7daa21d..5b0d9931c720 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -3053,7 +3053,7 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* copy the MAC address out of the EEPROM */
if (atl1_read_mac_addr(&adapter->hw)) {
/* mark random mac */
- netdev->addr_assign_type |= NET_ADDR_RANDOM;
+ netdev->addr_assign_type = NET_ADDR_RANDOM;
}
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index aab83a2d4e07..1278b47022e0 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1433,14 +1433,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* copy the MAC address out of the EEPROM */
atl2_read_mac_addr(&adapter->hw);
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
-/* FIXME: do we still need this? */
-#ifdef ETHTOOL_GPERMADDR
- memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
-
- if (!is_valid_ether_addr(netdev->perm_addr)) {
-#else
if (!is_valid_ether_addr(netdev->dev_addr)) {
-#endif
err = -EIO;
goto err_eeprom;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index 77ffbc4a5071..f82eb1699464 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -84,7 +84,6 @@ static int atlx_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
- netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
atlx_set_mac_addr(&adapter->hw);
return 0;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index f55267363f35..3e69b3f88099 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -121,4 +121,22 @@ config BNX2X
To compile this driver as a module, choose M here: the module
will be called bnx2x. This is recommended.
+config BNX2X_SRIOV
+ bool "Broadcom 578xx and 57712 SR-IOV support"
+ depends on BNX2X && PCI_IOV
+ default y
+ ---help---
+ This configuration parameter enables Single Root Input Output
+ Virtualization support in the 578xx and 57712 products. This
+ allows for virtual function acceleration in virtual environments.
+
+config BGMAC
+ tristate "BCMA bus GBit core support"
+ depends on BCMA_HOST_SOC && HAS_DMA
+ ---help---
+ This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus.
+ They can be found on BCM47xx SoCs and provide gigabit ethernet.
+ In case of using this driver on BCM4706 it's also requires to enable
+ BCMA_DRIVER_GMAC_CMN to make it work.
+
endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index b7896051d54e..68efa1a3fb88 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_CNIC) += cnic.o
obj-$(CONFIG_BNX2X) += bnx2x/
obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
obj-$(CONFIG_TIGON3) += tg3.o
+obj-$(CONFIG_BGMAC) += bgmac.o
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 219f6226fcb1..a7efec293037 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -809,11 +809,10 @@ static int b44_rx(struct b44 *bp, int budget)
struct sk_buff *copy_skb;
b44_recycle_rx(bp, cons, bp->rx_prod);
- copy_skb = netdev_alloc_skb(bp->dev, len + 2);
+ copy_skb = netdev_alloc_skb_ip_align(bp->dev, len);
if (copy_skb == NULL)
goto drop_it_no_recycle;
- skb_reserve(copy_skb, 2);
skb_put(copy_skb, len);
/* DMA sync done above, copy just the actual packet */
skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
@@ -1518,10 +1517,8 @@ static void b44_setup_pseudo_magicp(struct b44 *bp)
u8 pwol_mask[B44_PMASK_SIZE];
pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
- if (!pwol_pattern) {
- pr_err("Memory not available for WOL\n");
+ if (!pwol_pattern)
return;
- }
/* Ipv4 magic packet pattern - pattern 0.*/
memset(pwol_mask, 0, B44_PMASK_SIZE);
@@ -2111,8 +2108,6 @@ static int b44_get_invariants(struct b44 *bp)
return -EINVAL;
}
- memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
-
bp->imask = IMASK_DEF;
/* XXX - really required?
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 39387d67b722..7d81e059e811 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -799,7 +799,7 @@ static int bcm_enet_open(struct net_device *dev)
snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
priv->mii_bus->id, priv->phy_id);
- phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, 0,
+ phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
@@ -886,10 +886,9 @@ static int bcm_enet_open(struct net_device *dev)
priv->tx_desc_alloc_size = size;
priv->tx_desc_cpu = p;
- priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
+ priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
GFP_KERNEL);
if (!priv->tx_skb) {
- dev_err(kdev, "cannot allocate rx skb queue\n");
ret = -ENOMEM;
goto out_free_tx_ring;
}
@@ -900,10 +899,9 @@ static int bcm_enet_open(struct net_device *dev)
spin_lock_init(&priv->tx_lock);
/* init & fill rx ring with skbs */
- priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
+ priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
GFP_KERNEL);
if (!priv->rx_skb) {
- dev_err(kdev, "cannot allocate rx skb queue\n");
ret = -ENOMEM;
goto out_free_tx_skb;
}
@@ -1227,10 +1225,11 @@ static const u32 unused_mib_regs[] = {
static void bcm_enet_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
- strncpy(drvinfo->version, bcm_enet_driver_version, 32);
- strncpy(drvinfo->fw_version, "N/A", 32);
- strncpy(drvinfo->bus_info, "bcm63xx", 32);
+ strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, bcm_enet_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
drvinfo->n_stats = BCM_ENET_STATS_LEN;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
new file mode 100644
index 000000000000..3fd32880e526
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -0,0 +1,1461 @@
+/*
+ * Driver for (BCM4706)? GBit MAC core on BCMA bus.
+ *
+ * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bgmac.h"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <asm/mach-bcm47xx/nvram.h>
+
+static const struct bcma_device_id bgmac_bcma_tbl[] = {
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
+ BCMA_CORETABLE_END
+};
+MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
+
+static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask,
+ u32 value, int timeout)
+{
+ u32 val;
+ int i;
+
+ for (i = 0; i < timeout / 10; i++) {
+ val = bcma_read32(core, reg);
+ if ((val & mask) == value)
+ return true;
+ udelay(10);
+ }
+ pr_err("Timeout waiting for reg 0x%X\n", reg);
+ return false;
+}
+
+/**************************************************
+ * DMA
+ **************************************************/
+
+static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
+{
+ u32 val;
+ int i;
+
+ if (!ring->mmio_base)
+ return;
+
+ /* Suspend DMA TX ring first.
+ * bgmac_wait_value doesn't support waiting for any of few values, so
+ * implement whole loop here.
+ */
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL,
+ BGMAC_DMA_TX_SUSPEND);
+ for (i = 0; i < 10000 / 10; i++) {
+ val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
+ val &= BGMAC_DMA_TX_STAT;
+ if (val == BGMAC_DMA_TX_STAT_DISABLED ||
+ val == BGMAC_DMA_TX_STAT_IDLEWAIT ||
+ val == BGMAC_DMA_TX_STAT_STOPPED) {
+ i = 0;
+ break;
+ }
+ udelay(10);
+ }
+ if (i)
+ bgmac_err(bgmac, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
+ ring->mmio_base, val);
+
+ /* Remove SUSPEND bit */
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
+ if (!bgmac_wait_value(bgmac->core,
+ ring->mmio_base + BGMAC_DMA_TX_STATUS,
+ BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED,
+ 10000)) {
+ bgmac_warn(bgmac, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
+ ring->mmio_base);
+ udelay(300);
+ val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
+ if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED)
+ bgmac_err(bgmac, "Reset of DMA TX ring 0x%X failed\n",
+ ring->mmio_base);
+ }
+}
+
+static void bgmac_dma_tx_enable(struct bgmac *bgmac,
+ struct bgmac_dma_ring *ring)
+{
+ u32 ctl;
+
+ ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
+ ctl |= BGMAC_DMA_TX_ENABLE;
+ ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
+}
+
+static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
+ struct bgmac_dma_ring *ring,
+ struct sk_buff *skb)
+{
+ struct device *dma_dev = bgmac->core->dma_dev;
+ struct net_device *net_dev = bgmac->net_dev;
+ struct bgmac_dma_desc *dma_desc;
+ struct bgmac_slot_info *slot;
+ u32 ctl0, ctl1;
+ int free_slots;
+
+ if (skb->len > BGMAC_DESC_CTL1_LEN) {
+ bgmac_err(bgmac, "Too long skb (%d)\n", skb->len);
+ goto err_stop_drop;
+ }
+
+ if (ring->start <= ring->end)
+ free_slots = ring->start - ring->end + BGMAC_TX_RING_SLOTS;
+ else
+ free_slots = ring->start - ring->end;
+ if (free_slots == 1) {
+ bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n");
+ netif_stop_queue(net_dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ slot = &ring->slots[ring->end];
+ slot->skb = skb;
+ slot->dma_addr = dma_map_single(dma_dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev, slot->dma_addr)) {
+ bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
+ ring->mmio_base);
+ goto err_stop_drop;
+ }
+
+ ctl0 = BGMAC_DESC_CTL0_IOC | BGMAC_DESC_CTL0_SOF | BGMAC_DESC_CTL0_EOF;
+ if (ring->end == ring->num_slots - 1)
+ ctl0 |= BGMAC_DESC_CTL0_EOT;
+ ctl1 = skb->len & BGMAC_DESC_CTL1_LEN;
+
+ dma_desc = ring->cpu_base;
+ dma_desc += ring->end;
+ dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
+ dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
+ dma_desc->ctl0 = cpu_to_le32(ctl0);
+ dma_desc->ctl1 = cpu_to_le32(ctl1);
+
+ wmb();
+
+ /* Increase ring->end to point empty slot. We tell hardware the first
+ * slot it should *not* read.
+ */
+ if (++ring->end >= BGMAC_TX_RING_SLOTS)
+ ring->end = 0;
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
+ ring->end * sizeof(struct bgmac_dma_desc));
+
+ /* Always keep one slot free to allow detecting bugged calls. */
+ if (--free_slots == 1)
+ netif_stop_queue(net_dev);
+
+ return NETDEV_TX_OK;
+
+err_stop_drop:
+ netif_stop_queue(net_dev);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+/* Free transmitted packets */
+static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
+{
+ struct device *dma_dev = bgmac->core->dma_dev;
+ int empty_slot;
+ bool freed = false;
+
+ /* The last slot that hardware didn't consume yet */
+ empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
+ empty_slot &= BGMAC_DMA_TX_STATDPTR;
+ empty_slot /= sizeof(struct bgmac_dma_desc);
+
+ while (ring->start != empty_slot) {
+ struct bgmac_slot_info *slot = &ring->slots[ring->start];
+
+ if (slot->skb) {
+ /* Unmap no longer used buffer */
+ dma_unmap_single(dma_dev, slot->dma_addr,
+ slot->skb->len, DMA_TO_DEVICE);
+ slot->dma_addr = 0;
+
+ /* Free memory! :) */
+ dev_kfree_skb(slot->skb);
+ slot->skb = NULL;
+ } else {
+ bgmac_err(bgmac, "Hardware reported transmission for empty TX ring slot %d! End of ring: %d\n",
+ ring->start, ring->end);
+ }
+
+ if (++ring->start >= BGMAC_TX_RING_SLOTS)
+ ring->start = 0;
+ freed = true;
+ }
+
+ if (freed && netif_queue_stopped(bgmac->net_dev))
+ netif_wake_queue(bgmac->net_dev);
+}
+
+static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
+{
+ if (!ring->mmio_base)
+ return;
+
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0);
+ if (!bgmac_wait_value(bgmac->core,
+ ring->mmio_base + BGMAC_DMA_RX_STATUS,
+ BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED,
+ 10000))
+ bgmac_err(bgmac, "Reset of ring 0x%X RX failed\n",
+ ring->mmio_base);
+}
+
+static void bgmac_dma_rx_enable(struct bgmac *bgmac,
+ struct bgmac_dma_ring *ring)
+{
+ u32 ctl;
+
+ ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
+ ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
+ ctl |= BGMAC_DMA_RX_ENABLE;
+ ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
+ ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
+ ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT;
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl);
+}
+
+static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
+ struct bgmac_slot_info *slot)
+{
+ struct device *dma_dev = bgmac->core->dma_dev;
+ struct bgmac_rx_header *rx;
+
+ /* Alloc skb */
+ slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
+ if (!slot->skb) {
+ bgmac_err(bgmac, "Allocation of skb failed!\n");
+ return -ENOMEM;
+ }
+
+ /* Poison - if everything goes fine, hardware will overwrite it */
+ rx = (struct bgmac_rx_header *)slot->skb->data;
+ rx->len = cpu_to_le16(0xdead);
+ rx->flags = cpu_to_le16(0xbeef);
+
+ /* Map skb for the DMA */
+ slot->dma_addr = dma_map_single(dma_dev, slot->skb->data,
+ BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dma_dev, slot->dma_addr)) {
+ bgmac_err(bgmac, "DMA mapping error\n");
+ return -ENOMEM;
+ }
+ if (slot->dma_addr & 0xC0000000)
+ bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
+
+ return 0;
+}
+
+static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
+ int weight)
+{
+ u32 end_slot;
+ int handled = 0;
+
+ end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
+ end_slot &= BGMAC_DMA_RX_STATDPTR;
+ end_slot /= sizeof(struct bgmac_dma_desc);
+
+ ring->end = end_slot;
+
+ while (ring->start != ring->end) {
+ struct device *dma_dev = bgmac->core->dma_dev;
+ struct bgmac_slot_info *slot = &ring->slots[ring->start];
+ struct sk_buff *skb = slot->skb;
+ struct sk_buff *new_skb;
+ struct bgmac_rx_header *rx;
+ u16 len, flags;
+
+ /* Unmap buffer to make it accessible to the CPU */
+ dma_sync_single_for_cpu(dma_dev, slot->dma_addr,
+ BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+
+ /* Get info from the header */
+ rx = (struct bgmac_rx_header *)skb->data;
+ len = le16_to_cpu(rx->len);
+ flags = le16_to_cpu(rx->flags);
+
+ /* Check for poison and drop or pass the packet */
+ if (len == 0xdead && flags == 0xbeef) {
+ bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
+ ring->start);
+ } else {
+ new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
+ if (new_skb) {
+ skb_put(new_skb, len);
+ skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
+ new_skb->data,
+ len);
+ new_skb->protocol =
+ eth_type_trans(new_skb, bgmac->net_dev);
+ netif_receive_skb(new_skb);
+ handled++;
+ } else {
+ bgmac->net_dev->stats.rx_dropped++;
+ bgmac_err(bgmac, "Allocation of skb for copying packet failed!\n");
+ }
+
+ /* Poison the old skb */
+ rx->len = cpu_to_le16(0xdead);
+ rx->flags = cpu_to_le16(0xbeef);
+ }
+
+ /* Make it back accessible to the hardware */
+ dma_sync_single_for_device(dma_dev, slot->dma_addr,
+ BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+
+ if (++ring->start >= BGMAC_RX_RING_SLOTS)
+ ring->start = 0;
+
+ if (handled >= weight) /* Should never be greater */
+ break;
+ }
+
+ return handled;
+}
+
+/* Does ring support unaligned addressing? */
+static bool bgmac_dma_unaligned(struct bgmac *bgmac,
+ struct bgmac_dma_ring *ring,
+ enum bgmac_dma_ring_type ring_type)
+{
+ switch (ring_type) {
+ case BGMAC_DMA_RING_TX:
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
+ 0xff0);
+ if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO))
+ return true;
+ break;
+ case BGMAC_DMA_RING_RX:
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
+ 0xff0);
+ if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO))
+ return true;
+ break;
+ }
+ return false;
+}
+
+static void bgmac_dma_ring_free(struct bgmac *bgmac,
+ struct bgmac_dma_ring *ring)
+{
+ struct device *dma_dev = bgmac->core->dma_dev;
+ struct bgmac_slot_info *slot;
+ int size;
+ int i;
+
+ for (i = 0; i < ring->num_slots; i++) {
+ slot = &ring->slots[i];
+ if (slot->skb) {
+ if (slot->dma_addr)
+ dma_unmap_single(dma_dev, slot->dma_addr,
+ slot->skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb(slot->skb);
+ }
+ }
+
+ if (ring->cpu_base) {
+ /* Free ring of descriptors */
+ size = ring->num_slots * sizeof(struct bgmac_dma_desc);
+ dma_free_coherent(dma_dev, size, ring->cpu_base,
+ ring->dma_base);
+ }
+}
+
+static void bgmac_dma_free(struct bgmac *bgmac)
+{
+ int i;
+
+ for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
+ bgmac_dma_ring_free(bgmac, &bgmac->tx_ring[i]);
+ for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
+ bgmac_dma_ring_free(bgmac, &bgmac->rx_ring[i]);
+}
+
+static int bgmac_dma_alloc(struct bgmac *bgmac)
+{
+ struct device *dma_dev = bgmac->core->dma_dev;
+ struct bgmac_dma_ring *ring;
+ static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1,
+ BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, };
+ int size; /* ring size: different for Tx and Rx */
+ int err;
+ int i;
+
+ BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
+ BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
+
+ if (!(bcma_aread32(bgmac->core, BCMA_IOST) & BCMA_IOST_DMA64)) {
+ bgmac_err(bgmac, "Core does not report 64-bit DMA\n");
+ return -ENOTSUPP;
+ }
+
+ for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
+ ring = &bgmac->tx_ring[i];
+ ring->num_slots = BGMAC_TX_RING_SLOTS;
+ ring->mmio_base = ring_base[i];
+ if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX))
+ bgmac_warn(bgmac, "TX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
+ ring->mmio_base);
+
+ /* Alloc ring of descriptors */
+ size = ring->num_slots * sizeof(struct bgmac_dma_desc);
+ ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
+ &ring->dma_base,
+ GFP_KERNEL);
+ if (!ring->cpu_base) {
+ bgmac_err(bgmac, "Allocation of TX ring 0x%X failed\n",
+ ring->mmio_base);
+ goto err_dma_free;
+ }
+ if (ring->dma_base & 0xC0000000)
+ bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
+
+ /* No need to alloc TX slots yet */
+ }
+
+ for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
+ ring = &bgmac->rx_ring[i];
+ ring->num_slots = BGMAC_RX_RING_SLOTS;
+ ring->mmio_base = ring_base[i];
+ if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX))
+ bgmac_warn(bgmac, "RX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
+ ring->mmio_base);
+
+ /* Alloc ring of descriptors */
+ size = ring->num_slots * sizeof(struct bgmac_dma_desc);
+ ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
+ &ring->dma_base,
+ GFP_KERNEL);
+ if (!ring->cpu_base) {
+ bgmac_err(bgmac, "Allocation of RX ring 0x%X failed\n",
+ ring->mmio_base);
+ err = -ENOMEM;
+ goto err_dma_free;
+ }
+ if (ring->dma_base & 0xC0000000)
+ bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
+
+ /* Alloc RX slots */
+ for (i = 0; i < ring->num_slots; i++) {
+ err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[i]);
+ if (err) {
+ bgmac_err(bgmac, "Can't allocate skb for slot in RX ring\n");
+ goto err_dma_free;
+ }
+ }
+ }
+
+ return 0;
+
+err_dma_free:
+ bgmac_dma_free(bgmac);
+ return -ENOMEM;
+}
+
+static void bgmac_dma_init(struct bgmac *bgmac)
+{
+ struct bgmac_dma_ring *ring;
+ struct bgmac_dma_desc *dma_desc;
+ u32 ctl0, ctl1;
+ int i;
+
+ for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
+ ring = &bgmac->tx_ring[i];
+
+ /* We don't implement unaligned addressing, so enable first */
+ bgmac_dma_tx_enable(bgmac, ring);
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
+ lower_32_bits(ring->dma_base));
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
+ upper_32_bits(ring->dma_base));
+
+ ring->start = 0;
+ ring->end = 0; /* Points the slot that should *not* be read */
+ }
+
+ for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
+ ring = &bgmac->rx_ring[i];
+
+ /* We don't implement unaligned addressing, so enable first */
+ bgmac_dma_rx_enable(bgmac, ring);
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
+ lower_32_bits(ring->dma_base));
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
+ upper_32_bits(ring->dma_base));
+
+ for (i = 0, dma_desc = ring->cpu_base; i < ring->num_slots;
+ i++, dma_desc++) {
+ ctl0 = ctl1 = 0;
+
+ if (i == ring->num_slots - 1)
+ ctl0 |= BGMAC_DESC_CTL0_EOT;
+ ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
+ /* Is there any BGMAC device that requires extension? */
+ /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
+ * B43_DMA64_DCTL1_ADDREXT_MASK;
+ */
+
+ dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[i].dma_addr));
+ dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[i].dma_addr));
+ dma_desc->ctl0 = cpu_to_le32(ctl0);
+ dma_desc->ctl1 = cpu_to_le32(ctl1);
+ }
+
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
+ ring->num_slots * sizeof(struct bgmac_dma_desc));
+
+ ring->start = 0;
+ ring->end = 0;
+ }
+}
+
+/**************************************************
+ * PHY ops
+ **************************************************/
+
+static u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg)
+{
+ struct bcma_device *core;
+ u16 phy_access_addr;
+ u16 phy_ctl_addr;
+ u32 tmp;
+
+ BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK);
+ BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK);
+ BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT);
+ BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK);
+ BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT);
+ BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE);
+ BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START);
+ BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK);
+ BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK);
+ BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT);
+ BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE);
+
+ if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
+ core = bgmac->core->bus->drv_gmac_cmn.core;
+ phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
+ phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
+ } else {
+ core = bgmac->core;
+ phy_access_addr = BGMAC_PHY_ACCESS;
+ phy_ctl_addr = BGMAC_PHY_CNTL;
+ }
+
+ tmp = bcma_read32(core, phy_ctl_addr);
+ tmp &= ~BGMAC_PC_EPA_MASK;
+ tmp |= phyaddr;
+ bcma_write32(core, phy_ctl_addr, tmp);
+
+ tmp = BGMAC_PA_START;
+ tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
+ tmp |= reg << BGMAC_PA_REG_SHIFT;
+ bcma_write32(core, phy_access_addr, tmp);
+
+ if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
+ bgmac_err(bgmac, "Reading PHY %d register 0x%X failed\n",
+ phyaddr, reg);
+ return 0xffff;
+ }
+
+ return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK;
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */
+static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
+{
+ struct bcma_device *core;
+ u16 phy_access_addr;
+ u16 phy_ctl_addr;
+ u32 tmp;
+
+ if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
+ core = bgmac->core->bus->drv_gmac_cmn.core;
+ phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
+ phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
+ } else {
+ core = bgmac->core;
+ phy_access_addr = BGMAC_PHY_ACCESS;
+ phy_ctl_addr = BGMAC_PHY_CNTL;
+ }
+
+ tmp = bcma_read32(core, phy_ctl_addr);
+ tmp &= ~BGMAC_PC_EPA_MASK;
+ tmp |= phyaddr;
+ bcma_write32(core, phy_ctl_addr, tmp);
+
+ bgmac_write(bgmac, BGMAC_INT_STATUS, BGMAC_IS_MDIO);
+ if (bgmac_read(bgmac, BGMAC_INT_STATUS) & BGMAC_IS_MDIO)
+ bgmac_warn(bgmac, "Error setting MDIO int\n");
+
+ tmp = BGMAC_PA_START;
+ tmp |= BGMAC_PA_WRITE;
+ tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
+ tmp |= reg << BGMAC_PA_REG_SHIFT;
+ tmp |= value;
+ bcma_write32(core, phy_access_addr, tmp);
+
+ if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
+ bgmac_err(bgmac, "Writing to PHY %d register 0x%X failed\n",
+ phyaddr, reg);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyforce */
+static void bgmac_phy_force(struct bgmac *bgmac)
+{
+ u16 ctl;
+ u16 mask = ~(BGMAC_PHY_CTL_SPEED | BGMAC_PHY_CTL_SPEED_MSB |
+ BGMAC_PHY_CTL_ANENAB | BGMAC_PHY_CTL_DUPLEX);
+
+ if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
+ return;
+
+ if (bgmac->autoneg)
+ return;
+
+ ctl = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL);
+ ctl &= mask;
+ if (bgmac->full_duplex)
+ ctl |= BGMAC_PHY_CTL_DUPLEX;
+ if (bgmac->speed == BGMAC_SPEED_100)
+ ctl |= BGMAC_PHY_CTL_SPEED_100;
+ else if (bgmac->speed == BGMAC_SPEED_1000)
+ ctl |= BGMAC_PHY_CTL_SPEED_1000;
+ bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL, ctl);
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyadvertise */
+static void bgmac_phy_advertise(struct bgmac *bgmac)
+{
+ u16 adv;
+
+ if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
+ return;
+
+ if (!bgmac->autoneg)
+ return;
+
+ /* Adv selected 10/100 speeds */
+ adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV);
+ adv &= ~(BGMAC_PHY_ADV_10HALF | BGMAC_PHY_ADV_10FULL |
+ BGMAC_PHY_ADV_100HALF | BGMAC_PHY_ADV_100FULL);
+ if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
+ adv |= BGMAC_PHY_ADV_10HALF;
+ if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
+ adv |= BGMAC_PHY_ADV_100HALF;
+ if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
+ adv |= BGMAC_PHY_ADV_10FULL;
+ if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
+ adv |= BGMAC_PHY_ADV_100FULL;
+ bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV, adv);
+
+ /* Adv selected 1000 speeds */
+ adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2);
+ adv &= ~(BGMAC_PHY_ADV2_1000HALF | BGMAC_PHY_ADV2_1000FULL);
+ if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
+ adv |= BGMAC_PHY_ADV2_1000HALF;
+ if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
+ adv |= BGMAC_PHY_ADV2_1000FULL;
+ bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2, adv);
+
+ /* Restart */
+ bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
+ bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) |
+ BGMAC_PHY_CTL_RESTART);
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
+static void bgmac_phy_init(struct bgmac *bgmac)
+{
+ struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
+ struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
+ u8 i;
+
+ if (ci->id == BCMA_CHIP_ID_BCM5356) {
+ for (i = 0; i < 5; i++) {
+ bgmac_phy_write(bgmac, i, 0x1f, 0x008b);
+ bgmac_phy_write(bgmac, i, 0x15, 0x0100);
+ bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
+ bgmac_phy_write(bgmac, i, 0x12, 0x2aaa);
+ bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
+ }
+ }
+ if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) ||
+ (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) ||
+ (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) {
+ bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0);
+ bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0);
+ for (i = 0; i < 5; i++) {
+ bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
+ bgmac_phy_write(bgmac, i, 0x16, 0x5284);
+ bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
+ bgmac_phy_write(bgmac, i, 0x17, 0x0010);
+ bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
+ bgmac_phy_write(bgmac, i, 0x16, 0x5296);
+ bgmac_phy_write(bgmac, i, 0x17, 0x1073);
+ bgmac_phy_write(bgmac, i, 0x17, 0x9073);
+ bgmac_phy_write(bgmac, i, 0x16, 0x52b6);
+ bgmac_phy_write(bgmac, i, 0x17, 0x9273);
+ bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */
+static void bgmac_phy_reset(struct bgmac *bgmac)
+{
+ if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
+ return;
+
+ bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
+ BGMAC_PHY_CTL_RESET);
+ udelay(100);
+ if (bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) &
+ BGMAC_PHY_CTL_RESET)
+ bgmac_err(bgmac, "PHY reset failed\n");
+ bgmac_phy_init(bgmac);
+}
+
+/**************************************************
+ * Chip ops
+ **************************************************/
+
+/* TODO: can we just drop @force? Can we don't reset MAC at all if there is
+ * nothing to change? Try if after stabilizng driver.
+ */
+static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
+ bool force)
+{
+ u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
+ u32 new_val = (cmdcfg & mask) | set;
+
+ bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR);
+ udelay(2);
+
+ if (new_val != cmdcfg || force)
+ bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
+
+ bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR);
+ udelay(2);
+}
+
+static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
+{
+ u32 tmp;
+
+ tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
+ bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
+ tmp = (addr[4] << 8) | addr[5];
+ bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
+}
+
+static void bgmac_set_rx_mode(struct net_device *net_dev)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+
+ if (net_dev->flags & IFF_PROMISC)
+ bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
+ else
+ bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
+}
+
+#if 0 /* We don't use that regs yet */
+static void bgmac_chip_stats_update(struct bgmac *bgmac)
+{
+ int i;
+
+ if (bgmac->core->id.id != BCMA_CORE_4706_MAC_GBIT) {
+ for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
+ bgmac->mib_tx_regs[i] =
+ bgmac_read(bgmac,
+ BGMAC_TX_GOOD_OCTETS + (i * 4));
+ for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
+ bgmac->mib_rx_regs[i] =
+ bgmac_read(bgmac,
+ BGMAC_RX_GOOD_OCTETS + (i * 4));
+ }
+
+ /* TODO: what else? how to handle BCM4706? Specs are needed */
+}
+#endif
+
+static void bgmac_clear_mib(struct bgmac *bgmac)
+{
+ int i;
+
+ if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT)
+ return;
+
+ bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR);
+ for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
+ bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4));
+ for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
+ bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4));
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
+static void bgmac_speed(struct bgmac *bgmac, int speed)
+{
+ u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
+ u32 set = 0;
+
+ if (speed & BGMAC_SPEED_10)
+ set |= BGMAC_CMDCFG_ES_10;
+ if (speed & BGMAC_SPEED_100)
+ set |= BGMAC_CMDCFG_ES_100;
+ if (speed & BGMAC_SPEED_1000)
+ set |= BGMAC_CMDCFG_ES_1000;
+ if (!bgmac->full_duplex)
+ set |= BGMAC_CMDCFG_HD;
+ bgmac_cmdcfg_maskset(bgmac, mask, set, true);
+}
+
+static void bgmac_miiconfig(struct bgmac *bgmac)
+{
+ u8 imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
+ BGMAC_DS_MM_SHIFT;
+ if (imode == 0 || imode == 1) {
+ if (bgmac->autoneg)
+ bgmac_speed(bgmac, BGMAC_SPEED_100);
+ else
+ bgmac_speed(bgmac, bgmac->speed);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
+static void bgmac_chip_reset(struct bgmac *bgmac)
+{
+ struct bcma_device *core = bgmac->core;
+ struct bcma_bus *bus = core->bus;
+ struct bcma_chipinfo *ci = &bus->chipinfo;
+ u32 flags = 0;
+ u32 iost;
+ int i;
+
+ if (bcma_core_is_enabled(core)) {
+ if (!bgmac->stats_grabbed) {
+ /* bgmac_chip_stats_update(bgmac); */
+ bgmac->stats_grabbed = true;
+ }
+
+ for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
+ bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);
+
+ bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
+ udelay(1);
+
+ for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
+ bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]);
+
+ /* TODO: Clear software multicast filter list */
+ }
+
+ iost = bcma_aread32(core, BCMA_IOST);
+ if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 10) ||
+ (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
+ (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9))
+ iost &= ~BGMAC_BCMA_IOST_ATTACHED;
+
+ if (iost & BGMAC_BCMA_IOST_ATTACHED) {
+ flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
+ if (!bgmac->has_robosw)
+ flags |= BGMAC_BCMA_IOCTL_SW_RESET;
+ }
+
+ bcma_core_enable(core, flags);
+
+ if (core->id.rev > 2) {
+ bgmac_set(bgmac, BCMA_CLKCTLST, 1 << 8);
+ bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, 1 << 24, 1 << 24,
+ 1000);
+ }
+
+ if (ci->id == BCMA_CHIP_ID_BCM5357 || ci->id == BCMA_CHIP_ID_BCM4749 ||
+ ci->id == BCMA_CHIP_ID_BCM53572) {
+ struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
+ u8 et_swtype = 0;
+ u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
+ BGMAC_CHIPCTL_1_IF_TYPE_RMII;
+ char buf[2];
+
+ if (nvram_getenv("et_swtype", buf, 1) > 0) {
+ if (kstrtou8(buf, 0, &et_swtype))
+ bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
+ buf);
+ et_swtype &= 0x0f;
+ et_swtype <<= 4;
+ sw_type = et_swtype;
+ } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 9) {
+ sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
+ } else if ((ci->id != BCMA_CHIP_ID_BCM53572 && ci->pkg == 10) ||
+ (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9)) {
+ sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
+ BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
+ }
+ bcma_chipco_chipctl_maskset(cc, 1,
+ ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
+ BGMAC_CHIPCTL_1_SW_TYPE_MASK),
+ sw_type);
+ }
+
+ if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
+ bcma_awrite32(core, BCMA_IOCTL,
+ bcma_aread32(core, BCMA_IOCTL) &
+ ~BGMAC_BCMA_IOCTL_SW_RESET);
+
+ /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
+ * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
+ * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
+ * be keps until taking MAC out of the reset.
+ */
+ bgmac_cmdcfg_maskset(bgmac,
+ ~(BGMAC_CMDCFG_TE |
+ BGMAC_CMDCFG_RE |
+ BGMAC_CMDCFG_RPI |
+ BGMAC_CMDCFG_TAI |
+ BGMAC_CMDCFG_HD |
+ BGMAC_CMDCFG_ML |
+ BGMAC_CMDCFG_CFE |
+ BGMAC_CMDCFG_RL |
+ BGMAC_CMDCFG_RED |
+ BGMAC_CMDCFG_PE |
+ BGMAC_CMDCFG_TPI |
+ BGMAC_CMDCFG_PAD_EN |
+ BGMAC_CMDCFG_PF),
+ BGMAC_CMDCFG_PROM |
+ BGMAC_CMDCFG_NLC |
+ BGMAC_CMDCFG_CFE |
+ BGMAC_CMDCFG_SR,
+ false);
+
+ bgmac_clear_mib(bgmac);
+ if (core->id.id == BCMA_CORE_4706_MAC_GBIT)
+ bcma_maskset32(bgmac->cmn, BCMA_GMAC_CMN_PHY_CTL, ~0,
+ BCMA_GMAC_CMN_PC_MTE);
+ else
+ bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE);
+ bgmac_miiconfig(bgmac);
+ bgmac_phy_init(bgmac);
+
+ bgmac->int_status = 0;
+}
+
+static void bgmac_chip_intrs_on(struct bgmac *bgmac)
+{
+ bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask);
+}
+
+static void bgmac_chip_intrs_off(struct bgmac *bgmac)
+{
+ bgmac_write(bgmac, BGMAC_INT_MASK, 0);
+ bgmac_read(bgmac, BGMAC_INT_MASK);
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
+static void bgmac_enable(struct bgmac *bgmac)
+{
+ struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
+ u32 cmdcfg;
+ u32 mode;
+ u32 rxq_ctl;
+ u32 fl_ctl;
+ u16 bp_clk;
+ u8 mdp;
+
+ cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
+ bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
+ BGMAC_CMDCFG_SR, true);
+ udelay(2);
+ cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
+ bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
+
+ mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
+ BGMAC_DS_MM_SHIFT;
+ if (ci->id != BCMA_CHIP_ID_BCM47162 || mode != 0)
+ bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
+ if (ci->id == BCMA_CHIP_ID_BCM47162 && mode == 2)
+ bcma_chipco_chipctl_maskset(&bgmac->core->bus->drv_cc, 1, ~0,
+ BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
+
+ switch (ci->id) {
+ case BCMA_CHIP_ID_BCM5357:
+ case BCMA_CHIP_ID_BCM4749:
+ case BCMA_CHIP_ID_BCM53572:
+ case BCMA_CHIP_ID_BCM4716:
+ case BCMA_CHIP_ID_BCM47162:
+ fl_ctl = 0x03cb04cb;
+ if (ci->id == BCMA_CHIP_ID_BCM5357 ||
+ ci->id == BCMA_CHIP_ID_BCM4749 ||
+ ci->id == BCMA_CHIP_ID_BCM53572)
+ fl_ctl = 0x2300e1;
+ bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
+ bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
+ break;
+ }
+
+ rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
+ rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
+ bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / 1000000;
+ mdp = (bp_clk * 128 / 1000) - 3;
+ rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
+ bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
+static void bgmac_chip_init(struct bgmac *bgmac, bool full_init)
+{
+ struct bgmac_dma_ring *ring;
+ int i;
+
+ /* 1 interrupt per received frame */
+ bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
+
+ /* Enable 802.3x tx flow control (honor received PAUSE frames) */
+ bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
+
+ bgmac_set_rx_mode(bgmac->net_dev);
+
+ bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
+
+ if (bgmac->loopback)
+ bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
+ else
+ bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
+
+ bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
+
+ if (!bgmac->autoneg) {
+ bgmac_speed(bgmac, bgmac->speed);
+ bgmac_phy_force(bgmac);
+ } else if (bgmac->speed) { /* if there is anything to adv */
+ bgmac_phy_advertise(bgmac);
+ }
+
+ if (full_init) {
+ bgmac_dma_init(bgmac);
+ if (1) /* FIXME: is there any case we don't want IRQs? */
+ bgmac_chip_intrs_on(bgmac);
+ } else {
+ for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
+ ring = &bgmac->rx_ring[i];
+ bgmac_dma_rx_enable(bgmac, ring);
+ }
+ }
+
+ bgmac_enable(bgmac);
+}
+
+static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
+{
+ struct bgmac *bgmac = netdev_priv(dev_id);
+
+ u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS);
+ int_status &= bgmac->int_mask;
+
+ if (!int_status)
+ return IRQ_NONE;
+
+ /* Ack */
+ bgmac_write(bgmac, BGMAC_INT_STATUS, int_status);
+
+ /* Disable new interrupts until handling existing ones */
+ bgmac_chip_intrs_off(bgmac);
+
+ bgmac->int_status = int_status;
+
+ napi_schedule(&bgmac->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int bgmac_poll(struct napi_struct *napi, int weight)
+{
+ struct bgmac *bgmac = container_of(napi, struct bgmac, napi);
+ struct bgmac_dma_ring *ring;
+ int handled = 0;
+
+ if (bgmac->int_status & BGMAC_IS_TX0) {
+ ring = &bgmac->tx_ring[0];
+ bgmac_dma_tx_free(bgmac, ring);
+ bgmac->int_status &= ~BGMAC_IS_TX0;
+ }
+
+ if (bgmac->int_status & BGMAC_IS_RX) {
+ ring = &bgmac->rx_ring[0];
+ handled += bgmac_dma_rx_read(bgmac, ring, weight);
+ bgmac->int_status &= ~BGMAC_IS_RX;
+ }
+
+ if (bgmac->int_status) {
+ bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", bgmac->int_status);
+ bgmac->int_status = 0;
+ }
+
+ if (handled < weight)
+ napi_complete(napi);
+
+ bgmac_chip_intrs_on(bgmac);
+
+ return handled;
+}
+
+/**************************************************
+ * net_device_ops
+ **************************************************/
+
+static int bgmac_open(struct net_device *net_dev)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+ int err = 0;
+
+ bgmac_chip_reset(bgmac);
+ /* Specs say about reclaiming rings here, but we do that in DMA init */
+ bgmac_chip_init(bgmac, true);
+
+ err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED,
+ KBUILD_MODNAME, net_dev);
+ if (err < 0) {
+ bgmac_err(bgmac, "IRQ request error: %d!\n", err);
+ goto err_out;
+ }
+ napi_enable(&bgmac->napi);
+
+ netif_carrier_on(net_dev);
+
+err_out:
+ return err;
+}
+
+static int bgmac_stop(struct net_device *net_dev)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+
+ netif_carrier_off(net_dev);
+
+ napi_disable(&bgmac->napi);
+ bgmac_chip_intrs_off(bgmac);
+ free_irq(bgmac->core->irq, net_dev);
+
+ bgmac_chip_reset(bgmac);
+
+ return 0;
+}
+
+static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
+ struct net_device *net_dev)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+ struct bgmac_dma_ring *ring;
+
+ /* No QOS support yet */
+ ring = &bgmac->tx_ring[0];
+ return bgmac_dma_tx_add(bgmac, ring, skb);
+}
+
+static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(net_dev, addr);
+ if (ret < 0)
+ return ret;
+ bgmac_write_mac_address(bgmac, (u8 *)addr);
+ eth_commit_mac_addr_change(net_dev, addr);
+ return 0;
+}
+
+static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = bgmac->phyaddr;
+ /* fallthru */
+ case SIOCGMIIREG:
+ if (!netif_running(net_dev))
+ return -EAGAIN;
+ data->val_out = bgmac_phy_read(bgmac, data->phy_id,
+ data->reg_num & 0x1f);
+ return 0;
+ case SIOCSMIIREG:
+ if (!netif_running(net_dev))
+ return -EAGAIN;
+ bgmac_phy_write(bgmac, data->phy_id, data->reg_num & 0x1f,
+ data->val_in);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct net_device_ops bgmac_netdev_ops = {
+ .ndo_open = bgmac_open,
+ .ndo_stop = bgmac_stop,
+ .ndo_start_xmit = bgmac_start_xmit,
+ .ndo_set_rx_mode = bgmac_set_rx_mode,
+ .ndo_set_mac_address = bgmac_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = bgmac_ioctl,
+};
+
+/**************************************************
+ * ethtool_ops
+ **************************************************/
+
+static int bgmac_get_settings(struct net_device *net_dev,
+ struct ethtool_cmd *cmd)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+
+ cmd->supported = SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg;
+
+ if (bgmac->autoneg) {
+ WARN_ON(cmd->advertising);
+ if (bgmac->full_duplex) {
+ if (bgmac->speed & BGMAC_SPEED_10)
+ cmd->advertising |= ADVERTISED_10baseT_Full;
+ if (bgmac->speed & BGMAC_SPEED_100)
+ cmd->advertising |= ADVERTISED_100baseT_Full;
+ if (bgmac->speed & BGMAC_SPEED_1000)
+ cmd->advertising |= ADVERTISED_1000baseT_Full;
+ } else {
+ if (bgmac->speed & BGMAC_SPEED_10)
+ cmd->advertising |= ADVERTISED_10baseT_Half;
+ if (bgmac->speed & BGMAC_SPEED_100)
+ cmd->advertising |= ADVERTISED_100baseT_Half;
+ if (bgmac->speed & BGMAC_SPEED_1000)
+ cmd->advertising |= ADVERTISED_1000baseT_Half;
+ }
+ } else {
+ switch (bgmac->speed) {
+ case BGMAC_SPEED_10:
+ ethtool_cmd_speed_set(cmd, SPEED_10);
+ break;
+ case BGMAC_SPEED_100:
+ ethtool_cmd_speed_set(cmd, SPEED_100);
+ break;
+ case BGMAC_SPEED_1000:
+ ethtool_cmd_speed_set(cmd, SPEED_1000);
+ break;
+ }
+ }
+
+ cmd->duplex = bgmac->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+
+ cmd->autoneg = bgmac->autoneg;
+
+ return 0;
+}
+
+#if 0
+static int bgmac_set_settings(struct net_device *net_dev,
+ struct ethtool_cmd *cmd)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+
+ return -1;
+}
+#endif
+
+static void bgmac_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->bus_info, "BCMA", sizeof(info->bus_info));
+}
+
+static const struct ethtool_ops bgmac_ethtool_ops = {
+ .get_settings = bgmac_get_settings,
+ .get_drvinfo = bgmac_get_drvinfo,
+};
+
+/**************************************************
+ * BCMA bus ops
+ **************************************************/
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */
+static int bgmac_probe(struct bcma_device *core)
+{
+ struct net_device *net_dev;
+ struct bgmac *bgmac;
+ struct ssb_sprom *sprom = &core->bus->sprom;
+ u8 *mac = core->core_unit ? sprom->et1mac : sprom->et0mac;
+ int err;
+
+ /* We don't support 2nd, 3rd, ... units, SPROM has to be adjusted */
+ if (core->core_unit > 1) {
+ pr_err("Unsupported core_unit %d\n", core->core_unit);
+ return -ENOTSUPP;
+ }
+
+ if (!is_valid_ether_addr(mac)) {
+ dev_err(&core->dev, "Invalid MAC addr: %pM\n", mac);
+ eth_random_addr(mac);
+ dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
+ }
+
+ /* Allocation and references */
+ net_dev = alloc_etherdev(sizeof(*bgmac));
+ if (!net_dev)
+ return -ENOMEM;
+ net_dev->netdev_ops = &bgmac_netdev_ops;
+ net_dev->irq = core->irq;
+ SET_ETHTOOL_OPS(net_dev, &bgmac_ethtool_ops);
+ bgmac = netdev_priv(net_dev);
+ bgmac->net_dev = net_dev;
+ bgmac->core = core;
+ bcma_set_drvdata(core, bgmac);
+
+ /* Defaults */
+ bgmac->autoneg = true;
+ bgmac->full_duplex = true;
+ bgmac->speed = BGMAC_SPEED_10 | BGMAC_SPEED_100 | BGMAC_SPEED_1000;
+ memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN);
+
+ /* On BCM4706 we need common core to access PHY */
+ if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
+ !core->bus->drv_gmac_cmn.core) {
+ bgmac_err(bgmac, "GMAC CMN core not found (required for BCM4706)\n");
+ err = -ENODEV;
+ goto err_netdev_free;
+ }
+ bgmac->cmn = core->bus->drv_gmac_cmn.core;
+
+ bgmac->phyaddr = core->core_unit ? sprom->et1phyaddr :
+ sprom->et0phyaddr;
+ bgmac->phyaddr &= BGMAC_PHY_MASK;
+ if (bgmac->phyaddr == BGMAC_PHY_MASK) {
+ bgmac_err(bgmac, "No PHY found\n");
+ err = -ENODEV;
+ goto err_netdev_free;
+ }
+ bgmac_info(bgmac, "Found PHY addr: %d%s\n", bgmac->phyaddr,
+ bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : "");
+
+ if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) {
+ bgmac_err(bgmac, "PCI setup not implemented\n");
+ err = -ENOTSUPP;
+ goto err_netdev_free;
+ }
+
+ bgmac_chip_reset(bgmac);
+
+ err = bgmac_dma_alloc(bgmac);
+ if (err) {
+ bgmac_err(bgmac, "Unable to alloc memory for DMA\n");
+ goto err_netdev_free;
+ }
+
+ bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
+ if (nvram_getenv("et0_no_txint", NULL, 0) == 0)
+ bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
+
+ /* TODO: reset the external phy. Specs are needed */
+ bgmac_phy_reset(bgmac);
+
+ bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo &
+ BGMAC_BFL_ENETROBO);
+ if (bgmac->has_robosw)
+ bgmac_warn(bgmac, "Support for Roboswitch not implemented\n");
+
+ if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
+ bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
+
+ err = register_netdev(bgmac->net_dev);
+ if (err) {
+ bgmac_err(bgmac, "Cannot register net device\n");
+ err = -ENOTSUPP;
+ goto err_dma_free;
+ }
+
+ netif_carrier_off(net_dev);
+
+ netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
+
+ return 0;
+
+err_dma_free:
+ bgmac_dma_free(bgmac);
+
+err_netdev_free:
+ bcma_set_drvdata(core, NULL);
+ free_netdev(net_dev);
+
+ return err;
+}
+
+static void bgmac_remove(struct bcma_device *core)
+{
+ struct bgmac *bgmac = bcma_get_drvdata(core);
+
+ netif_napi_del(&bgmac->napi);
+ unregister_netdev(bgmac->net_dev);
+ bgmac_dma_free(bgmac);
+ bcma_set_drvdata(core, NULL);
+ free_netdev(bgmac->net_dev);
+}
+
+static struct bcma_driver bgmac_bcma_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = bgmac_bcma_tbl,
+ .probe = bgmac_probe,
+ .remove = bgmac_remove,
+};
+
+static int __init bgmac_init(void)
+{
+ int err;
+
+ err = bcma_driver_register(&bgmac_bcma_driver);
+ if (err)
+ return err;
+ pr_info("Broadcom 47xx GBit MAC driver loaded\n");
+
+ return 0;
+}
+
+static void __exit bgmac_exit(void)
+{
+ bcma_driver_unregister(&bgmac_bcma_driver);
+}
+
+module_init(bgmac_init)
+module_exit(bgmac_exit)
+
+MODULE_AUTHOR("Rafał Miłecki");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
new file mode 100644
index 000000000000..4ede614c81f8
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -0,0 +1,453 @@
+#ifndef _BGMAC_H
+#define _BGMAC_H
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define bgmac_err(bgmac, fmt, ...) \
+ dev_err(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
+#define bgmac_warn(bgmac, fmt, ...) \
+ dev_warn(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
+#define bgmac_info(bgmac, fmt, ...) \
+ dev_info(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
+#define bgmac_dbg(bgmac, fmt, ...) \
+ dev_dbg(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
+
+#include <linux/bcma/bcma.h>
+#include <linux/netdevice.h>
+
+#define BGMAC_DEV_CTL 0x000
+#define BGMAC_DC_TSM 0x00000002
+#define BGMAC_DC_CFCO 0x00000004
+#define BGMAC_DC_RLSS 0x00000008
+#define BGMAC_DC_MROR 0x00000010
+#define BGMAC_DC_FCM_MASK 0x00000060
+#define BGMAC_DC_FCM_SHIFT 5
+#define BGMAC_DC_NAE 0x00000080
+#define BGMAC_DC_TF 0x00000100
+#define BGMAC_DC_RDS_MASK 0x00030000
+#define BGMAC_DC_RDS_SHIFT 16
+#define BGMAC_DC_TDS_MASK 0x000c0000
+#define BGMAC_DC_TDS_SHIFT 18
+#define BGMAC_DEV_STATUS 0x004 /* Configuration of the interface */
+#define BGMAC_DS_RBF 0x00000001
+#define BGMAC_DS_RDF 0x00000002
+#define BGMAC_DS_RIF 0x00000004
+#define BGMAC_DS_TBF 0x00000008
+#define BGMAC_DS_TDF 0x00000010
+#define BGMAC_DS_TIF 0x00000020
+#define BGMAC_DS_PO 0x00000040
+#define BGMAC_DS_MM_MASK 0x00000300 /* Mode of the interface */
+#define BGMAC_DS_MM_SHIFT 8
+#define BGMAC_BIST_STATUS 0x00c
+#define BGMAC_INT_STATUS 0x020 /* Interrupt status */
+#define BGMAC_IS_MRO 0x00000001
+#define BGMAC_IS_MTO 0x00000002
+#define BGMAC_IS_TFD 0x00000004
+#define BGMAC_IS_LS 0x00000008
+#define BGMAC_IS_MDIO 0x00000010
+#define BGMAC_IS_MR 0x00000020
+#define BGMAC_IS_MT 0x00000040
+#define BGMAC_IS_TO 0x00000080
+#define BGMAC_IS_DESC_ERR 0x00000400 /* Descriptor error */
+#define BGMAC_IS_DATA_ERR 0x00000800 /* Data error */
+#define BGMAC_IS_DESC_PROT_ERR 0x00001000 /* Descriptor protocol error */
+#define BGMAC_IS_RX_DESC_UNDERF 0x00002000 /* Receive descriptor underflow */
+#define BGMAC_IS_RX_F_OVERF 0x00004000 /* Receive FIFO overflow */
+#define BGMAC_IS_TX_F_UNDERF 0x00008000 /* Transmit FIFO underflow */
+#define BGMAC_IS_RX 0x00010000 /* Interrupt for RX queue 0 */
+#define BGMAC_IS_TX0 0x01000000 /* Interrupt for TX queue 0 */
+#define BGMAC_IS_TX1 0x02000000 /* Interrupt for TX queue 1 */
+#define BGMAC_IS_TX2 0x04000000 /* Interrupt for TX queue 2 */
+#define BGMAC_IS_TX3 0x08000000 /* Interrupt for TX queue 3 */
+#define BGMAC_IS_TX_MASK 0x0f000000
+#define BGMAC_IS_INTMASK 0x0f01fcff
+#define BGMAC_IS_ERRMASK 0x0000fc00
+#define BGMAC_INT_MASK 0x024 /* Interrupt mask */
+#define BGMAC_GP_TIMER 0x028
+#define BGMAC_INT_RECV_LAZY 0x100
+#define BGMAC_IRL_TO_MASK 0x00ffffff
+#define BGMAC_IRL_FC_MASK 0xff000000
+#define BGMAC_IRL_FC_SHIFT 24 /* Shift the number of interrupts triggered per received frame */
+#define BGMAC_FLOW_CTL_THRESH 0x104 /* Flow control thresholds */
+#define BGMAC_WRRTHRESH 0x108
+#define BGMAC_GMAC_IDLE_CNT_THRESH 0x10c
+#define BGMAC_PHY_ACCESS 0x180 /* PHY access address */
+#define BGMAC_PA_DATA_MASK 0x0000ffff
+#define BGMAC_PA_ADDR_MASK 0x001f0000
+#define BGMAC_PA_ADDR_SHIFT 16
+#define BGMAC_PA_REG_MASK 0x1f000000
+#define BGMAC_PA_REG_SHIFT 24
+#define BGMAC_PA_WRITE 0x20000000
+#define BGMAC_PA_START 0x40000000
+#define BGMAC_PHY_CNTL 0x188 /* PHY control address */
+#define BGMAC_PC_EPA_MASK 0x0000001f
+#define BGMAC_PC_MCT_MASK 0x007f0000
+#define BGMAC_PC_MCT_SHIFT 16
+#define BGMAC_PC_MTE 0x00800000
+#define BGMAC_TXQ_CTL 0x18c
+#define BGMAC_TXQ_CTL_DBT_MASK 0x00000fff
+#define BGMAC_TXQ_CTL_DBT_SHIFT 0
+#define BGMAC_RXQ_CTL 0x190
+#define BGMAC_RXQ_CTL_DBT_MASK 0x00000fff
+#define BGMAC_RXQ_CTL_DBT_SHIFT 0
+#define BGMAC_RXQ_CTL_PTE 0x00001000
+#define BGMAC_RXQ_CTL_MDP_MASK 0x3f000000
+#define BGMAC_RXQ_CTL_MDP_SHIFT 24
+#define BGMAC_GPIO_SELECT 0x194
+#define BGMAC_GPIO_OUTPUT_EN 0x198
+/* For 0x1e0 see BCMA_CLKCTLST */
+#define BGMAC_HW_WAR 0x1e4
+#define BGMAC_PWR_CTL 0x1e8
+#define BGMAC_DMA_BASE0 0x200 /* Tx and Rx controller */
+#define BGMAC_DMA_BASE1 0x240 /* Tx controller only */
+#define BGMAC_DMA_BASE2 0x280 /* Tx controller only */
+#define BGMAC_DMA_BASE3 0x2C0 /* Tx controller only */
+#define BGMAC_TX_GOOD_OCTETS 0x300
+#define BGMAC_TX_GOOD_OCTETS_HIGH 0x304
+#define BGMAC_TX_GOOD_PKTS 0x308
+#define BGMAC_TX_OCTETS 0x30c
+#define BGMAC_TX_OCTETS_HIGH 0x310
+#define BGMAC_TX_PKTS 0x314
+#define BGMAC_TX_BROADCAST_PKTS 0x318
+#define BGMAC_TX_MULTICAST_PKTS 0x31c
+#define BGMAC_TX_LEN_64 0x320
+#define BGMAC_TX_LEN_65_TO_127 0x324
+#define BGMAC_TX_LEN_128_TO_255 0x328
+#define BGMAC_TX_LEN_256_TO_511 0x32c
+#define BGMAC_TX_LEN_512_TO_1023 0x330
+#define BGMAC_TX_LEN_1024_TO_1522 0x334
+#define BGMAC_TX_LEN_1523_TO_2047 0x338
+#define BGMAC_TX_LEN_2048_TO_4095 0x33c
+#define BGMAC_TX_LEN_4095_TO_8191 0x340
+#define BGMAC_TX_LEN_8192_TO_MAX 0x344
+#define BGMAC_TX_JABBER_PKTS 0x348 /* Error */
+#define BGMAC_TX_OVERSIZE_PKTS 0x34c /* Error */
+#define BGMAC_TX_FRAGMENT_PKTS 0x350
+#define BGMAC_TX_UNDERRUNS 0x354 /* Error */
+#define BGMAC_TX_TOTAL_COLS 0x358
+#define BGMAC_TX_SINGLE_COLS 0x35c
+#define BGMAC_TX_MULTIPLE_COLS 0x360
+#define BGMAC_TX_EXCESSIVE_COLS 0x364 /* Error */
+#define BGMAC_TX_LATE_COLS 0x368 /* Error */
+#define BGMAC_TX_DEFERED 0x36c
+#define BGMAC_TX_CARRIER_LOST 0x370
+#define BGMAC_TX_PAUSE_PKTS 0x374
+#define BGMAC_TX_UNI_PKTS 0x378
+#define BGMAC_TX_Q0_PKTS 0x37c
+#define BGMAC_TX_Q0_OCTETS 0x380
+#define BGMAC_TX_Q0_OCTETS_HIGH 0x384
+#define BGMAC_TX_Q1_PKTS 0x388
+#define BGMAC_TX_Q1_OCTETS 0x38c
+#define BGMAC_TX_Q1_OCTETS_HIGH 0x390
+#define BGMAC_TX_Q2_PKTS 0x394
+#define BGMAC_TX_Q2_OCTETS 0x398
+#define BGMAC_TX_Q2_OCTETS_HIGH 0x39c
+#define BGMAC_TX_Q3_PKTS 0x3a0
+#define BGMAC_TX_Q3_OCTETS 0x3a4
+#define BGMAC_TX_Q3_OCTETS_HIGH 0x3a8
+#define BGMAC_RX_GOOD_OCTETS 0x3b0
+#define BGMAC_RX_GOOD_OCTETS_HIGH 0x3b4
+#define BGMAC_RX_GOOD_PKTS 0x3b8
+#define BGMAC_RX_OCTETS 0x3bc
+#define BGMAC_RX_OCTETS_HIGH 0x3c0
+#define BGMAC_RX_PKTS 0x3c4
+#define BGMAC_RX_BROADCAST_PKTS 0x3c8
+#define BGMAC_RX_MULTICAST_PKTS 0x3cc
+#define BGMAC_RX_LEN_64 0x3d0
+#define BGMAC_RX_LEN_65_TO_127 0x3d4
+#define BGMAC_RX_LEN_128_TO_255 0x3d8
+#define BGMAC_RX_LEN_256_TO_511 0x3dc
+#define BGMAC_RX_LEN_512_TO_1023 0x3e0
+#define BGMAC_RX_LEN_1024_TO_1522 0x3e4
+#define BGMAC_RX_LEN_1523_TO_2047 0x3e8
+#define BGMAC_RX_LEN_2048_TO_4095 0x3ec
+#define BGMAC_RX_LEN_4095_TO_8191 0x3f0
+#define BGMAC_RX_LEN_8192_TO_MAX 0x3f4
+#define BGMAC_RX_JABBER_PKTS 0x3f8 /* Error */
+#define BGMAC_RX_OVERSIZE_PKTS 0x3fc /* Error */
+#define BGMAC_RX_FRAGMENT_PKTS 0x400
+#define BGMAC_RX_MISSED_PKTS 0x404 /* Error */
+#define BGMAC_RX_CRC_ALIGN_ERRS 0x408 /* Error */
+#define BGMAC_RX_UNDERSIZE 0x40c /* Error */
+#define BGMAC_RX_CRC_ERRS 0x410 /* Error */
+#define BGMAC_RX_ALIGN_ERRS 0x414 /* Error */
+#define BGMAC_RX_SYMBOL_ERRS 0x418 /* Error */
+#define BGMAC_RX_PAUSE_PKTS 0x41c
+#define BGMAC_RX_NONPAUSE_PKTS 0x420
+#define BGMAC_RX_SACHANGES 0x424
+#define BGMAC_RX_UNI_PKTS 0x428
+#define BGMAC_UNIMAC_VERSION 0x800
+#define BGMAC_HDBKP_CTL 0x804
+#define BGMAC_CMDCFG 0x808 /* Configuration */
+#define BGMAC_CMDCFG_TE 0x00000001 /* Set to activate TX */
+#define BGMAC_CMDCFG_RE 0x00000002 /* Set to activate RX */
+#define BGMAC_CMDCFG_ES_MASK 0x0000000c /* Ethernet speed see gmac_speed */
+#define BGMAC_CMDCFG_ES_10 0x00000000
+#define BGMAC_CMDCFG_ES_100 0x00000004
+#define BGMAC_CMDCFG_ES_1000 0x00000008
+#define BGMAC_CMDCFG_PROM 0x00000010 /* Set to activate promiscuous mode */
+#define BGMAC_CMDCFG_PAD_EN 0x00000020
+#define BGMAC_CMDCFG_CF 0x00000040
+#define BGMAC_CMDCFG_PF 0x00000080
+#define BGMAC_CMDCFG_RPI 0x00000100 /* Unset to enable 802.3x tx flow control */
+#define BGMAC_CMDCFG_TAI 0x00000200
+#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */
+#define BGMAC_CMDCFG_HD_SHIFT 10
+#define BGMAC_CMDCFG_SR 0x00000800 /* Set to reset mode */
+#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
+#define BGMAC_CMDCFG_AE 0x00400000
+#define BGMAC_CMDCFG_CFE 0x00800000
+#define BGMAC_CMDCFG_NLC 0x01000000
+#define BGMAC_CMDCFG_RL 0x02000000
+#define BGMAC_CMDCFG_RED 0x04000000
+#define BGMAC_CMDCFG_PE 0x08000000
+#define BGMAC_CMDCFG_TPI 0x10000000
+#define BGMAC_CMDCFG_AT 0x20000000
+#define BGMAC_MACADDR_HIGH 0x80c /* High 4 octets of own mac address */
+#define BGMAC_MACADDR_LOW 0x810 /* Low 2 octets of own mac address */
+#define BGMAC_RXMAX_LENGTH 0x814 /* Max receive frame length with vlan tag */
+#define BGMAC_PAUSEQUANTA 0x818
+#define BGMAC_MAC_MODE 0x844
+#define BGMAC_OUTERTAG 0x848
+#define BGMAC_INNERTAG 0x84c
+#define BGMAC_TXIPG 0x85c
+#define BGMAC_PAUSE_CTL 0xb30
+#define BGMAC_TX_FLUSH 0xb34
+#define BGMAC_RX_STATUS 0xb38
+#define BGMAC_TX_STATUS 0xb3c
+
+#define BGMAC_PHY_CTL 0x00
+#define BGMAC_PHY_CTL_SPEED_MSB 0x0040
+#define BGMAC_PHY_CTL_DUPLEX 0x0100 /* duplex mode */
+#define BGMAC_PHY_CTL_RESTART 0x0200 /* restart autonegotiation */
+#define BGMAC_PHY_CTL_ANENAB 0x1000 /* enable autonegotiation */
+#define BGMAC_PHY_CTL_SPEED 0x2000
+#define BGMAC_PHY_CTL_LOOP 0x4000 /* loopback */
+#define BGMAC_PHY_CTL_RESET 0x8000 /* reset */
+/* Helpers */
+#define BGMAC_PHY_CTL_SPEED_10 0
+#define BGMAC_PHY_CTL_SPEED_100 BGMAC_PHY_CTL_SPEED
+#define BGMAC_PHY_CTL_SPEED_1000 BGMAC_PHY_CTL_SPEED_MSB
+#define BGMAC_PHY_ADV 0x04
+#define BGMAC_PHY_ADV_10HALF 0x0020 /* advertise 10MBits/s half duplex */
+#define BGMAC_PHY_ADV_10FULL 0x0040 /* advertise 10MBits/s full duplex */
+#define BGMAC_PHY_ADV_100HALF 0x0080 /* advertise 100MBits/s half duplex */
+#define BGMAC_PHY_ADV_100FULL 0x0100 /* advertise 100MBits/s full duplex */
+#define BGMAC_PHY_ADV2 0x09
+#define BGMAC_PHY_ADV2_1000HALF 0x0100 /* advertise 1000MBits/s half duplex */
+#define BGMAC_PHY_ADV2_1000FULL 0x0200 /* advertise 1000MBits/s full duplex */
+
+/* BCMA GMAC core specific IO Control (BCMA_IOCTL) flags */
+#define BGMAC_BCMA_IOCTL_SW_CLKEN 0x00000004 /* PHY Clock Enable */
+#define BGMAC_BCMA_IOCTL_SW_RESET 0x00000008 /* PHY Reset */
+
+/* BCMA GMAC core specific IO status (BCMA_IOST) flags */
+#define BGMAC_BCMA_IOST_ATTACHED 0x00000800
+
+#define BGMAC_NUM_MIB_TX_REGS \
+ (((BGMAC_TX_Q3_OCTETS_HIGH - BGMAC_TX_GOOD_OCTETS) / 4) + 1)
+#define BGMAC_NUM_MIB_RX_REGS \
+ (((BGMAC_RX_UNI_PKTS - BGMAC_RX_GOOD_OCTETS) / 4) + 1)
+
+#define BGMAC_DMA_TX_CTL 0x00
+#define BGMAC_DMA_TX_ENABLE 0x00000001
+#define BGMAC_DMA_TX_SUSPEND 0x00000002
+#define BGMAC_DMA_TX_LOOPBACK 0x00000004
+#define BGMAC_DMA_TX_FLUSH 0x00000010
+#define BGMAC_DMA_TX_PARITY_DISABLE 0x00000800
+#define BGMAC_DMA_TX_ADDREXT_MASK 0x00030000
+#define BGMAC_DMA_TX_ADDREXT_SHIFT 16
+#define BGMAC_DMA_TX_INDEX 0x04
+#define BGMAC_DMA_TX_RINGLO 0x08
+#define BGMAC_DMA_TX_RINGHI 0x0C
+#define BGMAC_DMA_TX_STATUS 0x10
+#define BGMAC_DMA_TX_STATDPTR 0x00001FFF
+#define BGMAC_DMA_TX_STAT 0xF0000000
+#define BGMAC_DMA_TX_STAT_DISABLED 0x00000000
+#define BGMAC_DMA_TX_STAT_ACTIVE 0x10000000
+#define BGMAC_DMA_TX_STAT_IDLEWAIT 0x20000000
+#define BGMAC_DMA_TX_STAT_STOPPED 0x30000000
+#define BGMAC_DMA_TX_STAT_SUSP 0x40000000
+#define BGMAC_DMA_TX_ERROR 0x14
+#define BGMAC_DMA_TX_ERRDPTR 0x0001FFFF
+#define BGMAC_DMA_TX_ERR 0xF0000000
+#define BGMAC_DMA_TX_ERR_NOERR 0x00000000
+#define BGMAC_DMA_TX_ERR_PROT 0x10000000
+#define BGMAC_DMA_TX_ERR_UNDERRUN 0x20000000
+#define BGMAC_DMA_TX_ERR_TRANSFER 0x30000000
+#define BGMAC_DMA_TX_ERR_DESCREAD 0x40000000
+#define BGMAC_DMA_TX_ERR_CORE 0x50000000
+#define BGMAC_DMA_RX_CTL 0x20
+#define BGMAC_DMA_RX_ENABLE 0x00000001
+#define BGMAC_DMA_RX_FRAME_OFFSET_MASK 0x000000FE
+#define BGMAC_DMA_RX_FRAME_OFFSET_SHIFT 1
+#define BGMAC_DMA_RX_DIRECT_FIFO 0x00000100
+#define BGMAC_DMA_RX_OVERFLOW_CONT 0x00000400
+#define BGMAC_DMA_RX_PARITY_DISABLE 0x00000800
+#define BGMAC_DMA_RX_ADDREXT_MASK 0x00030000
+#define BGMAC_DMA_RX_ADDREXT_SHIFT 16
+#define BGMAC_DMA_RX_INDEX 0x24
+#define BGMAC_DMA_RX_RINGLO 0x28
+#define BGMAC_DMA_RX_RINGHI 0x2C
+#define BGMAC_DMA_RX_STATUS 0x30
+#define BGMAC_DMA_RX_STATDPTR 0x00001FFF
+#define BGMAC_DMA_RX_STAT 0xF0000000
+#define BGMAC_DMA_RX_STAT_DISABLED 0x00000000
+#define BGMAC_DMA_RX_STAT_ACTIVE 0x10000000
+#define BGMAC_DMA_RX_STAT_IDLEWAIT 0x20000000
+#define BGMAC_DMA_RX_STAT_STOPPED 0x30000000
+#define BGMAC_DMA_RX_STAT_SUSP 0x40000000
+#define BGMAC_DMA_RX_ERROR 0x34
+#define BGMAC_DMA_RX_ERRDPTR 0x0001FFFF
+#define BGMAC_DMA_RX_ERR 0xF0000000
+#define BGMAC_DMA_RX_ERR_NOERR 0x00000000
+#define BGMAC_DMA_RX_ERR_PROT 0x10000000
+#define BGMAC_DMA_RX_ERR_UNDERRUN 0x20000000
+#define BGMAC_DMA_RX_ERR_TRANSFER 0x30000000
+#define BGMAC_DMA_RX_ERR_DESCREAD 0x40000000
+#define BGMAC_DMA_RX_ERR_CORE 0x50000000
+
+#define BGMAC_DESC_CTL0_EOT 0x10000000 /* End of ring */
+#define BGMAC_DESC_CTL0_IOC 0x20000000 /* IRQ on complete */
+#define BGMAC_DESC_CTL0_SOF 0x40000000 /* Start of frame */
+#define BGMAC_DESC_CTL0_EOF 0x80000000 /* End of frame */
+#define BGMAC_DESC_CTL1_LEN 0x00001FFF
+
+#define BGMAC_PHY_NOREGS 0x1E
+#define BGMAC_PHY_MASK 0x1F
+
+#define BGMAC_MAX_TX_RINGS 4
+#define BGMAC_MAX_RX_RINGS 1
+
+#define BGMAC_TX_RING_SLOTS 128
+#define BGMAC_RX_RING_SLOTS 512 - 1 /* Why -1? Well, Broadcom does that... */
+
+#define BGMAC_RX_HEADER_LEN 28 /* Last 24 bytes are unused. Well... */
+#define BGMAC_RX_FRAME_OFFSET 30 /* There are 2 unused bytes between header and real data */
+#define BGMAC_RX_MAX_FRAME_SIZE 1536 /* Copied from b44/tg3 */
+#define BGMAC_RX_BUF_SIZE (BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE)
+
+#define BGMAC_BFL_ENETROBO 0x0010 /* has ephy roboswitch spi */
+#define BGMAC_BFL_ENETADM 0x0080 /* has ADMtek switch */
+#define BGMAC_BFL_ENETVLAN 0x0100 /* can do vlan */
+
+#define BGMAC_CHIPCTL_1_IF_TYPE_MASK 0x00000030
+#define BGMAC_CHIPCTL_1_IF_TYPE_RMII 0x00000000
+#define BGMAC_CHIPCTL_1_IF_TYPE_MI 0x00000010
+#define BGMAC_CHIPCTL_1_IF_TYPE_RGMII 0x00000020
+#define BGMAC_CHIPCTL_1_SW_TYPE_MASK 0x000000C0
+#define BGMAC_CHIPCTL_1_SW_TYPE_EPHY 0x00000000
+#define BGMAC_CHIPCTL_1_SW_TYPE_EPHYMII 0x00000040
+#define BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII 0x00000080
+#define BGMAC_CHIPCTL_1_SW_TYPE_RGMII 0x000000C0
+#define BGMAC_CHIPCTL_1_RXC_DLL_BYPASS 0x00010000
+
+#define BGMAC_SPEED_10 0x0001
+#define BGMAC_SPEED_100 0x0002
+#define BGMAC_SPEED_1000 0x0004
+
+#define BGMAC_WEIGHT 64
+
+#define ETHER_MAX_LEN 1518
+
+struct bgmac_slot_info {
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+};
+
+struct bgmac_dma_desc {
+ __le32 ctl0;
+ __le32 ctl1;
+ __le32 addr_low;
+ __le32 addr_high;
+} __packed;
+
+enum bgmac_dma_ring_type {
+ BGMAC_DMA_RING_TX,
+ BGMAC_DMA_RING_RX,
+};
+
+/**
+ * bgmac_dma_ring - contains info about DMA ring (either TX or RX one)
+ * @start: index of the first slot containing data
+ * @end: index of a slot that can *not* be read (yet)
+ *
+ * Be really aware of the specific @end meaning. It's an index of a slot *after*
+ * the one containing data that can be read. If @start equals @end the ring is
+ * empty.
+ */
+struct bgmac_dma_ring {
+ u16 num_slots;
+ u16 start;
+ u16 end;
+
+ u16 mmio_base;
+ struct bgmac_dma_desc *cpu_base;
+ dma_addr_t dma_base;
+
+ struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS];
+};
+
+struct bgmac_rx_header {
+ __le16 len;
+ __le16 flags;
+ __le16 pad[12];
+};
+
+struct bgmac {
+ struct bcma_device *core;
+ struct bcma_device *cmn; /* Reference to CMN core for BCM4706 */
+ struct net_device *net_dev;
+ struct napi_struct napi;
+
+ /* DMA */
+ struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS];
+ struct bgmac_dma_ring rx_ring[BGMAC_MAX_RX_RINGS];
+
+ /* Stats */
+ bool stats_grabbed;
+ u32 mib_tx_regs[BGMAC_NUM_MIB_TX_REGS];
+ u32 mib_rx_regs[BGMAC_NUM_MIB_RX_REGS];
+
+ /* Int */
+ u32 int_mask;
+ u32 int_status;
+
+ /* Speed-related */
+ int speed;
+ bool autoneg;
+ bool full_duplex;
+
+ u8 phyaddr;
+ bool has_robosw;
+
+ bool loopback;
+};
+
+static inline u32 bgmac_read(struct bgmac *bgmac, u16 offset)
+{
+ return bcma_read32(bgmac->core, offset);
+}
+
+static inline void bgmac_write(struct bgmac *bgmac, u16 offset, u32 value)
+{
+ bcma_write32(bgmac->core, offset, value);
+}
+
+static inline void bgmac_maskset(struct bgmac *bgmac, u16 offset, u32 mask,
+ u32 set)
+{
+ bgmac_write(bgmac, offset, (bgmac_read(bgmac, offset) & mask) | set);
+}
+
+static inline void bgmac_mask(struct bgmac *bgmac, u16 offset, u32 mask)
+{
+ bgmac_maskset(bgmac, offset, mask, 0);
+}
+
+static inline void bgmac_set(struct bgmac *bgmac, u16 offset, u32 set)
+{
+ bgmac_maskset(bgmac, offset, ~0, set);
+}
+
+#endif /* _BGMAC_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index a1adfaf87f49..2f0ba8f2fd6c 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8543,7 +8543,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
memcpy(dev->dev_addr, bp->mac_addr, 6);
- memcpy(dev->perm_addr, bp->mac_addr, 6);
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO_ECN |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/Makefile b/drivers/net/ethernet/broadcom/bnx2x/Makefile
index 48fbdd48f88f..116762daae09 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/Makefile
+++ b/drivers/net/ethernet/broadcom/bnx2x/Makefile
@@ -4,4 +4,5 @@
obj-$(CONFIG_BNX2X) += bnx2x.o
-bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o
+bnx2x-y := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o
+bnx2x-$(CONFIG_BNX2X_SRIOV) += bnx2x_vfpf.o bnx2x_sriov.o
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e8d4db10c8f3..e4605a965084 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1,6 +1,6 @@
/* bnx2x.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -13,9 +13,12 @@
#ifndef BNX2X_H
#define BNX2X_H
+
+#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/dma-mapping.h>
#include <linux/types.h>
+#include <linux/pci_regs.h>
/* compilation time flags */
@@ -23,8 +26,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.78.00-0"
-#define DRV_MODULE_RELDATE "2012/09/27"
+#define DRV_MODULE_VERSION "1.78.02-0"
+#define DRV_MODULE_RELDATE "2013/01/14"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
@@ -48,6 +51,13 @@
#include "bnx2x_sp.h"
#include "bnx2x_dcb.h"
#include "bnx2x_stats.h"
+#include "bnx2x_vfpf.h"
+
+enum bnx2x_int_mode {
+ BNX2X_INT_MODE_MSIX,
+ BNX2X_INT_MODE_INTX,
+ BNX2X_INT_MODE_MSI
+};
/* error/debug prints */
@@ -112,29 +122,29 @@ do { \
dev_info(&bp->pdev->dev, fmt, ##__VA_ARGS__); \
} while (0)
+/* Error handling */
+void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int);
#ifdef BNX2X_STOP_ON_ERROR
-void bnx2x_int_disable(struct bnx2x *bp);
#define bnx2x_panic() \
do { \
bp->panic = 1; \
BNX2X_ERR("driver assert\n"); \
- bnx2x_int_disable(bp); \
- bnx2x_panic_dump(bp); \
+ bnx2x_panic_dump(bp, true); \
} while (0)
#else
#define bnx2x_panic() \
do { \
bp->panic = 1; \
BNX2X_ERR("driver assert\n"); \
- bnx2x_panic_dump(bp); \
+ bnx2x_panic_dump(bp, false); \
} while (0)
#endif
#define bnx2x_mc_addr(ha) ((ha)->addr)
#define bnx2x_uc_addr(ha) ((ha)->addr)
-#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
-#define U64_HI(x) (u32)(((u64)(x)) >> 32)
+#define U64_LO(x) ((u32)(((u64)(x)) & 0xffffffff))
+#define U64_HI(x) ((u32)(((u64)(x)) >> 32))
#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
@@ -334,6 +344,9 @@ union db_prod {
#define SGE_PAGE_SIZE PAGE_SIZE
#define SGE_PAGE_SHIFT PAGE_SHIFT
#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
+#define SGE_PAGES (SGE_PAGE_SIZE * PAGES_PER_SGE)
+#define TPA_AGG_SIZE min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * \
+ SGE_PAGES), 0xffff)
/* SGE ring related macros */
#define NUM_RX_SGE_PAGES 2
@@ -789,48 +802,63 @@ struct bnx2x_common {
#define CHIP_NUM_57711E 0x1650
#define CHIP_NUM_57712 0x1662
#define CHIP_NUM_57712_MF 0x1663
+#define CHIP_NUM_57712_VF 0x166f
#define CHIP_NUM_57713 0x1651
#define CHIP_NUM_57713E 0x1652
#define CHIP_NUM_57800 0x168a
#define CHIP_NUM_57800_MF 0x16a5
+#define CHIP_NUM_57800_VF 0x16a9
#define CHIP_NUM_57810 0x168e
#define CHIP_NUM_57810_MF 0x16ae
+#define CHIP_NUM_57810_VF 0x16af
#define CHIP_NUM_57811 0x163d
#define CHIP_NUM_57811_MF 0x163e
-#define CHIP_NUM_57840_OBSOLETE 0x168d
+#define CHIP_NUM_57811_VF 0x163f
+#define CHIP_NUM_57840_OBSOLETE 0x168d
#define CHIP_NUM_57840_MF_OBSOLETE 0x16ab
#define CHIP_NUM_57840_4_10 0x16a1
#define CHIP_NUM_57840_2_20 0x16a2
#define CHIP_NUM_57840_MF 0x16a4
+#define CHIP_NUM_57840_VF 0x16ad
#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710)
#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711)
#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E)
#define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712)
+#define CHIP_IS_57712_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_VF)
#define CHIP_IS_57712_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_MF)
#define CHIP_IS_57800(bp) (CHIP_NUM(bp) == CHIP_NUM_57800)
#define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF)
+#define CHIP_IS_57800_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_VF)
#define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810)
#define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF)
+#define CHIP_IS_57810_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_VF)
#define CHIP_IS_57811(bp) (CHIP_NUM(bp) == CHIP_NUM_57811)
#define CHIP_IS_57811_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_MF)
+#define CHIP_IS_57811_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_VF)
#define CHIP_IS_57840(bp) \
((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) || \
(CHIP_NUM(bp) == CHIP_NUM_57840_2_20) || \
(CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE))
#define CHIP_IS_57840_MF(bp) ((CHIP_NUM(bp) == CHIP_NUM_57840_MF) || \
(CHIP_NUM(bp) == CHIP_NUM_57840_MF_OBSOLETE))
+#define CHIP_IS_57840_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_VF)
#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
CHIP_IS_57711E(bp))
#define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \
- CHIP_IS_57712_MF(bp))
+ CHIP_IS_57712_MF(bp) || \
+ CHIP_IS_57712_VF(bp))
#define CHIP_IS_E3(bp) (CHIP_IS_57800(bp) || \
CHIP_IS_57800_MF(bp) || \
+ CHIP_IS_57800_VF(bp) || \
CHIP_IS_57810(bp) || \
CHIP_IS_57810_MF(bp) || \
+ CHIP_IS_57810_VF(bp) || \
CHIP_IS_57811(bp) || \
CHIP_IS_57811_MF(bp) || \
+ CHIP_IS_57811_VF(bp) || \
CHIP_IS_57840(bp) || \
- CHIP_IS_57840_MF(bp))
+ CHIP_IS_57840_MF(bp) || \
+ CHIP_IS_57840_VF(bp))
#define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp)))
#define USES_WARPCORE(bp) (CHIP_IS_E3(bp))
#define IS_E1H_OFFSET (!CHIP_IS_E1(bp))
@@ -954,6 +982,11 @@ struct bnx2x_port {
extern struct workqueue_struct *bnx2x_wq;
#define BNX2X_MAX_NUM_OF_VFS 64
+#define BNX2X_VF_CID_WND 0
+#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND)
+#define BNX2X_CLIENTS_PER_VF 1
+#define BNX2X_FIRST_VF_CID 256
+#define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF)
#define BNX2X_VF_ID_INVALID 0xFF
/*
@@ -1104,6 +1137,7 @@ struct hw_context {
/* forward */
struct bnx2x_ilt;
+struct bnx2x_vfdb;
enum bnx2x_recovery_state {
BNX2X_RECOVERY_DONE,
@@ -1165,19 +1199,22 @@ struct bnx2x_fw_stats_req {
};
struct bnx2x_fw_stats_data {
- struct stats_counter storm_counters;
- struct per_port_stats port;
- struct per_pf_stats pf;
+ struct stats_counter storm_counters;
+ struct per_port_stats port;
+ struct per_pf_stats pf;
struct fcoe_statistics_params fcoe;
- struct per_queue_stats queue_stats[1];
+ struct per_queue_stats queue_stats[1];
};
/* Public slow path states */
enum {
BNX2X_SP_RTNL_SETUP_TC,
BNX2X_SP_RTNL_TX_TIMEOUT,
- BNX2X_SP_RTNL_AFEX_F_UPDATE,
BNX2X_SP_RTNL_FAN_FAILURE,
+ BNX2X_SP_RTNL_AFEX_F_UPDATE,
+ BNX2X_SP_RTNL_ENABLE_SRIOV,
+ BNX2X_SP_RTNL_VFPF_MCAST,
+ BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
};
@@ -1231,6 +1268,21 @@ struct bnx2x {
(vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1))
#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp))
+#ifdef CONFIG_BNX2X_SRIOV
+ /* vf pf channel mailbox contains request and response buffers */
+ struct bnx2x_vf_mbx_msg *vf2pf_mbox;
+ dma_addr_t vf2pf_mbox_mapping;
+
+ /* we set aside a copy of the acquire response */
+ struct pfvf_acquire_resp_tlv acquire_resp;
+
+ /* bulletin board for messages from pf to vf */
+ union pf_vf_bulletin *pf2vf_bulletin;
+ dma_addr_t pf2vf_bulletin_mapping;
+
+ struct pf_vf_bulletin_content old_bulletin;
+#endif /* CONFIG_BNX2X_SRIOV */
+
struct net_device *dev;
struct pci_dev *pdev;
@@ -1295,8 +1347,6 @@ struct bnx2x {
__le16 *eq_cons_sb;
atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */
-
-
/* Counter for marking that there is a STAT_QUERY ramrod pending */
u16 stats_pending;
/* Counter for completed statistics ramrods */
@@ -1318,8 +1368,6 @@ struct bnx2x {
#define DISABLE_MSI_FLAG (1 << 7)
#define TPA_ENABLE_FLAG (1 << 8)
#define NO_MCP_FLAG (1 << 9)
-
-#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
#define GRO_ENABLE_FLAG (1 << 10)
#define MF_FUNC_DIS (1 << 11)
#define OWN_CNIC_IRQ (1 << 12)
@@ -1330,6 +1378,17 @@ struct bnx2x {
#define BC_SUPPORTS_FCOE_FEATURES (1 << 19)
#define USING_SINGLE_MSIX_FLAG (1 << 20)
#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
+#define IS_VF_FLAG (1 << 22)
+
+#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
+
+#ifdef CONFIG_BNX2X_SRIOV
+#define IS_VF(bp) ((bp)->flags & IS_VF_FLAG)
+#define IS_PF(bp) (!((bp)->flags & IS_VF_FLAG))
+#else
+#define IS_VF(bp) false
+#define IS_PF(bp) true
+#endif
#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
@@ -1349,6 +1408,7 @@ struct bnx2x {
int mrrs;
struct delayed_work sp_task;
+ atomic_t interrupt_occurred;
struct delayed_work sp_rtnl_task;
struct delayed_work period_task;
@@ -1432,6 +1492,7 @@ struct bnx2x {
u8 igu_sb_cnt;
u8 min_msix_vec_cnt;
+ u32 igu_base_addr;
dma_addr_t def_status_blk_mapping;
struct bnx2x_slowpath *slowpath;
@@ -1580,6 +1641,9 @@ struct bnx2x {
char fw_ver[32];
const struct firmware *firmware;
+ struct bnx2x_vfdb *vfdb;
+#define IS_SRIOV(bp) ((bp)->vfdb)
+
/* DCB support on/off */
u16 dcb_state;
#define BNX2X_DCB_STATE_OFF 0
@@ -1599,6 +1663,10 @@ struct bnx2x {
int dcb_version;
/* CAM credit pools */
+
+ /* used only in sriov */
+ struct bnx2x_credit_pool_obj vlans_pool;
+
struct bnx2x_credit_pool_obj macs_pool;
/* RX_MODE object */
@@ -1636,6 +1704,9 @@ struct bnx2x {
/* priority to cos mapping */
u8 prio_to_cos[8];
+
+ int fp_array_size;
+ u32 dump_preset_idx;
};
/* Tx queues may be less or equal to Rx queues */
@@ -1813,12 +1884,16 @@ int bnx2x_del_all_macs(struct bnx2x *bp,
/* Init Function API */
void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p);
+void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
+ u8 vf_valid, int fw_sb_id, int igu_sb_id);
+u32 bnx2x_get_pretend_reg(struct bnx2x *bp);
int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode);
int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
void bnx2x_read_mf_cfg(struct bnx2x *bp);
+int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val);
/* dmae */
void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
@@ -1830,6 +1905,18 @@ u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
bool with_comp, u8 comp_type);
+void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
+ u8 src_type, u8 dst_type);
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae);
+void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl);
+
+/* FLR related routines */
+u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp);
+void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count);
+int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt);
+u8 bnx2x_is_pcie_pending(struct pci_dev *dev);
+int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
+ char *msg, u32 poll_cnt);
void bnx2x_calc_fc_adv(struct bnx2x *bp);
int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
@@ -1854,6 +1941,9 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
return val;
}
+void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
+ bool is_pf);
+
#define BNX2X_ILT_ZALLOC(x, y, size) \
do { \
x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
@@ -1990,10 +2080,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \
BNX2X_PHY_LOOPBACK_FAILED)
-
#define STROM_ASSERT_ARRAY_SIZE 50
-
/* must be used on a CID before placing it on a HW ring */
#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
(BP_VN(bp) << BNX2X_SWCID_SHIFT) | \
@@ -2024,7 +2112,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
/* Memory of fairness algorithm . 2 cycles */
#define FAIR_MEM 2
-
#define ATTN_NIG_FOR_FUNC (1L << 8)
#define ATTN_SW_TIMER_4_FUNC (1L << 9)
#define GPIO_2_FUNC (1L << 10)
@@ -2067,6 +2154,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
(AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT)
#define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \
AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \
@@ -2128,7 +2216,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define MULTI_MASK 0x7f
-
#define DEF_USB_FUNC_OFF offsetof(struct cstorm_def_status_block_u, func)
#define DEF_CSB_FUNC_OFF offsetof(struct cstorm_def_status_block_c, func)
#define DEF_XSB_FUNC_OFF offsetof(struct xstorm_def_status_block, func)
@@ -2156,18 +2243,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
(&bp->def_status_blk->sp_sb.\
index_values[HC_SP_INDEX_ETH_DEF_CONS])
-#define SET_FLAG(value, mask, flag) \
- do {\
- (value) &= ~(mask);\
- (value) |= ((flag) << (mask##_SHIFT));\
- } while (0)
-
-#define GET_FLAG(value, mask) \
- (((value) & (mask)) >> (mask##_SHIFT))
-
-#define GET_FIELD(value, fname) \
- (((value) & (fname##_MASK)) >> (fname##_SHIFT))
-
#define CAM_IS_INVALID(x) \
(GET_FLAG(x.flags, \
MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \
@@ -2178,7 +2253,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \
TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4)
-
#ifndef PXP2_REG_PXP2_INT_STS
#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0
#endif
@@ -2190,9 +2264,16 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_VPD_LEN 128
#define VENDOR_ID_LEN 4
+#define VF_ACQUIRE_THRESH 3
+#define VF_ACQUIRE_MAC_FILTERS 1
+#define VF_ACQUIRE_MC_FILTERS 10
+
+#define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \
+ (!((me_reg) & ME_REG_VF_ERR)))
+int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code);
/* Congestion management fairness mode */
-#define CMNG_FNS_NONE 0
-#define CMNG_FNS_MINMAX 1
+#define CMNG_FNS_NONE 0
+#define CMNG_FNS_MINMAX 1
#define HC_SEG_ACCESS_DEF 0 /*Driver decision 0-3*/
#define HC_SEG_ACCESS_ATTN 4
@@ -2208,7 +2289,6 @@ static const u32 dmae_reg_go_c[] = {
void bnx2x_set_ethtool_ops(struct net_device *netdev);
void bnx2x_notify_link_changed(struct bnx2x *bp);
-
#define BNX2X_MF_SD_PROTOCOL(bp) \
((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK)
@@ -2229,6 +2309,18 @@ void bnx2x_notify_link_changed(struct bnx2x *bp);
(BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \
BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
+#define SET_FLAG(value, mask, flag) \
+ do {\
+ (value) &= ~(mask);\
+ (value) |= ((flag) << (mask##_SHIFT));\
+ } while (0)
+
+#define GET_FLAG(value, mask) \
+ (((value) & (mask)) >> (mask##_SHIFT))
+
+#define GET_FIELD(value, fname) \
+ (((value) & (fname##_MASK)) >> (fname##_SHIFT))
+
enum {
SWITCH_UPDATE,
AFEX_UPDATE,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 01588b66a38c..ecac04a3687c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1,6 +1,6 @@
/* bnx2x_cmn.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,6 +21,7 @@
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/ip.h>
+#include <net/tcp.h>
#include <net/ipv6.h>
#include <net/ip6_checksum.h>
#include <linux/prefetch.h>
@@ -28,8 +29,6 @@
#include "bnx2x_init.h"
#include "bnx2x_sp.h"
-
-
/**
* bnx2x_move_fp - move content of the fastpath structure.
*
@@ -80,12 +79,65 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
}
- memcpy(&bp->bnx2x_txq[old_txdata_index],
- &bp->bnx2x_txq[new_txdata_index],
+ memcpy(&bp->bnx2x_txq[new_txdata_index],
+ &bp->bnx2x_txq[old_txdata_index],
sizeof(struct bnx2x_fp_txdata));
to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
}
+/**
+ * bnx2x_fill_fw_str - Fill buffer with FW version string.
+ *
+ * @bp: driver handle
+ * @buf: character buffer to fill with the fw name
+ * @buf_len: length of the above buffer
+ *
+ */
+void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
+{
+ if (IS_PF(bp)) {
+ u8 phy_fw_ver[PHY_FW_VER_LEN];
+
+ phy_fw_ver[0] = '\0';
+ bnx2x_get_ext_phy_fw_version(&bp->link_params,
+ phy_fw_ver, PHY_FW_VER_LEN);
+ strlcpy(buf, bp->fw_ver, buf_len);
+ snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
+ "bc %d.%d.%d%s%s",
+ (bp->common.bc_ver & 0xff0000) >> 16,
+ (bp->common.bc_ver & 0xff00) >> 8,
+ (bp->common.bc_ver & 0xff),
+ ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
+ } else {
+ bnx2x_vf_fill_fw_str(bp, buf, buf_len);
+ }
+}
+
+/**
+ * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
+ *
+ * @bp: driver handle
+ * @delta: number of eth queues which were not allocated
+ */
+static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
+{
+ int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
+
+ /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
+ * backward along the array could cause memory to be overriden
+ */
+ for (cos = 1; cos < bp->max_cos; cos++) {
+ for (i = 0; i < old_eth_num - delta; i++) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ int new_idx = cos * (old_eth_num - delta) + i;
+
+ memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
+ sizeof(struct bnx2x_fp_txdata));
+ fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
+ }
+ }
+}
+
int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
/* free skb in the packet ring at pos idx
@@ -185,7 +237,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
txdata->txq_index, hw_cons, sw_cons, pkt_cons);
bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
- &pkts_compl, &bytes_compl);
+ &pkts_compl, &bytes_compl);
sw_cons++;
}
@@ -291,14 +343,14 @@ static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
fp->last_max_sge, fp->rx_sge_prod);
}
-/* Set Toeplitz hash value in the skb using the value from the
+/* Get Toeplitz hash value in the skb using the value from the
* CQE (calculated by HW).
*/
static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
const struct eth_fast_path_rx_cqe *cqe,
bool *l4_rxhash)
{
- /* Set Toeplitz hash from CQE */
+ /* Get Toeplitz hash from CQE */
if ((bp->dev->features & NETIF_F_RXHASH) &&
(cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
enum eth_rss_hash_type htype;
@@ -365,8 +417,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
if (fp->mode == TPA_MODE_GRO) {
u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
- tpa_info->full_page =
- SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
+ tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
tpa_info->gro_size = gro_size;
}
@@ -387,31 +438,34 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
*/
#define TPA_TSTAMP_OPT_LEN 12
/**
- * bnx2x_set_lro_mss - calculate the approximate value of the MSS
+ * bnx2x_set_gro_params - compute GRO values
*
- * @bp: driver handle
+ * @skb: packet skb
* @parsing_flags: parsing flags from the START CQE
* @len_on_bd: total length of the first packet for the
* aggregation.
+ * @pkt_len: length of all segments
*
* Approximate value of the MSS for this aggregation calculated using
* the first packet of it.
+ * Compute number of aggregated segments, and gso_type.
*/
-static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
- u16 len_on_bd)
+static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
+ u16 len_on_bd, unsigned int pkt_len)
{
- /*
- * TPA arrgregation won't have either IP options or TCP options
+ /* TPA aggregation won't have either IP options or TCP options
* other than timestamp or IPv6 extension headers.
*/
u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
- PRS_FLAG_OVERETH_IPV6)
+ PRS_FLAG_OVERETH_IPV6) {
hdrs_len += sizeof(struct ipv6hdr);
- else /* IPv4 */
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ } else {
hdrs_len += sizeof(struct iphdr);
-
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ }
/* Check if there was a TCP timestamp, if there is it's will
* always be 12 bytes length: nop nop kind length echo val.
@@ -421,7 +475,13 @@ static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
hdrs_len += TPA_TSTAMP_OPT_LEN;
- return len_on_bd - hdrs_len;
+ skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
+
+ /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
+ * to skb_shinfo(skb)->gso_segs
+ */
+ NAPI_GRO_CB(skb)->count = DIV_ROUND_UP(pkt_len - hdrs_len,
+ skb_shinfo(skb)->gso_size);
}
static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
@@ -438,7 +498,7 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
}
mapping = dma_map_page(&bp->pdev->dev, page, 0,
- SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
+ SGE_PAGES, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
__free_pages(page, PAGES_PER_SGE_SHIFT);
BNX2X_ERR("Can't map sge\n");
@@ -475,22 +535,12 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
/* This is needed in order to enable forwarding support */
- if (frag_size) {
- skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
- tpa_info->parsing_flags, len_on_bd);
-
- /* set for GRO */
- if (fp->mode == TPA_MODE_GRO)
- skb_shinfo(skb)->gso_type =
- (GET_FLAG(tpa_info->parsing_flags,
- PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
- PRS_FLAG_OVERETH_IPV6) ?
- SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
- }
-
+ if (frag_size)
+ bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
+ le16_to_cpu(cqe->pkt_len));
#ifdef BNX2X_STOP_ON_ERROR
- if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
+ if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
pages, cqe_idx);
BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
@@ -508,8 +558,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
if (fp->mode == TPA_MODE_GRO)
frag_len = min_t(u32, frag_size, (u32)full_page);
else /* LRO */
- frag_len = min_t(u32, frag_size,
- (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
+ frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
rx_pg = &fp->rx_page_ring[sge_idx];
old_rx_pg = *rx_pg;
@@ -525,7 +574,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* Unmap the page as we r going to pass it to the stack */
dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(&old_rx_pg, mapping),
- SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
+ SGE_PAGES, DMA_FROM_DEVICE);
/* Add one frag and update the appropriate fields in the skb */
if (fp->mode == TPA_MODE_LRO)
skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
@@ -543,7 +592,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
skb->data_len += frag_len;
- skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
+ skb->truesize += SGE_PAGES;
skb->len += frag_len;
frag_size -= frag_len;
@@ -568,6 +617,54 @@ static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
}
+#ifdef CONFIG_INET
+static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ struct tcphdr *th;
+
+ skb_set_transport_header(skb, sizeof(struct iphdr));
+ th = tcp_hdr(skb);
+
+ th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
+ iph->saddr, iph->daddr, 0);
+}
+
+static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
+{
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+ struct tcphdr *th;
+
+ skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+ th = tcp_hdr(skb);
+
+ th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
+ &iph->saddr, &iph->daddr, 0);
+}
+#endif
+
+static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+ if (skb_shinfo(skb)->gso_size) {
+ skb_set_network_header(skb, 0);
+ switch (be16_to_cpu(skb->protocol)) {
+ case ETH_P_IP:
+ bnx2x_gro_ip_csum(bp, skb);
+ break;
+ case ETH_P_IPV6:
+ bnx2x_gro_ipv6_csum(bp, skb);
+ break;
+ default:
+ BNX2X_ERR("FW GRO supports only IPv4/IPv6, not 0x%04x\n",
+ be16_to_cpu(skb->protocol));
+ }
+ tcp_gro_complete(skb);
+ }
+#endif
+ napi_gro_receive(&fp->napi, skb);
+}
static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
struct bnx2x_agg_info *tpa_info,
@@ -622,7 +719,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
skb, cqe, cqe_idx)) {
if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
__vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
- napi_gro_receive(&fp->napi, skb);
+ bnx2x_gro_receive(bp, fp, skb);
} else {
DP(NETIF_MSG_RX_STATUS,
"Failed to allocate new pages - dropping packet!\n");
@@ -1064,7 +1161,7 @@ void __bnx2x_link_report(struct bnx2x *bp)
struct bnx2x_link_report_data cur_data;
/* reread mf_cfg */
- if (!CHIP_IS_E1(bp))
+ if (IS_PF(bp) && !CHIP_IS_E1(bp))
bnx2x_read_mf_cfg(bp);
/* Read the current link report info */
@@ -1406,10 +1503,14 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
if (nvecs == offset)
return;
- free_irq(bp->msix_table[offset].vector, bp->dev);
- DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
- bp->msix_table[offset].vector);
- offset++;
+
+ /* VFs don't have a default SB */
+ if (IS_PF(bp)) {
+ free_irq(bp->msix_table[offset].vector, bp->dev);
+ DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
+ bp->msix_table[offset].vector);
+ offset++;
+ }
if (CNIC_SUPPORT(bp)) {
if (nvecs == offset)
@@ -1430,21 +1531,30 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
void bnx2x_free_irq(struct bnx2x *bp)
{
if (bp->flags & USING_MSIX_FLAG &&
- !(bp->flags & USING_SINGLE_MSIX_FLAG))
- bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
- CNIC_SUPPORT(bp) + 1);
- else
+ !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
+ int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
+
+ /* vfs don't have a default status block */
+ if (IS_PF(bp))
+ nvecs++;
+
+ bnx2x_free_msix_irqs(bp, nvecs);
+ } else {
free_irq(bp->dev->irq, bp->dev);
+ }
}
int bnx2x_enable_msix(struct bnx2x *bp)
{
- int msix_vec = 0, i, rc, req_cnt;
+ int msix_vec = 0, i, rc;
- bp->msix_table[msix_vec].entry = msix_vec;
- BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
- bp->msix_table[0].entry);
- msix_vec++;
+ /* VFs don't have a default status block */
+ if (IS_PF(bp)) {
+ bp->msix_table[msix_vec].entry = msix_vec;
+ BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
+ bp->msix_table[0].entry);
+ msix_vec++;
+ }
/* Cnic requires an msix vector for itself */
if (CNIC_SUPPORT(bp)) {
@@ -1462,9 +1572,10 @@ int bnx2x_enable_msix(struct bnx2x *bp)
msix_vec++;
}
- req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp) + 1;
+ DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
+ msix_vec);
- rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
+ rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
/*
* reconfigure number of tx/rx queues according to available
@@ -1472,7 +1583,7 @@ int bnx2x_enable_msix(struct bnx2x *bp)
*/
if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
/* how less vectors we will have? */
- int diff = req_cnt - rc;
+ int diff = msix_vec - rc;
BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
@@ -1526,12 +1637,15 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
{
int i, rc, offset = 0;
- rc = request_irq(bp->msix_table[offset++].vector,
- bnx2x_msix_sp_int, 0,
- bp->dev->name, bp->dev);
- if (rc) {
- BNX2X_ERR("request sp irq failed\n");
- return -EBUSY;
+ /* no default status block for vf */
+ if (IS_PF(bp)) {
+ rc = request_irq(bp->msix_table[offset++].vector,
+ bnx2x_msix_sp_int, 0,
+ bp->dev->name, bp->dev);
+ if (rc) {
+ BNX2X_ERR("request sp irq failed\n");
+ return -EBUSY;
+ }
}
if (CNIC_SUPPORT(bp))
@@ -1555,12 +1669,20 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
}
i = BNX2X_NUM_ETH_QUEUES(bp);
- offset = 1 + CNIC_SUPPORT(bp);
- netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
- bp->msix_table[0].vector,
- 0, bp->msix_table[offset].vector,
- i - 1, bp->msix_table[offset + i - 1].vector);
-
+ if (IS_PF(bp)) {
+ offset = 1 + CNIC_SUPPORT(bp);
+ netdev_info(bp->dev,
+ "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
+ bp->msix_table[0].vector,
+ 0, bp->msix_table[offset].vector,
+ i - 1, bp->msix_table[offset + i - 1].vector);
+ } else {
+ offset = CNIC_SUPPORT(bp);
+ netdev_info(bp->dev,
+ "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
+ 0, bp->msix_table[offset].vector,
+ i - 1, bp->msix_table[offset + i - 1].vector);
+ }
return 0;
}
@@ -1605,7 +1727,6 @@ static int bnx2x_setup_irqs(struct bnx2x *bp)
if (rc)
return rc;
} else {
- bnx2x_ack_int(bp);
rc = bnx2x_req_irq(bp);
if (rc) {
BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
@@ -1703,7 +1824,6 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
}
-
void bnx2x_set_num_queues(struct bnx2x *bp)
{
/* RSS queues */
@@ -1968,27 +2088,212 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
} while (0)
#endif /*BNX2X_STOP_ON_ERROR*/
-bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
+static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
{
- /* build FW version dword */
- u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
- (BCM_5710_FW_MINOR_VERSION << 8) +
- (BCM_5710_FW_REVISION_VERSION << 16) +
- (BCM_5710_FW_ENGINEERING_VERSION << 24);
+ BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+ return;
+}
+
+static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
+{
+ int num_groups, vf_headroom = 0;
+ int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
+
+ /* number of queues for statistics is number of eth queues + FCoE */
+ u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
+
+ /* Total number of FW statistics requests =
+ * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
+ * and fcoe l2 queue) stats + num of queues (which includes another 1
+ * for fcoe l2 queue if applicable)
+ */
+ bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
+
+ /* vf stats appear in the request list, but their data is allocated by
+ * the VFs themselves. We don't include them in the bp->fw_stats_num as
+ * it is used to determine where to place the vf stats queries in the
+ * request struct
+ */
+ if (IS_SRIOV(bp))
+ vf_headroom = bnx2x_vf_headroom(bp);
- /* read loaded FW from chip */
- u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
+ /* Request is built from stats_query_header and an array of
+ * stats_query_cmd_group each of which contains
+ * STATS_QUERY_CMD_COUNT rules. The real number or requests is
+ * configured in the stats_query_header.
+ */
+ num_groups =
+ (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
+ (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
+ 1 : 0));
+
+ DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
+ bp->fw_stats_num, vf_headroom, num_groups);
+ bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
+ num_groups * sizeof(struct stats_query_cmd_group);
+
+ /* Data for statistics requests + stats_counter
+ * stats_counter holds per-STORM counters that are incremented
+ * when STORM has finished with the current request.
+ * memory for FCoE offloaded statistics are counted anyway,
+ * even if they will not be sent.
+ * VF stats are not accounted for here as the data of VF stats is stored
+ * in memory allocated by the VF, not here.
+ */
+ bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
+ sizeof(struct per_pf_stats) +
+ sizeof(struct fcoe_statistics_params) +
+ sizeof(struct per_queue_stats) * num_queue_stats +
+ sizeof(struct stats_counter);
+
+ BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+
+ /* Set shortcuts */
+ bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
+ bp->fw_stats_req_mapping = bp->fw_stats_mapping;
+ bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
+ ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
+ bp->fw_stats_data_mapping = bp->fw_stats_mapping +
+ bp->fw_stats_req_sz;
+
+ DP(BNX2X_MSG_SP, "statistics request base address set to %x %x",
+ U64_HI(bp->fw_stats_req_mapping),
+ U64_LO(bp->fw_stats_req_mapping));
+ DP(BNX2X_MSG_SP, "statistics data base address set to %x %x",
+ U64_HI(bp->fw_stats_data_mapping),
+ U64_LO(bp->fw_stats_data_mapping));
+ return 0;
- DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
+alloc_mem_err:
+ bnx2x_free_fw_stats_mem(bp);
+ BNX2X_ERR("Can't allocate FW stats memory\n");
+ return -ENOMEM;
+}
- if (loaded_fw != my_fw) {
- if (is_err)
- BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
+/* send load request to mcp and analyze response */
+static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
+{
+ /* init fw_seq */
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+ BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+
+ /* Get current FW pulse sequence */
+ bp->fw_drv_pulse_wr_seq =
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
+ DRV_PULSE_SEQ_MASK);
+ BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
+
+ /* load request */
+ (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
+ DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
+
+ /* if mcp fails to respond we must abort */
+ if (!(*load_code)) {
+ BNX2X_ERR("MCP response failure, aborting\n");
+ return -EBUSY;
+ }
+
+ /* If mcp refused (e.g. other port is in diagnostic mode) we
+ * must abort
+ */
+ if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
+ BNX2X_ERR("MCP refused load request, aborting\n");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+/* check whether another PF has already loaded FW to chip. In
+ * virtualized environments a pf from another VM may have already
+ * initialized the device including loading FW
+ */
+int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
+{
+ /* is another pf loaded on this engine? */
+ if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
+ load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
+ /* build my FW version dword */
+ u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
+ (BCM_5710_FW_MINOR_VERSION << 8) +
+ (BCM_5710_FW_REVISION_VERSION << 16) +
+ (BCM_5710_FW_ENGINEERING_VERSION << 24);
+
+ /* read loaded FW from chip */
+ u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
+
+ DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
+ loaded_fw, my_fw);
+
+ /* abort nic load if version mismatch */
+ if (my_fw != loaded_fw) {
+ BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. aborting\n",
loaded_fw, my_fw);
- return false;
+ return -EBUSY;
+ }
+ }
+ return 0;
+}
+
+/* returns the "mcp load_code" according to global load_count array */
+static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
+{
+ int path = BP_PATH(bp);
+
+ DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
+ path, load_count[path][0], load_count[path][1],
+ load_count[path][2]);
+ load_count[path][0]++;
+ load_count[path][1 + port]++;
+ DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
+ path, load_count[path][0], load_count[path][1],
+ load_count[path][2]);
+ if (load_count[path][0] == 1)
+ return FW_MSG_CODE_DRV_LOAD_COMMON;
+ else if (load_count[path][1 + port] == 1)
+ return FW_MSG_CODE_DRV_LOAD_PORT;
+ else
+ return FW_MSG_CODE_DRV_LOAD_FUNCTION;
+}
+
+/* mark PMF if applicable */
+static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
+{
+ if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
+ (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
+ (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
+ bp->port.pmf = 1;
+ /* We need the barrier to ensure the ordering between the
+ * writing to bp->port.pmf here and reading it from the
+ * bnx2x_periodic_task().
+ */
+ smp_mb();
+ } else {
+ bp->port.pmf = 0;
}
- return true;
+ DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
+}
+
+static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
+{
+ if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
+ (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
+ (bp->common.shmem2_base)) {
+ if (SHMEM2_HAS(bp, dcc_support))
+ SHMEM2_WR(bp, dcc_support,
+ (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
+ SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
+ if (SHMEM2_HAS(bp, afex_driver_support))
+ SHMEM2_WR(bp, afex_driver_support,
+ SHMEM_AFEX_SUPPORTED_VERSION_ONE);
+ }
+
+ /* Set AFEX default VLAN tag to an invalid value */
+ bp->afex_def_vlan_tag = -1;
}
/**
@@ -2003,49 +2308,15 @@ bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
static void bnx2x_bz_fp(struct bnx2x *bp, int index)
{
struct bnx2x_fastpath *fp = &bp->fp[index];
- struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
int cos;
struct napi_struct orig_napi = fp->napi;
struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
/* bzero bnx2x_fastpath contents */
- if (bp->stats_init) {
- memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
- memset(fp, 0, sizeof(*fp));
- } else {
- /* Keep Queue statistics */
- struct bnx2x_eth_q_stats *tmp_eth_q_stats;
- struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
-
- tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
- GFP_KERNEL);
- if (tmp_eth_q_stats)
- memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
- sizeof(struct bnx2x_eth_q_stats));
-
- tmp_eth_q_stats_old =
- kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
- GFP_KERNEL);
- if (tmp_eth_q_stats_old)
- memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
- sizeof(struct bnx2x_eth_q_stats_old));
-
- memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
- memset(fp, 0, sizeof(*fp));
-
- if (tmp_eth_q_stats) {
- memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
- sizeof(struct bnx2x_eth_q_stats));
- kfree(tmp_eth_q_stats);
- }
-
- if (tmp_eth_q_stats_old) {
- memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
- sizeof(struct bnx2x_eth_q_stats_old));
- kfree(tmp_eth_q_stats_old);
- }
-
- }
+ if (fp->tpa_info)
+ memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
+ sizeof(struct bnx2x_agg_info));
+ memset(fp, 0, sizeof(*fp));
/* Restore the NAPI object as it has been already initialized */
fp->napi = orig_napi;
@@ -2091,10 +2362,12 @@ int bnx2x_load_cnic(struct bnx2x *bp)
mutex_init(&bp->cnic_mutex);
- rc = bnx2x_alloc_mem_cnic(bp);
- if (rc) {
- BNX2X_ERR("Unable to allocate bp memory for cnic\n");
- LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
+ if (IS_PF(bp)) {
+ rc = bnx2x_alloc_mem_cnic(bp);
+ if (rc) {
+ BNX2X_ERR("Unable to allocate bp memory for cnic\n");
+ LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
+ }
}
rc = bnx2x_alloc_fp_mem_cnic(bp);
@@ -2121,14 +2394,17 @@ int bnx2x_load_cnic(struct bnx2x *bp)
bnx2x_nic_init_cnic(bp);
- /* Enable Timer scan */
- REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
+ if (IS_PF(bp)) {
+ /* Enable Timer scan */
+ REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
- for_each_cnic_queue(bp, i) {
- rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
- if (rc) {
- BNX2X_ERR("Queue setup failed\n");
- LOAD_ERROR_EXIT(bp, load_error_cnic2);
+ /* setup cnic queues */
+ for_each_cnic_queue(bp, i) {
+ rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
+ if (rc) {
+ BNX2X_ERR("Queue setup failed\n");
+ LOAD_ERROR_EXIT(bp, load_error_cnic2);
+ }
}
}
@@ -2169,13 +2445,11 @@ load_error_cnic0:
#endif /* ! BNX2X_STOP_ON_ERROR */
}
-
/* must be called with rtnl_lock */
int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
{
int port = BP_PORT(bp);
- u32 load_code;
- int i, rc;
+ int i, rc = 0, load_code = 0;
DP(NETIF_MSG_IFUP, "Starting NIC load\n");
DP(NETIF_MSG_IFUP,
@@ -2190,15 +2464,13 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
- /* Set the initial link reported state to link down */
- bnx2x_acquire_phy_lock(bp);
memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
&bp->last_reported_link.link_report_flags);
- bnx2x_release_phy_lock(bp);
- /* must be called before memory allocation and HW init */
- bnx2x_ilt_set_info(bp);
+ if (IS_PF(bp))
+ /* must be called before memory allocation and HW init */
+ bnx2x_ilt_set_info(bp);
/*
* Zero fastpath structures preserving invariants like napi, which are
@@ -2217,8 +2489,33 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Set the receive queues buffer size */
bnx2x_set_rx_buf_size(bp);
- if (bnx2x_alloc_mem(bp))
- return -ENOMEM;
+ if (IS_PF(bp)) {
+ rc = bnx2x_alloc_mem(bp);
+ if (rc) {
+ BNX2X_ERR("Unable to allocate bp memory\n");
+ return rc;
+ }
+ }
+
+ /* Allocated memory for FW statistics */
+ if (bnx2x_alloc_fw_stats_mem(bp))
+ LOAD_ERROR_EXIT(bp, load_error0);
+
+ /* need to be done after alloc mem, since it's self adjusting to amount
+ * of memory available for RSS queues
+ */
+ rc = bnx2x_alloc_fp_mem(bp);
+ if (rc) {
+ BNX2X_ERR("Unable to allocate memory for fps\n");
+ LOAD_ERROR_EXIT(bp, load_error0);
+ }
+
+ /* request pf to initialize status blocks */
+ if (IS_VF(bp)) {
+ rc = bnx2x_vfpf_init(bp);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error0);
+ }
/* As long as bnx2x_alloc_mem() may possibly update
* bp->num_queues, bnx2x_set_real_num_queues() should always
@@ -2241,98 +2538,48 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
DP(NETIF_MSG_IFUP, "napi added\n");
bnx2x_napi_enable(bp);
- /* set pf load just before approaching the MCP */
- bnx2x_set_pf_load(bp);
-
- /* Send LOAD_REQUEST command to MCP
- * Returns the type of LOAD command:
- * if it is the first port to be initialized
- * common blocks should be initialized, otherwise - not
- */
- if (!BP_NOMCP(bp)) {
- /* init fw_seq */
- bp->fw_seq =
- (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK);
- BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
-
- /* Get current FW pulse sequence */
- bp->fw_drv_pulse_wr_seq =
- (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
- DRV_PULSE_SEQ_MASK);
- BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
-
- load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
- DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
- if (!load_code) {
- BNX2X_ERR("MCP response failure, aborting\n");
- rc = -EBUSY;
- LOAD_ERROR_EXIT(bp, load_error1);
- }
- if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
- BNX2X_ERR("Driver load refused\n");
- rc = -EBUSY; /* other port in diagnostic mode */
- LOAD_ERROR_EXIT(bp, load_error1);
- }
- if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
- load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
- /* abort nic load if version mismatch */
- if (!bnx2x_test_firmware_version(bp, true)) {
- rc = -EBUSY;
+ if (IS_PF(bp)) {
+ /* set pf load just before approaching the MCP */
+ bnx2x_set_pf_load(bp);
+
+ /* if mcp exists send load request and analyze response */
+ if (!BP_NOMCP(bp)) {
+ /* attempt to load pf */
+ rc = bnx2x_nic_load_request(bp, &load_code);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error1);
+
+ /* what did mcp say? */
+ rc = bnx2x_nic_load_analyze_req(bp, load_code);
+ if (rc) {
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
LOAD_ERROR_EXIT(bp, load_error2);
}
+ } else {
+ load_code = bnx2x_nic_load_no_mcp(bp, port);
}
- } else {
- int path = BP_PATH(bp);
-
- DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
- path, load_count[path][0], load_count[path][1],
- load_count[path][2]);
- load_count[path][0]++;
- load_count[path][1 + port]++;
- DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
- path, load_count[path][0], load_count[path][1],
- load_count[path][2]);
- if (load_count[path][0] == 1)
- load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
- else if (load_count[path][1 + port] == 1)
- load_code = FW_MSG_CODE_DRV_LOAD_PORT;
- else
- load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
- }
+ /* mark pmf if applicable */
+ bnx2x_nic_load_pmf(bp, load_code);
- if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
- (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
- (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
- bp->port.pmf = 1;
- /*
- * We need the barrier to ensure the ordering between the
- * writing to bp->port.pmf here and reading it from the
- * bnx2x_periodic_task().
- */
- smp_mb();
- } else
- bp->port.pmf = 0;
-
- DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
+ /* Init Function state controlling object */
+ bnx2x__init_func_obj(bp);
- /* Init Function state controlling object */
- bnx2x__init_func_obj(bp);
-
- /* Initialize HW */
- rc = bnx2x_init_hw(bp, load_code);
- if (rc) {
- BNX2X_ERR("HW init failed, aborting\n");
- bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
- LOAD_ERROR_EXIT(bp, load_error2);
+ /* Initialize HW */
+ rc = bnx2x_init_hw(bp, load_code);
+ if (rc) {
+ BNX2X_ERR("HW init failed, aborting\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+ LOAD_ERROR_EXIT(bp, load_error2);
+ }
}
/* Connect to IRQs */
rc = bnx2x_setup_irqs(bp);
if (rc) {
- BNX2X_ERR("IRQs setup failed\n");
- bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+ BNX2X_ERR("setup irqs failed\n");
+ if (IS_PF(bp))
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
LOAD_ERROR_EXIT(bp, load_error2);
}
@@ -2340,78 +2587,89 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bnx2x_nic_init(bp, load_code);
/* Init per-function objects */
- bnx2x_init_bp_objs(bp);
-
- if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
- (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
- (bp->common.shmem2_base)) {
- if (SHMEM2_HAS(bp, dcc_support))
- SHMEM2_WR(bp, dcc_support,
- (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
- SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
- if (SHMEM2_HAS(bp, afex_driver_support))
- SHMEM2_WR(bp, afex_driver_support,
- SHMEM_AFEX_SUPPORTED_VERSION_ONE);
- }
+ if (IS_PF(bp)) {
+ bnx2x_init_bp_objs(bp);
+ bnx2x_iov_nic_init(bp);
+
+ /* Set AFEX default VLAN tag to an invalid value */
+ bp->afex_def_vlan_tag = -1;
+ bnx2x_nic_load_afex_dcc(bp, load_code);
+ bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
+ rc = bnx2x_func_start(bp);
+ if (rc) {
+ BNX2X_ERR("Function start failed!\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
- /* Set AFEX default VLAN tag to an invalid value */
- bp->afex_def_vlan_tag = -1;
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
- bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
- rc = bnx2x_func_start(bp);
- if (rc) {
- BNX2X_ERR("Function start failed!\n");
- bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
- LOAD_ERROR_EXIT(bp, load_error3);
- }
+ /* Send LOAD_DONE command to MCP */
+ if (!BP_NOMCP(bp)) {
+ load_code = bnx2x_fw_command(bp,
+ DRV_MSG_CODE_LOAD_DONE, 0);
+ if (!load_code) {
+ BNX2X_ERR("MCP response failure, aborting\n");
+ rc = -EBUSY;
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
+ }
- /* Send LOAD_DONE command to MCP */
- if (!BP_NOMCP(bp)) {
- load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
- if (!load_code) {
- BNX2X_ERR("MCP response failure, aborting\n");
- rc = -EBUSY;
+ /* setup the leading queue */
+ rc = bnx2x_setup_leading(bp);
+ if (rc) {
+ BNX2X_ERR("Setup leading failed!\n");
LOAD_ERROR_EXIT(bp, load_error3);
}
- }
- rc = bnx2x_setup_leading(bp);
- if (rc) {
- BNX2X_ERR("Setup leading failed!\n");
- LOAD_ERROR_EXIT(bp, load_error3);
- }
+ /* set up the rest of the queues */
+ for_each_nondefault_eth_queue(bp, i) {
+ rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
+ if (rc) {
+ BNX2X_ERR("Queue setup failed\n");
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
+ }
- for_each_nondefault_eth_queue(bp, i) {
- rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
+ /* setup rss */
+ rc = bnx2x_init_rss_pf(bp);
if (rc) {
- BNX2X_ERR("Queue setup failed\n");
+ BNX2X_ERR("PF RSS init failed\n");
LOAD_ERROR_EXIT(bp, load_error3);
}
- }
- rc = bnx2x_init_rss_pf(bp);
- if (rc) {
- BNX2X_ERR("PF RSS init failed\n");
- LOAD_ERROR_EXIT(bp, load_error3);
+ } else { /* vf */
+ for_each_eth_queue(bp, i) {
+ rc = bnx2x_vfpf_setup_q(bp, i);
+ if (rc) {
+ BNX2X_ERR("Queue setup failed\n");
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
+ }
}
/* Now when Clients are configured we are ready to work */
bp->state = BNX2X_STATE_OPEN;
/* Configure a ucast MAC */
- rc = bnx2x_set_eth_mac(bp, true);
+ if (IS_PF(bp))
+ rc = bnx2x_set_eth_mac(bp, true);
+ else /* vf */
+ rc = bnx2x_vfpf_set_mac(bp);
if (rc) {
BNX2X_ERR("Setting Ethernet MAC failed\n");
LOAD_ERROR_EXIT(bp, load_error3);
}
- if (bp->pending_max) {
+ if (IS_PF(bp) && bp->pending_max) {
bnx2x_update_max_mf_config(bp, bp->pending_max);
bp->pending_max = 0;
}
- if (bp->port.pmf)
- bnx2x_initial_phy_init(bp, load_mode);
+ if (bp->port.pmf) {
+ rc = bnx2x_initial_phy_init(bp, load_mode);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
/* Start fast path */
@@ -2453,8 +2711,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (CNIC_ENABLED(bp))
bnx2x_load_cnic(bp);
- /* mark driver is loaded in shmem2 */
- if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ /* mark driver is loaded in shmem2 */
u32 val;
val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
@@ -2463,7 +2721,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
}
/* Wait for all pending SP commands to complete */
- if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
+ if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
BNX2X_ERR("Timeout waiting for SP elements to complete\n");
bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
return -EBUSY;
@@ -2479,10 +2737,12 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
#ifndef BNX2X_STOP_ON_ERROR
load_error3:
- bnx2x_int_disable_sync(bp, 1);
+ if (IS_PF(bp)) {
+ bnx2x_int_disable_sync(bp, 1);
- /* Clean queueable objects */
- bnx2x_squeeze_objects(bp);
+ /* Clean queueable objects */
+ bnx2x_squeeze_objects(bp);
+ }
/* Free SKBs, SGEs, TPA pool and driver internals */
bnx2x_free_skbs(bp);
@@ -2492,7 +2752,7 @@ load_error3:
/* Release IRQs */
bnx2x_free_irq(bp);
load_error2:
- if (!BP_NOMCP(bp)) {
+ if (IS_PF(bp) && !BP_NOMCP(bp)) {
bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
}
@@ -2500,15 +2760,35 @@ load_error2:
bp->port.pmf = 0;
load_error1:
bnx2x_napi_disable(bp);
+
/* clear pf_load status, as it was already set */
- bnx2x_clear_pf_load(bp);
+ if (IS_PF(bp))
+ bnx2x_clear_pf_load(bp);
load_error0:
+ bnx2x_free_fp_mem(bp);
+ bnx2x_free_fw_stats_mem(bp);
bnx2x_free_mem(bp);
return rc;
#endif /* ! BNX2X_STOP_ON_ERROR */
}
+static int bnx2x_drain_tx_queues(struct bnx2x *bp)
+{
+ u8 rc = 0, cos, i;
+
+ /* Wait until tx fastpath tasks complete */
+ for_each_tx_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ for_each_cos_in_tx_queue(fp, cos)
+ rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
/* must be called with rtnl_lock */
int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
{
@@ -2518,15 +2798,16 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
/* mark driver is unloaded in shmem2 */
- if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
u32 val;
val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
}
- if ((bp->state == BNX2X_STATE_CLOSED) ||
- (bp->state == BNX2X_STATE_ERROR)) {
+ if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
+ (bp->state == BNX2X_STATE_CLOSED ||
+ bp->state == BNX2X_STATE_ERROR)) {
/* We can get here if the driver has been unloaded
* during parity error recovery and is either waiting for a
* leader to complete or for other functions to unload and
@@ -2544,8 +2825,16 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
return -EINVAL;
}
- /*
- * It's important to set the bp->state to the value different from
+ /* Nothing to do during unload if previous bnx2x_nic_load()
+ * have not completed succesfully - all resourses are released.
+ *
+ * we can get here only after unsuccessful ndo_* callback, during which
+ * dev->IFF_UP flag is still on.
+ */
+ if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
+ return 0;
+
+ /* It's important to set the bp->state to the value different from
* BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
* may restart the Tx from the NAPI context (see bnx2x_tx_int()).
*/
@@ -2563,16 +2852,24 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
del_timer_sync(&bp->timer);
- /* Set ALWAYS_ALIVE bit in shmem */
- bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
-
- bnx2x_drv_pulse(bp);
+ if (IS_PF(bp)) {
+ /* Set ALWAYS_ALIVE bit in shmem */
+ bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
+ bnx2x_drv_pulse(bp);
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_save_statistics(bp);
+ }
- bnx2x_stats_handle(bp, STATS_EVENT_STOP);
- bnx2x_save_statistics(bp);
+ /* wait till consumers catch up with producers in all queues */
+ bnx2x_drain_tx_queues(bp);
- /* Cleanup the chip if needed */
- if (unload_mode != UNLOAD_RECOVERY)
+ /* if VF indicate to PF this function is going down (PF will delete sp
+ * elements and clear initializations
+ */
+ if (IS_VF(bp))
+ bnx2x_vfpf_close_vf(bp);
+ else if (unload_mode != UNLOAD_RECOVERY)
+ /* if this is a normal/close unload need to clean up chip*/
bnx2x_chip_cleanup(bp, unload_mode, keep_link);
else {
/* Send the UNLOAD_REQUEST to the MCP */
@@ -2605,7 +2902,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
* At this stage no more interrupts will arrive so we may safly clean
* the queueable objects here in case they failed to get cleaned so far.
*/
- bnx2x_squeeze_objects(bp);
+ if (IS_PF(bp))
+ bnx2x_squeeze_objects(bp);
/* There should be no more pending SP commands at this stage */
bp->sp_state = 0;
@@ -2619,19 +2917,22 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
for_each_rx_queue(bp, i)
bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
- if (CNIC_LOADED(bp)) {
+ bnx2x_free_fp_mem(bp);
+ if (CNIC_LOADED(bp))
bnx2x_free_fp_mem_cnic(bp);
- bnx2x_free_mem_cnic(bp);
- }
- bnx2x_free_mem(bp);
+ if (IS_PF(bp)) {
+ bnx2x_free_mem(bp);
+ if (CNIC_LOADED(bp))
+ bnx2x_free_mem_cnic(bp);
+ }
bp->state = BNX2X_STATE_CLOSED;
bp->cnic_loaded = false;
/* Check if there are pending parity attentions. If there are - set
* RECOVERY_IN_PROGRESS.
*/
- if (bnx2x_chk_parity_attn(bp, &global, false)) {
+ if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
bnx2x_set_reset_in_progress(bp);
/* Set RESET_IS_GLOBAL if needed */
@@ -2643,7 +2944,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
/* The last driver must disable a "close the gate" if there is no
* parity attention or "process kill" pending.
*/
- if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
+ if (IS_PF(bp) &&
+ !bnx2x_clear_pf_load(bp) &&
+ bnx2x_reset_is_done(bp, BP_PATH(bp)))
bnx2x_disable_close_the_gate(bp);
DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
@@ -2727,7 +3030,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
-
if (bnx2x_has_rx_work(fp)) {
work_done += bnx2x_rx_int(fp, budget - work_done);
@@ -2826,17 +3128,21 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
return bd_prod;
}
-static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
+#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
+#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
+static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
{
+ __sum16 tsum = (__force __sum16) csum;
+
if (fix > 0)
- csum = (u16) ~csum_fold(csum_sub(csum,
- csum_partial(t_header - fix, fix, 0)));
+ tsum = ~csum_fold(csum_sub((__force __wsum) csum,
+ csum_partial(t_header - fix, fix, 0)));
else if (fix < 0)
- csum = (u16) ~csum_fold(csum_add(csum,
- csum_partial(t_header, -fix, 0)));
+ tsum = ~csum_fold(csum_add((__force __wsum) csum,
+ csum_partial(t_header, -fix, 0)));
- return swab16(csum);
+ return bswab16(csum);
}
static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
@@ -2970,23 +3276,24 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
u32 xmit_type)
{
pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
- pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
+ pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
pbd->tcp_flags = pbd_tcp_flags(skb);
if (xmit_type & XMIT_GSO_V4) {
- pbd->ip_id = swab16(ip_hdr(skb)->id);
+ pbd->ip_id = bswab16(ip_hdr(skb)->id);
pbd->tcp_pseudo_csum =
- swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
- ip_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0));
+ bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
} else
pbd->tcp_pseudo_csum =
- swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0));
+ bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
- pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
+ pbd->global_data |=
+ cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
}
/**
@@ -3000,12 +3307,12 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
* 57712 related
*/
static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
- u32 *parsing_data, u32 xmit_type)
+ u32 *parsing_data, u32 xmit_type)
{
*parsing_data |=
- ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
- ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
- ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
+ ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
+ ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
if (xmit_type & XMIT_CSUM_TCP) {
*parsing_data |= ((tcp_hdrlen(skb) / 4) <<
@@ -3013,12 +3320,11 @@ static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
- } else
- /* We support checksum offload for TCP and UDP only.
- * No need to pass the UDP header length - it's a constant.
- */
- return skb_transport_header(skb) +
- sizeof(struct udphdr) - skb->data;
+ }
+ /* We support checksum offload for TCP and UDP only.
+ * No need to pass the UDP header length - it's a constant.
+ */
+ return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
}
static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
@@ -3053,8 +3359,9 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
/* for now NS flag is not used in Linux */
pbd->global_data =
- (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
- ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
+ cpu_to_le16(hlen |
+ ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
+ ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
pbd->ip_hlen_w = (skb_transport_header(skb) -
skb_network_header(skb)) >> 1;
@@ -3071,7 +3378,7 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
hlen = hlen*2;
if (xmit_type & XMIT_CSUM_TCP) {
- pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
+ pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
} else {
s8 fix = SKB_CS_OFF(skb); /* signed! */
@@ -3151,17 +3458,18 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
- bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
- netif_tx_stop_queue(txq);
+ bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
+ netif_tx_stop_queue(txq);
BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
DP(NETIF_MSG_TX_QUEUED,
- "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
+ "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
- ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
+ ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
+ skb->len);
eth = (struct ethhdr *)skb->data;
@@ -3242,8 +3550,22 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
cpu_to_le16(vlan_tx_tag_get(skb));
tx_start_bd->bd_flags.as_bitfield |=
(X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
- } else
- tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
+ } else {
+ /* when transmitting in a vf, start bd must hold the ethertype
+ * for fw to enforce it
+ */
+#ifndef BNX2X_STOP_ON_ERROR
+ if (IS_VF(bp)) {
+#endif
+ tx_start_bd->vlan_or_ethertype =
+ cpu_to_le16(ntohs(eth->h_proto));
+#ifndef BNX2X_STOP_ON_ERROR
+ } else {
+ /* used by FW for packet accounting */
+ tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
+ }
+#endif
+ }
/* turn on parsing and get a BD */
bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
@@ -3259,9 +3581,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
hlen = bnx2x_set_pbd_csum_e2(bp, skb,
&pbd_e2_parsing_data,
xmit_type);
- if (IS_MF_SI(bp)) {
- /*
- * fill in the MAC addresses in the PBD - for local
+
+ if (IS_MF_SI(bp) || IS_VF(bp)) {
+ /* fill in the MAC addresses in the PBD - for local
* switching
*/
bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
@@ -3542,7 +3864,6 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
return rc;
}
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
if (netif_running(dev))
@@ -3738,6 +4059,8 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
} else /* if rx_ring_size specified - use it */
rx_ring_size = bp->rx_ring_size;
+ DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
+
/* Common */
sb = &bnx2x_fp(bp, index, status_blk);
@@ -3863,6 +4186,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
WARN_ON(delta < 0);
+ bnx2x_shrink_eth_fp(bp, delta);
if (CNIC_SUPPORT(bp))
/* move non eth FPs next to last eth FP
* must be done in that order
@@ -3883,7 +4207,10 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
void bnx2x_free_mem_bp(struct bnx2x *bp)
{
- kfree(bp->fp->tpa_info);
+ int i;
+
+ for (i = 0; i < bp->fp_array_size; i++)
+ kfree(bp->fp[i].tpa_info);
kfree(bp->fp);
kfree(bp->sp_objs);
kfree(bp->fp_stats);
@@ -3903,18 +4230,22 @@ int bnx2x_alloc_mem_bp(struct bnx2x *bp)
/*
* The biggest MSI-X table we might need is as a maximum number of fast
- * path IGU SBs plus default SB (for PF).
+ * path IGU SBs plus default SB (for PF only).
*/
- msix_table_size = bp->igu_sb_cnt + 1;
+ msix_table_size = bp->igu_sb_cnt;
+ if (IS_PF(bp))
+ msix_table_size++;
+ BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
/* fp array: RSS plus CNIC related L2 queues */
fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
- BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
+ bp->fp_array_size = fp_array_size;
+ BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
- fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
+ fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
if (!fp)
goto alloc_err;
- for (i = 0; i < fp_array_size; i++) {
+ for (i = 0; i < bp->fp_array_size; i++) {
fp[i].tpa_info =
kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
sizeof(struct bnx2x_agg_info), GFP_KERNEL);
@@ -3925,13 +4256,13 @@ int bnx2x_alloc_mem_bp(struct bnx2x *bp)
bp->fp = fp;
/* allocate sp objs */
- bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
+ bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
GFP_KERNEL);
if (!bp->sp_objs)
goto alloc_err;
/* allocate fp_stats */
- bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
+ bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
GFP_KERNEL);
if (!bp->fp_stats)
goto alloc_err;
@@ -4010,7 +4341,7 @@ int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
{
u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
/*
- * The selected actived PHY is always after swapping (in case PHY
+ * The selected activated PHY is always after swapping (in case PHY
* swapping is enabled). So when swapping is enabled, we need to reverse
* the configuration
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 0991534f61da..aee7671ff4c1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -1,6 +1,6 @@
/* bnx2x_cmn.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -24,6 +24,7 @@
#include "bnx2x.h"
+#include "bnx2x_sriov.h"
/* This is used as a replacement for an MCP if it's not present */
extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
@@ -196,6 +197,7 @@ void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
/* Disable transactions from chip to host */
void bnx2x_pf_disable(struct bnx2x *bp);
+int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val);
/**
* bnx2x__link_status_update - handles link status change.
@@ -401,7 +403,7 @@ void bnx2x_set_rx_mode(struct net_device *dev);
* If bp->state is OPEN, should be called with
* netif_addr_lock_bh().
*/
-void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
+int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
/**
* bnx2x_set_q_rx_mode - configures rx_mode for a single queue.
@@ -413,11 +415,11 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
* @tx_accept_flags: tx accept configuration (tx switch)
* @ramrod_flags: ramrod configuration
*/
-void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
- unsigned long rx_mode_flags,
- unsigned long rx_accept_flags,
- unsigned long tx_accept_flags,
- unsigned long ramrod_flags);
+int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
+ unsigned long rx_mode_flags,
+ unsigned long rx_accept_flags,
+ unsigned long tx_accept_flags,
+ unsigned long ramrod_flags);
/* Parity errors related */
void bnx2x_set_pf_load(struct bnx2x *bp);
@@ -477,8 +479,6 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
*/
void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
/* Error handling */
-void bnx2x_panic_dump(struct bnx2x *bp);
-
void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
/* validate currect fw is loaded */
@@ -496,9 +496,44 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
/* setup_tc callback */
int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
+int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
+
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
+static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp,
+ u16 bd_prod, u16 rx_comp_prod,
+ u16 rx_sge_prod)
+{
+ struct ustorm_eth_rx_producers rx_prods = {0};
+ u32 i;
+
+ /* Update producers */
+ rx_prods.bd_prod = bd_prod;
+ rx_prods.cqe_prod = rx_comp_prod;
+ rx_prods.sge_prod = rx_sge_prod;
+
+ /* Make sure that the BD and SGE data is updated before updating the
+ * producers since FW might read the BD/SGE right after the producer
+ * is updated.
+ * This is only applicable for weak-ordered memory model archs such
+ * as IA-64. The following barrier is also mandatory since FW will
+ * assumes BDs must have buffers.
+ */
+ wmb();
+
+ for (i = 0; i < sizeof(rx_prods)/4; i++)
+ REG_WR(bp, fp->ustorm_rx_prods_offset + i*4,
+ ((u32 *)&rx_prods)[i]);
+
+ mmiowb(); /* keep prod updates ordered */
+
+ DP(NETIF_MSG_RX_STATUS,
+ "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
+ fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
+}
+
/* reload helper */
int bnx2x_reload_if_running(struct net_device *dev);
@@ -507,9 +542,6 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p);
/* NAPI poll Rx part */
int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
-void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
- u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod);
-
/* NAPI poll Tx part */
int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata);
@@ -612,38 +644,6 @@ static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
}
-static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp,
- struct bnx2x_fastpath *fp, u16 bd_prod,
- u16 rx_comp_prod, u16 rx_sge_prod, u32 start)
-{
- struct ustorm_eth_rx_producers rx_prods = {0};
- u32 i;
-
- /* Update producers */
- rx_prods.bd_prod = bd_prod;
- rx_prods.cqe_prod = rx_comp_prod;
- rx_prods.sge_prod = rx_sge_prod;
-
- /*
- * Make sure that the BD and SGE data is updated before updating the
- * producers since FW might read the BD/SGE right after the producer
- * is updated.
- * This is only applicable for weak-ordered memory model archs such
- * as IA-64. The following barrier is also mandatory since FW will
- * assumes BDs must have buffers.
- */
- wmb();
-
- for (i = 0; i < sizeof(rx_prods)/4; i++)
- REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]);
-
- mmiowb(); /* keep prod updates ordered */
-
- DP(NETIF_MSG_RX_STATUS,
- "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
- fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
-}
-
static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
u8 segment, u16 index, u8 op,
u8 update, u32 igu_addr)
@@ -819,7 +819,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
return;
dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
- SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
+ SGE_PAGES, DMA_FROM_DEVICE);
__free_pages(page, PAGES_PER_SGE_SHIFT);
sw_buf->page = NULL;
@@ -863,7 +863,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
netif_napi_del(&bnx2x_fp(bp, i, napi));
}
-void bnx2x_set_int_mode(struct bnx2x *bp);
+int bnx2x_set_int_mode(struct bnx2x *bp);
static inline void bnx2x_disable_msi(struct bnx2x *bp)
{
@@ -973,7 +973,6 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
return bnx2x_func_state_change(bp, &func_params);
}
-
/**
* bnx2x_set_fw_mac_addr - fill in a MAC address in FW format
*
@@ -982,8 +981,8 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
* @fw_lo: pointer to lower part
* @mac: pointer to MAC address
*/
-static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo,
- u8 *mac)
+static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
+ __le16 *fw_lo, u8 *mac)
{
((u8 *)fw_hi)[0] = mac[1];
((u8 *)fw_hi)[1] = mac[0];
@@ -1108,6 +1107,9 @@ static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
bnx2x_get_path_func_num(bp));
+ bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_ABS_FUNC(bp)>>1,
+ bnx2x_get_path_func_num(bp));
+
/* RSS configuration object */
bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id,
bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp),
@@ -1125,15 +1127,7 @@ static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
return fp->cl_id;
}
-static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
-{
- struct bnx2x *bp = fp->bp;
-
- if (!CHIP_IS_E1x(bp))
- return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
- else
- return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
-}
+u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
static inline void bnx2x_init_txdata(struct bnx2x *bp,
struct bnx2x_fp_txdata *txdata, u32 cid,
@@ -1228,7 +1222,7 @@ static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
#endif
}
cnt--;
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
}
return 0;
@@ -1263,7 +1257,7 @@ static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask)
}
netif_addr_unlock_bh(bp->dev);
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
}
smp_mb();
@@ -1393,4 +1387,13 @@ static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
return false;
}
+/**
+ * bnx2x_fill_fw_str - Fill buffer with FW version string
+ *
+ * @bp: driver handle
+ * @buf: character buffer to fill with the fw name
+ * @buf_len: length of the above buffer
+ *
+ */
+void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 10bc093d2ca4..568205436a15 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -1,6 +1,6 @@
/* bnx2x_dcb.c: Broadcom Everest network driver.
*
- * Copyright 2009-2012 Broadcom Corporation
+ * Copyright 2009-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -416,6 +416,7 @@ static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
int mfw_configured = SHMEM2_HAS(bp, drv_flags) &&
GET_FLAGS(SHMEM2_RD(bp, drv_flags),
1 << DRV_FLAGS_DCB_MFW_CONFIGURED);
+
if (bp->dcbx_port_params.pfc.enabled &&
(!(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) || mfw_configured))
/*
@@ -558,6 +559,7 @@ static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
int mfw_configured = SHMEM2_HAS(bp, drv_flags) &&
GET_FLAGS(SHMEM2_RD(bp, drv_flags),
1 << DRV_FLAGS_DCB_MFW_CONFIGURED);
+
bnx2x_ets_disabled(&bp->link_params, &bp->link_vars);
if (!bp->dcbx_port_params.ets.enabled ||
@@ -1904,11 +1906,13 @@ static u8 bnx2x_dcbnl_set_state(struct net_device *netdev, u8 state)
struct bnx2x *bp = netdev_priv(netdev);
DP(BNX2X_MSG_DCB, "state = %s\n", state ? "on" : "off");
+ /* Fail to set state to "enabled" if dcbx is disabled in nvram */
if (state && ((bp->dcbx_enabled == BNX2X_DCBX_ENABLED_OFF) ||
(bp->dcbx_enabled == BNX2X_DCBX_ENABLED_INVALID))) {
DP(BNX2X_MSG_DCB, "Can not set dcbx to enabled while it is disabled in nvm\n");
return 1;
}
+
bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled);
return 0;
}
@@ -2052,7 +2056,6 @@ static void bnx2x_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio,
if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES)
return;
-
if (setting) {
bp->dcbx_config_params.admin_pfc_bitmap |= (1 << prio);
bp->dcbx_config_params.admin_pfc_tx_enable = 1;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index 06c7a0435948..d153f44cf8f9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -1,6 +1,6 @@
/* bnx2x_dcb.h: Broadcom Everest network driver.
*
- * Copyright 2009-2012 Broadcom Corporation
+ * Copyright 2009-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
index b926f58e983b..bff5e33eaa14 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
@@ -1,6 +1,6 @@
/* bnx2x_dump.h: Broadcom Everest network driver.
*
- * Copyright (c) 2012 Broadcom Corporation
+ * Copyright (c) 2012-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -22,120 +22,37 @@
#ifndef BNX2X_DUMP_H
#define BNX2X_DUMP_H
+/* WaitP Definitions */
+#define DRV_DUMP_XSTORM_WAITP_ADDRESS 0x2b8a80
+#define DRV_DUMP_TSTORM_WAITP_ADDRESS 0x1b8a80
+#define DRV_DUMP_USTORM_WAITP_ADDRESS 0x338a80
+#define DRV_DUMP_CSTORM_WAITP_ADDRESS 0x238a80
-/*definitions */
-#define XSTORM_WAITP_ADDR 0x2b8a80
-#define TSTORM_WAITP_ADDR 0x1b8a80
-#define USTORM_WAITP_ADDR 0x338a80
-#define CSTORM_WAITP_ADDR 0x238a80
-#define TSTORM_CAM_MODE 0x1B1440
+/* Possible Chips */
+#define DUMP_CHIP_E1 1
+#define DUMP_CHIP_E1H 2
+#define DUMP_CHIP_E2 4
+#define DUMP_CHIP_E3A0 8
+#define DUMP_CHIP_E3B0 16
+#define DUMP_PATH_0 512
+#define DUMP_PATH_1 1024
+#define NUM_PRESETS 13
+#define NUM_CHIPS 5
-#define MAX_TIMER_PENDING 200
-#define TIMER_SCAN_DONT_CARE 0xFF
-#define RI_E1 0x1
-#define RI_E1H 0x2
-#define RI_E2 0x4
-#define RI_E3 0x8
-#define RI_E3B0 0x10
-#define RI_ONLINE 0x100
-#define RI_OFFLINE 0x0
-#define RI_PATH0_DUMP 0x200
-#define RI_PATH1_DUMP 0x400
-
-#define RI_E1_ONLINE (RI_E1 | RI_ONLINE)
-#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE)
-#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE)
-#define RI_E2_ONLINE (RI_E2 | RI_ONLINE)
-#define RI_E1E2_ONLINE (RI_E1 | RI_E2 | RI_ONLINE)
-#define RI_E1HE2_ONLINE (RI_E1H | RI_E2 | RI_ONLINE)
-#define RI_E1E1HE2_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
-#define RI_E3_ONLINE (RI_E3 | RI_ONLINE)
-#define RI_E1E3_ONLINE (RI_E1 | RI_E3 | RI_ONLINE)
-#define RI_E1HE3_ONLINE (RI_E1H | RI_E3 | RI_ONLINE)
-#define RI_E1E1HE3_ONLINE (RI_E1 | RI_E1H | RI_E3 | RI_ONLINE)
-#define RI_E2E3_ONLINE (RI_E2 | RI_E3 | RI_ONLINE)
-#define RI_E1E2E3_ONLINE (RI_E1 | RI_E2 | RI_E3 | RI_ONLINE)
-#define RI_E1HE2E3_ONLINE (RI_E1H | RI_E2 | RI_E3 | RI_ONLINE)
-#define RI_E1E1HE2E3_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_ONLINE)
-#define RI_E3B0_ONLINE (RI_E3B0 | RI_ONLINE)
-#define RI_E1E3B0_ONLINE (RI_E1 | RI_E3B0 | RI_ONLINE)
-#define RI_E1HE3B0_ONLINE (RI_E1H | RI_E3B0 | RI_ONLINE)
-#define RI_E1E1HE3B0_ONLINE (RI_E1 | RI_E1H | RI_E3B0 | RI_ONLINE)
-#define RI_E2E3B0_ONLINE (RI_E2 | RI_E3B0 | RI_ONLINE)
-#define RI_E1E2E3B0_ONLINE (RI_E1 | RI_E2 | RI_E3B0 | RI_ONLINE)
-#define RI_E1HE2E3B0_ONLINE (RI_E1H | RI_E2 | RI_E3B0 | RI_ONLINE)
-#define RI_E1E1HE2E3B0_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3B0 | RI_ONLINE)
-#define RI_E3E3B0_ONLINE (RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1E3E3B0_ONLINE (RI_E1 | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1HE3E3B0_ONLINE (RI_E1H | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1E1HE3E3B0_ONLINE (RI_E1 | RI_E1H | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E2E3E3B0_ONLINE (RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1E2E3E3B0_ONLINE (RI_E1 | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1HE2E3E3B0_ONLINE (RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1E1HE2E3E3B0_ONLINE \
- (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1_OFFLINE (RI_E1 | RI_OFFLINE)
-#define RI_E1H_OFFLINE (RI_E1H | RI_OFFLINE)
-#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H | RI_OFFLINE)
-#define RI_E2_OFFLINE (RI_E2 | RI_OFFLINE)
-#define RI_E1E2_OFFLINE (RI_E1 | RI_E2 | RI_OFFLINE)
-#define RI_E1HE2_OFFLINE (RI_E1H | RI_E2 | RI_OFFLINE)
-#define RI_E1E1HE2_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_OFFLINE)
-#define RI_E3_OFFLINE (RI_E3 | RI_OFFLINE)
-#define RI_E1E3_OFFLINE (RI_E1 | RI_E3 | RI_OFFLINE)
-#define RI_E1HE3_OFFLINE (RI_E1H | RI_E3 | RI_OFFLINE)
-#define RI_E1E1HE3_OFFLINE (RI_E1 | RI_E1H | RI_E3 | RI_OFFLINE)
-#define RI_E2E3_OFFLINE (RI_E2 | RI_E3 | RI_OFFLINE)
-#define RI_E1E2E3_OFFLINE (RI_E1 | RI_E2 | RI_E3 | RI_OFFLINE)
-#define RI_E1HE2E3_OFFLINE (RI_E1H | RI_E2 | RI_E3 | RI_OFFLINE)
-#define RI_E1E1HE2E3_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_OFFLINE)
-#define RI_E3B0_OFFLINE (RI_E3B0 | RI_OFFLINE)
-#define RI_E1E3B0_OFFLINE (RI_E1 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1HE3B0_OFFLINE (RI_E1H | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E1HE3B0_OFFLINE (RI_E1 | RI_E1H | RI_E3B0 | RI_OFFLINE)
-#define RI_E2E3B0_OFFLINE (RI_E2 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E2E3B0_OFFLINE (RI_E1 | RI_E2 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1HE2E3B0_OFFLINE (RI_E1H | RI_E2 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E1HE2E3B0_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3B0 | RI_OFFLINE)
-#define RI_E3E3B0_OFFLINE (RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E3E3B0_OFFLINE (RI_E1 | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1HE3E3B0_OFFLINE (RI_E1H | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E1HE3E3B0_OFFLINE (RI_E1 | RI_E1H | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E2E3E3B0_OFFLINE (RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E2E3E3B0_OFFLINE (RI_E1 | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1HE2E3E3B0_OFFLINE (RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E1HE2E3E3B0_OFFLINE \
- (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_ALL_ONLINE RI_E1E1HE2E3E3B0_ONLINE
-#define RI_ALL_OFFLINE RI_E1E1HE2E3E3B0_OFFLINE
-
-#define DBG_DMP_TRACE_BUFFER_SIZE 0x800
-#define DBG_DMP_TRACE_BUFFER_OFFSET(shmem0_offset) \
- ((shmem0_offset) - DBG_DMP_TRACE_BUFFER_SIZE)
-
-struct dump_sign {
- u32 time_stamp;
- u32 diag_ver;
- u32 grc_dump_ver;
-};
-
-struct dump_hdr {
- u32 hdr_size; /* in dwords, excluding this field */
- struct dump_sign dump_sign;
- u32 xstorm_waitp;
- u32 tstorm_waitp;
- u32 ustorm_waitp;
- u32 cstorm_waitp;
- u16 info;
- u8 idle_chk;
- u8 reserved;
+struct dump_header {
+ u32 header_size; /* Size in DWORDs excluding this field */
+ u32 version;
+ u32 preset;
+ u32 dump_meta_data; /* OR of CHIP and PATH. */
};
+#define BNX2X_DUMP_VERSION 0x50acff01
struct reg_addr {
u32 addr;
u32 size;
- u16 info;
+ u32 chips;
+ u32 presets;
};
struct wreg_addr {
@@ -143,1005 +60,2168 @@ struct wreg_addr {
u32 size;
u32 read_regs_count;
const u32 *read_regs;
- u16 info;
+ u32 chips;
+ u32 presets;
+};
+
+#define PAGE_MODE_VALUES_E2 2
+#define PAGE_READ_REGS_E2 1
+#define PAGE_WRITE_REGS_E2 1
+static const u32 page_vals_e2[] = {0, 128};
+static const u32 page_write_regs_e2[] = {328476};
+static const struct reg_addr page_read_regs_e2[] = {
+ {0x58000, 4608, DUMP_CHIP_E2, 0x30}
+};
+
+#define PAGE_MODE_VALUES_E3 2
+#define PAGE_READ_REGS_E3 1
+#define PAGE_WRITE_REGS_E3 1
+static const u32 page_vals_e3[] = {0, 128};
+static const u32 page_write_regs_e3[] = {328476};
+static const struct reg_addr page_read_regs_e3[] = {
+ {0x58000, 4608, DUMP_CHIP_E3A0 | DUMP_CHIP_E3B0, 0x30}
};
static const struct reg_addr reg_addrs[] = {
- { 0x2000, 341, RI_ALL_ONLINE },
- { 0x2800, 103, RI_ALL_ONLINE },
- { 0x3000, 287, RI_ALL_ONLINE },
- { 0x3800, 331, RI_ALL_ONLINE },
- { 0x8800, 6, RI_ALL_ONLINE },
- { 0x8818, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x9000, 147, RI_E2E3E3B0_ONLINE },
- { 0x924c, 1, RI_E2_ONLINE },
- { 0x9250, 16, RI_E2E3E3B0_ONLINE },
- { 0x9400, 33, RI_E2E3E3B0_ONLINE },
- { 0x9484, 5, RI_E3E3B0_ONLINE },
- { 0xa000, 27, RI_ALL_ONLINE },
- { 0xa06c, 1, RI_E1E1H_ONLINE },
- { 0xa070, 71, RI_ALL_ONLINE },
- { 0xa18c, 4, RI_E1E1H_ONLINE },
- { 0xa19c, 62, RI_ALL_ONLINE },
- { 0xa294, 2, RI_E1E1H_ONLINE },
- { 0xa29c, 2, RI_ALL_ONLINE },
- { 0xa2a4, 2, RI_E1E1HE2_ONLINE },
- { 0xa2ac, 52, RI_ALL_ONLINE },
- { 0xa39c, 7, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3b8, 2, RI_E3E3B0_ONLINE },
- { 0xa3c0, 3, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3d0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3d8, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3e0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3e8, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3f0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3f8, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa400, 40, RI_ALL_ONLINE },
- { 0xa4a0, 1, RI_E1E1HE2_ONLINE },
- { 0xa4a4, 2, RI_ALL_ONLINE },
- { 0xa4ac, 2, RI_E1E1H_ONLINE },
- { 0xa4b4, 1, RI_E1E1HE2_ONLINE },
- { 0xa4b8, 2, RI_E1E1H_ONLINE },
- { 0xa4c0, 3, RI_ALL_ONLINE },
- { 0xa4cc, 5, RI_E1E1H_ONLINE },
- { 0xa4e0, 3, RI_ALL_ONLINE },
- { 0xa4fc, 2, RI_ALL_ONLINE },
- { 0xa504, 1, RI_E1E1H_ONLINE },
- { 0xa508, 3, RI_ALL_ONLINE },
- { 0xa518, 1, RI_ALL_ONLINE },
- { 0xa520, 1, RI_ALL_ONLINE },
- { 0xa528, 1, RI_ALL_ONLINE },
- { 0xa530, 1, RI_ALL_ONLINE },
- { 0xa538, 1, RI_ALL_ONLINE },
- { 0xa540, 1, RI_ALL_ONLINE },
- { 0xa548, 1, RI_E1E1H_ONLINE },
- { 0xa550, 1, RI_E1E1H_ONLINE },
- { 0xa558, 1, RI_E1E1H_ONLINE },
- { 0xa560, 1, RI_E1E1H_ONLINE },
- { 0xa568, 1, RI_E1E1H_ONLINE },
- { 0xa570, 1, RI_ALL_ONLINE },
- { 0xa580, 1, RI_ALL_ONLINE },
- { 0xa590, 1, RI_ALL_ONLINE },
- { 0xa5a0, 1, RI_E1E1HE2_ONLINE },
- { 0xa5c0, 1, RI_ALL_ONLINE },
- { 0xa5e0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa5e8, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa5f0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa5f8, 1, RI_E1HE2_ONLINE },
- { 0xa5fc, 9, RI_E1HE2E3E3B0_ONLINE },
- { 0xa620, 6, RI_E2E3E3B0_ONLINE },
- { 0xa638, 20, RI_E2_ONLINE },
- { 0xa688, 42, RI_E2E3E3B0_ONLINE },
- { 0xa730, 1, RI_E2_ONLINE },
- { 0xa734, 2, RI_E2E3E3B0_ONLINE },
- { 0xa73c, 4, RI_E2_ONLINE },
- { 0xa74c, 5, RI_E2E3E3B0_ONLINE },
- { 0xa760, 5, RI_E2_ONLINE },
- { 0xa774, 7, RI_E2E3E3B0_ONLINE },
- { 0xa790, 15, RI_E2_ONLINE },
- { 0xa7cc, 4, RI_E2E3E3B0_ONLINE },
- { 0xa7e0, 6, RI_E3E3B0_ONLINE },
- { 0xa800, 18, RI_E2_ONLINE },
- { 0xa848, 33, RI_E2E3E3B0_ONLINE },
- { 0xa8cc, 2, RI_E3E3B0_ONLINE },
- { 0xa8d4, 4, RI_E2E3E3B0_ONLINE },
- { 0xa8e4, 1, RI_E3E3B0_ONLINE },
- { 0xa8e8, 1, RI_E2E3E3B0_ONLINE },
- { 0xa8f0, 1, RI_E2E3E3B0_ONLINE },
- { 0xa8f8, 30, RI_E3E3B0_ONLINE },
- { 0xa974, 73, RI_E3E3B0_ONLINE },
- { 0xac30, 1, RI_E3E3B0_ONLINE },
- { 0xac40, 1, RI_E3E3B0_ONLINE },
- { 0xac50, 1, RI_E3E3B0_ONLINE },
- { 0xac60, 1, RI_E3B0_ONLINE },
- { 0x10000, 9, RI_ALL_ONLINE },
- { 0x10024, 1, RI_E1E1HE2_ONLINE },
- { 0x10028, 5, RI_ALL_ONLINE },
- { 0x1003c, 6, RI_E1E1HE2_ONLINE },
- { 0x10054, 20, RI_ALL_ONLINE },
- { 0x100a4, 4, RI_E1E1HE2_ONLINE },
- { 0x100b4, 11, RI_ALL_ONLINE },
- { 0x100e0, 4, RI_E1E1HE2_ONLINE },
- { 0x100f0, 8, RI_ALL_ONLINE },
- { 0x10110, 6, RI_E1E1HE2_ONLINE },
- { 0x10128, 110, RI_ALL_ONLINE },
- { 0x102e0, 4, RI_E1E1HE2_ONLINE },
- { 0x102f0, 18, RI_ALL_ONLINE },
- { 0x10338, 20, RI_E1E1HE2_ONLINE },
- { 0x10388, 10, RI_ALL_ONLINE },
- { 0x10400, 6, RI_E1E1HE2_ONLINE },
- { 0x10418, 6, RI_ALL_ONLINE },
- { 0x10430, 10, RI_E1E1HE2_ONLINE },
- { 0x10458, 22, RI_ALL_ONLINE },
- { 0x104b0, 12, RI_E1E1HE2_ONLINE },
- { 0x104e0, 1, RI_ALL_ONLINE },
- { 0x104e8, 2, RI_ALL_ONLINE },
- { 0x104f4, 2, RI_ALL_ONLINE },
- { 0x10500, 146, RI_ALL_ONLINE },
- { 0x10750, 2, RI_E1E1HE2_ONLINE },
- { 0x10760, 2, RI_E1E1HE2_ONLINE },
- { 0x10770, 2, RI_E1E1HE2_ONLINE },
- { 0x10780, 2, RI_E1E1HE2_ONLINE },
- { 0x10790, 2, RI_ALL_ONLINE },
- { 0x107a0, 2, RI_E1E1HE2_ONLINE },
- { 0x107b0, 2, RI_E1E1HE2_ONLINE },
- { 0x107c0, 2, RI_E1E1HE2_ONLINE },
- { 0x107d0, 2, RI_E1E1HE2_ONLINE },
- { 0x107e0, 2, RI_ALL_ONLINE },
- { 0x10880, 2, RI_ALL_ONLINE },
- { 0x10900, 2, RI_ALL_ONLINE },
- { 0x16000, 1, RI_E1HE2_ONLINE },
- { 0x16004, 25, RI_E1HE2E3E3B0_ONLINE },
- { 0x16070, 8, RI_E1HE2E3E3B0_ONLINE },
- { 0x16090, 4, RI_E1HE2E3_ONLINE },
- { 0x160a0, 6, RI_E1HE2E3E3B0_ONLINE },
- { 0x160c0, 7, RI_E1HE2E3E3B0_ONLINE },
- { 0x160dc, 2, RI_E1HE2_ONLINE },
- { 0x160e4, 10, RI_E1HE2E3E3B0_ONLINE },
- { 0x1610c, 2, RI_E1HE2_ONLINE },
- { 0x16114, 6, RI_E1HE2E3E3B0_ONLINE },
- { 0x16140, 48, RI_E1HE2E3E3B0_ONLINE },
- { 0x16204, 5, RI_E1HE2E3E3B0_ONLINE },
- { 0x18000, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x18008, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x18010, 35, RI_E2E3E3B0_ONLINE },
- { 0x180a4, 2, RI_E2E3E3B0_ONLINE },
- { 0x180c0, 9, RI_E2E3E3B0_ONLINE },
- { 0x180e4, 1, RI_E2E3_ONLINE },
- { 0x180e8, 2, RI_E2E3E3B0_ONLINE },
- { 0x180f0, 1, RI_E2E3_ONLINE },
- { 0x180f4, 79, RI_E2E3E3B0_ONLINE },
- { 0x18230, 1, RI_E2E3_ONLINE },
- { 0x18234, 2, RI_E2E3E3B0_ONLINE },
- { 0x1823c, 1, RI_E2E3_ONLINE },
- { 0x18240, 13, RI_E2E3E3B0_ONLINE },
- { 0x18274, 1, RI_E2_ONLINE },
- { 0x18278, 81, RI_E2E3E3B0_ONLINE },
- { 0x18440, 63, RI_E2E3E3B0_ONLINE },
- { 0x18570, 42, RI_E3E3B0_ONLINE },
- { 0x18618, 25, RI_E3B0_ONLINE },
- { 0x18680, 44, RI_E3B0_ONLINE },
- { 0x18748, 12, RI_E3B0_ONLINE },
- { 0x18788, 1, RI_E3B0_ONLINE },
- { 0x1879c, 6, RI_E3B0_ONLINE },
- { 0x187c4, 51, RI_E3B0_ONLINE },
- { 0x18a00, 48, RI_E3B0_ONLINE },
- { 0x20000, 24, RI_ALL_ONLINE },
- { 0x20060, 8, RI_ALL_ONLINE },
- { 0x20080, 94, RI_ALL_ONLINE },
- { 0x201f8, 1, RI_E1E1H_ONLINE },
- { 0x201fc, 1, RI_ALL_ONLINE },
- { 0x20200, 1, RI_E1E1H_ONLINE },
- { 0x20204, 1, RI_ALL_ONLINE },
- { 0x20208, 1, RI_E1E1H_ONLINE },
- { 0x2020c, 39, RI_ALL_ONLINE },
- { 0x202c8, 1, RI_E2E3E3B0_ONLINE },
- { 0x202d8, 4, RI_E2E3E3B0_ONLINE },
- { 0x202f0, 1, RI_E3B0_ONLINE },
- { 0x20400, 2, RI_ALL_ONLINE },
- { 0x2040c, 8, RI_ALL_ONLINE },
- { 0x2042c, 18, RI_E1HE2E3E3B0_ONLINE },
- { 0x20480, 1, RI_ALL_ONLINE },
- { 0x20500, 1, RI_ALL_ONLINE },
- { 0x20600, 1, RI_ALL_ONLINE },
- { 0x28000, 1, RI_ALL_ONLINE },
- { 0x28004, 8191, RI_ALL_OFFLINE },
- { 0x30000, 1, RI_ALL_ONLINE },
- { 0x30004, 16383, RI_ALL_OFFLINE },
- { 0x40000, 98, RI_ALL_ONLINE },
- { 0x401a8, 8, RI_E1HE2E3E3B0_ONLINE },
- { 0x401c8, 1, RI_E1H_ONLINE },
- { 0x401cc, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x401d4, 2, RI_E2E3E3B0_ONLINE },
- { 0x40200, 4, RI_ALL_ONLINE },
- { 0x40220, 6, RI_E2E3E3B0_ONLINE },
- { 0x40238, 8, RI_E2E3_ONLINE },
- { 0x40258, 4, RI_E2E3E3B0_ONLINE },
- { 0x40268, 2, RI_E3E3B0_ONLINE },
- { 0x40270, 17, RI_E3B0_ONLINE },
- { 0x40400, 43, RI_ALL_ONLINE },
- { 0x404cc, 3, RI_E1HE2E3E3B0_ONLINE },
- { 0x404e0, 1, RI_E2E3E3B0_ONLINE },
- { 0x40500, 2, RI_ALL_ONLINE },
- { 0x40510, 2, RI_ALL_ONLINE },
- { 0x40520, 2, RI_ALL_ONLINE },
- { 0x40530, 2, RI_ALL_ONLINE },
- { 0x40540, 2, RI_ALL_ONLINE },
- { 0x40550, 10, RI_E2E3E3B0_ONLINE },
- { 0x40610, 2, RI_E2E3E3B0_ONLINE },
- { 0x42000, 164, RI_ALL_ONLINE },
- { 0x422c0, 4, RI_E2E3E3B0_ONLINE },
- { 0x422d4, 5, RI_E1HE2E3E3B0_ONLINE },
- { 0x422e8, 1, RI_E2E3E3B0_ONLINE },
- { 0x42400, 49, RI_ALL_ONLINE },
- { 0x424c8, 38, RI_ALL_ONLINE },
- { 0x42568, 2, RI_ALL_ONLINE },
- { 0x42640, 5, RI_E2E3E3B0_ONLINE },
- { 0x42800, 1, RI_ALL_ONLINE },
- { 0x50000, 1, RI_ALL_ONLINE },
- { 0x50004, 19, RI_ALL_ONLINE },
- { 0x50050, 8, RI_ALL_ONLINE },
- { 0x50070, 88, RI_ALL_ONLINE },
- { 0x501f0, 4, RI_E1HE2E3E3B0_ONLINE },
- { 0x50200, 2, RI_ALL_ONLINE },
- { 0x5020c, 7, RI_ALL_ONLINE },
- { 0x50228, 6, RI_E1HE2E3E3B0_ONLINE },
- { 0x50240, 1, RI_ALL_ONLINE },
- { 0x50280, 1, RI_ALL_ONLINE },
- { 0x50300, 1, RI_E2E3E3B0_ONLINE },
- { 0x5030c, 1, RI_E2E3E3B0_ONLINE },
- { 0x50318, 1, RI_E2E3E3B0_ONLINE },
- { 0x5031c, 1, RI_E2E3E3B0_ONLINE },
- { 0x50320, 2, RI_E2E3E3B0_ONLINE },
- { 0x50330, 1, RI_E3B0_ONLINE },
- { 0x52000, 1, RI_ALL_ONLINE },
- { 0x54000, 1, RI_ALL_ONLINE },
- { 0x54004, 3327, RI_ALL_OFFLINE },
- { 0x58000, 1, RI_ALL_ONLINE },
- { 0x58004, 8191, RI_E1E1H_OFFLINE },
- { 0x60000, 26, RI_ALL_ONLINE },
- { 0x60068, 8, RI_E1E1H_ONLINE },
- { 0x60088, 12, RI_ALL_ONLINE },
- { 0x600b8, 9, RI_E1E1H_ONLINE },
- { 0x600dc, 1, RI_ALL_ONLINE },
- { 0x600e0, 5, RI_E1E1H_ONLINE },
- { 0x600f4, 1, RI_E1E1HE2_ONLINE },
- { 0x600f8, 1, RI_E1E1H_ONLINE },
- { 0x600fc, 8, RI_ALL_ONLINE },
- { 0x6013c, 24, RI_E1H_ONLINE },
- { 0x6019c, 2, RI_E2E3E3B0_ONLINE },
- { 0x601ac, 18, RI_E2E3E3B0_ONLINE },
- { 0x60200, 1, RI_ALL_ONLINE },
- { 0x60204, 2, RI_ALL_OFFLINE },
- { 0x60210, 13, RI_E2E3E3B0_ONLINE },
- { 0x60244, 16, RI_E3B0_ONLINE },
- { 0x61000, 1, RI_ALL_ONLINE },
- { 0x61004, 511, RI_ALL_OFFLINE },
- { 0x61800, 512, RI_E3E3B0_OFFLINE },
- { 0x70000, 8, RI_ALL_ONLINE },
- { 0x70020, 8184, RI_ALL_OFFLINE },
- { 0x78000, 8192, RI_E3E3B0_OFFLINE },
- { 0x85000, 3, RI_ALL_OFFLINE },
- { 0x8501c, 7, RI_ALL_OFFLINE },
- { 0x85048, 1, RI_ALL_OFFLINE },
- { 0x85200, 32, RI_ALL_OFFLINE },
- { 0xb0000, 16384, RI_E1H_OFFLINE },
- { 0xc1000, 7, RI_ALL_ONLINE },
- { 0xc103c, 2, RI_E2E3E3B0_ONLINE },
- { 0xc1800, 2, RI_ALL_ONLINE },
- { 0xc2000, 164, RI_ALL_ONLINE },
- { 0xc22c0, 5, RI_E2E3E3B0_ONLINE },
- { 0xc22d8, 4, RI_E2E3E3B0_ONLINE },
- { 0xc2400, 49, RI_ALL_ONLINE },
- { 0xc24c8, 38, RI_ALL_ONLINE },
- { 0xc2568, 2, RI_ALL_ONLINE },
- { 0xc2600, 1, RI_ALL_ONLINE },
- { 0xc4000, 165, RI_ALL_ONLINE },
- { 0xc42d8, 2, RI_E2E3E3B0_ONLINE },
- { 0xc42e0, 7, RI_E1HE2E3E3B0_ONLINE },
- { 0xc42fc, 1, RI_E2E3E3B0_ONLINE },
- { 0xc4400, 51, RI_ALL_ONLINE },
- { 0xc44d0, 38, RI_ALL_ONLINE },
- { 0xc4570, 2, RI_ALL_ONLINE },
- { 0xc4578, 5, RI_E2E3E3B0_ONLINE },
- { 0xc4600, 1, RI_ALL_ONLINE },
- { 0xd0000, 19, RI_ALL_ONLINE },
- { 0xd004c, 8, RI_ALL_ONLINE },
- { 0xd006c, 91, RI_ALL_ONLINE },
- { 0xd01fc, 1, RI_E2E3E3B0_ONLINE },
- { 0xd0200, 2, RI_ALL_ONLINE },
- { 0xd020c, 7, RI_ALL_ONLINE },
- { 0xd0228, 18, RI_E1HE2E3E3B0_ONLINE },
- { 0xd0280, 1, RI_ALL_ONLINE },
- { 0xd0300, 1, RI_ALL_ONLINE },
- { 0xd0400, 1, RI_ALL_ONLINE },
- { 0xd0818, 1, RI_E3B0_ONLINE },
- { 0xd4000, 1, RI_ALL_ONLINE },
- { 0xd4004, 2559, RI_ALL_OFFLINE },
- { 0xd8000, 1, RI_ALL_ONLINE },
- { 0xd8004, 8191, RI_ALL_OFFLINE },
- { 0xe0000, 21, RI_ALL_ONLINE },
- { 0xe0054, 8, RI_ALL_ONLINE },
- { 0xe0074, 49, RI_ALL_ONLINE },
- { 0xe0138, 1, RI_E1E1H_ONLINE },
- { 0xe013c, 35, RI_ALL_ONLINE },
- { 0xe01f4, 1, RI_E2_ONLINE },
- { 0xe01f8, 1, RI_E2E3E3B0_ONLINE },
- { 0xe0200, 2, RI_ALL_ONLINE },
- { 0xe020c, 8, RI_ALL_ONLINE },
- { 0xe022c, 18, RI_E1HE2E3E3B0_ONLINE },
- { 0xe0280, 1, RI_ALL_ONLINE },
- { 0xe0300, 1, RI_ALL_ONLINE },
- { 0xe0400, 1, RI_E3B0_ONLINE },
- { 0xe1000, 1, RI_ALL_ONLINE },
- { 0xe2000, 1, RI_ALL_ONLINE },
- { 0xe2004, 2047, RI_ALL_OFFLINE },
- { 0xf0000, 1, RI_ALL_ONLINE },
- { 0xf0004, 16383, RI_ALL_OFFLINE },
- { 0x101000, 12, RI_ALL_ONLINE },
- { 0x101050, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x101054, 3, RI_E2E3E3B0_ONLINE },
- { 0x101100, 1, RI_ALL_ONLINE },
- { 0x101800, 8, RI_ALL_ONLINE },
- { 0x102000, 18, RI_ALL_ONLINE },
- { 0x102068, 6, RI_E2E3E3B0_ONLINE },
- { 0x102080, 17, RI_ALL_ONLINE },
- { 0x1020c8, 8, RI_E1H_ONLINE },
- { 0x1020e8, 9, RI_E2E3E3B0_ONLINE },
- { 0x102400, 1, RI_ALL_ONLINE },
- { 0x103000, 26, RI_ALL_ONLINE },
- { 0x103098, 5, RI_E1HE2E3E3B0_ONLINE },
- { 0x1030ac, 2, RI_E2E3E3B0_ONLINE },
- { 0x1030b4, 1, RI_E2_ONLINE },
- { 0x1030b8, 7, RI_E2E3E3B0_ONLINE },
- { 0x1030d8, 8, RI_E2E3E3B0_ONLINE },
- { 0x103400, 1, RI_E2E3E3B0_ONLINE },
- { 0x103404, 135, RI_E2E3E3B0_OFFLINE },
- { 0x103800, 8, RI_ALL_ONLINE },
- { 0x104000, 63, RI_ALL_ONLINE },
- { 0x10411c, 16, RI_E2E3E3B0_ONLINE },
- { 0x104200, 17, RI_ALL_ONLINE },
- { 0x104400, 64, RI_ALL_ONLINE },
- { 0x104500, 192, RI_ALL_OFFLINE },
- { 0x104800, 64, RI_ALL_ONLINE },
- { 0x104900, 192, RI_ALL_OFFLINE },
- { 0x105000, 256, RI_ALL_ONLINE },
- { 0x105400, 768, RI_ALL_OFFLINE },
- { 0x107000, 7, RI_E2E3E3B0_ONLINE },
- { 0x10701c, 1, RI_E3E3B0_ONLINE },
- { 0x108000, 33, RI_E1E1H_ONLINE },
- { 0x1080ac, 5, RI_E1H_ONLINE },
- { 0x108100, 5, RI_E1E1H_ONLINE },
- { 0x108120, 5, RI_E1E1H_ONLINE },
- { 0x108200, 74, RI_E1E1H_ONLINE },
- { 0x108400, 74, RI_E1E1H_ONLINE },
- { 0x108800, 152, RI_E1E1H_ONLINE },
- { 0x110000, 111, RI_E2E3E3B0_ONLINE },
- { 0x1101dc, 1, RI_E3E3B0_ONLINE },
- { 0x110200, 4, RI_E2E3E3B0_ONLINE },
- { 0x120000, 2, RI_ALL_ONLINE },
- { 0x120008, 4, RI_ALL_ONLINE },
- { 0x120018, 3, RI_ALL_ONLINE },
- { 0x120024, 4, RI_ALL_ONLINE },
- { 0x120034, 3, RI_ALL_ONLINE },
- { 0x120040, 4, RI_ALL_ONLINE },
- { 0x120050, 3, RI_ALL_ONLINE },
- { 0x12005c, 4, RI_ALL_ONLINE },
- { 0x12006c, 3, RI_ALL_ONLINE },
- { 0x120078, 4, RI_ALL_ONLINE },
- { 0x120088, 3, RI_ALL_ONLINE },
- { 0x120094, 4, RI_ALL_ONLINE },
- { 0x1200a4, 3, RI_ALL_ONLINE },
- { 0x1200b0, 4, RI_ALL_ONLINE },
- { 0x1200c0, 3, RI_ALL_ONLINE },
- { 0x1200cc, 4, RI_ALL_ONLINE },
- { 0x1200dc, 3, RI_ALL_ONLINE },
- { 0x1200e8, 4, RI_ALL_ONLINE },
- { 0x1200f8, 3, RI_ALL_ONLINE },
- { 0x120104, 4, RI_ALL_ONLINE },
- { 0x120114, 1, RI_ALL_ONLINE },
- { 0x120118, 22, RI_ALL_ONLINE },
- { 0x120170, 2, RI_E1E1H_ONLINE },
- { 0x120178, 243, RI_ALL_ONLINE },
- { 0x120544, 4, RI_E1E1H_ONLINE },
- { 0x120554, 6, RI_ALL_ONLINE },
- { 0x12059c, 6, RI_E1HE2E3E3B0_ONLINE },
- { 0x1205b4, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1205b8, 15, RI_E1HE2E3E3B0_ONLINE },
- { 0x1205f4, 1, RI_E1HE2_ONLINE },
- { 0x1205f8, 4, RI_E2E3E3B0_ONLINE },
- { 0x120618, 1, RI_E2E3E3B0_ONLINE },
- { 0x12061c, 20, RI_E1HE2E3E3B0_ONLINE },
- { 0x12066c, 11, RI_E1HE2E3E3B0_ONLINE },
- { 0x120698, 3, RI_E2E3E3B0_ONLINE },
- { 0x1206a4, 1, RI_E2_ONLINE },
- { 0x1206a8, 1, RI_E2E3E3B0_ONLINE },
- { 0x1206b0, 75, RI_E2E3E3B0_ONLINE },
- { 0x1207dc, 1, RI_E2_ONLINE },
- { 0x1207fc, 1, RI_E2E3E3B0_ONLINE },
- { 0x12080c, 65, RI_ALL_ONLINE },
- { 0x120910, 7, RI_E2E3E3B0_ONLINE },
- { 0x120930, 9, RI_E2E3E3B0_ONLINE },
- { 0x12095c, 37, RI_E3E3B0_ONLINE },
- { 0x120a00, 2, RI_E1E1HE2_ONLINE },
- { 0x120b00, 1, RI_E3E3B0_ONLINE },
- { 0x122000, 2, RI_ALL_ONLINE },
- { 0x122008, 2046, RI_E1_OFFLINE },
- { 0x128000, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x128008, 6142, RI_E1HE2E3E3B0_OFFLINE },
- { 0x130000, 35, RI_E2E3E3B0_ONLINE },
- { 0x130100, 29, RI_E2E3E3B0_ONLINE },
- { 0x130180, 1, RI_E2E3E3B0_ONLINE },
- { 0x130200, 1, RI_E2E3E3B0_ONLINE },
- { 0x130280, 1, RI_E2E3E3B0_ONLINE },
- { 0x130300, 5, RI_E2E3E3B0_ONLINE },
- { 0x130380, 1, RI_E2E3E3B0_ONLINE },
- { 0x130400, 1, RI_E2E3E3B0_ONLINE },
- { 0x130480, 5, RI_E2E3E3B0_ONLINE },
- { 0x130800, 72, RI_E2E3E3B0_ONLINE },
- { 0x131000, 136, RI_E2E3E3B0_ONLINE },
- { 0x132000, 148, RI_E2E3E3B0_ONLINE },
- { 0x134000, 544, RI_E2E3E3B0_ONLINE },
- { 0x140000, 1, RI_ALL_ONLINE },
- { 0x140004, 9, RI_E1E1HE2E3_ONLINE },
- { 0x140028, 8, RI_ALL_ONLINE },
- { 0x140048, 10, RI_E1E1HE2E3_ONLINE },
- { 0x140070, 1, RI_ALL_ONLINE },
- { 0x140074, 10, RI_E1E1HE2E3_ONLINE },
- { 0x14009c, 1, RI_ALL_ONLINE },
- { 0x1400a0, 5, RI_E1E1HE2E3_ONLINE },
- { 0x1400b4, 7, RI_ALL_ONLINE },
- { 0x1400d0, 10, RI_E1E1HE2E3_ONLINE },
- { 0x1400f8, 2, RI_ALL_ONLINE },
- { 0x140100, 5, RI_E1E1H_ONLINE },
- { 0x140114, 5, RI_E1E1HE2E3_ONLINE },
- { 0x140128, 7, RI_ALL_ONLINE },
- { 0x140144, 9, RI_E1E1HE2E3_ONLINE },
- { 0x140168, 8, RI_ALL_ONLINE },
- { 0x140188, 3, RI_E1E1HE2E3_ONLINE },
- { 0x140194, 13, RI_ALL_ONLINE },
- { 0x140200, 6, RI_E1E1HE2E3_ONLINE },
- { 0x140260, 4, RI_E2E3_ONLINE },
- { 0x140280, 4, RI_E2E3_ONLINE },
- { 0x1402e0, 2, RI_E2E3_ONLINE },
- { 0x1402e8, 2, RI_E2E3E3B0_ONLINE },
- { 0x1402f0, 9, RI_E2E3_ONLINE },
- { 0x140314, 44, RI_E3B0_ONLINE },
- { 0x144000, 4, RI_E1E1H_ONLINE },
- { 0x148000, 4, RI_E1E1H_ONLINE },
- { 0x14c000, 4, RI_E1E1H_ONLINE },
- { 0x150000, 4, RI_E1E1H_ONLINE },
- { 0x154000, 4, RI_E1E1H_ONLINE },
- { 0x158000, 4, RI_E1E1H_ONLINE },
- { 0x15c000, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x15c008, 5, RI_E1H_ONLINE },
- { 0x15c020, 8, RI_E2E3E3B0_ONLINE },
- { 0x15c040, 1, RI_E2E3_ONLINE },
- { 0x15c044, 2, RI_E2E3E3B0_ONLINE },
- { 0x15c04c, 8, RI_E2E3_ONLINE },
- { 0x15c06c, 8, RI_E2E3E3B0_ONLINE },
- { 0x15c090, 13, RI_E2E3E3B0_ONLINE },
- { 0x15c0c8, 24, RI_E2E3E3B0_ONLINE },
- { 0x15c128, 2, RI_E2E3_ONLINE },
- { 0x15c130, 8, RI_E2E3E3B0_ONLINE },
- { 0x15c150, 2, RI_E3E3B0_ONLINE },
- { 0x15c158, 2, RI_E3_ONLINE },
- { 0x15c160, 149, RI_E3B0_ONLINE },
- { 0x161000, 7, RI_ALL_ONLINE },
- { 0x16103c, 2, RI_E2E3E3B0_ONLINE },
- { 0x161800, 2, RI_ALL_ONLINE },
- { 0x162000, 54, RI_E3E3B0_ONLINE },
- { 0x162200, 60, RI_E3E3B0_ONLINE },
- { 0x162400, 54, RI_E3E3B0_ONLINE },
- { 0x162600, 60, RI_E3E3B0_ONLINE },
- { 0x162800, 54, RI_E3E3B0_ONLINE },
- { 0x162a00, 60, RI_E3E3B0_ONLINE },
- { 0x162c00, 54, RI_E3E3B0_ONLINE },
- { 0x162e00, 60, RI_E3E3B0_ONLINE },
- { 0x164000, 60, RI_ALL_ONLINE },
- { 0x164110, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x164118, 15, RI_E2E3E3B0_ONLINE },
- { 0x164200, 1, RI_ALL_ONLINE },
- { 0x164208, 1, RI_ALL_ONLINE },
- { 0x164210, 1, RI_ALL_ONLINE },
- { 0x164218, 1, RI_ALL_ONLINE },
- { 0x164220, 1, RI_ALL_ONLINE },
- { 0x164228, 1, RI_ALL_ONLINE },
- { 0x164230, 1, RI_ALL_ONLINE },
- { 0x164238, 1, RI_ALL_ONLINE },
- { 0x164240, 1, RI_ALL_ONLINE },
- { 0x164248, 1, RI_ALL_ONLINE },
- { 0x164250, 1, RI_ALL_ONLINE },
- { 0x164258, 1, RI_ALL_ONLINE },
- { 0x164260, 1, RI_ALL_ONLINE },
- { 0x164270, 2, RI_ALL_ONLINE },
- { 0x164280, 2, RI_ALL_ONLINE },
- { 0x164800, 2, RI_ALL_ONLINE },
- { 0x165000, 2, RI_ALL_ONLINE },
- { 0x166000, 164, RI_ALL_ONLINE },
- { 0x1662cc, 7, RI_E2E3E3B0_ONLINE },
- { 0x166400, 49, RI_ALL_ONLINE },
- { 0x1664c8, 38, RI_ALL_ONLINE },
- { 0x166568, 2, RI_ALL_ONLINE },
- { 0x166570, 5, RI_E2E3E3B0_ONLINE },
- { 0x166800, 1, RI_ALL_ONLINE },
- { 0x168000, 137, RI_ALL_ONLINE },
- { 0x168224, 2, RI_E1E1H_ONLINE },
- { 0x16822c, 29, RI_ALL_ONLINE },
- { 0x1682a0, 12, RI_E1E1H_ONLINE },
- { 0x1682d0, 12, RI_ALL_ONLINE },
- { 0x168300, 2, RI_E1E1H_ONLINE },
- { 0x168308, 68, RI_ALL_ONLINE },
- { 0x168418, 2, RI_E1E1H_ONLINE },
- { 0x168420, 6, RI_ALL_ONLINE },
- { 0x168800, 19, RI_ALL_ONLINE },
- { 0x168900, 1, RI_ALL_ONLINE },
- { 0x168a00, 128, RI_ALL_ONLINE },
- { 0x16a000, 1, RI_ALL_ONLINE },
- { 0x16a004, 1535, RI_ALL_OFFLINE },
- { 0x16c000, 1, RI_ALL_ONLINE },
- { 0x16c004, 1535, RI_ALL_OFFLINE },
- { 0x16e000, 16, RI_E1H_ONLINE },
- { 0x16e040, 8, RI_E2E3E3B0_ONLINE },
- { 0x16e100, 1, RI_E1H_ONLINE },
- { 0x16e200, 2, RI_E1H_ONLINE },
- { 0x16e400, 161, RI_E1H_ONLINE },
- { 0x16e684, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x16e68c, 12, RI_E1H_ONLINE },
- { 0x16e6bc, 4, RI_E1HE2E3E3B0_ONLINE },
- { 0x16e6cc, 4, RI_E1H_ONLINE },
- { 0x16e6e0, 2, RI_E2E3E3B0_ONLINE },
- { 0x16e6e8, 5, RI_E2E3_ONLINE },
- { 0x16e6fc, 5, RI_E2E3E3B0_ONLINE },
- { 0x16e768, 17, RI_E2E3E3B0_ONLINE },
- { 0x16e7ac, 12, RI_E3B0_ONLINE },
- { 0x170000, 24, RI_ALL_ONLINE },
- { 0x170060, 4, RI_E1E1H_ONLINE },
- { 0x170070, 65, RI_ALL_ONLINE },
- { 0x170194, 11, RI_E2E3E3B0_ONLINE },
- { 0x1701c4, 1, RI_E2E3E3B0_ONLINE },
- { 0x1701cc, 7, RI_E2E3E3B0_ONLINE },
- { 0x1701e8, 1, RI_E3E3B0_ONLINE },
- { 0x1701ec, 1, RI_E2E3E3B0_ONLINE },
- { 0x1701f4, 1, RI_E2E3E3B0_ONLINE },
- { 0x170200, 4, RI_ALL_ONLINE },
- { 0x170214, 1, RI_ALL_ONLINE },
- { 0x170218, 77, RI_E2E3E3B0_ONLINE },
- { 0x170400, 64, RI_E2E3E3B0_ONLINE },
- { 0x178000, 1, RI_ALL_ONLINE },
- { 0x180000, 61, RI_ALL_ONLINE },
- { 0x18013c, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x180200, 58, RI_ALL_ONLINE },
- { 0x180340, 4, RI_ALL_ONLINE },
- { 0x180380, 1, RI_E2E3E3B0_ONLINE },
- { 0x180388, 1, RI_E2E3E3B0_ONLINE },
- { 0x180390, 1, RI_E2E3E3B0_ONLINE },
- { 0x180398, 1, RI_E2E3E3B0_ONLINE },
- { 0x1803a0, 5, RI_E2E3E3B0_ONLINE },
- { 0x1803b4, 2, RI_E3E3B0_ONLINE },
- { 0x180404, 255, RI_E1E1H_OFFLINE },
- { 0x181000, 4, RI_ALL_ONLINE },
- { 0x181010, 1020, RI_ALL_OFFLINE },
- { 0x182000, 4, RI_E3E3B0_ONLINE },
- { 0x1a0000, 1, RI_ALL_ONLINE },
- { 0x1a0004, 5631, RI_ALL_OFFLINE },
- { 0x1a5800, 2560, RI_E1HE2E3E3B0_OFFLINE },
- { 0x1a8000, 1, RI_ALL_ONLINE },
- { 0x1a8004, 8191, RI_E1HE2E3E3B0_OFFLINE },
- { 0x1b0000, 1, RI_ALL_ONLINE },
- { 0x1b0004, 15, RI_E1H_OFFLINE },
- { 0x1b0040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b0044, 239, RI_E1H_OFFLINE },
- { 0x1b0400, 1, RI_ALL_ONLINE },
- { 0x1b0404, 255, RI_E1H_OFFLINE },
- { 0x1b0800, 1, RI_ALL_ONLINE },
- { 0x1b0840, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b0c00, 1, RI_ALL_ONLINE },
- { 0x1b1000, 1, RI_ALL_ONLINE },
- { 0x1b1040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b1400, 1, RI_ALL_ONLINE },
- { 0x1b1440, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b1480, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b14c0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b1800, 128, RI_ALL_OFFLINE },
- { 0x1b1c00, 128, RI_ALL_OFFLINE },
- { 0x1b2000, 1, RI_ALL_ONLINE },
- { 0x1b2400, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b2404, 5631, RI_E2E3E3B0_OFFLINE },
- { 0x1b8000, 1, RI_ALL_ONLINE },
- { 0x1b8040, 1, RI_ALL_ONLINE },
- { 0x1b8080, 1, RI_ALL_ONLINE },
- { 0x1b80c0, 1, RI_ALL_ONLINE },
- { 0x1b8100, 1, RI_ALL_ONLINE },
- { 0x1b8140, 1, RI_ALL_ONLINE },
- { 0x1b8180, 1, RI_ALL_ONLINE },
- { 0x1b81c0, 1, RI_ALL_ONLINE },
- { 0x1b8200, 1, RI_ALL_ONLINE },
- { 0x1b8240, 1, RI_ALL_ONLINE },
- { 0x1b8280, 1, RI_ALL_ONLINE },
- { 0x1b82c0, 1, RI_ALL_ONLINE },
- { 0x1b8300, 1, RI_ALL_ONLINE },
- { 0x1b8340, 1, RI_ALL_ONLINE },
- { 0x1b8380, 1, RI_ALL_ONLINE },
- { 0x1b83c0, 1, RI_ALL_ONLINE },
- { 0x1b8400, 1, RI_ALL_ONLINE },
- { 0x1b8440, 1, RI_ALL_ONLINE },
- { 0x1b8480, 1, RI_ALL_ONLINE },
- { 0x1b84c0, 1, RI_ALL_ONLINE },
- { 0x1b8500, 1, RI_ALL_ONLINE },
- { 0x1b8540, 1, RI_ALL_ONLINE },
- { 0x1b8580, 1, RI_ALL_ONLINE },
- { 0x1b85c0, 19, RI_E2E3E3B0_ONLINE },
- { 0x1b8800, 1, RI_ALL_ONLINE },
- { 0x1b8840, 1, RI_ALL_ONLINE },
- { 0x1b8880, 1, RI_ALL_ONLINE },
- { 0x1b88c0, 1, RI_ALL_ONLINE },
- { 0x1b8900, 1, RI_ALL_ONLINE },
- { 0x1b8940, 1, RI_ALL_ONLINE },
- { 0x1b8980, 1, RI_ALL_ONLINE },
- { 0x1b89c0, 1, RI_ALL_ONLINE },
- { 0x1b8a00, 1, RI_ALL_ONLINE },
- { 0x1b8a40, 1, RI_ALL_ONLINE },
- { 0x1b8a80, 1, RI_ALL_ONLINE },
- { 0x1b8ac0, 1, RI_ALL_ONLINE },
- { 0x1b8b00, 1, RI_ALL_ONLINE },
- { 0x1b8b40, 1, RI_ALL_ONLINE },
- { 0x1b8b80, 1, RI_ALL_ONLINE },
- { 0x1b8bc0, 1, RI_ALL_ONLINE },
- { 0x1b8c00, 1, RI_ALL_ONLINE },
- { 0x1b8c40, 1, RI_ALL_ONLINE },
- { 0x1b8c80, 1, RI_ALL_ONLINE },
- { 0x1b8cc0, 1, RI_ALL_ONLINE },
- { 0x1b8cc4, 1, RI_E2E3E3B0_ONLINE },
- { 0x1b8d00, 1, RI_ALL_ONLINE },
- { 0x1b8d40, 1, RI_ALL_ONLINE },
- { 0x1b8d80, 1, RI_ALL_ONLINE },
- { 0x1b8dc0, 1, RI_ALL_ONLINE },
- { 0x1b8e00, 1, RI_ALL_ONLINE },
- { 0x1b8e40, 1, RI_ALL_ONLINE },
- { 0x1b8e80, 1, RI_ALL_ONLINE },
- { 0x1b8e84, 1, RI_E2E3E3B0_ONLINE },
- { 0x1b8ec0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b8f00, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b8f40, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b8f80, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b8fc0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b8fc4, 2, RI_E2E3E3B0_ONLINE },
- { 0x1b8fd0, 6, RI_E2E3E3B0_ONLINE },
- { 0x1b8fe8, 2, RI_E3E3B0_ONLINE },
- { 0x1b9000, 1, RI_E2E3E3B0_ONLINE },
- { 0x1b9040, 3, RI_E2E3E3B0_ONLINE },
- { 0x1b905c, 1, RI_E3E3B0_ONLINE },
- { 0x1b9064, 1, RI_E3B0_ONLINE },
- { 0x1b9080, 10, RI_E3B0_ONLINE },
- { 0x1b9400, 14, RI_E2E3E3B0_OFFLINE },
- { 0x1b943c, 19, RI_E2E3E3B0_OFFLINE },
- { 0x1b9490, 10, RI_E2E3E3B0_OFFLINE },
- { 0x1c0000, 2, RI_ALL_ONLINE },
- { 0x200000, 65, RI_ALL_ONLINE },
- { 0x20014c, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x200200, 58, RI_ALL_ONLINE },
- { 0x200340, 4, RI_ALL_ONLINE },
- { 0x200380, 1, RI_E2E3E3B0_ONLINE },
- { 0x200388, 1, RI_E2E3E3B0_ONLINE },
- { 0x200390, 1, RI_E2E3E3B0_ONLINE },
- { 0x200398, 1, RI_E2E3E3B0_ONLINE },
- { 0x2003a0, 1, RI_E2E3E3B0_ONLINE },
- { 0x2003a8, 2, RI_E2E3E3B0_ONLINE },
- { 0x200404, 255, RI_E1E1H_OFFLINE },
- { 0x202000, 4, RI_ALL_ONLINE },
- { 0x202010, 2044, RI_ALL_OFFLINE },
- { 0x204000, 4, RI_E3E3B0_ONLINE },
- { 0x220000, 1, RI_ALL_ONLINE },
- { 0x220004, 5631, RI_ALL_OFFLINE },
- { 0x225800, 2560, RI_E1HE2E3E3B0_OFFLINE },
- { 0x228000, 1, RI_ALL_ONLINE },
- { 0x228004, 8191, RI_E1HE2E3E3B0_OFFLINE },
- { 0x230000, 1, RI_ALL_ONLINE },
- { 0x230004, 15, RI_E1H_OFFLINE },
- { 0x230040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x230044, 239, RI_E1H_OFFLINE },
- { 0x230400, 1, RI_ALL_ONLINE },
- { 0x230404, 255, RI_E1H_OFFLINE },
- { 0x230800, 1, RI_ALL_ONLINE },
- { 0x230840, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x230c00, 1, RI_ALL_ONLINE },
- { 0x231000, 1, RI_ALL_ONLINE },
- { 0x231040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x231400, 1, RI_ALL_ONLINE },
- { 0x231440, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x231480, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2314c0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x231800, 128, RI_ALL_OFFLINE },
- { 0x231c00, 128, RI_ALL_OFFLINE },
- { 0x232000, 1, RI_ALL_ONLINE },
- { 0x232400, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x232404, 5631, RI_E2E3E3B0_OFFLINE },
- { 0x238000, 1, RI_ALL_ONLINE },
- { 0x238040, 1, RI_ALL_ONLINE },
- { 0x238080, 1, RI_ALL_ONLINE },
- { 0x2380c0, 1, RI_ALL_ONLINE },
- { 0x238100, 1, RI_ALL_ONLINE },
- { 0x238140, 1, RI_ALL_ONLINE },
- { 0x238180, 1, RI_ALL_ONLINE },
- { 0x2381c0, 1, RI_ALL_ONLINE },
- { 0x238200, 1, RI_ALL_ONLINE },
- { 0x238240, 1, RI_ALL_ONLINE },
- { 0x238280, 1, RI_ALL_ONLINE },
- { 0x2382c0, 1, RI_ALL_ONLINE },
- { 0x238300, 1, RI_ALL_ONLINE },
- { 0x238340, 1, RI_ALL_ONLINE },
- { 0x238380, 1, RI_ALL_ONLINE },
- { 0x2383c0, 1, RI_ALL_ONLINE },
- { 0x238400, 1, RI_ALL_ONLINE },
- { 0x238440, 1, RI_ALL_ONLINE },
- { 0x238480, 1, RI_ALL_ONLINE },
- { 0x2384c0, 1, RI_ALL_ONLINE },
- { 0x238500, 1, RI_ALL_ONLINE },
- { 0x238540, 1, RI_ALL_ONLINE },
- { 0x238580, 1, RI_ALL_ONLINE },
- { 0x2385c0, 19, RI_E2E3E3B0_ONLINE },
- { 0x238800, 1, RI_ALL_ONLINE },
- { 0x238840, 1, RI_ALL_ONLINE },
- { 0x238880, 1, RI_ALL_ONLINE },
- { 0x2388c0, 1, RI_ALL_ONLINE },
- { 0x238900, 1, RI_ALL_ONLINE },
- { 0x238940, 1, RI_ALL_ONLINE },
- { 0x238980, 1, RI_ALL_ONLINE },
- { 0x2389c0, 1, RI_ALL_ONLINE },
- { 0x238a00, 1, RI_ALL_ONLINE },
- { 0x238a40, 1, RI_ALL_ONLINE },
- { 0x238a80, 1, RI_ALL_ONLINE },
- { 0x238ac0, 1, RI_ALL_ONLINE },
- { 0x238b00, 1, RI_ALL_ONLINE },
- { 0x238b40, 1, RI_ALL_ONLINE },
- { 0x238b80, 1, RI_ALL_ONLINE },
- { 0x238bc0, 1, RI_ALL_ONLINE },
- { 0x238c00, 1, RI_ALL_ONLINE },
- { 0x238c40, 1, RI_ALL_ONLINE },
- { 0x238c80, 1, RI_ALL_ONLINE },
- { 0x238cc0, 1, RI_ALL_ONLINE },
- { 0x238cc4, 1, RI_E2E3E3B0_ONLINE },
- { 0x238d00, 1, RI_ALL_ONLINE },
- { 0x238d40, 1, RI_ALL_ONLINE },
- { 0x238d80, 1, RI_ALL_ONLINE },
- { 0x238dc0, 1, RI_ALL_ONLINE },
- { 0x238e00, 1, RI_ALL_ONLINE },
- { 0x238e40, 1, RI_ALL_ONLINE },
- { 0x238e80, 1, RI_ALL_ONLINE },
- { 0x238e84, 1, RI_E2E3E3B0_ONLINE },
- { 0x238ec0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x238f00, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x238f40, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x238f80, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x238fc0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x238fc4, 2, RI_E2E3E3B0_ONLINE },
- { 0x238fd0, 6, RI_E2E3E3B0_ONLINE },
- { 0x238fe8, 2, RI_E3E3B0_ONLINE },
- { 0x239000, 1, RI_E2E3E3B0_ONLINE },
- { 0x239040, 3, RI_E2E3E3B0_ONLINE },
- { 0x23905c, 1, RI_E3E3B0_ONLINE },
- { 0x239064, 1, RI_E3B0_ONLINE },
- { 0x239080, 10, RI_E3B0_ONLINE },
- { 0x240000, 2, RI_ALL_ONLINE },
- { 0x280000, 65, RI_ALL_ONLINE },
- { 0x28014c, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x280200, 58, RI_ALL_ONLINE },
- { 0x280340, 4, RI_ALL_ONLINE },
- { 0x280380, 1, RI_E2E3E3B0_ONLINE },
- { 0x280388, 1, RI_E2E3E3B0_ONLINE },
- { 0x280390, 1, RI_E2E3E3B0_ONLINE },
- { 0x280398, 1, RI_E2E3E3B0_ONLINE },
- { 0x2803a0, 1, RI_E2E3E3B0_ONLINE },
- { 0x2803a8, 2, RI_E2E3E3B0_ONLINE },
- { 0x280404, 255, RI_E1E1H_OFFLINE },
- { 0x282000, 4, RI_ALL_ONLINE },
- { 0x282010, 2044, RI_ALL_OFFLINE },
- { 0x284000, 4, RI_E3E3B0_ONLINE },
- { 0x2a0000, 1, RI_ALL_ONLINE },
- { 0x2a0004, 5631, RI_ALL_OFFLINE },
- { 0x2a5800, 2560, RI_E1HE2E3E3B0_OFFLINE },
- { 0x2a8000, 1, RI_ALL_ONLINE },
- { 0x2a8004, 8191, RI_E1HE2E3E3B0_OFFLINE },
- { 0x2b0000, 1, RI_ALL_ONLINE },
- { 0x2b0004, 15, RI_E1H_OFFLINE },
- { 0x2b0040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b0044, 239, RI_E1H_OFFLINE },
- { 0x2b0400, 1, RI_ALL_ONLINE },
- { 0x2b0404, 255, RI_E1H_OFFLINE },
- { 0x2b0800, 1, RI_ALL_ONLINE },
- { 0x2b0840, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b0c00, 1, RI_ALL_ONLINE },
- { 0x2b1000, 1, RI_ALL_ONLINE },
- { 0x2b1040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b1400, 1, RI_ALL_ONLINE },
- { 0x2b1440, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b1480, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b14c0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b1800, 128, RI_ALL_OFFLINE },
- { 0x2b1c00, 128, RI_ALL_OFFLINE },
- { 0x2b2000, 1, RI_ALL_ONLINE },
- { 0x2b2400, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b2404, 5631, RI_E2E3E3B0_OFFLINE },
- { 0x2b8000, 1, RI_ALL_ONLINE },
- { 0x2b8040, 1, RI_ALL_ONLINE },
- { 0x2b8080, 1, RI_ALL_ONLINE },
- { 0x2b80c0, 1, RI_ALL_ONLINE },
- { 0x2b8100, 1, RI_ALL_ONLINE },
- { 0x2b8140, 1, RI_ALL_ONLINE },
- { 0x2b8180, 1, RI_ALL_ONLINE },
- { 0x2b81c0, 1, RI_ALL_ONLINE },
- { 0x2b8200, 1, RI_ALL_ONLINE },
- { 0x2b8240, 1, RI_ALL_ONLINE },
- { 0x2b8280, 1, RI_ALL_ONLINE },
- { 0x2b82c0, 1, RI_ALL_ONLINE },
- { 0x2b8300, 1, RI_ALL_ONLINE },
- { 0x2b8340, 1, RI_ALL_ONLINE },
- { 0x2b8380, 1, RI_ALL_ONLINE },
- { 0x2b83c0, 1, RI_ALL_ONLINE },
- { 0x2b8400, 1, RI_ALL_ONLINE },
- { 0x2b8440, 1, RI_ALL_ONLINE },
- { 0x2b8480, 1, RI_ALL_ONLINE },
- { 0x2b84c0, 1, RI_ALL_ONLINE },
- { 0x2b8500, 1, RI_ALL_ONLINE },
- { 0x2b8540, 1, RI_ALL_ONLINE },
- { 0x2b8580, 1, RI_ALL_ONLINE },
- { 0x2b85c0, 19, RI_E2E3E3B0_ONLINE },
- { 0x2b8800, 1, RI_ALL_ONLINE },
- { 0x2b8840, 1, RI_ALL_ONLINE },
- { 0x2b8880, 1, RI_ALL_ONLINE },
- { 0x2b88c0, 1, RI_ALL_ONLINE },
- { 0x2b8900, 1, RI_ALL_ONLINE },
- { 0x2b8940, 1, RI_ALL_ONLINE },
- { 0x2b8980, 1, RI_ALL_ONLINE },
- { 0x2b89c0, 1, RI_ALL_ONLINE },
- { 0x2b8a00, 1, RI_ALL_ONLINE },
- { 0x2b8a40, 1, RI_ALL_ONLINE },
- { 0x2b8a80, 1, RI_ALL_ONLINE },
- { 0x2b8ac0, 1, RI_ALL_ONLINE },
- { 0x2b8b00, 1, RI_ALL_ONLINE },
- { 0x2b8b40, 1, RI_ALL_ONLINE },
- { 0x2b8b80, 1, RI_ALL_ONLINE },
- { 0x2b8bc0, 1, RI_ALL_ONLINE },
- { 0x2b8c00, 1, RI_ALL_ONLINE },
- { 0x2b8c40, 1, RI_ALL_ONLINE },
- { 0x2b8c80, 1, RI_ALL_ONLINE },
- { 0x2b8cc0, 1, RI_ALL_ONLINE },
- { 0x2b8cc4, 1, RI_E2E3E3B0_ONLINE },
- { 0x2b8d00, 1, RI_ALL_ONLINE },
- { 0x2b8d40, 1, RI_ALL_ONLINE },
- { 0x2b8d80, 1, RI_ALL_ONLINE },
- { 0x2b8dc0, 1, RI_ALL_ONLINE },
- { 0x2b8e00, 1, RI_ALL_ONLINE },
- { 0x2b8e40, 1, RI_ALL_ONLINE },
- { 0x2b8e80, 1, RI_ALL_ONLINE },
- { 0x2b8e84, 1, RI_E2E3E3B0_ONLINE },
- { 0x2b8ec0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b8f00, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b8f40, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b8f80, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b8fc0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b8fc4, 2, RI_E2E3E3B0_ONLINE },
- { 0x2b8fd0, 6, RI_E2E3E3B0_ONLINE },
- { 0x2b8fe8, 2, RI_E3E3B0_ONLINE },
- { 0x2b9000, 1, RI_E2E3E3B0_ONLINE },
- { 0x2b9040, 3, RI_E2E3E3B0_ONLINE },
- { 0x2b905c, 1, RI_E3E3B0_ONLINE },
- { 0x2b9064, 1, RI_E3B0_ONLINE },
- { 0x2b9080, 10, RI_E3B0_ONLINE },
- { 0x2b9400, 14, RI_E2E3E3B0_ONLINE },
- { 0x2b943c, 19, RI_E2E3E3B0_ONLINE },
- { 0x2b9490, 10, RI_E2E3E3B0_ONLINE },
- { 0x2c0000, 2, RI_ALL_ONLINE },
- { 0x300000, 65, RI_ALL_ONLINE },
- { 0x30014c, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x300200, 58, RI_ALL_ONLINE },
- { 0x300340, 4, RI_ALL_ONLINE },
- { 0x300380, 1, RI_E2E3E3B0_ONLINE },
- { 0x300388, 1, RI_E2E3E3B0_ONLINE },
- { 0x300390, 1, RI_E2E3E3B0_ONLINE },
- { 0x300398, 1, RI_E2E3E3B0_ONLINE },
- { 0x3003a0, 1, RI_E2E3E3B0_ONLINE },
- { 0x3003a8, 2, RI_E2E3E3B0_ONLINE },
- { 0x300404, 255, RI_E1E1H_OFFLINE },
- { 0x302000, 4, RI_ALL_ONLINE },
- { 0x302010, 2044, RI_ALL_OFFLINE },
- { 0x304000, 4, RI_E3E3B0_ONLINE },
- { 0x320000, 1, RI_ALL_ONLINE },
- { 0x320004, 5631, RI_ALL_OFFLINE },
- { 0x325800, 2560, RI_E1HE2E3E3B0_OFFLINE },
- { 0x328000, 1, RI_ALL_ONLINE },
- { 0x328004, 8191, RI_E1HE2E3E3B0_OFFLINE },
- { 0x330000, 1, RI_ALL_ONLINE },
- { 0x330004, 15, RI_E1H_OFFLINE },
- { 0x330040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x330044, 239, RI_E1H_OFFLINE },
- { 0x330400, 1, RI_ALL_ONLINE },
- { 0x330404, 255, RI_E1H_OFFLINE },
- { 0x330800, 1, RI_ALL_ONLINE },
- { 0x330840, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x330c00, 1, RI_ALL_ONLINE },
- { 0x331000, 1, RI_ALL_ONLINE },
- { 0x331040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x331400, 1, RI_ALL_ONLINE },
- { 0x331440, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x331480, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x3314c0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x331800, 128, RI_ALL_OFFLINE },
- { 0x331c00, 128, RI_ALL_OFFLINE },
- { 0x332000, 1, RI_ALL_ONLINE },
- { 0x332400, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x332404, 5631, RI_E2E3E3B0_OFFLINE },
- { 0x338000, 1, RI_ALL_ONLINE },
- { 0x338040, 1, RI_ALL_ONLINE },
- { 0x338080, 1, RI_ALL_ONLINE },
- { 0x3380c0, 1, RI_ALL_ONLINE },
- { 0x338100, 1, RI_ALL_ONLINE },
- { 0x338140, 1, RI_ALL_ONLINE },
- { 0x338180, 1, RI_ALL_ONLINE },
- { 0x3381c0, 1, RI_ALL_ONLINE },
- { 0x338200, 1, RI_ALL_ONLINE },
- { 0x338240, 1, RI_ALL_ONLINE },
- { 0x338280, 1, RI_ALL_ONLINE },
- { 0x3382c0, 1, RI_ALL_ONLINE },
- { 0x338300, 1, RI_ALL_ONLINE },
- { 0x338340, 1, RI_ALL_ONLINE },
- { 0x338380, 1, RI_ALL_ONLINE },
- { 0x3383c0, 1, RI_ALL_ONLINE },
- { 0x338400, 1, RI_ALL_ONLINE },
- { 0x338440, 1, RI_ALL_ONLINE },
- { 0x338480, 1, RI_ALL_ONLINE },
- { 0x3384c0, 1, RI_ALL_ONLINE },
- { 0x338500, 1, RI_ALL_ONLINE },
- { 0x338540, 1, RI_ALL_ONLINE },
- { 0x338580, 1, RI_ALL_ONLINE },
- { 0x3385c0, 19, RI_E2E3E3B0_ONLINE },
- { 0x338800, 1, RI_ALL_ONLINE },
- { 0x338840, 1, RI_ALL_ONLINE },
- { 0x338880, 1, RI_ALL_ONLINE },
- { 0x3388c0, 1, RI_ALL_ONLINE },
- { 0x338900, 1, RI_ALL_ONLINE },
- { 0x338940, 1, RI_ALL_ONLINE },
- { 0x338980, 1, RI_ALL_ONLINE },
- { 0x3389c0, 1, RI_ALL_ONLINE },
- { 0x338a00, 1, RI_ALL_ONLINE },
- { 0x338a40, 1, RI_ALL_ONLINE },
- { 0x338a80, 1, RI_ALL_ONLINE },
- { 0x338ac0, 1, RI_ALL_ONLINE },
- { 0x338b00, 1, RI_ALL_ONLINE },
- { 0x338b40, 1, RI_ALL_ONLINE },
- { 0x338b80, 1, RI_ALL_ONLINE },
- { 0x338bc0, 1, RI_ALL_ONLINE },
- { 0x338c00, 1, RI_ALL_ONLINE },
- { 0x338c40, 1, RI_ALL_ONLINE },
- { 0x338c80, 1, RI_ALL_ONLINE },
- { 0x338cc0, 1, RI_ALL_ONLINE },
- { 0x338cc4, 1, RI_E2E3E3B0_ONLINE },
- { 0x338d00, 1, RI_ALL_ONLINE },
- { 0x338d40, 1, RI_ALL_ONLINE },
- { 0x338d80, 1, RI_ALL_ONLINE },
- { 0x338dc0, 1, RI_ALL_ONLINE },
- { 0x338e00, 1, RI_ALL_ONLINE },
- { 0x338e40, 1, RI_ALL_ONLINE },
- { 0x338e80, 1, RI_ALL_ONLINE },
- { 0x338e84, 1, RI_E2E3E3B0_ONLINE },
- { 0x338ec0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x338f00, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x338f40, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x338f80, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x338fc0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x338fc4, 2, RI_E2E3E3B0_ONLINE },
- { 0x338fd0, 6, RI_E2E3E3B0_ONLINE },
- { 0x338fe8, 2, RI_E3E3B0_ONLINE },
- { 0x339000, 1, RI_E2E3E3B0_ONLINE },
- { 0x339040, 3, RI_E2E3E3B0_ONLINE },
- { 0x33905c, 1, RI_E3E3B0_ONLINE },
- { 0x339064, 1, RI_E3B0_ONLINE },
- { 0x339080, 10, RI_E3B0_ONLINE },
- { 0x340000, 2, RI_ALL_ONLINE },
+ { 0x2000, 1, 0x1f, 0xfff},
+ { 0x2004, 1, 0x1f, 0x1fff},
+ { 0x2008, 25, 0x1f, 0xfff},
+ { 0x206c, 1, 0x1f, 0x1fff},
+ { 0x2070, 313, 0x1f, 0xfff},
+ { 0x2800, 103, 0x1f, 0xfff},
+ { 0x3000, 287, 0x1f, 0xfff},
+ { 0x3800, 331, 0x1f, 0xfff},
+ { 0x8800, 6, 0x1f, 0x924},
+ { 0x8818, 1, 0x1e, 0x924},
+ { 0x9000, 4, 0x1c, 0x924},
+ { 0x9010, 7, 0x1c, 0xfff},
+ { 0x902c, 1, 0x1c, 0x924},
+ { 0x9030, 1, 0x1c, 0xfff},
+ { 0x9034, 13, 0x1c, 0x924},
+ { 0x9068, 16, 0x1c, 0xfff},
+ { 0x90a8, 98, 0x1c, 0x924},
+ { 0x9230, 2, 0x1c, 0xfff},
+ { 0x9238, 3, 0x1c, 0x924},
+ { 0x9244, 1, 0x1c, 0xfff},
+ { 0x9248, 1, 0x1c, 0x924},
+ { 0x924c, 1, 0x4, 0x924},
+ { 0x9250, 16, 0x1c, 0x924},
+ { 0x92a8, 2, 0x1c, 0x1fff},
+ { 0x92b4, 1, 0x1c, 0x1fff},
+ { 0x9400, 33, 0x1c, 0x924},
+ { 0x9484, 5, 0x18, 0x924},
+ { 0xa000, 27, 0x1f, 0x924},
+ { 0xa06c, 1, 0x3, 0x924},
+ { 0xa070, 2, 0x1f, 0x924},
+ { 0xa078, 1, 0x1f, 0x1fff},
+ { 0xa07c, 31, 0x1f, 0x924},
+ { 0xa0f8, 1, 0x1f, 0x1fff},
+ { 0xa0fc, 3, 0x1f, 0x924},
+ { 0xa108, 1, 0x1f, 0x1fff},
+ { 0xa10c, 3, 0x1f, 0x924},
+ { 0xa118, 1, 0x1f, 0x1fff},
+ { 0xa11c, 28, 0x1f, 0x924},
+ { 0xa18c, 4, 0x3, 0x924},
+ { 0xa19c, 3, 0x1f, 0x924},
+ { 0xa1a8, 1, 0x1f, 0x1fff},
+ { 0xa1ac, 3, 0x1f, 0x924},
+ { 0xa1b8, 1, 0x1f, 0x1fff},
+ { 0xa1bc, 54, 0x1f, 0x924},
+ { 0xa294, 2, 0x3, 0x924},
+ { 0xa29c, 2, 0x1f, 0x924},
+ { 0xa2a4, 2, 0x7, 0x924},
+ { 0xa2ac, 2, 0x1f, 0x924},
+ { 0xa2b4, 1, 0x1f, 0x1fff},
+ { 0xa2b8, 49, 0x1f, 0x924},
+ { 0xa38c, 2, 0x1f, 0x1fff},
+ { 0xa398, 1, 0x1f, 0x1fff},
+ { 0xa39c, 7, 0x1e, 0x924},
+ { 0xa3b8, 2, 0x18, 0x924},
+ { 0xa3c0, 1, 0x1e, 0x924},
+ { 0xa3c4, 1, 0x1e, 0xfff},
+ { 0xa3c8, 1, 0x1e, 0x924},
+ { 0xa3d0, 1, 0x1e, 0x924},
+ { 0xa3d8, 1, 0x1e, 0x924},
+ { 0xa3e0, 1, 0x1e, 0x924},
+ { 0xa3e8, 1, 0x1e, 0x924},
+ { 0xa3f0, 1, 0x1e, 0x924},
+ { 0xa3f8, 1, 0x1e, 0x924},
+ { 0xa400, 1, 0x1f, 0x924},
+ { 0xa404, 1, 0x1f, 0xfff},
+ { 0xa408, 2, 0x1f, 0x1fff},
+ { 0xa410, 7, 0x1f, 0x924},
+ { 0xa42c, 12, 0x1f, 0xfff},
+ { 0xa45c, 1, 0x1f, 0x924},
+ { 0xa460, 1, 0x1f, 0x1924},
+ { 0xa464, 15, 0x1f, 0x924},
+ { 0xa4a0, 1, 0x7, 0x924},
+ { 0xa4a4, 2, 0x1f, 0x924},
+ { 0xa4ac, 2, 0x3, 0x924},
+ { 0xa4b4, 1, 0x7, 0x924},
+ { 0xa4b8, 2, 0x3, 0x924},
+ { 0xa4c0, 3, 0x1f, 0x924},
+ { 0xa4cc, 5, 0x3, 0x924},
+ { 0xa4e0, 3, 0x1f, 0x924},
+ { 0xa4fc, 2, 0x1f, 0x924},
+ { 0xa504, 1, 0x3, 0x924},
+ { 0xa508, 3, 0x1f, 0x924},
+ { 0xa518, 1, 0x1f, 0x924},
+ { 0xa520, 1, 0x1f, 0x924},
+ { 0xa528, 1, 0x1f, 0x924},
+ { 0xa530, 1, 0x1f, 0x924},
+ { 0xa538, 1, 0x1f, 0x924},
+ { 0xa540, 1, 0x1f, 0x924},
+ { 0xa548, 1, 0x3, 0x924},
+ { 0xa550, 1, 0x3, 0x924},
+ { 0xa558, 1, 0x3, 0x924},
+ { 0xa560, 1, 0x3, 0x924},
+ { 0xa568, 1, 0x3, 0x924},
+ { 0xa570, 1, 0x1f, 0x924},
+ { 0xa580, 1, 0x1f, 0x1fff},
+ { 0xa590, 1, 0x1f, 0x1fff},
+ { 0xa5a0, 1, 0x7, 0x924},
+ { 0xa5c0, 1, 0x1f, 0x924},
+ { 0xa5e0, 1, 0x1e, 0x924},
+ { 0xa5e8, 1, 0x1e, 0x924},
+ { 0xa5f0, 1, 0x1e, 0x924},
+ { 0xa5f8, 1, 0x6, 0x924},
+ { 0xa5fc, 1, 0x1e, 0x924},
+ { 0xa600, 5, 0x1e, 0xfff},
+ { 0xa614, 1, 0x1e, 0x924},
+ { 0xa618, 1, 0x1e, 0xfff},
+ { 0xa61c, 1, 0x1e, 0x924},
+ { 0xa620, 6, 0x1c, 0x924},
+ { 0xa638, 20, 0x4, 0x924},
+ { 0xa688, 35, 0x1c, 0x924},
+ { 0xa714, 1, 0x1c, 0xfff},
+ { 0xa718, 2, 0x1c, 0x924},
+ { 0xa720, 1, 0x1c, 0xfff},
+ { 0xa724, 3, 0x1c, 0x924},
+ { 0xa730, 1, 0x4, 0x924},
+ { 0xa734, 2, 0x1c, 0x924},
+ { 0xa73c, 4, 0x4, 0x924},
+ { 0xa74c, 1, 0x1c, 0x924},
+ { 0xa750, 1, 0x1c, 0xfff},
+ { 0xa754, 3, 0x1c, 0x924},
+ { 0xa760, 5, 0x4, 0x924},
+ { 0xa774, 7, 0x1c, 0x924},
+ { 0xa790, 15, 0x4, 0x924},
+ { 0xa7cc, 4, 0x1c, 0x924},
+ { 0xa7e0, 6, 0x18, 0x924},
+ { 0xa800, 18, 0x4, 0x924},
+ { 0xa848, 33, 0x1c, 0x924},
+ { 0xa8cc, 2, 0x18, 0x924},
+ { 0xa8d4, 4, 0x1c, 0x924},
+ { 0xa8e4, 1, 0x18, 0x924},
+ { 0xa8e8, 1, 0x1c, 0x924},
+ { 0xa8f0, 1, 0x1c, 0x924},
+ { 0xa8f8, 30, 0x18, 0x924},
+ { 0xa974, 73, 0x18, 0x924},
+ { 0xac30, 1, 0x18, 0x924},
+ { 0xac40, 1, 0x18, 0x924},
+ { 0xac50, 1, 0x18, 0x924},
+ { 0xac60, 1, 0x10, 0x924},
+ { 0x10000, 9, 0x1f, 0x924},
+ { 0x10024, 1, 0x7, 0x924},
+ { 0x10028, 5, 0x1f, 0x924},
+ { 0x1003c, 6, 0x7, 0x924},
+ { 0x10054, 20, 0x1f, 0x924},
+ { 0x100a4, 4, 0x7, 0x924},
+ { 0x100b4, 11, 0x1f, 0x924},
+ { 0x100e0, 4, 0x7, 0x924},
+ { 0x100f0, 8, 0x1f, 0x924},
+ { 0x10110, 6, 0x7, 0x924},
+ { 0x10128, 110, 0x1f, 0x924},
+ { 0x102e0, 4, 0x7, 0x924},
+ { 0x102f0, 18, 0x1f, 0x924},
+ { 0x10338, 20, 0x7, 0x924},
+ { 0x10388, 10, 0x1f, 0x924},
+ { 0x103d0, 2, 0x3, 0x1fff},
+ { 0x103dc, 1, 0x3, 0x1fff},
+ { 0x10400, 6, 0x7, 0x924},
+ { 0x10418, 1, 0x1f, 0xfff},
+ { 0x1041c, 1, 0x1f, 0x924},
+ { 0x10420, 1, 0x1f, 0xfff},
+ { 0x10424, 1, 0x1f, 0x924},
+ { 0x10428, 1, 0x1f, 0xfff},
+ { 0x1042c, 1, 0x1f, 0x924},
+ { 0x10430, 10, 0x7, 0x924},
+ { 0x10458, 2, 0x1f, 0x924},
+ { 0x10460, 1, 0x1f, 0xfff},
+ { 0x10464, 4, 0x1f, 0x924},
+ { 0x10474, 1, 0x1f, 0xfff},
+ { 0x10478, 14, 0x1f, 0x924},
+ { 0x104b0, 12, 0x7, 0x924},
+ { 0x104e0, 1, 0x1f, 0xfff},
+ { 0x104e8, 1, 0x1f, 0x924},
+ { 0x104ec, 1, 0x1f, 0xfff},
+ { 0x104f4, 1, 0x1f, 0x924},
+ { 0x104f8, 1, 0x1f, 0xfff},
+ { 0x10500, 2, 0x1f, 0x924},
+ { 0x10508, 1, 0x1f, 0xfff},
+ { 0x1050c, 9, 0x1f, 0x924},
+ { 0x10530, 1, 0x1f, 0xfff},
+ { 0x10534, 1, 0x1f, 0x924},
+ { 0x10538, 1, 0x1f, 0xfff},
+ { 0x1053c, 3, 0x1f, 0x924},
+ { 0x10548, 1, 0x1f, 0xfff},
+ { 0x1054c, 3, 0x1f, 0x924},
+ { 0x10558, 1, 0x1f, 0xfff},
+ { 0x1055c, 123, 0x1f, 0x924},
+ { 0x10750, 2, 0x7, 0x924},
+ { 0x10760, 2, 0x7, 0x924},
+ { 0x10770, 2, 0x7, 0x924},
+ { 0x10780, 2, 0x7, 0x924},
+ { 0x10790, 2, 0x1f, 0x924},
+ { 0x107a0, 2, 0x7, 0x924},
+ { 0x107b0, 2, 0x7, 0x924},
+ { 0x107c0, 2, 0x7, 0x924},
+ { 0x107d0, 2, 0x7, 0x924},
+ { 0x107e0, 2, 0x1f, 0x924},
+ { 0x10880, 2, 0x1f, 0x924},
+ { 0x10900, 2, 0x1f, 0x924},
+ { 0x16000, 1, 0x6, 0x924},
+ { 0x16004, 25, 0x1e, 0x924},
+ { 0x16070, 8, 0x1e, 0x924},
+ { 0x16090, 4, 0xe, 0x924},
+ { 0x160a0, 6, 0x1e, 0x924},
+ { 0x160c0, 7, 0x1e, 0x924},
+ { 0x160dc, 2, 0x6, 0x924},
+ { 0x160e4, 6, 0x1e, 0x924},
+ { 0x160fc, 4, 0x1e, 0x1fff},
+ { 0x1610c, 2, 0x6, 0x924},
+ { 0x16114, 6, 0x1e, 0x924},
+ { 0x16140, 48, 0x1e, 0x1fff},
+ { 0x16204, 5, 0x1e, 0x924},
+ { 0x18000, 1, 0x1e, 0x924},
+ { 0x18008, 1, 0x1e, 0x924},
+ { 0x18010, 35, 0x1c, 0x924},
+ { 0x180a4, 2, 0x1c, 0x924},
+ { 0x180c0, 9, 0x1c, 0x924},
+ { 0x180e4, 1, 0xc, 0x924},
+ { 0x180e8, 2, 0x1c, 0x924},
+ { 0x180f0, 1, 0xc, 0x924},
+ { 0x180f4, 79, 0x1c, 0x924},
+ { 0x18230, 1, 0xc, 0x924},
+ { 0x18234, 2, 0x1c, 0x924},
+ { 0x1823c, 1, 0xc, 0x924},
+ { 0x18240, 13, 0x1c, 0x924},
+ { 0x18274, 1, 0x4, 0x924},
+ { 0x18278, 12, 0x1c, 0x924},
+ { 0x182a8, 1, 0x1c, 0xfff},
+ { 0x182ac, 3, 0x1c, 0x924},
+ { 0x182b8, 1, 0x1c, 0xfff},
+ { 0x182bc, 19, 0x1c, 0x924},
+ { 0x18308, 1, 0x1c, 0xfff},
+ { 0x1830c, 3, 0x1c, 0x924},
+ { 0x18318, 1, 0x1c, 0xfff},
+ { 0x1831c, 7, 0x1c, 0x924},
+ { 0x18338, 1, 0x1c, 0xfff},
+ { 0x1833c, 3, 0x1c, 0x924},
+ { 0x18348, 1, 0x1c, 0xfff},
+ { 0x1834c, 28, 0x1c, 0x924},
+ { 0x183bc, 2, 0x1c, 0x1fff},
+ { 0x183c8, 3, 0x1c, 0x1fff},
+ { 0x183d8, 1, 0x1c, 0x1fff},
+ { 0x18440, 48, 0x1c, 0x1fff},
+ { 0x18500, 15, 0x1c, 0x924},
+ { 0x18570, 1, 0x18, 0xfff},
+ { 0x18574, 1, 0x18, 0x924},
+ { 0x18578, 1, 0x18, 0xfff},
+ { 0x1857c, 4, 0x18, 0x924},
+ { 0x1858c, 1, 0x18, 0xfff},
+ { 0x18590, 1, 0x18, 0x924},
+ { 0x18594, 1, 0x18, 0xfff},
+ { 0x18598, 32, 0x18, 0x924},
+ { 0x18618, 5, 0x10, 0x924},
+ { 0x1862c, 4, 0x10, 0xfff},
+ { 0x1863c, 16, 0x10, 0x924},
+ { 0x18680, 44, 0x10, 0x924},
+ { 0x18748, 12, 0x10, 0x924},
+ { 0x18788, 1, 0x10, 0x924},
+ { 0x1879c, 6, 0x10, 0x924},
+ { 0x187c4, 51, 0x10, 0x924},
+ { 0x18a00, 48, 0x10, 0x924},
+ { 0x20000, 24, 0x1f, 0x924},
+ { 0x20060, 8, 0x1f, 0x9e4},
+ { 0x20080, 94, 0x1f, 0x924},
+ { 0x201f8, 1, 0x3, 0x924},
+ { 0x201fc, 1, 0x1f, 0x924},
+ { 0x20200, 1, 0x3, 0x924},
+ { 0x20204, 1, 0x1f, 0x924},
+ { 0x20208, 1, 0x3, 0x924},
+ { 0x2020c, 4, 0x1f, 0x924},
+ { 0x2021c, 11, 0x1f, 0xfff},
+ { 0x20248, 24, 0x1f, 0x924},
+ { 0x202b8, 2, 0x1f, 0x1fff},
+ { 0x202c4, 1, 0x1f, 0x1fff},
+ { 0x202c8, 1, 0x1c, 0x924},
+ { 0x202d8, 4, 0x1c, 0x924},
+ { 0x202f0, 1, 0x10, 0x924},
+ { 0x20400, 1, 0x1f, 0x924},
+ { 0x20404, 1, 0x1f, 0xfff},
+ { 0x2040c, 2, 0x1f, 0xfff},
+ { 0x20414, 2, 0x1f, 0x924},
+ { 0x2041c, 2, 0x1f, 0xfff},
+ { 0x20424, 2, 0x1f, 0x924},
+ { 0x2042c, 18, 0x1e, 0x924},
+ { 0x20480, 1, 0x1f, 0x924},
+ { 0x20500, 1, 0x1f, 0x924},
+ { 0x20600, 1, 0x1f, 0x924},
+ { 0x28000, 1, 0x1f, 0x9e4},
+ { 0x28004, 255, 0x1f, 0x180},
+ { 0x28400, 1, 0x1f, 0x1c0},
+ { 0x28404, 255, 0x1f, 0x180},
+ { 0x28800, 1, 0x1f, 0x1c0},
+ { 0x28804, 255, 0x1f, 0x180},
+ { 0x28c00, 1, 0x1f, 0x1c0},
+ { 0x28c04, 255, 0x1f, 0x180},
+ { 0x29000, 1, 0x1f, 0x1c0},
+ { 0x29004, 255, 0x1f, 0x180},
+ { 0x29400, 1, 0x1f, 0x1c0},
+ { 0x29404, 255, 0x1f, 0x180},
+ { 0x29800, 1, 0x1f, 0x1c0},
+ { 0x29804, 255, 0x1f, 0x180},
+ { 0x29c00, 1, 0x1f, 0x1c0},
+ { 0x29c04, 255, 0x1f, 0x180},
+ { 0x2a000, 1, 0x1f, 0x1c0},
+ { 0x2a004, 255, 0x1f, 0x180},
+ { 0x2a400, 1, 0x1f, 0x1c0},
+ { 0x2a404, 255, 0x1f, 0x180},
+ { 0x2a800, 1, 0x1f, 0x1c0},
+ { 0x2a804, 255, 0x1f, 0x180},
+ { 0x2ac00, 1, 0x1f, 0x1c0},
+ { 0x2ac04, 255, 0x1f, 0x180},
+ { 0x2b000, 1, 0x1f, 0x1c0},
+ { 0x2b004, 255, 0x1f, 0x180},
+ { 0x2b400, 1, 0x1f, 0x1c0},
+ { 0x2b404, 255, 0x1f, 0x180},
+ { 0x2b800, 1, 0x1f, 0x1c0},
+ { 0x2b804, 255, 0x1f, 0x180},
+ { 0x2bc00, 1, 0x1f, 0x1c0},
+ { 0x2bc04, 255, 0x1f, 0x180},
+ { 0x2c000, 1, 0x1f, 0x1c0},
+ { 0x2c004, 255, 0x1f, 0x180},
+ { 0x2c400, 1, 0x1f, 0x1c0},
+ { 0x2c404, 255, 0x1f, 0x180},
+ { 0x2c800, 1, 0x1f, 0x1c0},
+ { 0x2c804, 255, 0x1f, 0x180},
+ { 0x2cc00, 1, 0x1f, 0x1c0},
+ { 0x2cc04, 255, 0x1f, 0x180},
+ { 0x2d000, 1, 0x1f, 0x1c0},
+ { 0x2d004, 255, 0x1f, 0x180},
+ { 0x2d400, 1, 0x1f, 0x1c0},
+ { 0x2d404, 255, 0x1f, 0x180},
+ { 0x2d800, 1, 0x1f, 0x1c0},
+ { 0x2d804, 255, 0x1f, 0x180},
+ { 0x2dc00, 1, 0x1f, 0x1c0},
+ { 0x2dc04, 255, 0x1f, 0x180},
+ { 0x2e000, 1, 0x1f, 0x1c0},
+ { 0x2e004, 255, 0x1f, 0x180},
+ { 0x2e400, 1, 0x1f, 0x1c0},
+ { 0x2e404, 255, 0x1f, 0x180},
+ { 0x2e800, 1, 0x1f, 0x1c0},
+ { 0x2e804, 255, 0x1f, 0x180},
+ { 0x2ec00, 1, 0x1f, 0x1c0},
+ { 0x2ec04, 255, 0x1f, 0x180},
+ { 0x2f000, 1, 0x1f, 0x1c0},
+ { 0x2f004, 255, 0x1f, 0x180},
+ { 0x2f400, 1, 0x1f, 0x1c0},
+ { 0x2f404, 255, 0x1f, 0x180},
+ { 0x2f800, 1, 0x1f, 0x1c0},
+ { 0x2f804, 255, 0x1f, 0x180},
+ { 0x2fc00, 1, 0x1f, 0x1c0},
+ { 0x2fc04, 255, 0x1f, 0x180},
+ { 0x30000, 1, 0x1f, 0x9e4},
+ { 0x30004, 255, 0x1f, 0x180},
+ { 0x30400, 1, 0x1f, 0x1c0},
+ { 0x30404, 255, 0x1f, 0x180},
+ { 0x30800, 1, 0x1f, 0x1c0},
+ { 0x30804, 255, 0x1f, 0x180},
+ { 0x30c00, 1, 0x1f, 0x1c0},
+ { 0x30c04, 255, 0x1f, 0x180},
+ { 0x31000, 1, 0x1f, 0x1c0},
+ { 0x31004, 255, 0x1f, 0x180},
+ { 0x31400, 1, 0x1f, 0x1c0},
+ { 0x31404, 255, 0x1f, 0x180},
+ { 0x31800, 1, 0x1f, 0x1c0},
+ { 0x31804, 255, 0x1f, 0x180},
+ { 0x31c00, 1, 0x1f, 0x1c0},
+ { 0x31c04, 255, 0x1f, 0x180},
+ { 0x32000, 1, 0x1f, 0x1c0},
+ { 0x32004, 255, 0x1f, 0x180},
+ { 0x32400, 1, 0x1f, 0x1c0},
+ { 0x32404, 255, 0x1f, 0x180},
+ { 0x32800, 1, 0x1f, 0x1c0},
+ { 0x32804, 255, 0x1f, 0x180},
+ { 0x32c00, 1, 0x1f, 0x1c0},
+ { 0x32c04, 255, 0x1f, 0x180},
+ { 0x33000, 1, 0x1f, 0x1c0},
+ { 0x33004, 255, 0x1f, 0x180},
+ { 0x33400, 1, 0x1f, 0x1c0},
+ { 0x33404, 255, 0x1f, 0x180},
+ { 0x33800, 1, 0x1f, 0x1c0},
+ { 0x33804, 255, 0x1f, 0x180},
+ { 0x33c00, 1, 0x1f, 0x1c0},
+ { 0x33c04, 255, 0x1f, 0x180},
+ { 0x34000, 1, 0x1f, 0x1c0},
+ { 0x34004, 255, 0x1f, 0x180},
+ { 0x34400, 1, 0x1f, 0x1c0},
+ { 0x34404, 255, 0x1f, 0x180},
+ { 0x34800, 1, 0x1f, 0x1c0},
+ { 0x34804, 255, 0x1f, 0x180},
+ { 0x34c00, 1, 0x1f, 0x1c0},
+ { 0x34c04, 255, 0x1f, 0x180},
+ { 0x35000, 1, 0x1f, 0x1c0},
+ { 0x35004, 255, 0x1f, 0x180},
+ { 0x35400, 1, 0x1f, 0x1c0},
+ { 0x35404, 255, 0x1f, 0x180},
+ { 0x35800, 1, 0x1f, 0x1c0},
+ { 0x35804, 255, 0x1f, 0x180},
+ { 0x35c00, 1, 0x1f, 0x1c0},
+ { 0x35c04, 255, 0x1f, 0x180},
+ { 0x36000, 1, 0x1f, 0x1c0},
+ { 0x36004, 255, 0x1f, 0x180},
+ { 0x36400, 1, 0x1f, 0x1c0},
+ { 0x36404, 255, 0x1f, 0x180},
+ { 0x36800, 1, 0x1f, 0x1c0},
+ { 0x36804, 255, 0x1f, 0x180},
+ { 0x36c00, 1, 0x1f, 0x1c0},
+ { 0x36c04, 255, 0x1f, 0x180},
+ { 0x37000, 1, 0x1f, 0x1c0},
+ { 0x37004, 255, 0x1f, 0x180},
+ { 0x37400, 1, 0x1f, 0x1c0},
+ { 0x37404, 255, 0x1f, 0x180},
+ { 0x37800, 1, 0x1f, 0x1c0},
+ { 0x37804, 255, 0x1f, 0x180},
+ { 0x37c00, 1, 0x1f, 0x1c0},
+ { 0x37c04, 255, 0x1f, 0x180},
+ { 0x38000, 1, 0x1f, 0x1c0},
+ { 0x38004, 255, 0x1f, 0x180},
+ { 0x38400, 1, 0x1f, 0x1c0},
+ { 0x38404, 255, 0x1f, 0x180},
+ { 0x38800, 1, 0x1f, 0x1c0},
+ { 0x38804, 255, 0x1f, 0x180},
+ { 0x38c00, 1, 0x1f, 0x1c0},
+ { 0x38c04, 255, 0x1f, 0x180},
+ { 0x39000, 1, 0x1f, 0x1c0},
+ { 0x39004, 255, 0x1f, 0x180},
+ { 0x39400, 1, 0x1f, 0x1c0},
+ { 0x39404, 255, 0x1f, 0x180},
+ { 0x39800, 1, 0x1f, 0x1c0},
+ { 0x39804, 255, 0x1f, 0x180},
+ { 0x39c00, 1, 0x1f, 0x1c0},
+ { 0x39c04, 255, 0x1f, 0x180},
+ { 0x3a000, 1, 0x1f, 0x1c0},
+ { 0x3a004, 255, 0x1f, 0x180},
+ { 0x3a400, 1, 0x1f, 0x1c0},
+ { 0x3a404, 255, 0x1f, 0x180},
+ { 0x3a800, 1, 0x1f, 0x1c0},
+ { 0x3a804, 255, 0x1f, 0x180},
+ { 0x3ac00, 1, 0x1f, 0x1c0},
+ { 0x3ac04, 255, 0x1f, 0x180},
+ { 0x3b000, 1, 0x1f, 0x1c0},
+ { 0x3b004, 255, 0x1f, 0x180},
+ { 0x3b400, 1, 0x1f, 0x1c0},
+ { 0x3b404, 255, 0x1f, 0x180},
+ { 0x3b800, 1, 0x1f, 0x1c0},
+ { 0x3b804, 255, 0x1f, 0x180},
+ { 0x3bc00, 1, 0x1f, 0x1c0},
+ { 0x3bc04, 255, 0x1f, 0x180},
+ { 0x3c000, 1, 0x1f, 0x1c0},
+ { 0x3c004, 255, 0x1f, 0x180},
+ { 0x3c400, 1, 0x1f, 0x1c0},
+ { 0x3c404, 255, 0x1f, 0x180},
+ { 0x3c800, 1, 0x1f, 0x1c0},
+ { 0x3c804, 255, 0x1f, 0x180},
+ { 0x3cc00, 1, 0x1f, 0x1c0},
+ { 0x3cc04, 255, 0x1f, 0x180},
+ { 0x3d000, 1, 0x1f, 0x1c0},
+ { 0x3d004, 255, 0x1f, 0x180},
+ { 0x3d400, 1, 0x1f, 0x1c0},
+ { 0x3d404, 255, 0x1f, 0x180},
+ { 0x3d800, 1, 0x1f, 0x1c0},
+ { 0x3d804, 255, 0x1f, 0x180},
+ { 0x3dc00, 1, 0x1f, 0x1c0},
+ { 0x3dc04, 255, 0x1f, 0x180},
+ { 0x3e000, 1, 0x1f, 0x1c0},
+ { 0x3e004, 255, 0x1f, 0x180},
+ { 0x3e400, 1, 0x1f, 0x1c0},
+ { 0x3e404, 255, 0x1f, 0x180},
+ { 0x3e800, 1, 0x1f, 0x1c0},
+ { 0x3e804, 255, 0x1f, 0x180},
+ { 0x3ec00, 1, 0x1f, 0x1c0},
+ { 0x3ec04, 255, 0x1f, 0x180},
+ { 0x3f000, 1, 0x1f, 0x1c0},
+ { 0x3f004, 255, 0x1f, 0x180},
+ { 0x3f400, 1, 0x1f, 0x1c0},
+ { 0x3f404, 255, 0x1f, 0x180},
+ { 0x3f800, 1, 0x1f, 0x1c0},
+ { 0x3f804, 255, 0x1f, 0x180},
+ { 0x3fc00, 1, 0x1f, 0x1c0},
+ { 0x3fc04, 255, 0x1f, 0x180},
+ { 0x40000, 85, 0x1f, 0x924},
+ { 0x40154, 13, 0x1f, 0xfff},
+ { 0x40198, 2, 0x1f, 0x1fff},
+ { 0x401a4, 1, 0x1f, 0x1fff},
+ { 0x401a8, 8, 0x1e, 0x924},
+ { 0x401c8, 1, 0x2, 0x924},
+ { 0x401cc, 2, 0x1e, 0x924},
+ { 0x401d4, 2, 0x1c, 0x924},
+ { 0x40200, 4, 0x1f, 0x924},
+ { 0x40220, 6, 0x1c, 0x924},
+ { 0x40238, 8, 0xc, 0x924},
+ { 0x40258, 4, 0x1c, 0x924},
+ { 0x40268, 2, 0x18, 0x924},
+ { 0x40270, 17, 0x10, 0x924},
+ { 0x40400, 43, 0x1f, 0x924},
+ { 0x404bc, 2, 0x1f, 0x1fff},
+ { 0x404c8, 1, 0x1f, 0x1fff},
+ { 0x404cc, 3, 0x1e, 0x924},
+ { 0x404e0, 1, 0x1c, 0x924},
+ { 0x40500, 2, 0x1f, 0x924},
+ { 0x40510, 2, 0x1f, 0x924},
+ { 0x40520, 2, 0x1f, 0x924},
+ { 0x40530, 2, 0x1f, 0x924},
+ { 0x40540, 2, 0x1f, 0x924},
+ { 0x40550, 10, 0x1c, 0x924},
+ { 0x40610, 2, 0x1c, 0x924},
+ { 0x42000, 164, 0x1f, 0x924},
+ { 0x422b0, 2, 0x1f, 0x1fff},
+ { 0x422bc, 1, 0x1f, 0x1fff},
+ { 0x422c0, 4, 0x1c, 0x924},
+ { 0x422d4, 5, 0x1e, 0x924},
+ { 0x422e8, 1, 0x1c, 0x924},
+ { 0x42400, 49, 0x1f, 0x924},
+ { 0x424c8, 32, 0x1f, 0x924},
+ { 0x42548, 1, 0x1f, 0xfff},
+ { 0x4254c, 1, 0x1f, 0x924},
+ { 0x42550, 1, 0x1f, 0xfff},
+ { 0x42554, 1, 0x1f, 0x924},
+ { 0x42558, 1, 0x1f, 0xfff},
+ { 0x4255c, 1, 0x1f, 0x924},
+ { 0x42568, 2, 0x1f, 0x924},
+ { 0x42640, 5, 0x1c, 0x924},
+ { 0x42800, 1, 0x1f, 0x924},
+ { 0x50000, 1, 0x1f, 0x1fff},
+ { 0x50004, 19, 0x1f, 0x924},
+ { 0x50050, 8, 0x1f, 0x93c},
+ { 0x50070, 60, 0x1f, 0x924},
+ { 0x50160, 8, 0x1f, 0xfff},
+ { 0x50180, 20, 0x1f, 0x924},
+ { 0x501e0, 2, 0x1f, 0x1fff},
+ { 0x501ec, 1, 0x1f, 0x1fff},
+ { 0x501f0, 4, 0x1e, 0x924},
+ { 0x50200, 1, 0x1f, 0x924},
+ { 0x50204, 1, 0x1f, 0xfff},
+ { 0x5020c, 2, 0x1f, 0xfff},
+ { 0x50214, 2, 0x1f, 0x924},
+ { 0x5021c, 1, 0x1f, 0xfff},
+ { 0x50220, 2, 0x1f, 0x924},
+ { 0x50228, 6, 0x1e, 0x924},
+ { 0x50240, 1, 0x1f, 0x924},
+ { 0x50280, 1, 0x1f, 0x924},
+ { 0x50300, 1, 0x1c, 0x924},
+ { 0x5030c, 1, 0x1c, 0x924},
+ { 0x50318, 1, 0x1c, 0x934},
+ { 0x5031c, 1, 0x1c, 0x924},
+ { 0x50320, 2, 0x1c, 0x934},
+ { 0x50330, 1, 0x10, 0x924},
+ { 0x52000, 1, 0x1f, 0x924},
+ { 0x54000, 1, 0x1f, 0x93c},
+ { 0x54004, 255, 0x1f, 0x30},
+ { 0x54400, 1, 0x1f, 0x38},
+ { 0x54404, 255, 0x1f, 0x30},
+ { 0x54800, 1, 0x1f, 0x38},
+ { 0x54804, 255, 0x1f, 0x30},
+ { 0x54c00, 1, 0x1f, 0x38},
+ { 0x54c04, 255, 0x1f, 0x30},
+ { 0x55000, 1, 0x1f, 0x38},
+ { 0x55004, 255, 0x1f, 0x30},
+ { 0x55400, 1, 0x1f, 0x38},
+ { 0x55404, 255, 0x1f, 0x30},
+ { 0x55800, 1, 0x1f, 0x38},
+ { 0x55804, 255, 0x1f, 0x30},
+ { 0x55c00, 1, 0x1f, 0x38},
+ { 0x55c04, 255, 0x1f, 0x30},
+ { 0x56000, 1, 0x1f, 0x38},
+ { 0x56004, 255, 0x1f, 0x30},
+ { 0x56400, 1, 0x1f, 0x38},
+ { 0x56404, 255, 0x1f, 0x30},
+ { 0x56800, 1, 0x1f, 0x38},
+ { 0x56804, 255, 0x1f, 0x30},
+ { 0x56c00, 1, 0x1f, 0x38},
+ { 0x56c04, 255, 0x1f, 0x30},
+ { 0x57000, 1, 0x1f, 0x38},
+ { 0x57004, 255, 0x1f, 0x30},
+ { 0x58000, 1, 0x1f, 0x934},
+ { 0x58004, 8191, 0x3, 0x30},
+ { 0x60000, 26, 0x1f, 0x924},
+ { 0x60068, 8, 0x3, 0x924},
+ { 0x60088, 2, 0x1f, 0x924},
+ { 0x60090, 1, 0x1f, 0xfff},
+ { 0x60094, 9, 0x1f, 0x924},
+ { 0x600b8, 9, 0x3, 0x924},
+ { 0x600dc, 1, 0x1f, 0x924},
+ { 0x600e0, 5, 0x3, 0x924},
+ { 0x600f4, 1, 0x7, 0x924},
+ { 0x600f8, 1, 0x3, 0x924},
+ { 0x600fc, 8, 0x1f, 0x924},
+ { 0x6012c, 2, 0x1f, 0x1fff},
+ { 0x60138, 1, 0x1f, 0x1fff},
+ { 0x6013c, 24, 0x2, 0x924},
+ { 0x6019c, 2, 0x1c, 0x924},
+ { 0x601ac, 18, 0x1c, 0x924},
+ { 0x60200, 1, 0x1f, 0xb6d},
+ { 0x60204, 2, 0x1f, 0x249},
+ { 0x60210, 13, 0x1c, 0x924},
+ { 0x60244, 16, 0x10, 0x924},
+ { 0x61000, 1, 0x1f, 0xb6d},
+ { 0x61004, 511, 0x1f, 0x249},
+ { 0x61800, 512, 0x18, 0x249},
+ { 0x70000, 8, 0x1f, 0xb6d},
+ { 0x70020, 8184, 0x1f, 0x249},
+ { 0x78000, 8192, 0x18, 0x249},
+ { 0x85000, 3, 0x1f, 0x1000},
+ { 0x8501c, 7, 0x1f, 0x1000},
+ { 0x85048, 1, 0x1f, 0x1000},
+ { 0x85200, 32, 0x1f, 0x1000},
+ { 0xa0000, 16384, 0x3, 0x1000},
+ { 0xb0000, 16384, 0x2, 0x1000},
+ { 0xc1000, 7, 0x1f, 0x924},
+ { 0xc102c, 2, 0x1f, 0x1fff},
+ { 0xc1038, 1, 0x1f, 0x1fff},
+ { 0xc103c, 2, 0x1c, 0x924},
+ { 0xc1800, 2, 0x1f, 0x924},
+ { 0xc2000, 164, 0x1f, 0x924},
+ { 0xc22b0, 2, 0x1f, 0x1fff},
+ { 0xc22bc, 1, 0x1f, 0x1fff},
+ { 0xc22c0, 5, 0x1c, 0x924},
+ { 0xc22d8, 4, 0x1c, 0x924},
+ { 0xc2400, 49, 0x1f, 0x924},
+ { 0xc24c8, 32, 0x1f, 0x924},
+ { 0xc2548, 1, 0x1f, 0xfff},
+ { 0xc254c, 1, 0x1f, 0x924},
+ { 0xc2550, 1, 0x1f, 0xfff},
+ { 0xc2554, 1, 0x1f, 0x924},
+ { 0xc2558, 1, 0x1f, 0xfff},
+ { 0xc255c, 1, 0x1f, 0x924},
+ { 0xc2568, 2, 0x1f, 0x924},
+ { 0xc2600, 1, 0x1f, 0x924},
+ { 0xc4000, 165, 0x1f, 0x924},
+ { 0xc42b4, 2, 0x1f, 0x1fff},
+ { 0xc42c0, 1, 0x1f, 0x1fff},
+ { 0xc42d8, 2, 0x1c, 0x924},
+ { 0xc42e0, 7, 0x1e, 0x924},
+ { 0xc42fc, 1, 0x1c, 0x924},
+ { 0xc4400, 51, 0x1f, 0x924},
+ { 0xc44d0, 32, 0x1f, 0x924},
+ { 0xc4550, 1, 0x1f, 0xfff},
+ { 0xc4554, 1, 0x1f, 0x924},
+ { 0xc4558, 1, 0x1f, 0xfff},
+ { 0xc455c, 1, 0x1f, 0x924},
+ { 0xc4560, 1, 0x1f, 0xfff},
+ { 0xc4564, 1, 0x1f, 0x924},
+ { 0xc4570, 2, 0x1f, 0x924},
+ { 0xc4578, 5, 0x1c, 0x924},
+ { 0xc4600, 1, 0x1f, 0x924},
+ { 0xd0000, 19, 0x1f, 0x924},
+ { 0xd004c, 8, 0x1f, 0x1927},
+ { 0xd006c, 64, 0x1f, 0x924},
+ { 0xd016c, 8, 0x1f, 0xfff},
+ { 0xd018c, 19, 0x1f, 0x924},
+ { 0xd01e8, 2, 0x1f, 0x1fff},
+ { 0xd01f4, 1, 0x1f, 0x1fff},
+ { 0xd01fc, 1, 0x1c, 0x924},
+ { 0xd0200, 1, 0x1f, 0x924},
+ { 0xd0204, 1, 0x1f, 0xfff},
+ { 0xd020c, 3, 0x1f, 0xfff},
+ { 0xd0218, 4, 0x1f, 0x924},
+ { 0xd0228, 18, 0x1e, 0x924},
+ { 0xd0280, 1, 0x1f, 0x924},
+ { 0xd0300, 1, 0x1f, 0x924},
+ { 0xd0400, 1, 0x1f, 0x924},
+ { 0xd0818, 1, 0x10, 0x924},
+ { 0xd4000, 1, 0x1f, 0x1927},
+ { 0xd4004, 255, 0x1f, 0x6},
+ { 0xd4400, 1, 0x1f, 0x1007},
+ { 0xd4404, 255, 0x1f, 0x6},
+ { 0xd4800, 1, 0x1f, 0x1007},
+ { 0xd4804, 255, 0x1f, 0x6},
+ { 0xd4c00, 1, 0x1f, 0x1007},
+ { 0xd4c04, 255, 0x1f, 0x6},
+ { 0xd5000, 1, 0x1f, 0x1007},
+ { 0xd5004, 255, 0x1f, 0x6},
+ { 0xd5400, 1, 0x1f, 0x1007},
+ { 0xd5404, 255, 0x1f, 0x6},
+ { 0xd5800, 1, 0x1f, 0x1007},
+ { 0xd5804, 255, 0x1f, 0x6},
+ { 0xd5c00, 1, 0x1f, 0x1007},
+ { 0xd5c04, 255, 0x1f, 0x6},
+ { 0xd6000, 1, 0x1f, 0x1007},
+ { 0xd6004, 255, 0x1f, 0x6},
+ { 0xd6400, 1, 0x1f, 0x1007},
+ { 0xd6404, 255, 0x1f, 0x6},
+ { 0xd8000, 1, 0x1f, 0x1927},
+ { 0xd8004, 255, 0x1f, 0x6},
+ { 0xd8400, 1, 0x1f, 0x1007},
+ { 0xd8404, 255, 0x1f, 0x6},
+ { 0xd8800, 1, 0x1f, 0x1007},
+ { 0xd8804, 255, 0x1f, 0x6},
+ { 0xd8c00, 1, 0x1f, 0x1007},
+ { 0xd8c04, 255, 0x1f, 0x6},
+ { 0xd9000, 1, 0x1f, 0x1007},
+ { 0xd9004, 255, 0x1f, 0x6},
+ { 0xd9400, 1, 0x1f, 0x1007},
+ { 0xd9404, 255, 0x1f, 0x6},
+ { 0xd9800, 1, 0x1f, 0x1007},
+ { 0xd9804, 255, 0x1f, 0x6},
+ { 0xd9c00, 1, 0x1f, 0x1007},
+ { 0xd9c04, 255, 0x1f, 0x6},
+ { 0xda000, 1, 0x1f, 0x1007},
+ { 0xda004, 255, 0x1f, 0x6},
+ { 0xda400, 1, 0x1f, 0x1007},
+ { 0xda404, 255, 0x1f, 0x6},
+ { 0xda800, 1, 0x1f, 0x1007},
+ { 0xda804, 255, 0x1f, 0x6},
+ { 0xdac00, 1, 0x1f, 0x1007},
+ { 0xdac04, 255, 0x1f, 0x6},
+ { 0xdb000, 1, 0x1f, 0x1007},
+ { 0xdb004, 255, 0x1f, 0x6},
+ { 0xdb400, 1, 0x1f, 0x1007},
+ { 0xdb404, 255, 0x1f, 0x6},
+ { 0xdb800, 1, 0x1f, 0x1007},
+ { 0xdb804, 255, 0x1f, 0x6},
+ { 0xdbc00, 1, 0x1f, 0x1007},
+ { 0xdbc04, 255, 0x1f, 0x6},
+ { 0xdc000, 1, 0x1f, 0x1007},
+ { 0xdc004, 255, 0x1f, 0x6},
+ { 0xdc400, 1, 0x1f, 0x1007},
+ { 0xdc404, 255, 0x1f, 0x6},
+ { 0xdc800, 1, 0x1f, 0x1007},
+ { 0xdc804, 255, 0x1f, 0x6},
+ { 0xdcc00, 1, 0x1f, 0x1007},
+ { 0xdcc04, 255, 0x1f, 0x6},
+ { 0xdd000, 1, 0x1f, 0x1007},
+ { 0xdd004, 255, 0x1f, 0x6},
+ { 0xdd400, 1, 0x1f, 0x1007},
+ { 0xdd404, 255, 0x1f, 0x6},
+ { 0xdd800, 1, 0x1f, 0x1007},
+ { 0xdd804, 255, 0x1f, 0x6},
+ { 0xddc00, 1, 0x1f, 0x1007},
+ { 0xddc04, 255, 0x1f, 0x6},
+ { 0xde000, 1, 0x1f, 0x1007},
+ { 0xde004, 255, 0x1f, 0x6},
+ { 0xde400, 1, 0x1f, 0x1007},
+ { 0xde404, 255, 0x1f, 0x6},
+ { 0xde800, 1, 0x1f, 0x1007},
+ { 0xde804, 255, 0x1f, 0x6},
+ { 0xdec00, 1, 0x1f, 0x1007},
+ { 0xdec04, 255, 0x1f, 0x6},
+ { 0xdf000, 1, 0x1f, 0x1007},
+ { 0xdf004, 255, 0x1f, 0x6},
+ { 0xdf400, 1, 0x1f, 0x1007},
+ { 0xdf404, 255, 0x1f, 0x6},
+ { 0xdf800, 1, 0x1f, 0x1007},
+ { 0xdf804, 255, 0x1f, 0x6},
+ { 0xdfc00, 1, 0x1f, 0x1007},
+ { 0xdfc04, 255, 0x1f, 0x6},
+ { 0xe0000, 21, 0x1f, 0x924},
+ { 0xe0054, 8, 0x1f, 0xf24},
+ { 0xe0074, 49, 0x1f, 0x924},
+ { 0xe0138, 1, 0x3, 0x924},
+ { 0xe013c, 6, 0x1f, 0x924},
+ { 0xe0154, 8, 0x1f, 0xfff},
+ { 0xe0174, 21, 0x1f, 0x924},
+ { 0xe01d8, 2, 0x1f, 0x1fff},
+ { 0xe01e4, 1, 0x1f, 0x1fff},
+ { 0xe01f4, 1, 0x4, 0x924},
+ { 0xe01f8, 1, 0x1c, 0x924},
+ { 0xe0200, 1, 0x1f, 0x924},
+ { 0xe0204, 1, 0x1f, 0xfff},
+ { 0xe020c, 2, 0x1f, 0xfff},
+ { 0xe0214, 2, 0x1f, 0x924},
+ { 0xe021c, 2, 0x1f, 0xfff},
+ { 0xe0224, 2, 0x1f, 0x924},
+ { 0xe022c, 18, 0x1e, 0x924},
+ { 0xe0280, 1, 0x1f, 0x924},
+ { 0xe0300, 1, 0x1f, 0x924},
+ { 0xe0400, 1, 0x10, 0x924},
+ { 0xe1000, 1, 0x1f, 0x924},
+ { 0xe2000, 1, 0x1f, 0xf24},
+ { 0xe2004, 255, 0x1f, 0xc00},
+ { 0xe2400, 1, 0x1f, 0xe00},
+ { 0xe2404, 255, 0x1f, 0xc00},
+ { 0xe2800, 1, 0x1f, 0xe00},
+ { 0xe2804, 255, 0x1f, 0xc00},
+ { 0xe2c00, 1, 0x1f, 0xe00},
+ { 0xe2c04, 255, 0x1f, 0xc00},
+ { 0xe3000, 1, 0x1f, 0xe00},
+ { 0xe3004, 255, 0x1f, 0xc00},
+ { 0xe3400, 1, 0x1f, 0xe00},
+ { 0xe3404, 255, 0x1f, 0xc00},
+ { 0xe3800, 1, 0x1f, 0xe00},
+ { 0xe3804, 255, 0x1f, 0xc00},
+ { 0xe3c00, 1, 0x1f, 0xe00},
+ { 0xe3c04, 255, 0x1f, 0xc00},
+ { 0xf0000, 1, 0x1f, 0xf24},
+ { 0xf0004, 255, 0x1f, 0xc00},
+ { 0xf0400, 1, 0x1f, 0xe00},
+ { 0xf0404, 255, 0x1f, 0xc00},
+ { 0xf0800, 1, 0x1f, 0xe00},
+ { 0xf0804, 255, 0x1f, 0xc00},
+ { 0xf0c00, 1, 0x1f, 0xe00},
+ { 0xf0c04, 255, 0x1f, 0xc00},
+ { 0xf1000, 1, 0x1f, 0xe00},
+ { 0xf1004, 255, 0x1f, 0xc00},
+ { 0xf1400, 1, 0x1f, 0xe00},
+ { 0xf1404, 255, 0x1f, 0xc00},
+ { 0xf1800, 1, 0x1f, 0xe00},
+ { 0xf1804, 255, 0x1f, 0xc00},
+ { 0xf1c00, 1, 0x1f, 0xe00},
+ { 0xf1c04, 255, 0x1f, 0xc00},
+ { 0xf2000, 1, 0x1f, 0xe00},
+ { 0xf2004, 255, 0x1f, 0xc00},
+ { 0xf2400, 1, 0x1f, 0xe00},
+ { 0xf2404, 255, 0x1f, 0xc00},
+ { 0xf2800, 1, 0x1f, 0xe00},
+ { 0xf2804, 255, 0x1f, 0xc00},
+ { 0xf2c00, 1, 0x1f, 0xe00},
+ { 0xf2c04, 255, 0x1f, 0xc00},
+ { 0xf3000, 1, 0x1f, 0xe00},
+ { 0xf3004, 255, 0x1f, 0xc00},
+ { 0xf3400, 1, 0x1f, 0xe00},
+ { 0xf3404, 255, 0x1f, 0xc00},
+ { 0xf3800, 1, 0x1f, 0xe00},
+ { 0xf3804, 255, 0x1f, 0xc00},
+ { 0xf3c00, 1, 0x1f, 0xe00},
+ { 0xf3c04, 255, 0x1f, 0xc00},
+ { 0xf4000, 1, 0x1f, 0xe00},
+ { 0xf4004, 255, 0x1f, 0xc00},
+ { 0xf4400, 1, 0x1f, 0xe00},
+ { 0xf4404, 255, 0x1f, 0xc00},
+ { 0xf4800, 1, 0x1f, 0xe00},
+ { 0xf4804, 255, 0x1f, 0xc00},
+ { 0xf4c00, 1, 0x1f, 0xe00},
+ { 0xf4c04, 255, 0x1f, 0xc00},
+ { 0xf5000, 1, 0x1f, 0xe00},
+ { 0xf5004, 255, 0x1f, 0xc00},
+ { 0xf5400, 1, 0x1f, 0xe00},
+ { 0xf5404, 255, 0x1f, 0xc00},
+ { 0xf5800, 1, 0x1f, 0xe00},
+ { 0xf5804, 255, 0x1f, 0xc00},
+ { 0xf5c00, 1, 0x1f, 0xe00},
+ { 0xf5c04, 255, 0x1f, 0xc00},
+ { 0xf6000, 1, 0x1f, 0xe00},
+ { 0xf6004, 255, 0x1f, 0xc00},
+ { 0xf6400, 1, 0x1f, 0xe00},
+ { 0xf6404, 255, 0x1f, 0xc00},
+ { 0xf6800, 1, 0x1f, 0xe00},
+ { 0xf6804, 255, 0x1f, 0xc00},
+ { 0xf6c00, 1, 0x1f, 0xe00},
+ { 0xf6c04, 255, 0x1f, 0xc00},
+ { 0xf7000, 1, 0x1f, 0xe00},
+ { 0xf7004, 255, 0x1f, 0xc00},
+ { 0xf7400, 1, 0x1f, 0xe00},
+ { 0xf7404, 255, 0x1f, 0xc00},
+ { 0xf7800, 1, 0x1f, 0xe00},
+ { 0xf7804, 255, 0x1f, 0xc00},
+ { 0xf7c00, 1, 0x1f, 0xe00},
+ { 0xf7c04, 255, 0x1f, 0xc00},
+ { 0xf8000, 1, 0x1f, 0xe00},
+ { 0xf8004, 255, 0x1f, 0xc00},
+ { 0xf8400, 1, 0x1f, 0xe00},
+ { 0xf8404, 255, 0x1f, 0xc00},
+ { 0xf8800, 1, 0x1f, 0xe00},
+ { 0xf8804, 255, 0x1f, 0xc00},
+ { 0xf8c00, 1, 0x1f, 0xe00},
+ { 0xf8c04, 255, 0x1f, 0xc00},
+ { 0xf9000, 1, 0x1f, 0xe00},
+ { 0xf9004, 255, 0x1f, 0xc00},
+ { 0xf9400, 1, 0x1f, 0xe00},
+ { 0xf9404, 255, 0x1f, 0xc00},
+ { 0xf9800, 1, 0x1f, 0xe00},
+ { 0xf9804, 255, 0x1f, 0xc00},
+ { 0xf9c00, 1, 0x1f, 0xe00},
+ { 0xf9c04, 255, 0x1f, 0xc00},
+ { 0xfa000, 1, 0x1f, 0xe00},
+ { 0xfa004, 255, 0x1f, 0xc00},
+ { 0xfa400, 1, 0x1f, 0xe00},
+ { 0xfa404, 255, 0x1f, 0xc00},
+ { 0xfa800, 1, 0x1f, 0xe00},
+ { 0xfa804, 255, 0x1f, 0xc00},
+ { 0xfac00, 1, 0x1f, 0xe00},
+ { 0xfac04, 255, 0x1f, 0xc00},
+ { 0xfb000, 1, 0x1f, 0xe00},
+ { 0xfb004, 255, 0x1f, 0xc00},
+ { 0xfb400, 1, 0x1f, 0xe00},
+ { 0xfb404, 255, 0x1f, 0xc00},
+ { 0xfb800, 1, 0x1f, 0xe00},
+ { 0xfb804, 255, 0x1f, 0xc00},
+ { 0xfbc00, 1, 0x1f, 0xe00},
+ { 0xfbc04, 255, 0x1f, 0xc00},
+ { 0xfc000, 1, 0x1f, 0xe00},
+ { 0xfc004, 255, 0x1f, 0xc00},
+ { 0xfc400, 1, 0x1f, 0xe00},
+ { 0xfc404, 255, 0x1f, 0xc00},
+ { 0xfc800, 1, 0x1f, 0xe00},
+ { 0xfc804, 255, 0x1f, 0xc00},
+ { 0xfcc00, 1, 0x1f, 0xe00},
+ { 0xfcc04, 255, 0x1f, 0xc00},
+ { 0xfd000, 1, 0x1f, 0xe00},
+ { 0xfd004, 255, 0x1f, 0xc00},
+ { 0xfd400, 1, 0x1f, 0xe00},
+ { 0xfd404, 255, 0x1f, 0xc00},
+ { 0xfd800, 1, 0x1f, 0xe00},
+ { 0xfd804, 255, 0x1f, 0xc00},
+ { 0xfdc00, 1, 0x1f, 0xe00},
+ { 0xfdc04, 255, 0x1f, 0xc00},
+ { 0xfe000, 1, 0x1f, 0xe00},
+ { 0xfe004, 255, 0x1f, 0xc00},
+ { 0xfe400, 1, 0x1f, 0xe00},
+ { 0xfe404, 255, 0x1f, 0xc00},
+ { 0xfe800, 1, 0x1f, 0xe00},
+ { 0xfe804, 255, 0x1f, 0xc00},
+ { 0xfec00, 1, 0x1f, 0xe00},
+ { 0xfec04, 255, 0x1f, 0xc00},
+ { 0xff000, 1, 0x1f, 0xe00},
+ { 0xff004, 255, 0x1f, 0xc00},
+ { 0xff400, 1, 0x1f, 0xe00},
+ { 0xff404, 255, 0x1f, 0xc00},
+ { 0xff800, 1, 0x1f, 0xe00},
+ { 0xff804, 255, 0x1f, 0xc00},
+ { 0xffc00, 1, 0x1f, 0xe00},
+ { 0xffc04, 255, 0x1f, 0xc00},
+ { 0x101000, 5, 0x1f, 0x924},
+ { 0x101014, 1, 0x1f, 0xfff},
+ { 0x101018, 6, 0x1f, 0x924},
+ { 0x101040, 2, 0x1f, 0x1fff},
+ { 0x10104c, 1, 0x1f, 0x1fff},
+ { 0x101050, 1, 0x1e, 0x924},
+ { 0x101054, 3, 0x1c, 0x924},
+ { 0x101100, 1, 0x1f, 0x924},
+ { 0x101800, 8, 0x1f, 0x924},
+ { 0x102000, 18, 0x1f, 0x924},
+ { 0x102058, 2, 0x1f, 0x1fff},
+ { 0x102064, 1, 0x1f, 0x1fff},
+ { 0x102068, 6, 0x1c, 0x924},
+ { 0x102080, 16, 0x1f, 0xfff},
+ { 0x1020c0, 1, 0x1f, 0x924},
+ { 0x1020c8, 8, 0x2, 0x924},
+ { 0x1020e8, 9, 0x1c, 0x924},
+ { 0x102400, 1, 0x1f, 0x924},
+ { 0x103000, 1, 0x1f, 0x924},
+ { 0x103004, 2, 0x1f, 0xfff},
+ { 0x10300c, 23, 0x1f, 0x924},
+ { 0x103088, 2, 0x1f, 0x1fff},
+ { 0x103094, 1, 0x1f, 0x1fff},
+ { 0x103098, 1, 0x1e, 0x924},
+ { 0x10309c, 2, 0x1e, 0xfff},
+ { 0x1030a4, 2, 0x1e, 0x924},
+ { 0x1030ac, 2, 0x1c, 0x924},
+ { 0x1030b4, 1, 0x4, 0x924},
+ { 0x1030b8, 2, 0x1c, 0xfff},
+ { 0x1030c0, 3, 0x1c, 0x924},
+ { 0x1030cc, 1, 0x1c, 0xfff},
+ { 0x1030d0, 1, 0x1c, 0x924},
+ { 0x1030d8, 2, 0x1c, 0x924},
+ { 0x1030e0, 1, 0x1c, 0xfff},
+ { 0x1030e4, 5, 0x1c, 0x924},
+ { 0x103400, 136, 0x1c, 0x1fff},
+ { 0x103800, 8, 0x1f, 0x924},
+ { 0x104000, 1, 0x1f, 0x924},
+ { 0x104004, 1, 0x1f, 0xfff},
+ { 0x104008, 4, 0x1f, 0x924},
+ { 0x104018, 1, 0x1f, 0xfff},
+ { 0x10401c, 1, 0x1f, 0x924},
+ { 0x104020, 1, 0x1f, 0xfff},
+ { 0x104024, 6, 0x1f, 0x924},
+ { 0x10403c, 1, 0x1f, 0xfff},
+ { 0x104040, 47, 0x1f, 0x924},
+ { 0x10410c, 2, 0x1f, 0x1fff},
+ { 0x104118, 1, 0x1f, 0x1fff},
+ { 0x10411c, 16, 0x1c, 0x924},
+ { 0x104200, 17, 0x1f, 0x924},
+ { 0x104400, 1, 0x1f, 0x1fff},
+ { 0x104404, 63, 0x1f, 0xfff},
+ { 0x104500, 192, 0x1f, 0xdb6},
+ { 0x104800, 1, 0x1f, 0x1fff},
+ { 0x104804, 63, 0x1f, 0xfff},
+ { 0x104900, 192, 0x1f, 0xdb6},
+ { 0x105000, 4, 0x1f, 0x1fff},
+ { 0x105010, 252, 0x1f, 0xfff},
+ { 0x105400, 768, 0x1f, 0xdb6},
+ { 0x107000, 7, 0x1c, 0x924},
+ { 0x10701c, 1, 0x18, 0x924},
+ { 0x108000, 33, 0x3, 0x924},
+ { 0x1080ac, 5, 0x2, 0x924},
+ { 0x108100, 5, 0x3, 0x924},
+ { 0x108120, 5, 0x3, 0x924},
+ { 0x108200, 74, 0x3, 0x924},
+ { 0x108400, 74, 0x3, 0x924},
+ { 0x108800, 152, 0x3, 0x924},
+ { 0x110000, 111, 0x1c, 0x924},
+ { 0x1101cc, 2, 0x1c, 0x1fff},
+ { 0x1101d8, 1, 0x1c, 0x1fff},
+ { 0x1101dc, 1, 0x18, 0x924},
+ { 0x110200, 4, 0x1c, 0x924},
+ { 0x120000, 92, 0x1f, 0x924},
+ { 0x120170, 2, 0x3, 0x924},
+ { 0x120178, 14, 0x1f, 0x924},
+ { 0x1201b0, 2, 0x1f, 0xfff},
+ { 0x1201b8, 93, 0x1f, 0x924},
+ { 0x12032c, 1, 0x1f, 0xfff},
+ { 0x120330, 15, 0x1f, 0x924},
+ { 0x12036c, 3, 0x1f, 0xfff},
+ { 0x120378, 36, 0x1f, 0x924},
+ { 0x120408, 2, 0x1f, 0xfff},
+ { 0x120410, 1, 0x1f, 0x924},
+ { 0x120414, 15, 0x1f, 0xfff},
+ { 0x120450, 10, 0x1f, 0x924},
+ { 0x120478, 2, 0x1f, 0xfff},
+ { 0x120480, 43, 0x1f, 0x924},
+ { 0x12052c, 1, 0x1f, 0xfff},
+ { 0x120530, 5, 0x1f, 0x924},
+ { 0x120544, 4, 0x3, 0x924},
+ { 0x120554, 4, 0x1f, 0x924},
+ { 0x120564, 2, 0x1f, 0xfff},
+ { 0x12057c, 2, 0x1f, 0x1fff},
+ { 0x120588, 3, 0x1f, 0x1fff},
+ { 0x120598, 1, 0x1f, 0x1fff},
+ { 0x12059c, 22, 0x1e, 0x924},
+ { 0x1205f4, 1, 0x6, 0x924},
+ { 0x1205f8, 4, 0x1c, 0x924},
+ { 0x120618, 1, 0x1c, 0x924},
+ { 0x12061c, 31, 0x1e, 0x924},
+ { 0x120698, 3, 0x1c, 0x924},
+ { 0x1206a4, 1, 0x4, 0x924},
+ { 0x1206a8, 1, 0x1c, 0x924},
+ { 0x1206b0, 38, 0x1c, 0x924},
+ { 0x120748, 1, 0x1c, 0xfff},
+ { 0x12074c, 11, 0x1c, 0x924},
+ { 0x120778, 2, 0x1c, 0xfff},
+ { 0x120780, 23, 0x1c, 0x924},
+ { 0x1207dc, 1, 0x4, 0x924},
+ { 0x1207fc, 1, 0x1c, 0x924},
+ { 0x12080c, 2, 0x1f, 0xfff},
+ { 0x120814, 1, 0x1f, 0x924},
+ { 0x120818, 1, 0x1f, 0xfff},
+ { 0x12081c, 1, 0x1f, 0x924},
+ { 0x120820, 1, 0x1f, 0xfff},
+ { 0x120824, 1, 0x1f, 0x924},
+ { 0x120828, 1, 0x1f, 0xfff},
+ { 0x12082c, 1, 0x1f, 0x924},
+ { 0x120830, 1, 0x1f, 0xfff},
+ { 0x120834, 1, 0x1f, 0x924},
+ { 0x120838, 1, 0x1f, 0xfff},
+ { 0x12083c, 1, 0x1f, 0x924},
+ { 0x120840, 1, 0x1f, 0xfff},
+ { 0x120844, 1, 0x1f, 0x924},
+ { 0x120848, 1, 0x1f, 0xfff},
+ { 0x12084c, 1, 0x1f, 0x924},
+ { 0x120850, 1, 0x1f, 0xfff},
+ { 0x120854, 1, 0x1f, 0x924},
+ { 0x120858, 1, 0x1f, 0xfff},
+ { 0x12085c, 1, 0x1f, 0x924},
+ { 0x120860, 1, 0x1f, 0xfff},
+ { 0x120864, 1, 0x1f, 0x924},
+ { 0x120868, 1, 0x1f, 0xfff},
+ { 0x12086c, 1, 0x1f, 0x924},
+ { 0x120870, 1, 0x1f, 0xfff},
+ { 0x120874, 1, 0x1f, 0x924},
+ { 0x120878, 1, 0x1f, 0xfff},
+ { 0x12087c, 1, 0x1f, 0x924},
+ { 0x120880, 1, 0x1f, 0xfff},
+ { 0x120884, 1, 0x1f, 0x924},
+ { 0x120888, 1, 0x1f, 0xfff},
+ { 0x12088c, 1, 0x1f, 0x924},
+ { 0x120890, 1, 0x1f, 0xfff},
+ { 0x120894, 1, 0x1f, 0x924},
+ { 0x120898, 1, 0x1f, 0xfff},
+ { 0x12089c, 1, 0x1f, 0x924},
+ { 0x1208a0, 1, 0x1f, 0xfff},
+ { 0x1208a4, 1, 0x1f, 0x924},
+ { 0x1208a8, 1, 0x1f, 0xfff},
+ { 0x1208ac, 1, 0x1f, 0x924},
+ { 0x1208b0, 1, 0x1f, 0xfff},
+ { 0x1208b4, 1, 0x1f, 0x924},
+ { 0x1208b8, 1, 0x1f, 0xfff},
+ { 0x1208bc, 1, 0x1f, 0x924},
+ { 0x1208c0, 1, 0x1f, 0xfff},
+ { 0x1208c4, 1, 0x1f, 0x924},
+ { 0x1208c8, 1, 0x1f, 0xfff},
+ { 0x1208cc, 1, 0x1f, 0x924},
+ { 0x1208d0, 1, 0x1f, 0xfff},
+ { 0x1208d4, 1, 0x1f, 0x924},
+ { 0x1208d8, 1, 0x1f, 0xfff},
+ { 0x1208dc, 1, 0x1f, 0x924},
+ { 0x1208e0, 1, 0x1f, 0xfff},
+ { 0x1208e4, 1, 0x1f, 0x924},
+ { 0x1208e8, 1, 0x1f, 0xfff},
+ { 0x1208ec, 1, 0x1f, 0x924},
+ { 0x1208f0, 1, 0x1f, 0xfff},
+ { 0x1208f4, 1, 0x1f, 0x924},
+ { 0x1208f8, 1, 0x1f, 0xfff},
+ { 0x1208fc, 1, 0x1f, 0x924},
+ { 0x120900, 1, 0x1f, 0xfff},
+ { 0x120904, 1, 0x1f, 0x924},
+ { 0x120908, 1, 0x1f, 0xfff},
+ { 0x12090c, 1, 0x1f, 0x924},
+ { 0x120910, 7, 0x1c, 0x924},
+ { 0x120930, 9, 0x1c, 0x924},
+ { 0x12095c, 37, 0x18, 0x924},
+ { 0x120a00, 2, 0x7, 0x924},
+ { 0x120b00, 1, 0x18, 0x924},
+ { 0x122000, 2, 0x1f, 0x924},
+ { 0x122008, 2046, 0x1, 0x924},
+ { 0x128000, 6144, 0x1e, 0x924},
+ { 0x130000, 1, 0x1c, 0x1fff},
+ { 0x130004, 11, 0x1c, 0x924},
+ { 0x130030, 1, 0x1c, 0xfff},
+ { 0x130034, 6, 0x1c, 0x924},
+ { 0x13004c, 3, 0x1c, 0xfff},
+ { 0x130058, 3, 0x1c, 0x924},
+ { 0x130064, 2, 0x1c, 0xfff},
+ { 0x13006c, 8, 0x1c, 0x924},
+ { 0x13009c, 2, 0x1c, 0x1fff},
+ { 0x1300a8, 1, 0x1c, 0x1fff},
+ { 0x130100, 12, 0x1c, 0x924},
+ { 0x130130, 1, 0x1c, 0xfff},
+ { 0x130134, 14, 0x1c, 0x924},
+ { 0x13016c, 1, 0x1c, 0xfff},
+ { 0x130170, 1, 0x1c, 0x924},
+ { 0x130180, 1, 0x1c, 0x924},
+ { 0x130200, 1, 0x1c, 0x924},
+ { 0x130280, 1, 0x1c, 0x924},
+ { 0x130300, 1, 0x1c, 0xfff},
+ { 0x130304, 4, 0x1c, 0x924},
+ { 0x130380, 1, 0x1c, 0x924},
+ { 0x130400, 1, 0x1c, 0x924},
+ { 0x130480, 1, 0x1c, 0xfff},
+ { 0x130484, 4, 0x1c, 0x924},
+ { 0x130800, 72, 0x1c, 0x924},
+ { 0x131000, 136, 0x1c, 0x924},
+ { 0x132000, 148, 0x1c, 0x924},
+ { 0x134000, 544, 0x1c, 0x924},
+ { 0x140000, 1, 0x1f, 0x924},
+ { 0x140004, 9, 0xf, 0x924},
+ { 0x140028, 8, 0x1f, 0x924},
+ { 0x140048, 5, 0xf, 0x924},
+ { 0x14005c, 2, 0xf, 0xfff},
+ { 0x140064, 3, 0xf, 0x924},
+ { 0x140070, 1, 0x1f, 0x924},
+ { 0x140074, 10, 0xf, 0x924},
+ { 0x14009c, 1, 0x1f, 0x924},
+ { 0x1400a0, 5, 0xf, 0x924},
+ { 0x1400b4, 7, 0x1f, 0x924},
+ { 0x1400d0, 2, 0xf, 0xfff},
+ { 0x1400d8, 2, 0xf, 0x924},
+ { 0x1400e0, 1, 0xf, 0xfff},
+ { 0x1400e4, 5, 0xf, 0x924},
+ { 0x1400f8, 2, 0x1f, 0x924},
+ { 0x140100, 5, 0x3, 0x924},
+ { 0x140114, 5, 0xf, 0x924},
+ { 0x140128, 7, 0x1f, 0x924},
+ { 0x140144, 9, 0xf, 0x924},
+ { 0x140168, 8, 0x1f, 0x924},
+ { 0x140188, 3, 0xf, 0x924},
+ { 0x140194, 13, 0x1f, 0x924},
+ { 0x1401d8, 2, 0x1f, 0x1fff},
+ { 0x1401e4, 1, 0x1f, 0x1fff},
+ { 0x140200, 6, 0xf, 0xfff},
+ { 0x1402e0, 2, 0xc, 0x924},
+ { 0x1402e8, 2, 0x1c, 0x924},
+ { 0x1402f0, 9, 0xc, 0x924},
+ { 0x140314, 9, 0x10, 0x924},
+ { 0x140338, 7, 0x10, 0xfff},
+ { 0x140354, 7, 0x10, 0x924},
+ { 0x140370, 7, 0x10, 0xfff},
+ { 0x14038c, 14, 0x10, 0x924},
+ { 0x1404b0, 14, 0x10, 0x924},
+ { 0x15c000, 2, 0x1e, 0x924},
+ { 0x15c008, 5, 0x2, 0x924},
+ { 0x15c020, 8, 0x1c, 0x924},
+ { 0x15c040, 1, 0xc, 0x924},
+ { 0x15c044, 2, 0x1c, 0x924},
+ { 0x15c04c, 8, 0xc, 0x924},
+ { 0x15c06c, 8, 0x1c, 0x924},
+ { 0x15c090, 13, 0x1c, 0x924},
+ { 0x15c0c8, 24, 0x1c, 0x924},
+ { 0x15c128, 2, 0xc, 0x924},
+ { 0x15c130, 1, 0x1c, 0x924},
+ { 0x15c138, 6, 0x1c, 0x924},
+ { 0x15c150, 2, 0x18, 0x924},
+ { 0x15c158, 2, 0x8, 0x924},
+ { 0x15c160, 23, 0x10, 0x924},
+ { 0x15c1bc, 6, 0x10, 0xfff},
+ { 0x15c1d4, 23, 0x10, 0x924},
+ { 0x15c230, 7, 0x10, 0xfff},
+ { 0x15c24c, 90, 0x10, 0x924},
+ { 0x160004, 6, 0x18, 0x924},
+ { 0x16003c, 1, 0x10, 0x924},
+ { 0x160040, 6, 0x18, 0x924},
+ { 0x16005c, 6, 0x18, 0x924},
+ { 0x160074, 1, 0x10, 0x924},
+ { 0x160078, 2, 0x18, 0x924},
+ { 0x160300, 8, 0x18, 0x924},
+ { 0x160330, 6, 0x18, 0x924},
+ { 0x160404, 6, 0x18, 0x924},
+ { 0x16043c, 1, 0x10, 0x924},
+ { 0x160440, 6, 0x18, 0x924},
+ { 0x16045c, 6, 0x18, 0x924},
+ { 0x160474, 1, 0x10, 0x924},
+ { 0x160478, 2, 0x18, 0x924},
+ { 0x160700, 8, 0x18, 0x924},
+ { 0x160730, 6, 0x18, 0x924},
+ { 0x161000, 7, 0x1f, 0x924},
+ { 0x16102c, 2, 0x1f, 0x1fff},
+ { 0x161038, 1, 0x1f, 0x1fff},
+ { 0x16103c, 2, 0x1c, 0x924},
+ { 0x161800, 2, 0x1f, 0x924},
+ { 0x162000, 54, 0x18, 0x924},
+ { 0x162200, 60, 0x18, 0x924},
+ { 0x162400, 54, 0x18, 0x924},
+ { 0x162600, 60, 0x18, 0x924},
+ { 0x162800, 54, 0x18, 0x924},
+ { 0x162a00, 60, 0x18, 0x924},
+ { 0x162c00, 54, 0x18, 0x924},
+ { 0x162e00, 60, 0x18, 0x924},
+ { 0x163000, 1, 0x18, 0x924},
+ { 0x163008, 1, 0x18, 0x924},
+ { 0x163010, 1, 0x18, 0x924},
+ { 0x163018, 1, 0x18, 0x924},
+ { 0x163020, 5, 0x18, 0x924},
+ { 0x163038, 3, 0x18, 0x924},
+ { 0x163048, 3, 0x18, 0x924},
+ { 0x163058, 1, 0x18, 0x924},
+ { 0x163060, 1, 0x18, 0x924},
+ { 0x163068, 1, 0x18, 0x924},
+ { 0x163070, 3, 0x18, 0x924},
+ { 0x163080, 1, 0x18, 0x924},
+ { 0x163088, 3, 0x18, 0x924},
+ { 0x163098, 1, 0x18, 0x924},
+ { 0x1630a0, 1, 0x18, 0x924},
+ { 0x1630a8, 1, 0x18, 0x924},
+ { 0x1630b0, 2, 0x10, 0x924},
+ { 0x1630c0, 1, 0x18, 0x924},
+ { 0x1630c8, 1, 0x18, 0x924},
+ { 0x1630d0, 1, 0x18, 0x924},
+ { 0x1630d8, 1, 0x18, 0x924},
+ { 0x1630e0, 2, 0x18, 0x924},
+ { 0x163110, 1, 0x18, 0x924},
+ { 0x163120, 2, 0x18, 0x924},
+ { 0x163420, 4, 0x18, 0x924},
+ { 0x163438, 2, 0x18, 0x924},
+ { 0x163488, 2, 0x18, 0x924},
+ { 0x163520, 2, 0x18, 0x924},
+ { 0x163800, 1, 0x18, 0x924},
+ { 0x163808, 1, 0x18, 0x924},
+ { 0x163810, 1, 0x18, 0x924},
+ { 0x163818, 1, 0x18, 0x924},
+ { 0x163820, 5, 0x18, 0x924},
+ { 0x163838, 3, 0x18, 0x924},
+ { 0x163848, 3, 0x18, 0x924},
+ { 0x163858, 1, 0x18, 0x924},
+ { 0x163860, 1, 0x18, 0x924},
+ { 0x163868, 1, 0x18, 0x924},
+ { 0x163870, 3, 0x18, 0x924},
+ { 0x163880, 1, 0x18, 0x924},
+ { 0x163888, 3, 0x18, 0x924},
+ { 0x163898, 1, 0x18, 0x924},
+ { 0x1638a0, 1, 0x18, 0x924},
+ { 0x1638a8, 1, 0x18, 0x924},
+ { 0x1638b0, 2, 0x10, 0x924},
+ { 0x1638c0, 1, 0x18, 0x924},
+ { 0x1638c8, 1, 0x18, 0x924},
+ { 0x1638d0, 1, 0x18, 0x924},
+ { 0x1638d8, 1, 0x18, 0x924},
+ { 0x1638e0, 2, 0x18, 0x924},
+ { 0x163910, 1, 0x18, 0x924},
+ { 0x163920, 2, 0x18, 0x924},
+ { 0x163c20, 4, 0x18, 0x924},
+ { 0x163c38, 2, 0x18, 0x924},
+ { 0x163c88, 2, 0x18, 0x924},
+ { 0x163d20, 2, 0x18, 0x924},
+ { 0x164000, 5, 0x1f, 0x924},
+ { 0x164014, 2, 0x1f, 0xfff},
+ { 0x16401c, 53, 0x1f, 0x924},
+ { 0x164100, 2, 0x1f, 0x1fff},
+ { 0x16410c, 1, 0x1f, 0x1fff},
+ { 0x164110, 2, 0x1e, 0x924},
+ { 0x164118, 15, 0x1c, 0x924},
+ { 0x164200, 1, 0x1f, 0x924},
+ { 0x164208, 1, 0x1f, 0x924},
+ { 0x164210, 1, 0x1f, 0x924},
+ { 0x164218, 1, 0x1f, 0x924},
+ { 0x164220, 1, 0x1f, 0x924},
+ { 0x164228, 1, 0x1f, 0x924},
+ { 0x164230, 1, 0x1f, 0x924},
+ { 0x164238, 1, 0x1f, 0x924},
+ { 0x164240, 1, 0x1f, 0x924},
+ { 0x164248, 1, 0x1f, 0x924},
+ { 0x164250, 1, 0x1f, 0x924},
+ { 0x164258, 1, 0x1f, 0x924},
+ { 0x164260, 1, 0x1f, 0x924},
+ { 0x164270, 2, 0x1f, 0x924},
+ { 0x164280, 2, 0x1f, 0x924},
+ { 0x164800, 2, 0x1f, 0x924},
+ { 0x165000, 2, 0x1f, 0x924},
+ { 0x166000, 164, 0x1f, 0x924},
+ { 0x1662b0, 2, 0x1f, 0x1fff},
+ { 0x1662bc, 1, 0x1f, 0x1fff},
+ { 0x1662cc, 7, 0x1c, 0x924},
+ { 0x166400, 49, 0x1f, 0x924},
+ { 0x1664c8, 32, 0x1f, 0x924},
+ { 0x166548, 1, 0x1f, 0xfff},
+ { 0x16654c, 1, 0x1f, 0x924},
+ { 0x166550, 1, 0x1f, 0xfff},
+ { 0x166554, 1, 0x1f, 0x924},
+ { 0x166558, 1, 0x1f, 0xfff},
+ { 0x16655c, 1, 0x1f, 0x924},
+ { 0x166568, 2, 0x1f, 0x924},
+ { 0x166570, 5, 0x1c, 0x924},
+ { 0x166800, 1, 0x1f, 0x924},
+ { 0x168000, 1, 0x1f, 0xfff},
+ { 0x168004, 1, 0x1f, 0x924},
+ { 0x168008, 1, 0x1f, 0xfff},
+ { 0x16800c, 1, 0x1f, 0x924},
+ { 0x168010, 1, 0x1f, 0xfff},
+ { 0x168014, 1, 0x1f, 0x924},
+ { 0x168018, 1, 0x1f, 0xfff},
+ { 0x16801c, 3, 0x1f, 0x924},
+ { 0x168028, 2, 0x1f, 0xfff},
+ { 0x168030, 10, 0x1f, 0x924},
+ { 0x168058, 9, 0x1f, 0xfff},
+ { 0x16807c, 106, 0x1f, 0x924},
+ { 0x168224, 2, 0x3, 0x924},
+ { 0x16822c, 3, 0x1f, 0x924},
+ { 0x168238, 1, 0x1f, 0xfff},
+ { 0x16823c, 25, 0x1f, 0x924},
+ { 0x1682a0, 12, 0x3, 0x924},
+ { 0x1682d0, 7, 0x1f, 0xfff},
+ { 0x1682ec, 5, 0x1f, 0x924},
+ { 0x168300, 2, 0x3, 0xfff},
+ { 0x168308, 65, 0x1f, 0xfff},
+ { 0x16840c, 1, 0x1f, 0x924},
+ { 0x168410, 2, 0x1f, 0xfff},
+ { 0x168418, 2, 0x3, 0x924},
+ { 0x168420, 6, 0x1f, 0x924},
+ { 0x168448, 2, 0x1f, 0x1fff},
+ { 0x168454, 1, 0x1f, 0x1fff},
+ { 0x168800, 19, 0x1f, 0x924},
+ { 0x168900, 1, 0x1f, 0x924},
+ { 0x168a00, 128, 0x1f, 0xfff},
+ { 0x16a000, 1536, 0x1f, 0x924},
+ { 0x16c000, 1536, 0x1f, 0x924},
+ { 0x16e000, 16, 0x2, 0x924},
+ { 0x16e040, 8, 0x1c, 0x924},
+ { 0x16e100, 1, 0x2, 0x924},
+ { 0x16e200, 2, 0x2, 0xfff},
+ { 0x16e400, 1, 0x2, 0x924},
+ { 0x16e404, 2, 0x2, 0xfff},
+ { 0x16e40c, 94, 0x2, 0x924},
+ { 0x16e584, 64, 0x2, 0xfff},
+ { 0x16e684, 2, 0x1e, 0xfff},
+ { 0x16e68c, 4, 0x2, 0xfff},
+ { 0x16e69c, 8, 0x2, 0x924},
+ { 0x16e6bc, 4, 0x1e, 0x924},
+ { 0x16e6cc, 4, 0x2, 0x924},
+ { 0x16e6e0, 2, 0x1c, 0x924},
+ { 0x16e6e8, 5, 0xc, 0x924},
+ { 0x16e6fc, 4, 0x1c, 0xfff},
+ { 0x16e70c, 1, 0x1c, 0x924},
+ { 0x16e768, 17, 0x1c, 0x924},
+ { 0x16e7ac, 12, 0x10, 0xfff},
+ { 0x170000, 24, 0x1f, 0x924},
+ { 0x170060, 4, 0x3, 0x924},
+ { 0x170070, 13, 0x1f, 0x924},
+ { 0x1700a4, 1, 0x1f, 0xfff},
+ { 0x1700a8, 1, 0x1f, 0x924},
+ { 0x1700ac, 2, 0x1f, 0xfff},
+ { 0x1700b4, 3, 0x1f, 0x924},
+ { 0x1700c0, 1, 0x1f, 0xfff},
+ { 0x1700c4, 44, 0x1f, 0x924},
+ { 0x170184, 2, 0x1f, 0x1fff},
+ { 0x170190, 1, 0x1f, 0x1fff},
+ { 0x170194, 11, 0x1c, 0x924},
+ { 0x1701c4, 1, 0x1c, 0x924},
+ { 0x1701cc, 7, 0x1c, 0x924},
+ { 0x1701e8, 1, 0x18, 0x924},
+ { 0x1701ec, 1, 0x1c, 0x924},
+ { 0x1701f4, 1, 0x1c, 0x924},
+ { 0x170200, 4, 0x1f, 0x924},
+ { 0x170214, 1, 0x1f, 0x924},
+ { 0x170218, 77, 0x1c, 0x924},
+ { 0x170400, 64, 0x1c, 0x924},
+ { 0x178000, 1, 0x1f, 0x924},
+ { 0x180000, 61, 0x1f, 0x924},
+ { 0x180114, 2, 0x1f, 0x1fff},
+ { 0x180120, 3, 0x1f, 0x1fff},
+ { 0x180130, 1, 0x1f, 0x1fff},
+ { 0x18013c, 2, 0x1e, 0x924},
+ { 0x180200, 27, 0x1f, 0x924},
+ { 0x18026c, 1, 0x1f, 0xfff},
+ { 0x180270, 12, 0x1f, 0x924},
+ { 0x1802a0, 1, 0x1f, 0xfff},
+ { 0x1802a4, 17, 0x1f, 0x924},
+ { 0x180340, 4, 0x1f, 0x924},
+ { 0x180380, 1, 0x1c, 0x924},
+ { 0x180388, 1, 0x1c, 0x924},
+ { 0x180390, 1, 0x1c, 0x924},
+ { 0x180398, 1, 0x1c, 0x924},
+ { 0x1803a0, 5, 0x1c, 0x924},
+ { 0x1803b4, 2, 0x18, 0x924},
+ { 0x180400, 256, 0x3, 0xfff},
+ { 0x181000, 4, 0x1f, 0x93c},
+ { 0x181010, 1020, 0x1f, 0x38},
+ { 0x182000, 4, 0x18, 0x924},
+ { 0x1a0000, 1, 0x1f, 0x92c},
+ { 0x1a0004, 5631, 0x1f, 0x8},
+ { 0x1a5800, 2560, 0x1e, 0x8},
+ { 0x1a8000, 1, 0x1f, 0x92c},
+ { 0x1a8004, 8191, 0x1e, 0x8},
+ { 0x1b0000, 1, 0x1f, 0x92c},
+ { 0x1b0004, 15, 0x2, 0x8},
+ { 0x1b0040, 1, 0x1e, 0x92c},
+ { 0x1b0044, 239, 0x2, 0x8},
+ { 0x1b0400, 1, 0x1f, 0x92c},
+ { 0x1b0404, 255, 0x2, 0x8},
+ { 0x1b0800, 1, 0x1f, 0x924},
+ { 0x1b0840, 1, 0x1e, 0x924},
+ { 0x1b0c00, 1, 0x1f, 0x1fff},
+ { 0x1b1000, 1, 0x1f, 0x1fff},
+ { 0x1b1040, 1, 0x1e, 0x1fff},
+ { 0x1b1400, 1, 0x1f, 0x924},
+ { 0x1b1440, 1, 0x1e, 0x924},
+ { 0x1b1480, 1, 0x1e, 0x924},
+ { 0x1b14c0, 1, 0x1e, 0x924},
+ { 0x1b1800, 128, 0x1f, 0x10},
+ { 0x1b1c00, 128, 0x1f, 0x10},
+ { 0x1b2000, 1, 0x1f, 0xdb6},
+ { 0x1b2400, 1, 0x1e, 0x92c},
+ { 0x1b2404, 5631, 0x1c, 0x8},
+ { 0x1b8000, 1, 0x1f, 0xfff},
+ { 0x1b8040, 1, 0x1f, 0xfff},
+ { 0x1b8080, 1, 0x1f, 0xfff},
+ { 0x1b80c0, 1, 0x1f, 0xfff},
+ { 0x1b8100, 1, 0x1f, 0x924},
+ { 0x1b8140, 1, 0x1f, 0x924},
+ { 0x1b8180, 1, 0x1f, 0x924},
+ { 0x1b81c0, 1, 0x1f, 0x924},
+ { 0x1b8200, 1, 0x1f, 0x924},
+ { 0x1b8240, 1, 0x1f, 0x924},
+ { 0x1b8280, 1, 0x1f, 0x924},
+ { 0x1b82c0, 1, 0x1f, 0x924},
+ { 0x1b8300, 1, 0x1f, 0x924},
+ { 0x1b8340, 1, 0x1f, 0x924},
+ { 0x1b8380, 1, 0x1f, 0x924},
+ { 0x1b83c0, 1, 0x1f, 0x924},
+ { 0x1b8400, 1, 0x1f, 0x924},
+ { 0x1b8440, 1, 0x1f, 0x924},
+ { 0x1b8480, 1, 0x1f, 0x924},
+ { 0x1b84c0, 1, 0x1f, 0x924},
+ { 0x1b8500, 1, 0x1f, 0x924},
+ { 0x1b8540, 1, 0x1f, 0x924},
+ { 0x1b8580, 1, 0x1f, 0x924},
+ { 0x1b85c0, 19, 0x1c, 0x924},
+ { 0x1b8800, 1, 0x1f, 0x924},
+ { 0x1b8840, 1, 0x1f, 0x924},
+ { 0x1b8880, 1, 0x1f, 0x924},
+ { 0x1b88c0, 1, 0x1f, 0x924},
+ { 0x1b8900, 1, 0x1f, 0x924},
+ { 0x1b8940, 1, 0x1f, 0x924},
+ { 0x1b8980, 1, 0x1f, 0x924},
+ { 0x1b89c0, 1, 0x1f, 0x924},
+ { 0x1b8a00, 1, 0x1f, 0x934},
+ { 0x1b8a40, 1, 0x1f, 0x924},
+ { 0x1b8a80, 1, 0x1f, 0x492},
+ { 0x1b8ac0, 1, 0x1f, 0x924},
+ { 0x1b8b00, 1, 0x1f, 0x924},
+ { 0x1b8b40, 1, 0x1f, 0x924},
+ { 0x1b8b80, 1, 0x1f, 0x924},
+ { 0x1b8bc0, 1, 0x1f, 0x924},
+ { 0x1b8c00, 1, 0x1f, 0x924},
+ { 0x1b8c40, 1, 0x1f, 0x924},
+ { 0x1b8c80, 1, 0x1f, 0x924},
+ { 0x1b8cc0, 1, 0x1f, 0x924},
+ { 0x1b8cc4, 1, 0x1c, 0x924},
+ { 0x1b8d00, 1, 0x1f, 0x924},
+ { 0x1b8d40, 1, 0x1f, 0x924},
+ { 0x1b8d80, 1, 0x1f, 0x924},
+ { 0x1b8dc0, 1, 0x1f, 0x924},
+ { 0x1b8e00, 1, 0x1f, 0x924},
+ { 0x1b8e40, 1, 0x1f, 0x924},
+ { 0x1b8e80, 1, 0x1f, 0x924},
+ { 0x1b8e84, 1, 0x1c, 0x924},
+ { 0x1b8ec0, 1, 0x1e, 0x924},
+ { 0x1b8f00, 1, 0x1e, 0x924},
+ { 0x1b8f40, 1, 0x1e, 0x924},
+ { 0x1b8f80, 1, 0x1e, 0x924},
+ { 0x1b8fc0, 1, 0x1e, 0x924},
+ { 0x1b8fd4, 5, 0x1c, 0x924},
+ { 0x1b8fe8, 2, 0x18, 0x924},
+ { 0x1b9000, 1, 0x1c, 0x924},
+ { 0x1b9040, 3, 0x1c, 0x924},
+ { 0x1b905c, 1, 0x18, 0x924},
+ { 0x1b9064, 1, 0x10, 0x924},
+ { 0x1b9080, 10, 0x10, 0x924},
+ { 0x1c0000, 2, 0x1f, 0x924},
+ { 0x200000, 65, 0x1f, 0x924},
+ { 0x200124, 2, 0x1f, 0x1fff},
+ { 0x200130, 3, 0x1f, 0x1fff},
+ { 0x200140, 1, 0x1f, 0x1fff},
+ { 0x20014c, 2, 0x1e, 0x924},
+ { 0x200200, 27, 0x1f, 0x924},
+ { 0x20026c, 1, 0x1f, 0xfff},
+ { 0x200270, 12, 0x1f, 0x924},
+ { 0x2002a0, 1, 0x1f, 0xfff},
+ { 0x2002a4, 17, 0x1f, 0x924},
+ { 0x200340, 4, 0x1f, 0x924},
+ { 0x200380, 1, 0x1c, 0x924},
+ { 0x200388, 1, 0x1c, 0x924},
+ { 0x200390, 1, 0x1c, 0x924},
+ { 0x200398, 1, 0x1c, 0x924},
+ { 0x2003a0, 1, 0x1c, 0x924},
+ { 0x2003a8, 2, 0x1c, 0x924},
+ { 0x200400, 256, 0x3, 0xfff},
+ { 0x202000, 4, 0x1f, 0x1927},
+ { 0x202010, 2044, 0x1f, 0x1007},
+ { 0x204000, 4, 0x18, 0x924},
+ { 0x220000, 1, 0x1f, 0x925},
+ { 0x220004, 5631, 0x1f, 0x1},
+ { 0x225800, 2560, 0x1e, 0x1},
+ { 0x228000, 1, 0x1f, 0x925},
+ { 0x228004, 8191, 0x1e, 0x1},
+ { 0x230000, 1, 0x1f, 0x925},
+ { 0x230004, 15, 0x2, 0x1},
+ { 0x230040, 1, 0x1e, 0x925},
+ { 0x230044, 239, 0x2, 0x1},
+ { 0x230400, 1, 0x1f, 0x925},
+ { 0x230404, 255, 0x2, 0x1},
+ { 0x230800, 1, 0x1f, 0x924},
+ { 0x230840, 1, 0x1e, 0x924},
+ { 0x230c00, 1, 0x1f, 0x924},
+ { 0x231000, 1, 0x1f, 0x924},
+ { 0x231040, 1, 0x1e, 0x924},
+ { 0x231400, 1, 0x1f, 0x924},
+ { 0x231440, 1, 0x1e, 0x924},
+ { 0x231480, 1, 0x1e, 0x924},
+ { 0x2314c0, 1, 0x1e, 0x924},
+ { 0x231800, 128, 0x1f, 0x2},
+ { 0x231c00, 128, 0x1f, 0x2},
+ { 0x232000, 1, 0x1f, 0xdb6},
+ { 0x232400, 1, 0x1e, 0x925},
+ { 0x232404, 5631, 0x1c, 0x1},
+ { 0x238000, 1, 0x1f, 0xfff},
+ { 0x238040, 1, 0x1f, 0xfff},
+ { 0x238080, 1, 0x1f, 0xfff},
+ { 0x2380c0, 1, 0x1f, 0xfff},
+ { 0x238100, 1, 0x1f, 0x924},
+ { 0x238140, 1, 0x1f, 0x924},
+ { 0x238180, 1, 0x1f, 0x924},
+ { 0x2381c0, 1, 0x1f, 0x924},
+ { 0x238200, 1, 0x1f, 0x924},
+ { 0x238240, 1, 0x1f, 0x924},
+ { 0x238280, 1, 0x1f, 0x924},
+ { 0x2382c0, 1, 0x1f, 0x924},
+ { 0x238300, 1, 0x1f, 0x924},
+ { 0x238340, 1, 0x1f, 0x924},
+ { 0x238380, 1, 0x1f, 0x924},
+ { 0x2383c0, 1, 0x1f, 0x924},
+ { 0x238400, 1, 0x1f, 0x924},
+ { 0x238440, 1, 0x1f, 0x924},
+ { 0x238480, 1, 0x1f, 0x924},
+ { 0x2384c0, 1, 0x1f, 0x924},
+ { 0x238500, 1, 0x1f, 0x924},
+ { 0x238540, 1, 0x1f, 0x924},
+ { 0x238580, 1, 0x1f, 0x924},
+ { 0x2385c0, 19, 0x1c, 0x924},
+ { 0x238800, 1, 0x1f, 0x924},
+ { 0x238840, 1, 0x1f, 0x924},
+ { 0x238880, 1, 0x1f, 0x924},
+ { 0x2388c0, 1, 0x1f, 0x924},
+ { 0x238900, 1, 0x1f, 0x924},
+ { 0x238940, 1, 0x1f, 0x924},
+ { 0x238980, 1, 0x1f, 0x924},
+ { 0x2389c0, 1, 0x1f, 0x924},
+ { 0x238a00, 1, 0x1f, 0x926},
+ { 0x238a40, 1, 0x1f, 0x924},
+ { 0x238a80, 1, 0x1f, 0x492},
+ { 0x238ac0, 1, 0x1f, 0x924},
+ { 0x238b00, 1, 0x1f, 0x924},
+ { 0x238b40, 1, 0x1f, 0x924},
+ { 0x238b80, 1, 0x1f, 0x924},
+ { 0x238bc0, 1, 0x1f, 0x924},
+ { 0x238c00, 1, 0x1f, 0x924},
+ { 0x238c40, 1, 0x1f, 0x924},
+ { 0x238c80, 1, 0x1f, 0x924},
+ { 0x238cc0, 1, 0x1f, 0x924},
+ { 0x238cc4, 1, 0x1c, 0x924},
+ { 0x238d00, 1, 0x1f, 0x924},
+ { 0x238d40, 1, 0x1f, 0x924},
+ { 0x238d80, 1, 0x1f, 0x924},
+ { 0x238dc0, 1, 0x1f, 0x924},
+ { 0x238e00, 1, 0x1f, 0x924},
+ { 0x238e40, 1, 0x1f, 0x924},
+ { 0x238e80, 1, 0x1f, 0x924},
+ { 0x238e84, 1, 0x1c, 0x924},
+ { 0x238ec0, 1, 0x1e, 0x924},
+ { 0x238f00, 1, 0x1e, 0x924},
+ { 0x238f40, 1, 0x1e, 0x924},
+ { 0x238f80, 1, 0x1e, 0x924},
+ { 0x238fc0, 1, 0x1e, 0x924},
+ { 0x238fd4, 5, 0x1c, 0x924},
+ { 0x238fe8, 2, 0x18, 0x924},
+ { 0x239000, 1, 0x1c, 0x924},
+ { 0x239040, 3, 0x1c, 0x924},
+ { 0x23905c, 1, 0x18, 0x924},
+ { 0x239064, 1, 0x10, 0x924},
+ { 0x239080, 10, 0x10, 0x924},
+ { 0x240000, 2, 0x1f, 0x924},
+ { 0x280000, 65, 0x1f, 0x924},
+ { 0x280124, 2, 0x1f, 0x1fff},
+ { 0x280130, 3, 0x1f, 0x1fff},
+ { 0x280140, 1, 0x1f, 0x1fff},
+ { 0x28014c, 2, 0x1e, 0x924},
+ { 0x280200, 27, 0x1f, 0x924},
+ { 0x28026c, 1, 0x1f, 0xfff},
+ { 0x280270, 12, 0x1f, 0x924},
+ { 0x2802a0, 1, 0x1f, 0xfff},
+ { 0x2802a4, 17, 0x1f, 0x924},
+ { 0x280340, 4, 0x1f, 0x924},
+ { 0x280380, 1, 0x1c, 0x924},
+ { 0x280388, 1, 0x1c, 0x924},
+ { 0x280390, 1, 0x1c, 0x924},
+ { 0x280398, 1, 0x1c, 0x924},
+ { 0x2803a0, 1, 0x1c, 0x924},
+ { 0x2803a8, 2, 0x1c, 0x924},
+ { 0x280400, 256, 0x3, 0xfff},
+ { 0x282000, 4, 0x1f, 0x9e4},
+ { 0x282010, 2044, 0x1f, 0x1c0},
+ { 0x284000, 4, 0x18, 0x924},
+ { 0x2a0000, 1, 0x1f, 0x964},
+ { 0x2a0004, 5631, 0x1f, 0x40},
+ { 0x2a5800, 2560, 0x1e, 0x40},
+ { 0x2a8000, 1, 0x1f, 0x964},
+ { 0x2a8004, 8191, 0x1e, 0x40},
+ { 0x2b0000, 1, 0x1f, 0x964},
+ { 0x2b0004, 15, 0x2, 0x40},
+ { 0x2b0040, 1, 0x1e, 0x964},
+ { 0x2b0044, 239, 0x2, 0x40},
+ { 0x2b0400, 1, 0x1f, 0x964},
+ { 0x2b0404, 255, 0x2, 0x40},
+ { 0x2b0800, 1, 0x1f, 0x924},
+ { 0x2b0840, 1, 0x1e, 0x924},
+ { 0x2b0c00, 1, 0x1f, 0x924},
+ { 0x2b1000, 1, 0x1f, 0x924},
+ { 0x2b1040, 1, 0x1e, 0x924},
+ { 0x2b1400, 1, 0x1f, 0x924},
+ { 0x2b1440, 1, 0x1e, 0x924},
+ { 0x2b1480, 1, 0x1e, 0x924},
+ { 0x2b14c0, 1, 0x1e, 0x924},
+ { 0x2b1800, 128, 0x1f, 0x80},
+ { 0x2b1c00, 128, 0x1f, 0x80},
+ { 0x2b2000, 1, 0x1f, 0xdb6},
+ { 0x2b2400, 1, 0x1e, 0x964},
+ { 0x2b2404, 5631, 0x1c, 0x40},
+ { 0x2b8000, 1, 0x1f, 0xfff},
+ { 0x2b8040, 1, 0x1f, 0xfff},
+ { 0x2b8080, 1, 0x1f, 0xfff},
+ { 0x2b80c0, 1, 0x1f, 0x924},
+ { 0x2b8100, 1, 0x1f, 0x924},
+ { 0x2b8140, 1, 0x1f, 0x924},
+ { 0x2b8180, 1, 0x1f, 0x924},
+ { 0x2b81c0, 1, 0x1f, 0x924},
+ { 0x2b8200, 1, 0x1f, 0x924},
+ { 0x2b8240, 1, 0x1f, 0x924},
+ { 0x2b8280, 1, 0x1f, 0x924},
+ { 0x2b82c0, 1, 0x1f, 0x924},
+ { 0x2b8300, 1, 0x1f, 0x924},
+ { 0x2b8340, 1, 0x1f, 0x924},
+ { 0x2b8380, 1, 0x1f, 0x924},
+ { 0x2b83c0, 1, 0x1f, 0x924},
+ { 0x2b8400, 1, 0x1f, 0x924},
+ { 0x2b8440, 1, 0x1f, 0x924},
+ { 0x2b8480, 1, 0x1f, 0x924},
+ { 0x2b84c0, 1, 0x1f, 0x924},
+ { 0x2b8500, 1, 0x1f, 0x924},
+ { 0x2b8540, 1, 0x1f, 0x924},
+ { 0x2b8580, 1, 0x1f, 0x924},
+ { 0x2b85c0, 19, 0x1c, 0x924},
+ { 0x2b8800, 1, 0x1f, 0x924},
+ { 0x2b8840, 1, 0x1f, 0x924},
+ { 0x2b8880, 1, 0x1f, 0x924},
+ { 0x2b88c0, 1, 0x1f, 0x924},
+ { 0x2b8900, 1, 0x1f, 0x924},
+ { 0x2b8940, 1, 0x1f, 0x924},
+ { 0x2b8980, 1, 0x1f, 0x924},
+ { 0x2b89c0, 1, 0x1f, 0x924},
+ { 0x2b8a00, 1, 0x1f, 0x9a4},
+ { 0x2b8a40, 1, 0x1f, 0x924},
+ { 0x2b8a80, 1, 0x1f, 0x492},
+ { 0x2b8ac0, 1, 0x1f, 0x924},
+ { 0x2b8b00, 1, 0x1f, 0x924},
+ { 0x2b8b40, 1, 0x1f, 0x924},
+ { 0x2b8b80, 1, 0x1f, 0x924},
+ { 0x2b8bc0, 1, 0x1f, 0x924},
+ { 0x2b8c00, 1, 0x1f, 0x924},
+ { 0x2b8c40, 1, 0x1f, 0x924},
+ { 0x2b8c80, 1, 0x1f, 0x924},
+ { 0x2b8cc0, 1, 0x1f, 0x924},
+ { 0x2b8cc4, 1, 0x1c, 0x924},
+ { 0x2b8d00, 1, 0x1f, 0x924},
+ { 0x2b8d40, 1, 0x1f, 0x924},
+ { 0x2b8d80, 1, 0x1f, 0x924},
+ { 0x2b8dc0, 1, 0x1f, 0x924},
+ { 0x2b8e00, 1, 0x1f, 0x924},
+ { 0x2b8e40, 1, 0x1f, 0x924},
+ { 0x2b8e80, 1, 0x1f, 0x924},
+ { 0x2b8e84, 1, 0x1c, 0x924},
+ { 0x2b8ec0, 1, 0x1e, 0x924},
+ { 0x2b8f00, 1, 0x1e, 0x924},
+ { 0x2b8f40, 1, 0x1e, 0x924},
+ { 0x2b8f80, 1, 0x1e, 0x924},
+ { 0x2b8fc0, 1, 0x1e, 0x924},
+ { 0x2b8fd4, 5, 0x1c, 0x924},
+ { 0x2b8fe8, 2, 0x18, 0x924},
+ { 0x2b9000, 1, 0x1c, 0x924},
+ { 0x2b9040, 3, 0x1c, 0x924},
+ { 0x2b905c, 1, 0x18, 0x924},
+ { 0x2b9064, 1, 0x10, 0x924},
+ { 0x2b9080, 10, 0x10, 0x924},
+ { 0x2c0000, 2, 0x1f, 0x1fff},
+ { 0x300000, 65, 0x1f, 0x924},
+ { 0x300124, 2, 0x1f, 0x1fff},
+ { 0x300130, 3, 0x1f, 0x1fff},
+ { 0x300140, 1, 0x1f, 0x1fff},
+ { 0x30014c, 2, 0x1e, 0x924},
+ { 0x300200, 27, 0x1f, 0x924},
+ { 0x30026c, 1, 0x1f, 0xfff},
+ { 0x300270, 12, 0x1f, 0x924},
+ { 0x3002a0, 1, 0x1f, 0xfff},
+ { 0x3002a4, 17, 0x1f, 0x924},
+ { 0x300340, 4, 0x1f, 0x924},
+ { 0x300380, 1, 0x1c, 0x924},
+ { 0x300388, 1, 0x1c, 0x924},
+ { 0x300390, 1, 0x1c, 0x924},
+ { 0x300398, 1, 0x1c, 0x924},
+ { 0x3003a0, 1, 0x1c, 0x924},
+ { 0x3003a8, 2, 0x1c, 0x924},
+ { 0x300400, 256, 0x3, 0xfff},
+ { 0x302000, 4, 0x1f, 0xf24},
+ { 0x302010, 2044, 0x1f, 0xe00},
+ { 0x304000, 4, 0x18, 0x924},
+ { 0x320000, 1, 0x1f, 0xb24},
+ { 0x320004, 5631, 0x1f, 0x200},
+ { 0x325800, 2560, 0x1e, 0x200},
+ { 0x328000, 1, 0x1f, 0xb24},
+ { 0x328004, 8191, 0x1e, 0x200},
+ { 0x330000, 1, 0x1f, 0xb24},
+ { 0x330004, 15, 0x2, 0x200},
+ { 0x330040, 1, 0x1e, 0xb24},
+ { 0x330044, 239, 0x2, 0x200},
+ { 0x330400, 1, 0x1f, 0xb24},
+ { 0x330404, 255, 0x2, 0x200},
+ { 0x330800, 1, 0x1f, 0x924},
+ { 0x330840, 1, 0x1e, 0x924},
+ { 0x330c00, 1, 0x1f, 0x924},
+ { 0x331000, 1, 0x1f, 0x924},
+ { 0x331040, 1, 0x1e, 0x924},
+ { 0x331400, 1, 0x1f, 0x924},
+ { 0x331440, 1, 0x1e, 0x924},
+ { 0x331480, 1, 0x1e, 0x924},
+ { 0x3314c0, 1, 0x1e, 0x924},
+ { 0x331800, 128, 0x1f, 0x400},
+ { 0x331c00, 128, 0x1f, 0x400},
+ { 0x332000, 1, 0x1f, 0xdb6},
+ { 0x332400, 1, 0x1e, 0xb24},
+ { 0x332404, 5631, 0x1c, 0x200},
+ { 0x338000, 1, 0x1f, 0xfff},
+ { 0x338040, 1, 0x1f, 0xfff},
+ { 0x338080, 1, 0x1f, 0xfff},
+ { 0x3380c0, 1, 0x1f, 0xfff},
+ { 0x338100, 1, 0x1f, 0x924},
+ { 0x338140, 1, 0x1f, 0x924},
+ { 0x338180, 1, 0x1f, 0x924},
+ { 0x3381c0, 1, 0x1f, 0x924},
+ { 0x338200, 1, 0x1f, 0x924},
+ { 0x338240, 1, 0x1f, 0x924},
+ { 0x338280, 1, 0x1f, 0x924},
+ { 0x3382c0, 1, 0x1f, 0x924},
+ { 0x338300, 1, 0x1f, 0x924},
+ { 0x338340, 1, 0x1f, 0x924},
+ { 0x338380, 1, 0x1f, 0x924},
+ { 0x3383c0, 1, 0x1f, 0x924},
+ { 0x338400, 1, 0x1f, 0x924},
+ { 0x338440, 1, 0x1f, 0x924},
+ { 0x338480, 1, 0x1f, 0x924},
+ { 0x3384c0, 1, 0x1f, 0x924},
+ { 0x338500, 1, 0x1f, 0x924},
+ { 0x338540, 1, 0x1f, 0x924},
+ { 0x338580, 1, 0x1f, 0x924},
+ { 0x3385c0, 19, 0x1c, 0x924},
+ { 0x338800, 1, 0x1f, 0x924},
+ { 0x338840, 1, 0x1f, 0x924},
+ { 0x338880, 1, 0x1f, 0x924},
+ { 0x3388c0, 1, 0x1f, 0x924},
+ { 0x338900, 1, 0x1f, 0x924},
+ { 0x338940, 1, 0x1f, 0x924},
+ { 0x338980, 1, 0x1f, 0x924},
+ { 0x3389c0, 1, 0x1f, 0x924},
+ { 0x338a00, 1, 0x1f, 0xd24},
+ { 0x338a40, 1, 0x1f, 0x924},
+ { 0x338a80, 1, 0x1f, 0x492},
+ { 0x338ac0, 1, 0x1f, 0x924},
+ { 0x338b00, 1, 0x1f, 0x924},
+ { 0x338b40, 1, 0x1f, 0x924},
+ { 0x338b80, 1, 0x1f, 0x924},
+ { 0x338bc0, 1, 0x1f, 0x924},
+ { 0x338c00, 1, 0x1f, 0x924},
+ { 0x338c40, 1, 0x1f, 0x924},
+ { 0x338c80, 1, 0x1f, 0x924},
+ { 0x338cc0, 1, 0x1f, 0x924},
+ { 0x338cc4, 1, 0x1c, 0x924},
+ { 0x338d00, 1, 0x1f, 0x924},
+ { 0x338d40, 1, 0x1f, 0x924},
+ { 0x338d80, 1, 0x1f, 0x924},
+ { 0x338dc0, 1, 0x1f, 0x924},
+ { 0x338e00, 1, 0x1f, 0x924},
+ { 0x338e40, 1, 0x1f, 0x924},
+ { 0x338e80, 1, 0x1f, 0x924},
+ { 0x338e84, 1, 0x1c, 0x924},
+ { 0x338ec0, 1, 0x1e, 0x924},
+ { 0x338f00, 1, 0x1e, 0x924},
+ { 0x338f40, 1, 0x1e, 0x924},
+ { 0x338f80, 1, 0x1e, 0x924},
+ { 0x338fc0, 1, 0x1e, 0x924},
+ { 0x338fd4, 5, 0x1c, 0x924},
+ { 0x338fe8, 2, 0x18, 0x924},
+ { 0x339000, 1, 0x1c, 0x924},
+ { 0x339040, 3, 0x1c, 0x924},
+ { 0x33905c, 1, 0x18, 0x924},
+ { 0x339064, 1, 0x10, 0x924},
+ { 0x339080, 10, 0x10, 0x924},
+ { 0x340000, 2, 0x1f, 0x924},
+ { 0x3a0000, 40960, 0x1c, 0x1000}
};
-#define REGS_COUNT ARRAY_SIZE(reg_addrs)
-static const struct dump_sign dump_sign_all = { 0x4e23fde1, 0x70017, 0x3a };
+#define REGS_COUNT ARRAY_SIZE(reg_addrs)
-static const u32 page_vals_e2[] = { 0, 128 };
-#define PAGE_MODE_VALUES_E2 ARRAY_SIZE(page_vals_e2)
+static const struct reg_addr idle_reg_addrs[] = {
+ { 0x2104, 1, 0x1f, 0xfff},
+ { 0x2110, 2, 0x1f, 0xfff},
+ { 0x211c, 8, 0x1f, 0xfff},
+ { 0x2814, 1, 0x1f, 0xfff},
+ { 0x281c, 2, 0x1f, 0xfff},
+ { 0x2854, 1, 0x1f, 0xfff},
+ { 0x285c, 1, 0x1f, 0xfff},
+ { 0x3040, 1, 0x1f, 0xfff},
+ { 0x9010, 7, 0x1c, 0xfff},
+ { 0x9030, 1, 0x1c, 0xfff},
+ { 0x9068, 16, 0x1c, 0xfff},
+ { 0x9230, 2, 0x1c, 0xfff},
+ { 0x9244, 1, 0x1c, 0xfff},
+ { 0x9298, 1, 0x1c, 0xfff},
+ { 0x92a8, 1, 0x1c, 0x1fff},
+ { 0xa38c, 1, 0x1f, 0x1fff},
+ { 0xa3c4, 1, 0x1e, 0xfff},
+ { 0xa404, 1, 0x1f, 0xfff},
+ { 0xa408, 2, 0x1f, 0x1fff},
+ { 0xa42c, 12, 0x1f, 0xfff},
+ { 0xa580, 1, 0x1f, 0x1fff},
+ { 0xa590, 1, 0x1f, 0x1fff},
+ { 0xa600, 5, 0x1e, 0xfff},
+ { 0xa618, 1, 0x1e, 0xfff},
+ { 0xa714, 1, 0x1c, 0xfff},
+ { 0xa720, 1, 0x1c, 0xfff},
+ { 0xa750, 1, 0x1c, 0xfff},
+ { 0xc09c, 1, 0x3, 0xfff},
+ { 0x103b0, 1, 0x1f, 0xfff},
+ { 0x103c0, 1, 0x1f, 0xfff},
+ { 0x103d0, 1, 0x3, 0x1fff},
+ { 0x10418, 1, 0x1f, 0xfff},
+ { 0x10420, 1, 0x1f, 0xfff},
+ { 0x10428, 1, 0x1f, 0xfff},
+ { 0x10460, 1, 0x1f, 0xfff},
+ { 0x10474, 1, 0x1f, 0xfff},
+ { 0x104e0, 1, 0x1f, 0xfff},
+ { 0x104ec, 1, 0x1f, 0xfff},
+ { 0x104f8, 1, 0x1f, 0xfff},
+ { 0x10508, 1, 0x1f, 0xfff},
+ { 0x10530, 1, 0x1f, 0xfff},
+ { 0x10538, 1, 0x1f, 0xfff},
+ { 0x10548, 1, 0x1f, 0xfff},
+ { 0x10558, 1, 0x1f, 0xfff},
+ { 0x182a8, 1, 0x1c, 0xfff},
+ { 0x182b8, 1, 0x1c, 0xfff},
+ { 0x18308, 1, 0x1c, 0xfff},
+ { 0x18318, 1, 0x1c, 0xfff},
+ { 0x18338, 1, 0x1c, 0xfff},
+ { 0x18348, 1, 0x1c, 0xfff},
+ { 0x183bc, 1, 0x1c, 0x1fff},
+ { 0x183cc, 1, 0x1c, 0x1fff},
+ { 0x18570, 1, 0x18, 0xfff},
+ { 0x18578, 1, 0x18, 0xfff},
+ { 0x1858c, 1, 0x18, 0xfff},
+ { 0x18594, 1, 0x18, 0xfff},
+ { 0x1862c, 4, 0x10, 0xfff},
+ { 0x2021c, 11, 0x1f, 0xfff},
+ { 0x202a8, 1, 0x1f, 0xfff},
+ { 0x202b8, 1, 0x1f, 0x1fff},
+ { 0x20404, 1, 0x1f, 0xfff},
+ { 0x2040c, 2, 0x1f, 0xfff},
+ { 0x2041c, 2, 0x1f, 0xfff},
+ { 0x40154, 14, 0x1f, 0xfff},
+ { 0x40198, 1, 0x1f, 0x1fff},
+ { 0x404ac, 1, 0x1f, 0xfff},
+ { 0x404bc, 1, 0x1f, 0x1fff},
+ { 0x42290, 1, 0x1f, 0xfff},
+ { 0x422a0, 1, 0x1f, 0xfff},
+ { 0x422b0, 1, 0x1f, 0x1fff},
+ { 0x42548, 1, 0x1f, 0xfff},
+ { 0x42550, 1, 0x1f, 0xfff},
+ { 0x42558, 1, 0x1f, 0xfff},
+ { 0x50160, 8, 0x1f, 0xfff},
+ { 0x501d0, 1, 0x1f, 0xfff},
+ { 0x501e0, 1, 0x1f, 0x1fff},
+ { 0x50204, 1, 0x1f, 0xfff},
+ { 0x5020c, 2, 0x1f, 0xfff},
+ { 0x5021c, 1, 0x1f, 0xfff},
+ { 0x60090, 1, 0x1f, 0xfff},
+ { 0x6011c, 1, 0x1f, 0xfff},
+ { 0x6012c, 1, 0x1f, 0x1fff},
+ { 0xc101c, 1, 0x1f, 0xfff},
+ { 0xc102c, 1, 0x1f, 0x1fff},
+ { 0xc2290, 1, 0x1f, 0xfff},
+ { 0xc22a0, 1, 0x1f, 0xfff},
+ { 0xc22b0, 1, 0x1f, 0x1fff},
+ { 0xc2548, 1, 0x1f, 0xfff},
+ { 0xc2550, 1, 0x1f, 0xfff},
+ { 0xc2558, 1, 0x1f, 0xfff},
+ { 0xc4294, 1, 0x1f, 0xfff},
+ { 0xc42a4, 1, 0x1f, 0xfff},
+ { 0xc42b4, 1, 0x1f, 0x1fff},
+ { 0xc4550, 1, 0x1f, 0xfff},
+ { 0xc4558, 1, 0x1f, 0xfff},
+ { 0xc4560, 1, 0x1f, 0xfff},
+ { 0xd016c, 8, 0x1f, 0xfff},
+ { 0xd01d8, 1, 0x1f, 0xfff},
+ { 0xd01e8, 1, 0x1f, 0x1fff},
+ { 0xd0204, 1, 0x1f, 0xfff},
+ { 0xd020c, 3, 0x1f, 0xfff},
+ { 0xe0154, 8, 0x1f, 0xfff},
+ { 0xe01c8, 1, 0x1f, 0xfff},
+ { 0xe01d8, 1, 0x1f, 0x1fff},
+ { 0xe0204, 1, 0x1f, 0xfff},
+ { 0xe020c, 2, 0x1f, 0xfff},
+ { 0xe021c, 2, 0x1f, 0xfff},
+ { 0x101014, 1, 0x1f, 0xfff},
+ { 0x101030, 1, 0x1f, 0xfff},
+ { 0x101040, 1, 0x1f, 0x1fff},
+ { 0x102058, 1, 0x1f, 0x1fff},
+ { 0x102080, 16, 0x1f, 0xfff},
+ { 0x103004, 2, 0x1f, 0xfff},
+ { 0x103068, 1, 0x1f, 0xfff},
+ { 0x103078, 1, 0x1f, 0xfff},
+ { 0x103088, 1, 0x1f, 0x1fff},
+ { 0x10309c, 2, 0x1e, 0xfff},
+ { 0x1030b8, 2, 0x1c, 0xfff},
+ { 0x1030cc, 1, 0x1c, 0xfff},
+ { 0x1030e0, 1, 0x1c, 0xfff},
+ { 0x104004, 1, 0x1f, 0xfff},
+ { 0x104018, 1, 0x1f, 0xfff},
+ { 0x104020, 1, 0x1f, 0xfff},
+ { 0x10403c, 1, 0x1f, 0xfff},
+ { 0x1040fc, 1, 0x1f, 0xfff},
+ { 0x10410c, 1, 0x1f, 0x1fff},
+ { 0x104400, 1, 0x1f, 0x1fff},
+ { 0x104404, 63, 0x1f, 0xfff},
+ { 0x104800, 1, 0x1f, 0x1fff},
+ { 0x104804, 63, 0x1f, 0xfff},
+ { 0x105000, 4, 0x1f, 0x1fff},
+ { 0x105010, 252, 0x1f, 0xfff},
+ { 0x108094, 1, 0x3, 0xfff},
+ { 0x1201b0, 2, 0x1f, 0xfff},
+ { 0x12032c, 1, 0x1f, 0xfff},
+ { 0x12036c, 3, 0x1f, 0xfff},
+ { 0x120408, 2, 0x1f, 0xfff},
+ { 0x120414, 15, 0x1f, 0xfff},
+ { 0x120478, 2, 0x1f, 0xfff},
+ { 0x12052c, 1, 0x1f, 0xfff},
+ { 0x120564, 3, 0x1f, 0xfff},
+ { 0x12057c, 1, 0x1f, 0x1fff},
+ { 0x12058c, 1, 0x1f, 0x1fff},
+ { 0x120608, 1, 0x1e, 0xfff},
+ { 0x120748, 1, 0x1c, 0xfff},
+ { 0x120778, 2, 0x1c, 0xfff},
+ { 0x120808, 3, 0x1f, 0xfff},
+ { 0x120818, 1, 0x1f, 0xfff},
+ { 0x120820, 1, 0x1f, 0xfff},
+ { 0x120828, 1, 0x1f, 0xfff},
+ { 0x120830, 1, 0x1f, 0xfff},
+ { 0x120838, 1, 0x1f, 0xfff},
+ { 0x120840, 1, 0x1f, 0xfff},
+ { 0x120848, 1, 0x1f, 0xfff},
+ { 0x120850, 1, 0x1f, 0xfff},
+ { 0x120858, 1, 0x1f, 0xfff},
+ { 0x120860, 1, 0x1f, 0xfff},
+ { 0x120868, 1, 0x1f, 0xfff},
+ { 0x120870, 1, 0x1f, 0xfff},
+ { 0x120878, 1, 0x1f, 0xfff},
+ { 0x120880, 1, 0x1f, 0xfff},
+ { 0x120888, 1, 0x1f, 0xfff},
+ { 0x120890, 1, 0x1f, 0xfff},
+ { 0x120898, 1, 0x1f, 0xfff},
+ { 0x1208a0, 1, 0x1f, 0xfff},
+ { 0x1208a8, 1, 0x1f, 0xfff},
+ { 0x1208b0, 1, 0x1f, 0xfff},
+ { 0x1208b8, 1, 0x1f, 0xfff},
+ { 0x1208c0, 1, 0x1f, 0xfff},
+ { 0x1208c8, 1, 0x1f, 0xfff},
+ { 0x1208d0, 1, 0x1f, 0xfff},
+ { 0x1208d8, 1, 0x1f, 0xfff},
+ { 0x1208e0, 1, 0x1f, 0xfff},
+ { 0x1208e8, 1, 0x1f, 0xfff},
+ { 0x1208f0, 1, 0x1f, 0xfff},
+ { 0x1208f8, 1, 0x1f, 0xfff},
+ { 0x120900, 1, 0x1f, 0xfff},
+ { 0x120908, 1, 0x1f, 0xfff},
+ { 0x130030, 1, 0x1c, 0xfff},
+ { 0x13004c, 3, 0x1c, 0xfff},
+ { 0x130064, 2, 0x1c, 0xfff},
+ { 0x13009c, 1, 0x1c, 0x1fff},
+ { 0x130130, 1, 0x1c, 0xfff},
+ { 0x13016c, 1, 0x1c, 0xfff},
+ { 0x130300, 1, 0x1c, 0xfff},
+ { 0x130480, 1, 0x1c, 0xfff},
+ { 0x14005c, 2, 0xf, 0xfff},
+ { 0x1400d0, 2, 0xf, 0xfff},
+ { 0x1400e0, 1, 0xf, 0xfff},
+ { 0x1401c8, 1, 0xf, 0xfff},
+ { 0x140200, 6, 0xf, 0xfff},
+ { 0x140338, 7, 0x10, 0xfff},
+ { 0x140370, 7, 0x10, 0xfff},
+ { 0x15c1bc, 6, 0x10, 0xfff},
+ { 0x15c230, 7, 0x10, 0xfff},
+ { 0x16101c, 1, 0x1f, 0xfff},
+ { 0x16102c, 1, 0x1f, 0x1fff},
+ { 0x164014, 2, 0x1f, 0xfff},
+ { 0x1640f0, 1, 0x1f, 0xfff},
+ { 0x166290, 1, 0x1f, 0xfff},
+ { 0x1662a0, 1, 0x1f, 0xfff},
+ { 0x1662b0, 1, 0x1f, 0x1fff},
+ { 0x166548, 1, 0x1f, 0xfff},
+ { 0x166550, 1, 0x1f, 0xfff},
+ { 0x166558, 1, 0x1f, 0xfff},
+ { 0x168000, 1, 0x1f, 0xfff},
+ { 0x168008, 1, 0x1f, 0xfff},
+ { 0x168010, 1, 0x1f, 0xfff},
+ { 0x168018, 1, 0x1f, 0xfff},
+ { 0x168028, 2, 0x1f, 0xfff},
+ { 0x168058, 9, 0x1f, 0xfff},
+ { 0x168238, 1, 0x1f, 0xfff},
+ { 0x1682d0, 7, 0x1f, 0xfff},
+ { 0x168300, 2, 0x3, 0xfff},
+ { 0x168308, 65, 0x1f, 0xfff},
+ { 0x168410, 2, 0x1f, 0xfff},
+ { 0x168438, 1, 0x1f, 0xfff},
+ { 0x168448, 1, 0x1f, 0x1fff},
+ { 0x168a00, 128, 0x1f, 0xfff},
+ { 0x16e200, 128, 0x2, 0xfff},
+ { 0x16e404, 2, 0x2, 0xfff},
+ { 0x16e584, 64, 0x2, 0xfff},
+ { 0x16e684, 2, 0x1e, 0xfff},
+ { 0x16e68c, 4, 0x2, 0xfff},
+ { 0x16e6fc, 4, 0x1c, 0xfff},
+ { 0x16e7ac, 12, 0x10, 0xfff},
+ { 0x1700a4, 1, 0x1f, 0xfff},
+ { 0x1700ac, 2, 0x1f, 0xfff},
+ { 0x1700c0, 1, 0x1f, 0xfff},
+ { 0x170174, 1, 0x1f, 0xfff},
+ { 0x170184, 1, 0x1f, 0x1fff},
+ { 0x1800f4, 1, 0x1f, 0xfff},
+ { 0x180104, 1, 0x1f, 0xfff},
+ { 0x180114, 1, 0x1f, 0x1fff},
+ { 0x180124, 1, 0x1f, 0x1fff},
+ { 0x18026c, 1, 0x1f, 0xfff},
+ { 0x1802a0, 1, 0x1f, 0xfff},
+ { 0x1b8000, 1, 0x1f, 0xfff},
+ { 0x1b8040, 1, 0x1f, 0xfff},
+ { 0x1b8080, 1, 0x1f, 0xfff},
+ { 0x1b80c0, 1, 0x1f, 0xfff},
+ { 0x200104, 1, 0x1f, 0xfff},
+ { 0x200114, 1, 0x1f, 0xfff},
+ { 0x200124, 1, 0x1f, 0x1fff},
+ { 0x200134, 1, 0x1f, 0x1fff},
+ { 0x20026c, 1, 0x1f, 0xfff},
+ { 0x2002a0, 1, 0x1f, 0xfff},
+ { 0x238000, 1, 0x1f, 0xfff},
+ { 0x238040, 1, 0x1f, 0xfff},
+ { 0x238080, 1, 0x1f, 0xfff},
+ { 0x2380c0, 1, 0x1f, 0xfff},
+ { 0x280104, 1, 0x1f, 0xfff},
+ { 0x280114, 1, 0x1f, 0xfff},
+ { 0x280124, 1, 0x1f, 0x1fff},
+ { 0x280134, 1, 0x1f, 0x1fff},
+ { 0x28026c, 1, 0x1f, 0xfff},
+ { 0x2802a0, 1, 0x1f, 0xfff},
+ { 0x2b8000, 1, 0x1f, 0xfff},
+ { 0x2b8040, 1, 0x1f, 0xfff},
+ { 0x2b8080, 1, 0x1f, 0xfff},
+ { 0x300104, 1, 0x1f, 0xfff},
+ { 0x300114, 1, 0x1f, 0xfff},
+ { 0x300124, 1, 0x1f, 0x1fff},
+ { 0x300134, 1, 0x1f, 0x1fff},
+ { 0x30026c, 1, 0x1f, 0xfff},
+ { 0x3002a0, 1, 0x1f, 0xfff},
+ { 0x338000, 1, 0x1f, 0xfff},
+ { 0x338040, 1, 0x1f, 0xfff},
+ { 0x338080, 1, 0x1f, 0xfff},
+ { 0x3380c0, 1, 0x1f, 0xfff}
+};
-static const u32 page_write_regs_e2[] = { 328476 };
-#define PAGE_WRITE_REGS_E2 ARRAY_SIZE(page_write_regs_e2)
+#define IDLE_REGS_COUNT ARRAY_SIZE(idle_reg_addrs)
-static const struct reg_addr page_read_regs_e2[] = {
- { 0x58000, 4608, RI_E2_ONLINE } };
-#define PAGE_READ_REGS_E2 ARRAY_SIZE(page_read_regs_e2)
+static const u32 read_reg_e1[] = {
+ 0x1b1000};
-static const u32 page_vals_e3[] = { 0, 128 };
-#define PAGE_MODE_VALUES_E3 ARRAY_SIZE(page_vals_e3)
+static const struct wreg_addr wreg_addr_e1 = {
+ 0x1b0c00, 192, 1, read_reg_e1, 0x1f, 0x1fff};
-static const u32 page_write_regs_e3[] = { 328476 };
-#define PAGE_WRITE_REGS_E3 ARRAY_SIZE(page_write_regs_e3)
+static const u32 read_reg_e1h[] = {
+ 0x1b1040, 0x1b1000};
-static const struct reg_addr page_read_regs_e3[] = {
- { 0x58000, 4608, RI_E3E3B0_ONLINE } };
-#define PAGE_READ_REGS_E3 ARRAY_SIZE(page_read_regs_e3)
+static const struct wreg_addr wreg_addr_e1h = {
+ 0x1b0c00, 256, 2, read_reg_e1h, 0x1f, 0x1fff};
+
+static const u32 read_reg_e2[] = {
+ 0x1b1040, 0x1b1000};
+
+static const struct wreg_addr wreg_addr_e2 = {
+ 0x1b0c00, 128, 2, read_reg_e2, 0x1f, 0x1fff};
-#endif /* BNX2X_DUMP_H */
+static const u32 read_reg_e3[] = {
+ 0x1b1040, 0x1b1000};
+
+static const struct wreg_addr wreg_addr_e3 = {
+ 0x1b0c00, 128, 2, read_reg_e3, 0x1f, 0x1fff};
+
+static const u32 read_reg_e3b0[] = {
+ 0x1b1040, 0x1b1000};
+
+static const struct wreg_addr wreg_addr_e3b0 = {
+ 0x1b0c00, 128, 2, read_reg_e3b0, 0x1f, 0x1fff};
+
+static const unsigned int dump_num_registers[NUM_CHIPS][NUM_PRESETS] = {
+ {20782, 18567, 27975, 19729, 18311, 27719, 20836, 32391, 41799, 20812,
+ 26247, 35655, 19074},
+ {32774, 19297, 33277, 31721, 19041, 33021, 32828, 33121, 47101, 32804,
+ 26977, 40957, 35895},
+ {36527, 17928, 33697, 35474, 18700, 34466, 36581, 31752, 47521, 36557,
+ 25608, 41377, 43903},
+ {45239, 17936, 34387, 44186, 18708, 35156, 45293, 31760, 48211, 45269,
+ 25616, 42067, 43903},
+ {45302, 17999, 34802, 44249, 18771, 35571, 45356, 31823, 48626, 45332,
+ 25679, 42482, 43903}
+};
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 277f17e3c8f8..9a674b14b403 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1,6 +1,6 @@
/* bnx2x_ethtool.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -186,6 +186,7 @@ static const struct {
};
#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
+
static int bnx2x_get_port_type(struct bnx2x *bp)
{
int port_type;
@@ -233,7 +234,7 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if ((bp->state == BNX2X_STATE_OPEN) && bp->link_vars.link_up &&
!(bp->flags & MF_FUNC_DIS)) {
- cmd->duplex = bp->link_vars.duplex;
+ cmd->duplex = bp->link_vars.duplex;
if (IS_MF(bp) && !BP_NOMCP(bp))
ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
@@ -399,7 +400,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
return -EINVAL;
}
- /* Save new config in case command complete successully */
+ /* Save new config in case command complete successfully */
new_multi_phy_config = bp->link_params.multi_phy_config;
/* Get the new cfg_idx */
cfg_idx = bnx2x_get_link_cfg_idx(bp);
@@ -596,29 +597,58 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return 0;
}
-#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
-#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
-#define IS_E2_ONLINE(info) (((info) & RI_E2_ONLINE) == RI_E2_ONLINE)
-#define IS_E3_ONLINE(info) (((info) & RI_E3_ONLINE) == RI_E3_ONLINE)
-#define IS_E3B0_ONLINE(info) (((info) & RI_E3B0_ONLINE) == RI_E3B0_ONLINE)
+#define DUMP_ALL_PRESETS 0x1FFF
+#define DUMP_MAX_PRESETS 13
-static bool bnx2x_is_reg_online(struct bnx2x *bp,
- const struct reg_addr *reg_info)
+static int __bnx2x_get_preset_regs_len(struct bnx2x *bp, u32 preset)
{
if (CHIP_IS_E1(bp))
- return IS_E1_ONLINE(reg_info->info);
+ return dump_num_registers[0][preset-1];
else if (CHIP_IS_E1H(bp))
- return IS_E1H_ONLINE(reg_info->info);
+ return dump_num_registers[1][preset-1];
else if (CHIP_IS_E2(bp))
- return IS_E2_ONLINE(reg_info->info);
+ return dump_num_registers[2][preset-1];
else if (CHIP_IS_E3A0(bp))
- return IS_E3_ONLINE(reg_info->info);
+ return dump_num_registers[3][preset-1];
else if (CHIP_IS_E3B0(bp))
- return IS_E3B0_ONLINE(reg_info->info);
+ return dump_num_registers[4][preset-1];
else
- return false;
+ return 0;
+}
+
+static int __bnx2x_get_regs_len(struct bnx2x *bp)
+{
+ u32 preset_idx;
+ int regdump_len = 0;
+
+ /* Calculate the total preset regs length */
+ for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++)
+ regdump_len += __bnx2x_get_preset_regs_len(bp, preset_idx);
+
+ return regdump_len;
+}
+
+static int bnx2x_get_regs_len(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int regdump_len = 0;
+
+ regdump_len = __bnx2x_get_regs_len(bp);
+ regdump_len *= 4;
+ regdump_len += sizeof(struct dump_header);
+
+ return regdump_len;
}
+#define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
+#define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
+#define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
+#define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
+#define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
+
+#define IS_REG_IN_PRESET(presets, idx) \
+ ((presets & (1 << (idx-1))) == (1 << (idx-1)))
+
/******* Paged registers info selectors ********/
static const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp)
{
@@ -680,38 +710,39 @@ static u32 __bnx2x_get_page_read_num(struct bnx2x *bp)
return 0;
}
-static int __bnx2x_get_regs_len(struct bnx2x *bp)
+static bool bnx2x_is_reg_in_chip(struct bnx2x *bp,
+ const struct reg_addr *reg_info)
{
- int num_pages = __bnx2x_get_page_reg_num(bp);
- int page_write_num = __bnx2x_get_page_write_num(bp);
- const struct reg_addr *page_read_addr = __bnx2x_get_page_read_ar(bp);
- int page_read_num = __bnx2x_get_page_read_num(bp);
- int regdump_len = 0;
- int i, j, k;
-
- for (i = 0; i < REGS_COUNT; i++)
- if (bnx2x_is_reg_online(bp, &reg_addrs[i]))
- regdump_len += reg_addrs[i].size;
-
- for (i = 0; i < num_pages; i++)
- for (j = 0; j < page_write_num; j++)
- for (k = 0; k < page_read_num; k++)
- if (bnx2x_is_reg_online(bp, &page_read_addr[k]))
- regdump_len += page_read_addr[k].size;
-
- return regdump_len;
+ if (CHIP_IS_E1(bp))
+ return IS_E1_REG(reg_info->chips);
+ else if (CHIP_IS_E1H(bp))
+ return IS_E1H_REG(reg_info->chips);
+ else if (CHIP_IS_E2(bp))
+ return IS_E2_REG(reg_info->chips);
+ else if (CHIP_IS_E3A0(bp))
+ return IS_E3A0_REG(reg_info->chips);
+ else if (CHIP_IS_E3B0(bp))
+ return IS_E3B0_REG(reg_info->chips);
+ else
+ return false;
}
-static int bnx2x_get_regs_len(struct net_device *dev)
-{
- struct bnx2x *bp = netdev_priv(dev);
- int regdump_len = 0;
-
- regdump_len = __bnx2x_get_regs_len(bp);
- regdump_len *= 4;
- regdump_len += sizeof(struct dump_hdr);
- return regdump_len;
+static bool bnx2x_is_wreg_in_chip(struct bnx2x *bp,
+ const struct wreg_addr *wreg_info)
+{
+ if (CHIP_IS_E1(bp))
+ return IS_E1_REG(wreg_info->chips);
+ else if (CHIP_IS_E1H(bp))
+ return IS_E1H_REG(wreg_info->chips);
+ else if (CHIP_IS_E2(bp))
+ return IS_E2_REG(wreg_info->chips);
+ else if (CHIP_IS_E3A0(bp))
+ return IS_E3A0_REG(wreg_info->chips);
+ else if (CHIP_IS_E3B0(bp))
+ return IS_E3B0_REG(wreg_info->chips);
+ else
+ return false;
}
/**
@@ -725,9 +756,10 @@ static int bnx2x_get_regs_len(struct net_device *dev)
* ("read address"). There may be more than one write address per "page" and
* more than one read address per write address.
*/
-static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p)
+static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p, u32 preset)
{
u32 i, j, k, n;
+
/* addresses of the paged registers */
const u32 *page_addr = __bnx2x_get_page_addr_ar(bp);
/* number of paged registers */
@@ -740,32 +772,100 @@ static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p)
const struct reg_addr *read_addr = __bnx2x_get_page_read_ar(bp);
/* number of read addresses */
int read_num = __bnx2x_get_page_read_num(bp);
+ u32 addr, size;
for (i = 0; i < num_pages; i++) {
for (j = 0; j < write_num; j++) {
REG_WR(bp, write_addr[j], page_addr[i]);
- for (k = 0; k < read_num; k++)
- if (bnx2x_is_reg_online(bp, &read_addr[k]))
- for (n = 0; n <
- read_addr[k].size; n++)
- *p++ = REG_RD(bp,
- read_addr[k].addr + n*4);
+
+ for (k = 0; k < read_num; k++) {
+ if (IS_REG_IN_PRESET(read_addr[k].presets,
+ preset)) {
+ size = read_addr[k].size;
+ for (n = 0; n < size; n++) {
+ addr = read_addr[k].addr + n*4;
+ *p++ = REG_RD(bp, addr);
+ }
+ }
+ }
}
}
}
-static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
+static int __bnx2x_get_preset_regs(struct bnx2x *bp, u32 *p, u32 preset)
{
- u32 i, j;
+ u32 i, j, addr;
+ const struct wreg_addr *wreg_addr_p = NULL;
+
+ if (CHIP_IS_E1(bp))
+ wreg_addr_p = &wreg_addr_e1;
+ else if (CHIP_IS_E1H(bp))
+ wreg_addr_p = &wreg_addr_e1h;
+ else if (CHIP_IS_E2(bp))
+ wreg_addr_p = &wreg_addr_e2;
+ else if (CHIP_IS_E3A0(bp))
+ wreg_addr_p = &wreg_addr_e3;
+ else if (CHIP_IS_E3B0(bp))
+ wreg_addr_p = &wreg_addr_e3b0;
+
+ /* Read the idle_chk registers */
+ for (i = 0; i < IDLE_REGS_COUNT; i++) {
+ if (bnx2x_is_reg_in_chip(bp, &idle_reg_addrs[i]) &&
+ IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
+ for (j = 0; j < idle_reg_addrs[i].size; j++)
+ *p++ = REG_RD(bp, idle_reg_addrs[i].addr + j*4);
+ }
+ }
/* Read the regular registers */
- for (i = 0; i < REGS_COUNT; i++)
- if (bnx2x_is_reg_online(bp, &reg_addrs[i]))
+ for (i = 0; i < REGS_COUNT; i++) {
+ if (bnx2x_is_reg_in_chip(bp, &reg_addrs[i]) &&
+ IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
for (j = 0; j < reg_addrs[i].size; j++)
*p++ = REG_RD(bp, reg_addrs[i].addr + j*4);
+ }
+ }
+
+ /* Read the CAM registers */
+ if (bnx2x_is_wreg_in_chip(bp, wreg_addr_p) &&
+ IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
+ for (i = 0; i < wreg_addr_p->size; i++) {
+ *p++ = REG_RD(bp, wreg_addr_p->addr + i*4);
+
+ /* In case of wreg_addr register, read additional
+ registers from read_regs array
+ */
+ for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
+ addr = *(wreg_addr_p->read_regs);
+ *p++ = REG_RD(bp, addr + j*4);
+ }
+ }
+ }
+
+ /* Paged registers are supported in E2 & E3 only */
+ if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) {
+ /* Read "paged" registes */
+ bnx2x_read_pages_regs(bp, p, preset);
+ }
- /* Read "paged" registes */
- bnx2x_read_pages_regs(bp, p);
+ return 0;
+}
+
+static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
+{
+ u32 preset_idx;
+
+ /* Read all registers, by reading all preset registers */
+ for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
+ /* Skip presets with IOR */
+ if ((preset_idx == 2) ||
+ (preset_idx == 5) ||
+ (preset_idx == 8) ||
+ (preset_idx == 11))
+ continue;
+ __bnx2x_get_preset_regs(bp, p, preset_idx);
+ p += __bnx2x_get_preset_regs_len(bp, preset_idx);
+ }
}
static void bnx2x_get_regs(struct net_device *dev,
@@ -773,9 +873,9 @@ static void bnx2x_get_regs(struct net_device *dev,
{
u32 *p = _p;
struct bnx2x *bp = netdev_priv(dev);
- struct dump_hdr dump_hdr = {0};
+ struct dump_header dump_hdr = {0};
- regs->version = 1;
+ regs->version = 2;
memset(p, 0, regs->len);
if (!netif_running(bp->dev))
@@ -785,53 +885,173 @@ static void bnx2x_get_regs(struct net_device *dev,
* cause false alarms by reading never written registers. We
* will re-enable parity attentions right after the dump.
*/
+
+ /* Disable parity on path 0 */
+ bnx2x_pretend_func(bp, 0);
+ bnx2x_disable_blocks_parity(bp);
+
+ /* Disable parity on path 1 */
+ bnx2x_pretend_func(bp, 1);
bnx2x_disable_blocks_parity(bp);
- dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
- dump_hdr.dump_sign = dump_sign_all;
- dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
- dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
- dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
- dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
+ /* Return to current function */
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
- if (CHIP_IS_E1(bp))
- dump_hdr.info = RI_E1_ONLINE;
- else if (CHIP_IS_E1H(bp))
- dump_hdr.info = RI_E1H_ONLINE;
- else if (!CHIP_IS_E1x(bp))
- dump_hdr.info = RI_E2_ONLINE |
- (BP_PATH(bp) ? RI_PATH1_DUMP : RI_PATH0_DUMP);
+ dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
+ dump_hdr.preset = DUMP_ALL_PRESETS;
+ dump_hdr.version = BNX2X_DUMP_VERSION;
- memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
- p += dump_hdr.hdr_size + 1;
+ /* dump_meta_data presents OR of CHIP and PATH. */
+ if (CHIP_IS_E1(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E1;
+ } else if (CHIP_IS_E1H(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E1H;
+ } else if (CHIP_IS_E2(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E2 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ } else if (CHIP_IS_E3A0(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ } else if (CHIP_IS_E3B0(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ }
+
+ memcpy(p, &dump_hdr, sizeof(struct dump_header));
+ p += dump_hdr.header_size + 1;
/* Actually read the registers */
__bnx2x_get_regs(bp, p);
- /* Re-enable parity attentions */
+ /* Re-enable parity attentions on path 0 */
+ bnx2x_pretend_func(bp, 0);
+ bnx2x_clear_blocks_parity(bp);
+ bnx2x_enable_blocks_parity(bp);
+
+ /* Re-enable parity attentions on path 1 */
+ bnx2x_pretend_func(bp, 1);
+ bnx2x_clear_blocks_parity(bp);
+ bnx2x_enable_blocks_parity(bp);
+
+ /* Return to current function */
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+}
+
+static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int regdump_len = 0;
+
+ regdump_len = __bnx2x_get_preset_regs_len(bp, preset);
+ regdump_len *= 4;
+ regdump_len += sizeof(struct dump_header);
+
+ return regdump_len;
+}
+
+static int bnx2x_set_dump(struct net_device *dev, struct ethtool_dump *val)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ /* Use the ethtool_dump "flag" field as the dump preset index */
+ bp->dump_preset_idx = val->flag;
+ return 0;
+}
+
+static int bnx2x_get_dump_flag(struct net_device *dev,
+ struct ethtool_dump *dump)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ /* Calculate the requested preset idx length */
+ dump->len = bnx2x_get_preset_regs_len(dev, bp->dump_preset_idx);
+ DP(BNX2X_MSG_ETHTOOL, "Get dump preset %d length=%d\n",
+ bp->dump_preset_idx, dump->len);
+
+ dump->flag = ETHTOOL_GET_DUMP_DATA;
+ return 0;
+}
+
+static int bnx2x_get_dump_data(struct net_device *dev,
+ struct ethtool_dump *dump,
+ void *buffer)
+{
+ u32 *p = buffer;
+ struct bnx2x *bp = netdev_priv(dev);
+ struct dump_header dump_hdr = {0};
+
+ memset(p, 0, dump->len);
+
+ /* Disable parity attentions as long as following dump may
+ * cause false alarms by reading never written registers. We
+ * will re-enable parity attentions right after the dump.
+ */
+
+ /* Disable parity on path 0 */
+ bnx2x_pretend_func(bp, 0);
+ bnx2x_disable_blocks_parity(bp);
+
+ /* Disable parity on path 1 */
+ bnx2x_pretend_func(bp, 1);
+ bnx2x_disable_blocks_parity(bp);
+
+ /* Return to current function */
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
+ dump_hdr.preset = bp->dump_preset_idx;
+ dump_hdr.version = BNX2X_DUMP_VERSION;
+
+ DP(BNX2X_MSG_ETHTOOL, "Get dump data of preset %d\n", dump_hdr.preset);
+
+ /* dump_meta_data presents OR of CHIP and PATH. */
+ if (CHIP_IS_E1(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E1;
+ } else if (CHIP_IS_E1H(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E1H;
+ } else if (CHIP_IS_E2(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E2 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ } else if (CHIP_IS_E3A0(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ } else if (CHIP_IS_E3B0(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ }
+
+ memcpy(p, &dump_hdr, sizeof(struct dump_header));
+ p += dump_hdr.header_size + 1;
+
+ /* Actually read the registers */
+ __bnx2x_get_preset_regs(bp, p, dump_hdr.preset);
+
+ /* Re-enable parity attentions on path 0 */
+ bnx2x_pretend_func(bp, 0);
+ bnx2x_clear_blocks_parity(bp);
+ bnx2x_enable_blocks_parity(bp);
+
+ /* Re-enable parity attentions on path 1 */
+ bnx2x_pretend_func(bp, 1);
bnx2x_clear_blocks_parity(bp);
bnx2x_enable_blocks_parity(bp);
+
+ /* Return to current function */
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ return 0;
}
static void bnx2x_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct bnx2x *bp = netdev_priv(dev);
- u8 phy_fw_ver[PHY_FW_VER_LEN];
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
- phy_fw_ver[0] = '\0';
- bnx2x_get_ext_phy_fw_version(&bp->link_params,
- phy_fw_ver, PHY_FW_VER_LEN);
- strlcpy(info->fw_version, bp->fw_ver, sizeof(info->fw_version));
- snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
- "bc %d.%d.%d%s%s",
- (bp->common.bc_ver & 0xff0000) >> 16,
- (bp->common.bc_ver & 0xff00) >> 8,
- (bp->common.bc_ver & 0xff),
- ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
+ bnx2x_fill_fw_str(bp, info->fw_version, sizeof(info->fw_version));
+
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = BNX2X_NUM_STATS;
info->testinfo_len = BNX2X_NUM_TESTS(bp);
@@ -861,13 +1081,13 @@ static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
struct bnx2x *bp = netdev_priv(dev);
if (wol->wolopts & ~WAKE_MAGIC) {
- DP(BNX2X_MSG_ETHTOOL, "WOL not supproted\n");
+ DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n");
return -EINVAL;
}
if (wol->wolopts & WAKE_MAGIC) {
if (bp->flags & NO_WOL_FLAG) {
- DP(BNX2X_MSG_ETHTOOL, "WOL not supproted\n");
+ DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n");
return -EINVAL;
}
bp->wol = 1;
@@ -890,7 +1110,7 @@ static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
if (capable(CAP_NET_ADMIN)) {
/* dump MCP trace */
- if (level & BNX2X_MSG_MCP)
+ if (IS_PF(bp) && (level & BNX2X_MSG_MCP))
bnx2x_fw_dump_lvl(bp, KERN_INFO);
bp->msg_enable = level;
}
@@ -940,7 +1160,7 @@ static int bnx2x_get_eeprom_len(struct net_device *dev)
* Pf B takes the lock and proceeds to perform it's own access.
* pf A unlocks the per port lock, while pf B is still working (!).
* mcp takes the per port lock and corrupts pf B's access (and/or has it's own
- * acess corrupted by pf B).*
+ * access corrupted by pf B)
*/
static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
{
@@ -1070,7 +1290,8 @@ static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
/* we read nvram data in cpu order
* but ethtool sees it as an array of bytes
- * converting to big-endian will do the work */
+ * converting to big-endian will do the work
+ */
*ret_val = cpu_to_be32(val);
rc = 0;
break;
@@ -1297,7 +1518,8 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
val |= (*data_buf << BYTE_OFFSET(offset));
/* nvram data is returned as an array of bytes
- * convert it back to cpu order */
+ * convert it back to cpu order
+ */
val = be32_to_cpu(val);
rc = bnx2x_nvram_write_dword(bp, align_offset, val,
@@ -1509,6 +1731,10 @@ static int bnx2x_set_ringparam(struct net_device *dev,
{
struct bnx2x *bp = netdev_priv(dev);
+ DP(BNX2X_MSG_ETHTOOL,
+ "set ring params command parameters: rx_pending = %d, tx_pending = %d\n",
+ ering->rx_pending, ering->tx_pending);
+
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
DP(BNX2X_MSG_ETHTOOL,
"Handling parity error recovery. Try again later\n");
@@ -1747,7 +1973,6 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return 0;
}
-
enum {
BNX2X_CHIP_E1_OFST = 0,
BNX2X_CHIP_E1H_OFST,
@@ -1875,7 +2100,8 @@ static int bnx2x_test_registers(struct bnx2x *bp)
hw = BNX2X_CHIP_MASK_E3;
/* Repeat the test twice:
- First by writing 0x00000000, second by writing 0xffffffff */
+ * First by writing 0x00000000, second by writing 0xffffffff
+ */
for (idx = 0; idx < 2; idx++) {
switch (idx) {
@@ -2388,8 +2614,8 @@ static void bnx2x_self_test(struct net_device *dev,
struct ethtool_test *etest, u64 *buf)
{
struct bnx2x *bp = netdev_priv(dev);
- u8 is_serdes;
- int rc;
+ u8 is_serdes, link_up;
+ int rc, cnt = 0;
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
netdev_err(bp->dev,
@@ -2397,6 +2623,7 @@ static void bnx2x_self_test(struct net_device *dev,
etest->flags |= ETH_TEST_FL_FAILED;
return;
}
+
DP(BNX2X_MSG_ETHTOOL,
"Self-test command parameters: offline = %d, external_lb = %d\n",
(etest->flags & ETH_TEST_FL_OFFLINE),
@@ -2411,20 +2638,17 @@ static void bnx2x_self_test(struct net_device *dev,
}
is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
-
+ link_up = bp->link_vars.link_up;
/* offline tests are not supported in MF mode */
if ((etest->flags & ETH_TEST_FL_OFFLINE) && !IS_MF(bp)) {
int port = BP_PORT(bp);
u32 val;
- u8 link_up;
/* save current value of input enable for TX port IF */
val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
/* disable input for TX port IF */
REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
- link_up = bp->link_vars.link_up;
-
bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
rc = bnx2x_nic_load(bp, LOAD_DIAG);
if (rc) {
@@ -2486,17 +2710,19 @@ static void bnx2x_self_test(struct net_device *dev,
etest->flags |= ETH_TEST_FL_FAILED;
}
- if (bnx2x_link_test(bp, is_serdes) != 0) {
+ if (link_up) {
+ cnt = 100;
+ while (bnx2x_link_test(bp, is_serdes) && --cnt)
+ msleep(20);
+ }
+
+ if (!cnt) {
if (!IS_MF(bp))
buf[6] = 1;
else
buf[2] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
-
-#ifdef BNX2X_EXTRA_DEBUG
- bnx2x_panic_dump(bp);
-#endif
}
#define IS_PORT_STAT(i) \
@@ -2753,15 +2979,14 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
DP(BNX2X_MSG_ETHTOOL,
"Command parameters not supported\n");
return -EINVAL;
- } else {
- return 0;
}
+ return 0;
case UDP_V4_FLOW:
case UDP_V6_FLOW:
/* For UDP either 2-tupple hash or 4-tupple hash is supported */
if (info->data == (RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
udp_rss_requested = 1;
else if (info->data == (RXH_IP_SRC | RXH_IP_DST))
udp_rss_requested = 0;
@@ -2777,13 +3002,13 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
} else if ((info->flow_type == UDP_V6_FLOW) &&
(bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
- return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled");
- } else {
- return 0;
+ return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
}
+ return 0;
+
case IPV4_FLOW:
case IPV6_FLOW:
/* For IP only 2-tupple hash is supported */
@@ -2791,9 +3016,9 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
DP(BNX2X_MSG_ETHTOOL,
"Command parameters not supported\n");
return -EINVAL;
- } else {
- return 0;
}
+ return 0;
+
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -2809,9 +3034,9 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
DP(BNX2X_MSG_ETHTOOL,
"Command parameters not supported\n");
return -EINVAL;
- } else {
- return 0;
}
+ return 0;
+
default:
return -EINVAL;
}
@@ -2964,6 +3189,9 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_drvinfo = bnx2x_get_drvinfo,
.get_regs_len = bnx2x_get_regs_len,
.get_regs = bnx2x_get_regs,
+ .get_dump_flag = bnx2x_get_dump_flag,
+ .get_dump_data = bnx2x_get_dump_data,
+ .set_dump = bnx2x_set_dump,
.get_wol = bnx2x_get_wol,
.set_wol = bnx2x_set_wol,
.get_msglevel = bnx2x_get_msglevel,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 60a83ad10370..e5f808377c91 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -1,6 +1,6 @@
/* bnx2x_fw_defs.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -305,12 +305,10 @@
#define MAX_VLAN_CREDIT_E1H 0 /* Per Chip */
#define MAX_VLAN_CREDIT_E2 272 /* Per Path */
-
/* Maximal aggregation queues supported */
#define ETH_MAX_AGGREGATION_QUEUES_E1 32
#define ETH_MAX_AGGREGATION_QUEUES_E1H_E2 64
-
#define ETH_NUM_OF_MCAST_BINS 256
#define ETH_NUM_OF_MCAST_ENGINES_E2 72
@@ -353,7 +351,6 @@
/* max number of slow path commands per port */
#define MAX_RAMRODS_PER_PORT 8
-
/**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
#define TIMERS_TICK_SIZE_CHIP (1e-3)
@@ -380,7 +377,6 @@
that is not mapped to priority*/
#define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
-
#define C_ERES_PER_PAGE \
(PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
#define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1)
@@ -391,8 +387,6 @@
#define INVALID_VNIC_ID 0xFF
-
#define UNDEF_IRO 0x80000000
-
#endif /* BNX2X_FW_DEFS_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
index 4bed52ba300d..f572ae164fce 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
@@ -1,6 +1,6 @@
/* bnx2x_fw_file_hdr.h: FW binary file header structure.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 3369a50ac6b4..037860ecc343 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1,6 +1,6 @@
/* bnx2x_hsi.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -899,6 +899,10 @@ struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
#define PORT_FEAT_CFG_DCBX_DISABLED 0x00000000
#define PORT_FEAT_CFG_DCBX_ENABLED 0x00000100
+ #define PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK 0x00000C00
+ #define PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE 0x00000400
+ #define PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI 0x00000800
+
#define PORT_FEATURE_EN_SIZE_MASK 0x0f000000
#define PORT_FEATURE_EN_SIZE_SHIFT 24
#define PORT_FEATURE_WOL_ENABLED 0x01000000
@@ -3374,6 +3378,10 @@ struct regpair {
__le32 hi;
};
+struct regpair_native {
+ u32 lo;
+ u32 hi;
+};
/*
* Classify rule opcodes in E2/E3
@@ -4400,13 +4408,13 @@ struct tstorm_eth_function_common_config {
* MAC filtering configuration parameters per port in Tstorm
*/
struct tstorm_eth_mac_filter_config {
- __le32 ucast_drop_all;
- __le32 ucast_accept_all;
- __le32 mcast_drop_all;
- __le32 mcast_accept_all;
- __le32 bcast_accept_all;
- __le32 vlan_filter[2];
- __le32 unmatched_unicast;
+ u32 ucast_drop_all;
+ u32 ucast_accept_all;
+ u32 mcast_drop_all;
+ u32 mcast_accept_all;
+ u32 bcast_accept_all;
+ u32 vlan_filter[2];
+ u32 unmatched_unicast;
};
@@ -4898,7 +4906,7 @@ union event_data {
* per PF event ring data
*/
struct event_ring_data {
- struct regpair base_addr;
+ struct regpair_native base_addr;
#if defined(__BIG_ENDIAN)
u8 index_id;
u8 sb_id;
@@ -5131,7 +5139,7 @@ struct pci_entity {
* The fast-path status block meta-data, common to all chips
*/
struct hc_sb_data {
- struct regpair host_sb_addr;
+ struct regpair_native host_sb_addr;
struct hc_status_block_sm state_machine[HC_SB_MAX_SM];
struct pci_entity p_func;
#if defined(__BIG_ENDIAN)
@@ -5145,7 +5153,7 @@ struct hc_sb_data {
u8 state;
u8 rsrv0;
#endif
- struct regpair rsrv1[2];
+ struct regpair_native rsrv1[2];
};
@@ -5163,7 +5171,7 @@ enum hc_segment {
* The fast-path status block meta-data
*/
struct hc_sp_status_block_data {
- struct regpair host_sb_addr;
+ struct regpair_native host_sb_addr;
#if defined(__BIG_ENDIAN)
u8 rsrv1;
u8 state;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index c8f10f0e8a0d..76df015f486a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -1,7 +1,7 @@
/* bnx2x_init.h: Broadcom Everest network driver.
* Structures and macroes needed during the initialization.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index d755acfe7a40..8ab0dd900960 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -2,7 +2,7 @@
* Static functions needed during the initialization.
* This file is "included" in bnx2x_main.c.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -218,7 +218,7 @@ static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len,
/* gunzip_outlen is in dwords */
len = GUNZIP_OUTLEN(bp);
for (i = 0; i < len; i++)
- ((u32 *)GUNZIP_BUF(bp))[i] =
+ ((u32 *)GUNZIP_BUF(bp))[i] = (__force u32)
cpu_to_le32(((u32 *)GUNZIP_BUF(bp))[i]);
bnx2x_write_big_buf_wb(bp, addr, len);
@@ -232,7 +232,7 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
u16 op_end =
INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
STAGE_END)];
- union init_op *op;
+ const union init_op *op;
u32 op_idx, op_type, addr, len;
const u32 *data, *data_base;
@@ -244,7 +244,7 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
for (op_idx = op_start; op_idx < op_end; op_idx++) {
- op = (union init_op *)&(INIT_OPS(bp)[op_idx]);
+ op = (const union init_op *)&(INIT_OPS(bp)[op_idx]);
/* Get generic data */
op_type = op->raw.op;
addr = op->raw.offset;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 09096b43a6e9..c6da77fa9d07 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -1,4 +1,4 @@
-/* Copyright 2008-2012 Broadcom Corporation
+/* Copyright 2008-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -3659,7 +3659,7 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL49_USERB0_CTRL, (3<<6));
- for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
@@ -3713,7 +3713,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
};
DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
/* Set to default registers that may be overriden by 10G force */
- for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
@@ -3854,7 +3854,7 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
{MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2}
};
- for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
@@ -4242,7 +4242,7 @@ static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_CONTROL, (3<<13));
- for (i = 0; i < sizeof(wc_regs)/sizeof(struct bnx2x_reg_set); i++)
+ for (i = 0; i < ARRAY_SIZE(wc_regs); i++)
bnx2x_cl45_write(bp, phy, wc_regs[i].devad, wc_regs[i].reg,
wc_regs[i].val);
@@ -4748,6 +4748,12 @@ void bnx2x_link_status_update(struct link_params *params,
vars->link_status = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region,
port_mb[port].link_status));
+
+ /* Force link UP in non LOOPBACK_EXT loopback mode(s) */
+ if (bp->link_params.loopback_mode != LOOPBACK_NONE &&
+ bp->link_params.loopback_mode != LOOPBACK_EXT)
+ vars->link_status |= LINK_STATUS_LINK_UP;
+
if (bnx2x_eee_has_cap(params))
vars->eee_status = REG_RD(bp, params->shmem2_base +
offsetof(struct shmem2_region,
@@ -9520,7 +9526,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
} else {
/* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
/* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
- for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set);
+ for (i = 0; i < ARRAY_SIZE(reg_set);
i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad,
reg_set[i].reg, reg_set[i].val);
@@ -9592,7 +9598,7 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LINK_SIGNAL, val);
- for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
@@ -13395,7 +13401,7 @@ static void bnx2x_disable_kr2(struct link_params *params,
};
DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
- for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index ee6e7ec85457..d25c7d79787a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -1,4 +1,4 @@
-/* Copyright 2008-2012 Broadcom Corporation
+/* Copyright 2008-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 940ef859dc60..e81a747ea8ce 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -1,6 +1,6 @@
/* bnx2x_main.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -59,6 +59,7 @@
#include "bnx2x_init.h"
#include "bnx2x_init_ops.h"
#include "bnx2x_cmn.h"
+#include "bnx2x_vfpf.h"
#include "bnx2x_dcb.h"
#include "bnx2x_sp.h"
@@ -127,45 +128,66 @@ MODULE_PARM_DESC(debug, " Default debug msglevel");
struct workqueue_struct *bnx2x_wq;
+struct bnx2x_mac_vals {
+ u32 xmac_addr;
+ u32 xmac_val;
+ u32 emac_addr;
+ u32 emac_val;
+ u32 umac_addr;
+ u32 umac_val;
+ u32 bmac_addr;
+ u32 bmac_val[2];
+};
+
enum bnx2x_board_type {
BCM57710 = 0,
BCM57711,
BCM57711E,
BCM57712,
BCM57712_MF,
+ BCM57712_VF,
BCM57800,
BCM57800_MF,
+ BCM57800_VF,
BCM57810,
BCM57810_MF,
- BCM57840_O,
+ BCM57810_VF,
BCM57840_4_10,
BCM57840_2_20,
- BCM57840_MFO,
BCM57840_MF,
+ BCM57840_VF,
BCM57811,
- BCM57811_MF
+ BCM57811_MF,
+ BCM57840_O,
+ BCM57840_MFO,
+ BCM57811_VF
};
/* indexed by board_type, above */
static struct {
char *name;
} board_info[] = {
- { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
- { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
- { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
- { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
- { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
- { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
- { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
- { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
- { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet"},
- { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function"},
+ [BCM57710] = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
+ [BCM57711] = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
+ [BCM57711E] = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
+ [BCM57712] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
+ [BCM57712_MF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
+ [BCM57712_VF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" },
+ [BCM57800] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
+ [BCM57800_MF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
+ [BCM57800_VF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" },
+ [BCM57810] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
+ [BCM57810_MF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
+ [BCM57810_VF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" },
+ [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
+ [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
+ [BCM57840_MF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
+ [BCM57840_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" },
+ [BCM57811] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" },
+ [BCM57811_MF] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" },
+ [BCM57840_O] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
+ [BCM57840_MFO] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
+ [BCM57811_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }
};
#ifndef PCI_DEVICE_ID_NX2_57710
@@ -183,12 +205,18 @@ static struct {
#ifndef PCI_DEVICE_ID_NX2_57712_MF
#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
#endif
+#ifndef PCI_DEVICE_ID_NX2_57712_VF
+#define PCI_DEVICE_ID_NX2_57712_VF CHIP_NUM_57712_VF
+#endif
#ifndef PCI_DEVICE_ID_NX2_57800
#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
#endif
#ifndef PCI_DEVICE_ID_NX2_57800_MF
#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
#endif
+#ifndef PCI_DEVICE_ID_NX2_57800_VF
+#define PCI_DEVICE_ID_NX2_57800_VF CHIP_NUM_57800_VF
+#endif
#ifndef PCI_DEVICE_ID_NX2_57810
#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
#endif
@@ -198,6 +226,9 @@ static struct {
#ifndef PCI_DEVICE_ID_NX2_57840_O
#define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE
#endif
+#ifndef PCI_DEVICE_ID_NX2_57810_VF
+#define PCI_DEVICE_ID_NX2_57810_VF CHIP_NUM_57810_VF
+#endif
#ifndef PCI_DEVICE_ID_NX2_57840_4_10
#define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10
#endif
@@ -210,29 +241,41 @@ static struct {
#ifndef PCI_DEVICE_ID_NX2_57840_MF
#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_VF
+#define PCI_DEVICE_ID_NX2_57840_VF CHIP_NUM_57840_VF
+#endif
#ifndef PCI_DEVICE_ID_NX2_57811
#define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811
#endif
#ifndef PCI_DEVICE_ID_NX2_57811_MF
#define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF
#endif
+#ifndef PCI_DEVICE_ID_NX2_57811_VF
+#define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF
+#endif
+
static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
{ 0 }
};
@@ -335,6 +378,65 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
#define DMAE_DP_DST_NONE "dst_addr [none]"
+void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
+{
+ u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
+
+ switch (dmae->opcode & DMAE_COMMAND_DST) {
+ case DMAE_CMD_DST_PCI:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%08x], len [%d*4], dst [%x:%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ case DMAE_CMD_DST_GRC:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->dst_addr_lo >> 2,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%08x], len [%d*4], dst [%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->dst_addr_lo >> 2,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ default:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
+ "comp_addr [%x:%08x] comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
+ "comp_addr [%x:%08x] comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ }
+}
/* copy command into DMAE command memory and set DMAE command go */
void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
@@ -385,7 +487,7 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
return opcode;
}
-static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
+void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
struct dmae_command *dmae,
u8 src_type, u8 dst_type)
{
@@ -401,9 +503,8 @@ static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
dmae->comp_val = DMAE_COMP_VAL;
}
-/* issue a dmae command over the init-channel and wailt for completion */
-static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
- struct dmae_command *dmae)
+/* issue a dmae command over the init-channel and wait for completion */
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
{
u32 *wb_comp = bnx2x_sp(bp, wb_comp);
int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
@@ -681,12 +782,16 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
printk("%s", lvl);
+
+ /* dump buffer after the mark */
for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
for (word = 0; word < 8; word++)
data[word] = htonl(REG_RD(bp, offset + 4*word));
data[8] = 0x0;
pr_cont("%s", (char *)data);
}
+
+ /* dump buffer before the mark */
for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
for (word = 0; word < 8; word++)
data[word] = htonl(REG_RD(bp, offset + 4*word));
@@ -701,7 +806,71 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
bnx2x_fw_dump_lvl(bp, KERN_ERR);
}
-void bnx2x_panic_dump(struct bnx2x *bp)
+static void bnx2x_hc_int_disable(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+ u32 val = REG_RD(bp, addr);
+
+ /* in E1 we must use only PCI configuration space to disable
+ * MSI/MSIX capablility
+ * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
+ */
+ if (CHIP_IS_E1(bp)) {
+ /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
+ * Use mask register to prevent from HC sending interrupts
+ * after we exit the function
+ */
+ REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
+
+ val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+ } else
+ val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+
+ DP(NETIF_MSG_IFDOWN,
+ "write %x to HC %d (addr 0x%x)\n",
+ val, port, addr);
+
+ /* flush all outstanding writes */
+ mmiowb();
+
+ REG_WR(bp, addr, val);
+ if (REG_RD(bp, addr) != val)
+ BNX2X_ERR("BUG! proper val not read from IGU!\n");
+}
+
+static void bnx2x_igu_int_disable(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
+
+ val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
+ IGU_PF_CONF_INT_LINE_EN |
+ IGU_PF_CONF_ATTN_BIT_EN);
+
+ DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
+
+ /* flush all outstanding writes */
+ mmiowb();
+
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+ if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
+ BNX2X_ERR("BUG! proper val not read from IGU!\n");
+}
+
+static void bnx2x_int_disable(struct bnx2x *bp)
+{
+ if (bp->common.int_block == INT_BLOCK_HC)
+ bnx2x_hc_int_disable(bp);
+ else
+ bnx2x_igu_int_disable(bp);
+}
+
+void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
{
int i;
u16 j;
@@ -711,6 +880,8 @@ void bnx2x_panic_dump(struct bnx2x *bp)
u16 start = 0, end = 0;
u8 cos;
#endif
+ if (disable_int)
+ bnx2x_int_disable(bp);
bp->stats_state = STATS_STATE_DISABLED;
bp->eth_stats.unrecoverable_error++;
@@ -856,6 +1027,17 @@ void bnx2x_panic_dump(struct bnx2x *bp)
}
#ifdef BNX2X_STOP_ON_ERROR
+
+ /* event queue */
+ for (i = 0; i < NUM_EQ_DESC; i++) {
+ u32 *data = (u32 *)&bp->eq_ring[i].message.data;
+
+ BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
+ i, bp->eq_ring[i].message.opcode,
+ bp->eq_ring[i].message.error);
+ BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]);
+ }
+
/* Rings */
/* Rx */
for_each_valid_rx_queue(bp, i) {
@@ -1027,8 +1209,8 @@ static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
return val;
}
-static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
- char *msg, u32 poll_cnt)
+int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
+ char *msg, u32 poll_cnt)
{
u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
if (val != 0) {
@@ -1038,7 +1220,8 @@ static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
return 0;
}
-static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
+/* Common routines with VF FLR cleanup */
+u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
{
/* adjust polling timeout */
if (CHIP_REV_IS_EMUL(bp))
@@ -1050,7 +1233,7 @@ static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
return FLR_POLL_CNT;
}
-static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
+void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
{
struct pbf_pN_cmd_regs cmd_regs[] = {
{0, (CHIP_IS_E3B0(bp)) ?
@@ -1125,10 +1308,9 @@ static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
(((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
-static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
- u32 poll_cnt)
+int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
{
- struct sdm_op_gen op_gen = {0};
+ u32 op_gen_command = 0;
u32 comp_addr = BAR_CSTRORM_INTMEM +
CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
@@ -1139,19 +1321,20 @@ static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
return 1;
}
- op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
- op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
- op_gen.command |= OP_GEN_AGG_VECT(clnup_func);
- op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
+ op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
+ op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
+ op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
+ op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
- REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command);
+ REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
BNX2X_ERR("FW final cleanup did not succeed\n");
DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
(REG_RD(bp, comp_addr)));
- ret = 1;
+ bnx2x_panic();
+ return 1;
}
/* Zero completion for nxt FLR */
REG_WR(bp, comp_addr, 0);
@@ -1159,7 +1342,7 @@ static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
return ret;
}
-static u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
+u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
{
u16 status;
@@ -1371,26 +1554,31 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
if (msix) {
val &= ~(IGU_PF_CONF_INT_LINE_EN |
IGU_PF_CONF_SINGLE_ISR_EN);
- val |= (IGU_PF_CONF_FUNC_EN |
- IGU_PF_CONF_MSI_MSIX_EN |
+ val |= (IGU_PF_CONF_MSI_MSIX_EN |
IGU_PF_CONF_ATTN_BIT_EN);
if (single_msix)
val |= IGU_PF_CONF_SINGLE_ISR_EN;
} else if (msi) {
val &= ~IGU_PF_CONF_INT_LINE_EN;
- val |= (IGU_PF_CONF_FUNC_EN |
- IGU_PF_CONF_MSI_MSIX_EN |
+ val |= (IGU_PF_CONF_MSI_MSIX_EN |
IGU_PF_CONF_ATTN_BIT_EN |
IGU_PF_CONF_SINGLE_ISR_EN);
} else {
val &= ~IGU_PF_CONF_MSI_MSIX_EN;
- val |= (IGU_PF_CONF_FUNC_EN |
- IGU_PF_CONF_INT_LINE_EN |
+ val |= (IGU_PF_CONF_INT_LINE_EN |
IGU_PF_CONF_ATTN_BIT_EN |
IGU_PF_CONF_SINGLE_ISR_EN);
}
+ /* Clean previous status - need to configure igu prior to ack*/
+ if ((!msix) || single_msix) {
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+ bnx2x_ack_int(bp);
+ }
+
+ val |= IGU_PF_CONF_FUNC_EN;
+
DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n",
val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
@@ -1425,71 +1613,6 @@ void bnx2x_int_enable(struct bnx2x *bp)
bnx2x_igu_int_enable(bp);
}
-static void bnx2x_hc_int_disable(struct bnx2x *bp)
-{
- int port = BP_PORT(bp);
- u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
- u32 val = REG_RD(bp, addr);
-
- /*
- * in E1 we must use only PCI configuration space to disable
- * MSI/MSIX capablility
- * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
- */
- if (CHIP_IS_E1(bp)) {
- /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
- * Use mask register to prevent from HC sending interrupts
- * after we exit the function
- */
- REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
-
- val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
- HC_CONFIG_0_REG_INT_LINE_EN_0 |
- HC_CONFIG_0_REG_ATTN_BIT_EN_0);
- } else
- val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
- HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
- HC_CONFIG_0_REG_INT_LINE_EN_0 |
- HC_CONFIG_0_REG_ATTN_BIT_EN_0);
-
- DP(NETIF_MSG_IFDOWN,
- "write %x to HC %d (addr 0x%x)\n",
- val, port, addr);
-
- /* flush all outstanding writes */
- mmiowb();
-
- REG_WR(bp, addr, val);
- if (REG_RD(bp, addr) != val)
- BNX2X_ERR("BUG! proper val not read from IGU!\n");
-}
-
-static void bnx2x_igu_int_disable(struct bnx2x *bp)
-{
- u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
-
- val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
- IGU_PF_CONF_INT_LINE_EN |
- IGU_PF_CONF_ATTN_BIT_EN);
-
- DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
-
- /* flush all outstanding writes */
- mmiowb();
-
- REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
- if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
- BNX2X_ERR("BUG! proper val not read from IGU!\n");
-}
-
-static void bnx2x_int_disable(struct bnx2x *bp)
-{
- if (bp->common.int_block == INT_BLOCK_HC)
- bnx2x_hc_int_disable(bp);
- else
- bnx2x_igu_int_disable(bp);
-}
-
void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
{
int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
@@ -1575,11 +1698,11 @@ static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
}
/**
- * bnx2x_trylock_leader_lock- try to aquire a leader lock.
+ * bnx2x_trylock_leader_lock- try to acquire a leader lock.
*
* @bp: driver handle
*
- * Tries to aquire a leader lock for current engine.
+ * Tries to acquire a leader lock for current engine.
*/
static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
{
@@ -1588,6 +1711,24 @@ static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
+/* schedule the sp task and mark that interrupt occurred (runs from ISR) */
+static int bnx2x_schedule_sp_task(struct bnx2x *bp)
+{
+ /* Set the interrupt occurred bit for the sp-task to recognize it
+ * must ack the interrupt and transition according to the IGU
+ * state machine.
+ */
+ atomic_set(&bp->interrupt_occurred, 1);
+
+ /* The sp_task must execute only after this bit
+ * is set, otherwise we will get out of sync and miss all
+ * further interrupts. Hence, the barrier.
+ */
+ smp_wmb();
+
+ /* schedule sp_task to workqueue */
+ return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+}
void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
{
@@ -1602,6 +1743,13 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
fp->index, cid, command, bp->state,
rr_cqe->ramrod_cqe.ramrod_type);
+ /* If cid is within VF range, replace the slowpath object with the
+ * one corresponding to this VF
+ */
+ if (cid >= BNX2X_FIRST_VF_CID &&
+ cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
+ bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
+
switch (command) {
case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
@@ -1653,6 +1801,8 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
#else
return;
#endif
+ /* SRIOV: reschedule any 'in_progress' operations */
+ bnx2x_iov_sp_event(bp, cid, true);
smp_mb__before_atomic_inc();
atomic_inc(&bp->cq_spq_left);
@@ -1669,7 +1819,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
* mark pending ACK to MCP bit.
* prevent case that both bits are cleared.
* At the end of load/unload driver checks that
- * sp_state is cleaerd, and this order prevents
+ * sp_state is cleared, and this order prevents
* races
*/
smp_mb__before_clear_bit();
@@ -1678,22 +1828,13 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
smp_mb__after_clear_bit();
- /* schedule workqueue to send ack to MCP */
- queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+ /* schedule the sp task as mcp ack is required */
+ bnx2x_schedule_sp_task(bp);
}
return;
}
-void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
- u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod)
-{
- u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset;
-
- bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod,
- start);
-}
-
irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
{
struct bnx2x *bp = netdev_priv(dev_instance);
@@ -1734,21 +1875,23 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
if (status & (mask | 0x1)) {
struct cnic_ops *c_ops = NULL;
- if (likely(bp->state == BNX2X_STATE_OPEN)) {
- rcu_read_lock();
- c_ops = rcu_dereference(bp->cnic_ops);
- if (c_ops)
- c_ops->cnic_handler(bp->cnic_data,
- NULL);
- rcu_read_unlock();
- }
+ rcu_read_lock();
+ c_ops = rcu_dereference(bp->cnic_ops);
+ if (c_ops && (bp->cnic_eth_dev.drv_state &
+ CNIC_DRV_STATE_HANDLES_IRQ))
+ c_ops->cnic_handler(bp->cnic_data, NULL);
+ rcu_read_unlock();
status &= ~mask;
}
}
if (unlikely(status & 0x1)) {
- queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+
+ /* schedule sp task to perform default status block work, ack
+ * attentions and enable interrupts.
+ */
+ bnx2x_schedule_sp_task(bp);
status &= ~0x1;
if (!status)
@@ -2448,23 +2591,55 @@ void bnx2x__link_status_update(struct bnx2x *bp)
return;
/* read updated dcb configuration */
- bnx2x_dcbx_pmf_update(bp);
-
- bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
+ if (IS_PF(bp)) {
+ bnx2x_dcbx_pmf_update(bp);
+ bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
+ if (bp->link_vars.link_up)
+ bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+ else
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ /* indicate link status */
+ bnx2x_link_report(bp);
- if (bp->link_vars.link_up)
+ } else { /* VF */
+ bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_2500baseX_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ bp->port.advertising[0] = bp->port.supported[0];
+
+ bp->link_params.bp = bp;
+ bp->link_params.port = BP_PORT(bp);
+ bp->link_params.req_duplex[0] = DUPLEX_FULL;
+ bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
+ bp->link_params.req_line_speed[0] = SPEED_10000;
+ bp->link_params.speed_cap_mask[0] = 0x7f0000;
+ bp->link_params.switch_cfg = SWITCH_CFG_10G;
+ bp->link_vars.mac_type = MAC_TYPE_BMAC;
+ bp->link_vars.line_speed = SPEED_10000;
+ bp->link_vars.link_status =
+ (LINK_STATUS_LINK_UP |
+ LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
+ bp->link_vars.link_up = 1;
+ bp->link_vars.duplex = DUPLEX_FULL;
+ bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ __bnx2x_link_report(bp);
bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
- else
- bnx2x_stats_handle(bp, STATS_EVENT_STOP);
-
- /* indicate link status */
- bnx2x_link_report(bp);
+ }
}
static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
u16 vlan_val, u8 allowed_prio)
{
- struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_state_params func_params = {NULL};
struct bnx2x_func_afex_update_params *f_update_params =
&func_params.params.afex_update;
@@ -2489,7 +2664,7 @@ static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
u16 vif_index, u8 func_bit_map)
{
- struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_state_params func_params = {NULL};
struct bnx2x_func_afex_viflists_params *update_params =
&func_params.params.afex_viflists;
int rc;
@@ -2505,7 +2680,7 @@ static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
/* set parameters according to cmd_type */
update_params->afex_vif_list_command = cmd_type;
- update_params->vif_list_index = cpu_to_le16(vif_index);
+ update_params->vif_list_index = vif_index;
update_params->func_bit_map =
(cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
update_params->func_to_clear = 0;
@@ -2789,6 +2964,10 @@ static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
__set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+#ifdef BNX2X_STOP_ON_ERROR
+ __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
+#endif
+
return flags;
}
@@ -2864,15 +3043,12 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
pause->sge_th_hi + FW_PREFETCH_CNT >
MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
- tpa_agg_size = min_t(u32,
- (min_t(u32, 8, MAX_SKB_FRAGS) *
- SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
+ tpa_agg_size = TPA_AGG_SIZE;
max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
SGE_PAGE_SHIFT;
max_sge = ((max_sge + PAGES_PER_SGE - 1) &
(~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
- sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
- 0xffff);
+ sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
}
/* pause - not for e1 */
@@ -2917,7 +3093,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
/* Maximum number or simultaneous TPA aggregation for this Queue.
*
- * For PF Clients it should be the maximum avaliable number.
+ * For PF Clients it should be the maximum available number.
* VF driver(s) may want to define it to a smaller value.
*/
rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
@@ -3011,7 +3187,7 @@ static void bnx2x_pf_init(struct bnx2x *bp)
if (bp->port.pmf)
storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
- /* init Event Queue */
+ /* init Event Queue - PCI bus guarantees correct endianity*/
eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
eq_data.producer = bp->eq_prod;
@@ -3101,65 +3277,75 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
struct fcoe_statistics_params *fw_fcoe_stat =
&bp->fw_stats_data->fcoe;
- ADD_64(fcoe_stat->rx_bytes_hi, 0, fcoe_stat->rx_bytes_lo,
- fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
+ ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
+ fcoe_stat->rx_bytes_lo,
+ fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
- ADD_64(fcoe_stat->rx_bytes_hi,
- fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
- fcoe_stat->rx_bytes_lo,
- fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
+ ADD_64_LE(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
- ADD_64(fcoe_stat->rx_bytes_hi,
- fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
- fcoe_stat->rx_bytes_lo,
- fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
+ ADD_64_LE(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
- ADD_64(fcoe_stat->rx_bytes_hi,
- fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
- fcoe_stat->rx_bytes_lo,
- fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
+ ADD_64_LE(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
- ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
- fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
+ ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+ fcoe_stat->rx_frames_lo,
+ fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
- ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
- fcoe_q_tstorm_stats->rcv_ucast_pkts);
+ ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+ fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_ucast_pkts);
- ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
- fcoe_q_tstorm_stats->rcv_bcast_pkts);
+ ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+ fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_bcast_pkts);
- ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
- fcoe_q_tstorm_stats->rcv_mcast_pkts);
+ ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+ fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_mcast_pkts);
- ADD_64(fcoe_stat->tx_bytes_hi, 0, fcoe_stat->tx_bytes_lo,
- fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
+ ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
+ fcoe_stat->tx_bytes_lo,
+ fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
- ADD_64(fcoe_stat->tx_bytes_hi,
- fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
- fcoe_stat->tx_bytes_lo,
- fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
+ ADD_64_LE(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
- ADD_64(fcoe_stat->tx_bytes_hi,
- fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
- fcoe_stat->tx_bytes_lo,
- fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
+ ADD_64_LE(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
- ADD_64(fcoe_stat->tx_bytes_hi,
- fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
- fcoe_stat->tx_bytes_lo,
- fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
+ ADD_64_LE(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
- ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
- fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
+ ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+ fcoe_stat->tx_frames_lo,
+ fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
- ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
- fcoe_q_xstorm_stats->ucast_pkts_sent);
+ ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+ fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->ucast_pkts_sent);
- ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
- fcoe_q_xstorm_stats->bcast_pkts_sent);
+ ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+ fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->bcast_pkts_sent);
- ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
- fcoe_q_xstorm_stats->mcast_pkts_sent);
+ ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+ fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->mcast_pkts_sent);
}
/* ask L5 driver to add data to the struct */
@@ -3630,7 +3816,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp)
"Please contact OEM Support for assistance\n");
/*
- * Scheudle device reset (unload)
+ * Schedule device reset (unload)
* This is due to some boards consuming sufficient power when driver is
* up to overheat if fan fails.
*/
@@ -3780,6 +3966,10 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
if (val & DRV_STATUS_DRV_INFO_REQ)
bnx2x_handle_drv_info_req(bp);
+
+ if (val & DRV_STATUS_VF_DISABLED)
+ bnx2x_vf_handle_flr_event(bp);
+
if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
bnx2x_pmf_update(bp);
@@ -4576,8 +4766,8 @@ static void bnx2x_attn_int(struct bnx2x *bp)
void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
u16 index, u8 op, u8 update)
{
- u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
-
+ u32 igu_addr = bp->igu_base_addr;
+ igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
igu_addr);
}
@@ -4605,7 +4795,7 @@ static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
cid);
- bnx2x_panic_dump(bp);
+ bnx2x_panic_dump(bp, false);
}
bnx2x_cnic_cfc_comp(bp, cid, err);
return 0;
@@ -4647,7 +4837,8 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
/* Always push next commands out, don't wait here */
__set_bit(RAMROD_CONT, &ramrod_flags);
- switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
+ switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo)
+ >> BNX2X_SWCID_SHIFT) {
case BNX2X_FILTER_MAC_PENDING:
DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
@@ -4724,7 +4915,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
struct bnx2x_queue_update_params *q_update_params =
&queue_params.params.update;
- /* Send Q update command with afex vlan removal values for all Qs */
+ /* Send Q update command with afex vlan removal values for all Qs */
queue_params.cmd = BNX2X_Q_CMD_UPDATE;
/* set silent vlan removal values according to vlan mode */
@@ -4798,7 +4989,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
u8 echo;
u32 cid;
u8 opcode;
- int spqe_cnt = 0;
+ int rc, spqe_cnt = 0;
struct bnx2x_queue_sp_obj *q_obj;
struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
@@ -4826,15 +5017,27 @@ static void bnx2x_eq_int(struct bnx2x *bp)
for (; sw_cons != hw_cons;
sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
-
elem = &bp->eq_ring[EQ_DESC(sw_cons)];
- cid = SW_CID(elem->message.data.cfc_del_event.cid);
- opcode = elem->message.opcode;
+ rc = bnx2x_iov_eq_sp_event(bp, elem);
+ if (!rc) {
+ DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
+ rc);
+ goto next_spqe;
+ }
+ /* elem CID originates from FW; actually LE */
+ cid = SW_CID((__force __le32)
+ elem->message.data.cfc_del_event.cid);
+ opcode = elem->message.opcode;
/* handle eq element */
switch (opcode) {
+ case EVENT_RING_OPCODE_VF_PF_CHANNEL:
+ DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n");
+ bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event);
+ continue;
+
case EVENT_RING_OPCODE_STAT_QUERY:
DP(BNX2X_MSG_SP | BNX2X_MSG_STATS,
"got statistics comp event %d\n",
@@ -5000,50 +5203,65 @@ next_spqe:
static void bnx2x_sp_task(struct work_struct *work)
{
struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
- u16 status;
- status = bnx2x_update_dsb_idx(bp);
-/* if (status == 0) */
-/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
+ DP(BNX2X_MSG_SP, "sp task invoked\n");
- DP(BNX2X_MSG_SP, "got a slowpath interrupt (status 0x%x)\n", status);
+ /* make sure the atomic interupt_occurred has been written */
+ smp_rmb();
+ if (atomic_read(&bp->interrupt_occurred)) {
- /* HW attentions */
- if (status & BNX2X_DEF_SB_ATT_IDX) {
- bnx2x_attn_int(bp);
- status &= ~BNX2X_DEF_SB_ATT_IDX;
- }
+ /* what work needs to be performed? */
+ u16 status = bnx2x_update_dsb_idx(bp);
+
+ DP(BNX2X_MSG_SP, "status %x\n", status);
+ DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
+ atomic_set(&bp->interrupt_occurred, 0);
- /* SP events: STAT_QUERY and others */
- if (status & BNX2X_DEF_SB_IDX) {
- struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
+ /* HW attentions */
+ if (status & BNX2X_DEF_SB_ATT_IDX) {
+ bnx2x_attn_int(bp);
+ status &= ~BNX2X_DEF_SB_ATT_IDX;
+ }
+
+ /* SP events: STAT_QUERY and others */
+ if (status & BNX2X_DEF_SB_IDX) {
+ struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
if (FCOE_INIT(bp) &&
- (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
- /*
- * Prevent local bottom-halves from running as
- * we are going to change the local NAPI list.
- */
- local_bh_disable();
- napi_schedule(&bnx2x_fcoe(bp, napi));
- local_bh_enable();
+ (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+ /* Prevent local bottom-halves from running as
+ * we are going to change the local NAPI list.
+ */
+ local_bh_disable();
+ napi_schedule(&bnx2x_fcoe(bp, napi));
+ local_bh_enable();
+ }
+
+ /* Handle EQ completions */
+ bnx2x_eq_int(bp);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
+ le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
+
+ status &= ~BNX2X_DEF_SB_IDX;
}
- /* Handle EQ completions */
- bnx2x_eq_int(bp);
+ /* if status is non zero then perhaps something went wrong */
+ if (unlikely(status))
+ DP(BNX2X_MSG_SP,
+ "got an unknown interrupt! (status 0x%x)\n", status);
- bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
- le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
+ /* ack status block only if something was actually handled */
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
+ le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
- status &= ~BNX2X_DEF_SB_IDX;
}
- if (unlikely(status))
- DP(BNX2X_MSG_SP, "got an unknown interrupt! (status 0x%x)\n",
- status);
-
- bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
- le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
+ /* must be called after the EQ processing (since eq leads to sriov
+ * ramrod completion flows).
+ * This flow may have been scheduled by the arrival of a ramrod
+ * completion, or by the sriov code rescheduling itself.
+ */
+ bnx2x_iov_sp_task(bp);
/* afex - poll to check if VIFSET_ACK should be sent to MFW */
if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
@@ -5076,7 +5294,10 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
rcu_read_unlock();
}
- queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+ /* schedule sp task to perform default status block work, ack
+ * attentions and enable interrupts.
+ */
+ bnx2x_schedule_sp_task(bp);
return IRQ_HANDLED;
}
@@ -5090,7 +5311,6 @@ void bnx2x_drv_pulse(struct bnx2x *bp)
bp->fw_drv_pulse_wr_seq);
}
-
static void bnx2x_timer(unsigned long data)
{
struct bnx2x *bp = (struct bnx2x *) data;
@@ -5098,7 +5318,8 @@ static void bnx2x_timer(unsigned long data)
if (!netif_running(bp->dev))
return;
- if (!BP_NOMCP(bp)) {
+ if (IS_PF(bp) &&
+ !BP_NOMCP(bp)) {
int mb_idx = BP_FW_MB_IDX(bp);
u32 drv_pulse;
u32 mcp_pulse;
@@ -5125,6 +5346,10 @@ static void bnx2x_timer(unsigned long data)
if (bp->state == BNX2X_STATE_OPEN)
bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
+ /* sample pf vf bulletin board for new posts from pf */
+ if (IS_VF(bp))
+ bnx2x_sample_bulletin(bp);
+
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
@@ -5267,7 +5492,7 @@ static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
}
-static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
+void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
u8 vf_valid, int fw_sb_id, int igu_sb_id)
{
int igu_seg_id;
@@ -5323,7 +5548,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
- /* write indecies to HW */
+ /* write indices to HW - PCI guarantees endianity of regpairs */
bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
}
@@ -5411,6 +5636,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
bnx2x_zero_sp_sb(bp);
+ /* PCI guarantees endianity of regpairs */
sp_sb_data.state = SB_ENABLED;
sp_sb_data.host_sb_addr.lo = U64_LO(section);
sp_sb_data.host_sb_addr.hi = U64_HI(section);
@@ -5467,13 +5693,12 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
}
-
/* called with netif_addr_lock_bh() */
-void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
- unsigned long rx_mode_flags,
- unsigned long rx_accept_flags,
- unsigned long tx_accept_flags,
- unsigned long ramrod_flags)
+int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
+ unsigned long rx_mode_flags,
+ unsigned long rx_accept_flags,
+ unsigned long tx_accept_flags,
+ unsigned long ramrod_flags)
{
struct bnx2x_rx_mode_ramrod_params ramrod_param;
int rc;
@@ -5503,22 +5728,21 @@ void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
rc = bnx2x_config_rx_mode(bp, &ramrod_param);
if (rc < 0) {
BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
- return;
+ return rc;
}
+
+ return 0;
}
-/* called with netif_addr_lock_bh() */
-void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
+static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
+ unsigned long *rx_accept_flags,
+ unsigned long *tx_accept_flags)
{
- unsigned long rx_mode_flags = 0, ramrod_flags = 0;
- unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
-
- if (!NO_FCOE(bp))
+ /* Clear the flags first */
+ *rx_accept_flags = 0;
+ *tx_accept_flags = 0;
- /* Configure rx_mode of FCoE Queue */
- __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
-
- switch (bp->rx_mode) {
+ switch (rx_mode) {
case BNX2X_RX_MODE_NONE:
/*
* 'drop all' supersedes any accept flags that may have been
@@ -5526,25 +5750,25 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
*/
break;
case BNX2X_RX_MODE_NORMAL:
- __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
/* internal switching mode */
- __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
- __set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
break;
case BNX2X_RX_MODE_ALLMULTI:
- __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
/* internal switching mode */
- __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
- __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
break;
case BNX2X_RX_MODE_PROMISC:
@@ -5552,36 +5776,57 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
* should receive matched and unmatched (in resolution of port)
* unicast packets.
*/
- __set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
/* internal switching mode */
- __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
if (IS_MF_SI(bp))
- __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
else
- __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
break;
default:
- BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode);
- return;
+ BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
+ return -EINVAL;
}
+ /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
- __set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
}
+ return 0;
+}
+
+/* called with netif_addr_lock_bh() */
+int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
+{
+ unsigned long rx_mode_flags = 0, ramrod_flags = 0;
+ unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
+ int rc;
+
+ if (!NO_FCOE(bp))
+ /* Configure rx_mode of FCoE Queue */
+ __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
+
+ rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
+ &tx_accept_flags);
+ if (rc)
+ return rc;
+
__set_bit(RAMROD_RX, &ramrod_flags);
__set_bit(RAMROD_TX, &ramrod_flags);
- bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags,
- tx_accept_flags, ramrod_flags);
+ return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
+ rx_accept_flags, tx_accept_flags,
+ ramrod_flags);
}
static void bnx2x_init_internal_common(struct bnx2x *bp)
@@ -5688,6 +5933,13 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
cids[cos] = fp->txdata_ptr[cos]->cid;
}
+ /* nothing more for vf to do here */
+ if (IS_VF(bp))
+ return;
+
+ bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
+ fp->fw_sb_id, fp->igu_sb_id);
+ bnx2x_update_fpsb_idx(fp);
bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
bnx2x_sp_mapping(bp, q_rdata), q_type);
@@ -5697,13 +5949,10 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
*/
bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
- DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
- fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
- fp->igu_sb_id);
- bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
- fp->fw_sb_id, fp->igu_sb_id);
-
- bnx2x_update_fpsb_idx(fp);
+ DP(NETIF_MSG_IFUP,
+ "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
+ fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
+ fp->igu_sb_id);
}
static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
@@ -5775,17 +6024,22 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
for_each_eth_queue(bp, i)
bnx2x_init_eth_fp(bp, i);
+
+ /* ensure status block indices were read */
+ rmb();
+ bnx2x_init_rx_rings(bp);
+ bnx2x_init_tx_rings(bp);
+
+ if (IS_VF(bp))
+ return;
+
/* Initialize MOD_ABS interrupts */
bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
bp->common.shmem_base, bp->common.shmem2_base,
BP_PORT(bp));
- /* ensure status block indices were read */
- rmb();
bnx2x_init_def_sb(bp);
bnx2x_update_dsb_idx(bp);
- bnx2x_init_rx_rings(bp);
- bnx2x_init_tx_rings(bp);
bnx2x_init_sp_ring(bp);
bnx2x_init_eq_ring(bp);
bnx2x_init_internal(bp, load_code);
@@ -6225,49 +6479,6 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
}
-static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
-{
- u32 offset = 0;
-
- if (CHIP_IS_E1(bp))
- return;
- if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
- return;
-
- switch (BP_ABS_FUNC(bp)) {
- case 0:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
- break;
- case 1:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
- break;
- case 2:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
- break;
- case 3:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
- break;
- case 4:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
- break;
- case 5:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
- break;
- case 6:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
- break;
- case 7:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
- break;
- default:
- return;
- }
-
- REG_WR(bp, offset, pretend_func_num);
- REG_RD(bp, offset);
- DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
-}
-
void bnx2x_pf_disable(struct bnx2x *bp)
{
u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
@@ -6311,7 +6522,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp));
/*
- * take the UNDI lock to protect undi_unload flow from accessing
+ * take the RESET lock to protect undi_unload flow from accessing
* registers while we're resetting the chip
*/
bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
@@ -6441,7 +6652,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
* queues with "old" ILT addresses.
* c. PF enable in the PGLC.
* d. Clear the was_error of the PF in the PGLC. (could have
- * occured while driver was down)
+ * occurred while driver was down)
* e. PF enable in the CFC (WEAK + STRONG)
* f. Timers scan enable
* 3. PF driver unload flow:
@@ -6482,7 +6693,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
/* Step 1: set zeroes to all ilt page entries with valid bit on
* Step 2: set the timers first/last ilt entry to point
* to the entire range to prevent ILT range error for 3rd/4th
- * vnic (this code assumes existance of the vnic)
+ * vnic (this code assumes existence of the vnic)
*
* both steps performed by call to bnx2x_ilt_client_init_op()
* with dummy TM client
@@ -6499,7 +6710,6 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
}
-
REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
@@ -6524,6 +6734,8 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
+ bnx2x_iov_init_dmae(bp);
+
/* clean the DMAE memory */
bp->dmae_ready = 1;
bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
@@ -6980,7 +7192,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
}
}
-
/* If SPIO5 is set to generate interrupts, enable it for this port */
val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
if (val & MISC_SPIO_SPIO5) {
@@ -7009,15 +7220,14 @@ static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
REG_WR_DMAE(bp, reg, wb_write, 2);
}
-static void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
- u8 idu_sb_id, bool is_Pf)
+void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
{
u32 data, ctl, cnt = 100;
u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
u32 sb_bit = 1 << (idu_sb_id%32);
- u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
+ u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
/* Not supported in BC mode */
@@ -7208,8 +7418,10 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
/* FLR cleanup - hmmm */
if (!CHIP_IS_E1x(bp)) {
rc = bnx2x_pf_flr_clnup(bp);
- if (rc)
+ if (rc) {
+ bnx2x_fw_dump(bp);
return rc;
+ }
}
/* set MSI reconfigure capability */
@@ -7226,12 +7438,21 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
ilt = BP_ILT(bp);
cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
+ if (IS_SRIOV(bp))
+ cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS;
+ cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
+
+ /* since BNX2X_FIRST_VF_CID > 0 the PF L2 cids precedes
+ * those of the VFs, so start line should be reset
+ */
+ cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
for (i = 0; i < L2_ILT_LINES(bp); i++) {
ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
ilt->lines[cdu_ilt_start + i].page_mapping =
bp->context[i].cxt_mapping;
ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
}
+
bnx2x_ilt_init_op(bp, INITOP_SET);
if (!CONFIGURE_NIC_MODE(bp)) {
@@ -7304,6 +7525,9 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_TM, init_phase);
bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
+
+ bnx2x_iov_init_dq(bp);
+
bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
bnx2x_init_block(bp, BLOCK_PRS, init_phase);
bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
@@ -7512,10 +7736,6 @@ void bnx2x_free_mem(struct bnx2x *bp)
{
int i;
- /* fastpath */
- bnx2x_free_fp_mem(bp);
- /* end of fastpath */
-
BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
sizeof(struct host_sp_status_block));
@@ -7536,69 +7756,11 @@ void bnx2x_free_mem(struct bnx2x *bp)
BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
BCM_PAGE_SIZE * NUM_EQ_PAGES);
-}
-
-static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
-{
- int num_groups;
- int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
-
- /* number of queues for statistics is number of eth queues + FCoE */
- u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
-
- /* Total number of FW statistics requests =
- * 1 for port stats + 1 for PF stats + potential 1 for FCoE stats +
- * num of queues
- */
- bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
-
-
- /* Request is built from stats_query_header and an array of
- * stats_query_cmd_group each of which contains
- * STATS_QUERY_CMD_COUNT rules. The real number or requests is
- * configured in the stats_query_header.
- */
- num_groups = ((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) +
- (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
-
- bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
- num_groups * sizeof(struct stats_query_cmd_group);
-
- /* Data for statistics requests + stats_conter
- *
- * stats_counter holds per-STORM counters that are incremented
- * when STORM has finished with the current request.
- *
- * memory for FCoE offloaded statistics are counted anyway,
- * even if they will not be sent.
- */
- bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
- sizeof(struct per_pf_stats) +
- sizeof(struct fcoe_statistics_params) +
- sizeof(struct per_queue_stats) * num_queue_stats +
- sizeof(struct stats_counter);
-
- BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
- bp->fw_stats_data_sz + bp->fw_stats_req_sz);
-
- /* Set shortcuts */
- bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
- bp->fw_stats_req_mapping = bp->fw_stats_mapping;
-
- bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
- ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
-
- bp->fw_stats_data_mapping = bp->fw_stats_mapping +
- bp->fw_stats_req_sz;
- return 0;
-alloc_mem_err:
- BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
- bp->fw_stats_data_sz + bp->fw_stats_req_sz);
- BNX2X_ERR("Can't allocate memory\n");
- return -ENOMEM;
+ bnx2x_iov_free_mem(bp);
}
+
int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
{
if (!CHIP_IS_E1x(bp))
@@ -7644,10 +7806,6 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
sizeof(struct bnx2x_slowpath));
- /* Allocated memory for FW statistics */
- if (bnx2x_alloc_fw_stats_mem(bp))
- goto alloc_mem_err;
-
/* Allocate memory for CDU context:
* This memory is allocated separately and not in the generic ILT
* functions because CDU differs in few aspects:
@@ -7676,6 +7834,9 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
goto alloc_mem_err;
+ if (bnx2x_iov_alloc_mem(bp))
+ goto alloc_mem_err;
+
/* Slow path ring */
BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
@@ -7683,13 +7844,6 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
BCM_PAGE_SIZE * NUM_EQ_PAGES);
-
- /* fastpath */
- /* need to be done at the end, since it's self adjusting to amount
- * of memory available for RSS queues
- */
- if (bnx2x_alloc_fp_mem(bp))
- goto alloc_mem_err;
return 0;
alloc_mem_err:
@@ -7792,43 +7946,53 @@ int bnx2x_setup_leading(struct bnx2x *bp)
*
* In case of MSI-X it will also try to enable MSI-X.
*/
-void bnx2x_set_int_mode(struct bnx2x *bp)
+int bnx2x_set_int_mode(struct bnx2x *bp)
{
+ int rc = 0;
+
+ if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX)
+ return -EINVAL;
+
switch (int_mode) {
- case INT_MODE_MSI:
+ case BNX2X_INT_MODE_MSIX:
+ /* attempt to enable msix */
+ rc = bnx2x_enable_msix(bp);
+
+ /* msix attained */
+ if (!rc)
+ return 0;
+
+ /* vfs use only msix */
+ if (rc && IS_VF(bp))
+ return rc;
+
+ /* failed to enable multiple MSI-X */
+ BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
+ bp->num_queues,
+ 1 + bp->num_cnic_queues);
+
+ /* falling through... */
+ case BNX2X_INT_MODE_MSI:
bnx2x_enable_msi(bp);
+
/* falling through... */
- case INT_MODE_INTx:
+ case BNX2X_INT_MODE_INTX:
bp->num_ethernet_queues = 1;
bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
BNX2X_DEV_INFO("set number of queues to 1\n");
break;
default:
- /* if we can't use MSI-X we only need one fp,
- * so try to enable MSI-X with the requested number of fp's
- * and fallback to MSI or legacy INTx with one fp
- */
- if (bnx2x_enable_msix(bp) ||
- bp->flags & USING_SINGLE_MSIX_FLAG) {
- /* failed to enable multiple MSI-X */
- BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
- bp->num_queues,
- 1 + bp->num_cnic_queues);
-
- bp->num_queues = 1 + bp->num_cnic_queues;
-
- /* Try to enable MSI */
- if (!(bp->flags & USING_SINGLE_MSIX_FLAG) &&
- !(bp->flags & DISABLE_MSI_FLAG))
- bnx2x_enable_msi(bp);
- }
- break;
+ BNX2X_DEV_INFO("unknown value in int_mode module parameter\n");
+ return -EINVAL;
}
+ return 0;
}
-/* must be called prioir to any HW initializations */
+/* must be called prior to any HW initializations */
static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
{
+ if (IS_SRIOV(bp))
+ return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS;
return L2_ILT_LINES(bp);
}
@@ -8211,8 +8375,8 @@ static void bnx2x_reset_func(struct bnx2x *bp)
/* SP SB */
REG_WR8(bp, BAR_CSTRORM_INTMEM +
- CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
- SB_DISABLED);
+ CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
+ SB_DISABLED);
for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
@@ -8513,7 +8677,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
}
/* Give HW time to discard old tx messages */
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
/* Clean all ETH MACs */
rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
@@ -8551,6 +8715,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
netif_addr_unlock_bh(bp->dev);
+ bnx2x_iov_chip_cleanup(bp);
/*
@@ -8678,7 +8843,7 @@ static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
(!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
(val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
} else {
- /* Prevent incomming interrupts in IGU */
+ /* Prevent incoming interrupts in IGU */
val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
@@ -8936,7 +9101,7 @@ static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
if (pend_bits == 0)
break;
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
} while (cnt-- > 0);
if (cnt <= 0) {
@@ -8953,8 +9118,7 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
int cnt = 1000;
u32 val = 0;
u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
- u32 tags_63_32 = 0;
-
+ u32 tags_63_32 = 0;
/* Empty the Tetris buffer, wait for 1s */
do {
@@ -8972,7 +9136,7 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
(pgl_exp_rom2 == 0xffffffff) &&
(!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
break;
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
} while (cnt-- > 0);
if (cnt <= 0) {
@@ -9005,7 +9169,7 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
/* Wait for 1ms to empty GLUE and PCI-E core queues,
* PSWHST, GRC and PSWRD Tetris buffer.
*/
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
/* Prepare to chip reset: */
/* MCP */
@@ -9288,8 +9452,10 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
rtnl_lock();
- if (!netif_running(bp->dev))
- goto sp_rtnl_exit;
+ if (!netif_running(bp->dev)) {
+ rtnl_unlock();
+ return;
+ }
/* if stop on error is defined no recovery flows should be executed */
#ifdef BNX2X_STOP_ON_ERROR
@@ -9308,7 +9474,8 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
bnx2x_parity_recover(bp);
- goto sp_rtnl_exit;
+ rtnl_unlock();
+ return;
}
if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
@@ -9322,7 +9489,8 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
bnx2x_nic_load(bp, LOAD_NORMAL);
- goto sp_rtnl_exit;
+ rtnl_unlock();
+ return;
}
#ifdef BNX2X_STOP_ON_ERROR
sp_rtnl_not_reset:
@@ -9340,13 +9508,33 @@ sp_rtnl_not_reset:
DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
netif_device_detach(bp->dev);
bnx2x_close(bp->dev);
+ rtnl_unlock();
+ return;
+ }
+
+ if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
+ DP(BNX2X_MSG_SP,
+ "sending set mcast vf pf channel message from rtnl sp-task\n");
+ bnx2x_vfpf_set_mcast(bp->dev);
}
-sp_rtnl_exit:
+ if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
+ &bp->sp_rtnl_state)) {
+ DP(BNX2X_MSG_SP,
+ "sending set storm rx mode vf pf channel message from rtnl sp-task\n");
+ bnx2x_vfpf_storm_rx_mode(bp);
+ }
+
+ /* work which needs rtnl lock not-taken (as it takes the lock itself and
+ * can be called from other contexts as well)
+ */
rtnl_unlock();
-}
-/* end of nic load/unload */
+ /* enable SR-IOV if applicable */
+ if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
+ &bp->sp_rtnl_state))
+ bnx2x_enable_sriov(bp);
+}
static void bnx2x_period_task(struct work_struct *work)
{
@@ -9383,49 +9571,26 @@ period_task_exit:
* Init service functions
*/
-static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
+u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
{
u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
return base + (BP_ABS_FUNC(bp)) * stride;
}
-static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
-{
- u32 reg = bnx2x_get_pretend_reg(bp);
-
- /* Flush all outstanding writes */
- mmiowb();
-
- /* Pretend to be function 0 */
- REG_WR(bp, reg, 0);
- REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
-
- /* From now we are in the "like-E1" mode */
- bnx2x_int_disable(bp);
-
- /* Flush all outstanding writes */
- mmiowb();
-
- /* Restore the original function */
- REG_WR(bp, reg, BP_ABS_FUNC(bp));
- REG_RD(bp, reg);
-}
-
-static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
-{
- if (CHIP_IS_E1(bp))
- bnx2x_int_disable(bp);
- else
- bnx2x_undi_int_disable_e1h(bp);
-}
-
-static void bnx2x_prev_unload_close_mac(struct bnx2x *bp)
+static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
+ struct bnx2x_mac_vals *vals)
{
u32 val, base_addr, offset, mask, reset_reg;
bool mac_stopped = false;
u8 port = BP_PORT(bp);
+ /* reset addresses as they also mark which values were changed */
+ vals->bmac_addr = 0;
+ vals->umac_addr = 0;
+ vals->xmac_addr = 0;
+ vals->emac_addr = 0;
+
reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
if (!CHIP_IS_E3(bp)) {
@@ -9447,14 +9612,18 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp)
*/
wb_data[0] = REG_RD(bp, base_addr + offset);
wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
+ vals->bmac_addr = base_addr + offset;
+ vals->bmac_val[0] = wb_data[0];
+ vals->bmac_val[1] = wb_data[1];
wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
- REG_WR(bp, base_addr + offset, wb_data[0]);
- REG_WR(bp, base_addr + offset + 0x4, wb_data[1]);
+ REG_WR(bp, vals->bmac_addr, wb_data[0]);
+ REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
}
BNX2X_DEV_INFO("Disable emac Rx\n");
- REG_WR(bp, NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4, 0);
-
+ vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
+ vals->emac_val = REG_RD(bp, vals->emac_addr);
+ REG_WR(bp, vals->emac_addr, 0);
mac_stopped = true;
} else {
if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
@@ -9465,14 +9634,18 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp)
val & ~(1 << 1));
REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
val | (1 << 1));
- REG_WR(bp, base_addr + XMAC_REG_CTRL, 0);
+ vals->xmac_addr = base_addr + XMAC_REG_CTRL;
+ vals->xmac_val = REG_RD(bp, vals->xmac_addr);
+ REG_WR(bp, vals->xmac_addr, 0);
mac_stopped = true;
}
mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
if (mask & reset_reg) {
BNX2X_DEV_INFO("Disable umac Rx\n");
base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
- REG_WR(bp, base_addr + UMAC_REG_COMMAND_CONFIG, 0);
+ vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
+ vals->umac_val = REG_RD(bp, vals->umac_addr);
+ REG_WR(bp, vals->umac_addr, 0);
mac_stopped = true;
}
}
@@ -9632,11 +9805,13 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
if (bnx2x_prev_is_path_marked(bp))
return bnx2x_prev_mcp_done(bp);
+ BNX2X_DEV_INFO("Path is unmarked\n");
+
/* If function has FLR capabilities, and existing FW version matches
* the one required, then FLR will be sufficient to clean any residue
* left by previous driver
*/
- rc = bnx2x_test_firmware_version(bp, false);
+ rc = bnx2x_nic_load_analyze_req(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION);
if (!rc) {
/* fw version is good */
@@ -9664,12 +9839,16 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
{
u32 reset_reg, tmp_reg = 0, rc;
bool prev_undi = false;
+ struct bnx2x_mac_vals mac_vals;
+
/* It is possible a previous function received 'common' answer,
* but hasn't loaded yet, therefore creating a scenario of
* multiple functions receiving 'common' on the same path.
*/
BNX2X_DEV_INFO("Common unload Flow\n");
+ memset(&mac_vals, 0, sizeof(mac_vals));
+
if (bnx2x_prev_is_path_marked(bp))
return bnx2x_prev_mcp_done(bp);
@@ -9680,12 +9859,14 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
u32 timer_count = 1000;
/* Close the MAC Rx to prevent BRB from filling up */
- bnx2x_prev_unload_close_mac(bp);
+ bnx2x_prev_unload_close_mac(bp, &mac_vals);
+
+ /* close LLH filters towards the BRB */
+ bnx2x_set_rx_filter(&bp->link_params, 0);
/* Check if the UNDI driver was previously loaded
* UNDI driver initializes CID offset for normal bell to 0x7
*/
- reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
if (tmp_reg == 0x7) {
@@ -9693,6 +9874,8 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
prev_undi = true;
/* clear the UNDI indication */
REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
+ /* clear possible idle check errors */
+ REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
}
}
/* wait until BRB is empty */
@@ -9727,6 +9910,17 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
/* No packets are in the pipeline, path is ready for reset */
bnx2x_reset_common(bp);
+ if (mac_vals.xmac_addr)
+ REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
+ if (mac_vals.umac_addr)
+ REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val);
+ if (mac_vals.emac_addr)
+ REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
+ if (mac_vals.bmac_addr) {
+ REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
+ REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
+ }
+
rc = bnx2x_prev_mark_path(bp, prev_undi);
if (rc) {
bnx2x_prev_mcp_done(bp);
@@ -9748,7 +9942,8 @@ static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
if (!CHIP_IS_E1x(bp)) {
u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
- BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
+ DP(BNX2X_MSG_SP,
+ "'was error' bit was found to be set in pglueb upon startup. Clearing\n");
REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
1 << BP_FUNC(bp));
}
@@ -9790,7 +9985,6 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
}
-
do {
/* Lock MCP using an unload request */
fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
@@ -10357,10 +10551,10 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
{
- mac_hi = cpu_to_be16(mac_hi);
- mac_lo = cpu_to_be32(mac_lo);
- memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
- memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
+ __be16 mac_hi_be = cpu_to_be16(mac_hi);
+ __be32 mac_lo_be = cpu_to_be32(mac_lo);
+ memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be));
+ memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be));
}
static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
@@ -10396,6 +10590,13 @@ static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
(config & PORT_FEATURE_WOL_ENABLED));
+ if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
+ PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp))
+ bp->flags |= NO_ISCSI_FLAG;
+ if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
+ PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp)))
+ bp->flags |= NO_FCOE_FLAG;
+
BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n",
bp->link_params.lane_config,
bp->link_params.speed_cap_mask[0],
@@ -10503,21 +10704,21 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp)
/* Port info */
bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
SHMEM_RD(bp,
- dev_info.port_hw_config[port].
+ dev_info.port_hw_config[port].
fcoe_wwn_port_name_upper);
bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
SHMEM_RD(bp,
- dev_info.port_hw_config[port].
+ dev_info.port_hw_config[port].
fcoe_wwn_port_name_lower);
/* Node info */
bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
SHMEM_RD(bp,
- dev_info.port_hw_config[port].
+ dev_info.port_hw_config[port].
fcoe_wwn_node_name_upper);
bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
SHMEM_RD(bp,
- dev_info.port_hw_config[port].
+ dev_info.port_hw_config[port].
fcoe_wwn_node_name_lower);
} else if (!IS_MF_SD(bp)) {
/*
@@ -10615,7 +10816,7 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
/* Zero primary MAC configuration */
memset(bp->dev->dev_addr, 0, ETH_ALEN);
- if (IS_MF_FCOE_AFEX(bp))
+ if (IS_MF_FCOE_AFEX(bp) || IS_MF_FCOE_SD(bp))
/* use FIP MAC as primary MAC */
memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
@@ -10678,7 +10879,6 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
}
memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
- memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
dev_err(&bp->pdev->dev,
@@ -10743,7 +10943,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
tout--;
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
}
if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
@@ -11081,9 +11281,13 @@ static int bnx2x_init_bp(struct bnx2x *bp)
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
- rc = bnx2x_get_hwinfo(bp);
- if (rc)
- return rc;
+ if (IS_PF(bp)) {
+ rc = bnx2x_get_hwinfo(bp);
+ if (rc)
+ return rc;
+ } else {
+ random_ether_addr(bp->dev->dev_addr);
+ }
bnx2x_set_modes_bitmap(bp);
@@ -11096,7 +11300,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
func = BP_FUNC(bp);
/* need to reset chip if undi was active */
- if (!BP_NOMCP(bp)) {
+ if (IS_PF(bp) && !BP_NOMCP(bp)) {
/* init fw_seq */
bp->fw_seq =
SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
@@ -11133,6 +11337,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bp->mrrs = mrrs;
bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
+ if (IS_VF(bp))
+ bp->rx_ring_size = MAX_RX_AVAIL;
/* make sure that the numbers are in the right granularity */
bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
@@ -11161,12 +11367,18 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bp->cnic_base_cl_id = FP_SB_MAX_E2;
/* multiple tx priority */
- if (CHIP_IS_E1x(bp))
+ if (IS_VF(bp))
+ bp->max_cos = 1;
+ else if (CHIP_IS_E1x(bp))
bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
- if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
+ else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
- if (CHIP_IS_E3B0(bp))
+ else if (CHIP_IS_E3B0(bp))
bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
+ else
+ BNX2X_ERR("unknown chip %x revision %x\n",
+ CHIP_NUM(bp), CHIP_REV(bp));
+ BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
/* We need at least one default status block for slow-path events,
* second status block for the L2 queue, and a third status block for
@@ -11190,6 +11402,26 @@ static int bnx2x_init_bp(struct bnx2x *bp)
* net_device service functions
*/
+static int bnx2x_open_epilog(struct bnx2x *bp)
+{
+ /* Enable sriov via delayed work. This must be done via delayed work
+ * because it causes the probe of the vf devices to be run, which invoke
+ * register_netdevice which must have rtnl lock taken. As we are holding
+ * the lock right now, that could only work if the probe would not take
+ * the lock. However, as the probe of the vf may be called from other
+ * contexts as well (such as passthrough to vm failes) it can't assume
+ * the lock is being held for it. Using delayed work here allows the
+ * probe code to simply take the lock (i.e. wait for it to be released
+ * if it is being held).
+ */
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+
+ return 0;
+}
+
/* called with rtnl_lock */
static int bnx2x_open(struct net_device *dev)
{
@@ -11197,6 +11429,7 @@ static int bnx2x_open(struct net_device *dev)
bool global = false;
int other_engine = BP_PATH(bp) ? 0 : 1;
bool other_load_status, load_status;
+ int rc;
bp->stats_init = true;
@@ -11204,53 +11437,57 @@ static int bnx2x_open(struct net_device *dev)
bnx2x_set_power_state(bp, PCI_D0);
- other_load_status = bnx2x_get_load_status(bp, other_engine);
- load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
-
- /*
- * If parity had happen during the unload, then attentions
+ /* If parity had happen during the unload, then attentions
* and/or RECOVERY_IN_PROGRES may still be set. In this case we
* want the first function loaded on the current engine to
* complete the recovery.
+ * Parity recovery is only relevant for PF driver.
*/
- if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
- bnx2x_chk_parity_attn(bp, &global, true))
- do {
- /*
- * If there are attentions and they are in a global
- * blocks, set the GLOBAL_RESET bit regardless whether
- * it will be this function that will complete the
- * recovery or not.
- */
- if (global)
- bnx2x_set_reset_global(bp);
+ if (IS_PF(bp)) {
+ other_load_status = bnx2x_get_load_status(bp, other_engine);
+ load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
+ if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
+ bnx2x_chk_parity_attn(bp, &global, true)) {
+ do {
+ /* If there are attentions and they are in a
+ * global blocks, set the GLOBAL_RESET bit
+ * regardless whether it will be this function
+ * that will complete the recovery or not.
+ */
+ if (global)
+ bnx2x_set_reset_global(bp);
- /*
- * Only the first function on the current engine should
- * try to recover in open. In case of attentions in
- * global blocks only the first in the chip should try
- * to recover.
- */
- if ((!load_status &&
- (!global || !other_load_status)) &&
- bnx2x_trylock_leader_lock(bp) &&
- !bnx2x_leader_reset(bp)) {
- netdev_info(bp->dev, "Recovered in open\n");
- break;
- }
+ /* Only the first function on the current
+ * engine should try to recover in open. In case
+ * of attentions in global blocks only the first
+ * in the chip should try to recover.
+ */
+ if ((!load_status &&
+ (!global || !other_load_status)) &&
+ bnx2x_trylock_leader_lock(bp) &&
+ !bnx2x_leader_reset(bp)) {
+ netdev_info(bp->dev,
+ "Recovered in open\n");
+ break;
+ }
- /* recovery has failed... */
- bnx2x_set_power_state(bp, PCI_D3hot);
- bp->recovery_state = BNX2X_RECOVERY_FAILED;
+ /* recovery has failed... */
+ bnx2x_set_power_state(bp, PCI_D3hot);
+ bp->recovery_state = BNX2X_RECOVERY_FAILED;
- BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
- "If you still see this message after a few retries then power cycle is required.\n");
+ BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
+ "If you still see this message after a few retries then power cycle is required.\n");
- return -EAGAIN;
- } while (0);
+ return -EAGAIN;
+ } while (0);
+ }
+ }
bp->recovery_state = BNX2X_RECOVERY_DONE;
- return bnx2x_nic_load(bp, LOAD_OPEN);
+ rc = bnx2x_nic_load(bp, LOAD_OPEN);
+ if (rc)
+ return rc;
+ return bnx2x_open_epilog(bp);
}
/* called with rtnl_lock */
@@ -11384,7 +11621,6 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)
return rc;
}
-
/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
void bnx2x_set_rx_mode(struct net_device *dev)
{
@@ -11405,12 +11641,25 @@ void bnx2x_set_rx_mode(struct net_device *dev)
CHIP_IS_E1(bp)))
rx_mode = BNX2X_RX_MODE_ALLMULTI;
else {
- /* some multicasts */
- if (bnx2x_set_mc_list(bp) < 0)
- rx_mode = BNX2X_RX_MODE_ALLMULTI;
+ if (IS_PF(bp)) {
+ /* some multicasts */
+ if (bnx2x_set_mc_list(bp) < 0)
+ rx_mode = BNX2X_RX_MODE_ALLMULTI;
- if (bnx2x_set_uc_list(bp) < 0)
- rx_mode = BNX2X_RX_MODE_PROMISC;
+ if (bnx2x_set_uc_list(bp) < 0)
+ rx_mode = BNX2X_RX_MODE_PROMISC;
+ } else {
+ /* configuring mcast to a vf involves sleeping (when we
+ * wait for the pf's response). Since this function is
+ * called from non sleepable context we must schedule
+ * a work item for this purpose
+ */
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_VFPF_MCAST,
+ &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ }
}
bp->rx_mode = rx_mode;
@@ -11424,7 +11673,20 @@ void bnx2x_set_rx_mode(struct net_device *dev)
return;
}
- bnx2x_set_storm_rx_mode(bp);
+ if (IS_PF(bp)) {
+ bnx2x_set_storm_rx_mode(bp);
+ } else {
+ /* configuring rx mode to storms in a vf involves sleeping (when
+ * we wait for the pf's response). Since this function is
+ * called from non sleepable context we must schedule
+ * a work item for this purpose
+ */
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
+ &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ }
}
/* called with rtnl_lock */
@@ -11527,7 +11789,9 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_poll_controller = poll_bnx2x,
#endif
.ndo_setup_tc = bnx2x_setup_tc,
-
+#ifdef CONFIG_BNX2X_SRIOV
+ .ndo_set_vf_mac = bnx2x_set_vf_mac,
+#endif
#ifdef NETDEV_FCOE_WWNN
.ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
#endif
@@ -11551,10 +11815,9 @@ static int bnx2x_set_coherency_mask(struct bnx2x *bp)
return 0;
}
-static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
- unsigned long board_type)
+static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
+ struct net_device *dev, unsigned long board_type)
{
- struct bnx2x *bp;
int rc;
u32 pci_cfg_dword;
bool chip_is_e1x = (board_type == BCM57710 ||
@@ -11562,11 +11825,9 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
board_type == BCM57711E);
SET_NETDEV_DEV(dev, &pdev->dev);
- bp = netdev_priv(dev);
bp->dev = dev;
bp->pdev = pdev;
- bp->flags = 0;
rc = pci_enable_device(pdev);
if (rc) {
@@ -11582,9 +11843,8 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
goto err_out_disable;
}
- if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
- dev_err(&bp->pdev->dev, "Cannot find second PCI device"
- " base address, aborting\n");
+ if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+ dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
rc = -ENODEV;
goto err_out_disable;
}
@@ -11609,12 +11869,14 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
pci_save_state(pdev);
}
- bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
- if (bp->pm_cap == 0) {
- dev_err(&bp->pdev->dev,
- "Cannot find power management capability, aborting\n");
- rc = -EIO;
- goto err_out_release;
+ if (IS_PF(bp)) {
+ bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (bp->pm_cap == 0) {
+ dev_err(&bp->pdev->dev,
+ "Cannot find power management capability, aborting\n");
+ rc = -EIO;
+ goto err_out_release;
+ }
}
if (!pci_is_pcie(pdev)) {
@@ -11646,13 +11908,14 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
* support Physical Device Assignment where kernel BDF maybe arbitrary
* (depending on hypervisor).
*/
- if (chip_is_e1x)
+ if (chip_is_e1x) {
bp->pf_num = PCI_FUNC(pdev->devfn);
- else {/* chip is E2/3*/
+ } else {
+ /* chip is E2/3*/
pci_read_config_dword(bp->pdev,
PCICFG_ME_REGISTER, &pci_cfg_dword);
bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
- ME_REG_ABS_PF_NUM_SHIFT);
+ ME_REG_ABS_PF_NUM_SHIFT);
}
BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
@@ -11665,25 +11928,28 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
* Clean the following indirect addresses for all functions since it
* is not used by the driver.
*/
- REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
+ if (IS_PF(bp)) {
+ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
+
+ if (chip_is_e1x) {
+ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
+ }
- if (chip_is_e1x) {
- REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
+ /* Enable internal target-read (in case we are probed after PF
+ * FLR). Must be done prior to any BAR read access. Only for
+ * 57712 and up
+ */
+ if (!chip_is_e1x)
+ REG_WR(bp,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
}
- /*
- * Enable internal target-read (in case we are probed after PF FLR).
- * Must be done prior to any BAR read access. Only for 57712 and up
- */
- if (!chip_is_e1x)
- REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
-
dev->watchdog_timeo = TX_TIMEOUT;
dev->netdev_ops = &bnx2x_netdev_ops;
@@ -11734,8 +12000,9 @@ err_out:
static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width, int *speed)
{
- u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
+ u32 val = 0;
+ pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val);
*width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
/* return value of 1=2.5GHz 2=5GHz */
@@ -11748,7 +12015,7 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
struct bnx2x_fw_file_hdr *fw_hdr;
struct bnx2x_fw_file_section *sections;
u32 offset, len, num_ops;
- u16 *ops_offsets;
+ __be16 *ops_offsets;
int i;
const u8 *fw_ver;
@@ -11773,7 +12040,7 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
/* Likewise for the init_ops offsets */
offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
- ops_offsets = (u16 *)(firmware->data + offset);
+ ops_offsets = (__force __be16 *)(firmware->data + offset);
num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
@@ -12000,8 +12267,12 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
{
int cid_count = BNX2X_L2_MAX_CID(bp);
+ if (IS_SRIOV(bp))
+ cid_count += BNX2X_VF_CIDS;
+
if (CNIC_SUPPORT(bp))
cid_count += CNIC_CID_MAX;
+
return roundup(cid_count, QM_CID_ROUND);
}
@@ -12012,10 +12283,10 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
*
*/
static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
- int cnic_cnt)
+ int cnic_cnt, bool is_vf)
{
- int pos;
- u16 control;
+ int pos, index;
+ u16 control = 0;
pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
@@ -12023,85 +12294,114 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
* If MSI-X is not supported - return number of SBs needed to support
* one fast path queue: one FP queue + SB for CNIC
*/
- if (!pos)
+ if (!pos) {
+ dev_info(&pdev->dev, "no msix capability found\n");
return 1 + cnic_cnt;
+ }
+ dev_info(&pdev->dev, "msix capability found\n");
/*
* The value in the PCI configuration space is the index of the last
* entry, namely one less than the actual size of the table, which is
* exactly what we want to return from this function: number of all SBs
* without the default SB.
+ * For VFs there is no default SB, then we return (index+1).
*/
pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
- return control & PCI_MSIX_FLAGS_QSIZE;
-}
-struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *);
+ index = control & PCI_MSIX_FLAGS_QSIZE;
-static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- struct net_device *dev = NULL;
- struct bnx2x *bp;
- int pcie_width, pcie_speed;
- int rc, max_non_def_sbs;
- int rx_count, tx_count, rss_count, doorbell_size;
- int cnic_cnt;
- /*
- * An estimated maximum supported CoS number according to the chip
- * version.
- * We will try to roughly estimate the maximum number of CoSes this chip
- * may support in order to minimize the memory allocated for Tx
- * netdev_queue's. This number will be accurately calculated during the
- * initialization of bp->max_cos based on the chip versions AND chip
- * revision in the bnx2x_init_bp().
- */
- u8 max_cos_est = 0;
+ return is_vf ? index + 1 : index;
+}
- switch (ent->driver_data) {
+static int set_max_cos_est(int chip_id)
+{
+ switch (chip_id) {
case BCM57710:
case BCM57711:
case BCM57711E:
- max_cos_est = BNX2X_MULTI_TX_COS_E1X;
- break;
-
+ return BNX2X_MULTI_TX_COS_E1X;
case BCM57712:
case BCM57712_MF:
- max_cos_est = BNX2X_MULTI_TX_COS_E2_E3A0;
- break;
-
+ case BCM57712_VF:
+ return BNX2X_MULTI_TX_COS_E2_E3A0;
case BCM57800:
case BCM57800_MF:
+ case BCM57800_VF:
case BCM57810:
case BCM57810_MF:
- case BCM57840_O:
case BCM57840_4_10:
case BCM57840_2_20:
+ case BCM57840_O:
case BCM57840_MFO:
+ case BCM57810_VF:
case BCM57840_MF:
+ case BCM57840_VF:
case BCM57811:
case BCM57811_MF:
- max_cos_est = BNX2X_MULTI_TX_COS_E3B0;
- break;
-
+ case BCM57811_VF:
+ return BNX2X_MULTI_TX_COS_E3B0;
+ return 1;
default:
- pr_err("Unknown board_type (%ld), aborting\n",
- ent->driver_data);
+ pr_err("Unknown board_type (%d), aborting\n", chip_id);
return -ENODEV;
}
+}
+
+static int set_is_vf(int chip_id)
+{
+ switch (chip_id) {
+ case BCM57712_VF:
+ case BCM57800_VF:
+ case BCM57810_VF:
+ case BCM57840_VF:
+ case BCM57811_VF:
+ return true;
+ default:
+ return false;
+ }
+}
+
+struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
- cnic_cnt = 1;
- max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
+static int bnx2x_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev = NULL;
+ struct bnx2x *bp;
+ int pcie_width, pcie_speed;
+ int rc, max_non_def_sbs;
+ int rx_count, tx_count, rss_count, doorbell_size;
+ int max_cos_est;
+ bool is_vf;
+ int cnic_cnt;
+
+ /* An estimated maximum supported CoS number according to the chip
+ * version.
+ * We will try to roughly estimate the maximum number of CoSes this chip
+ * may support in order to minimize the memory allocated for Tx
+ * netdev_queue's. This number will be accurately calculated during the
+ * initialization of bp->max_cos based on the chip versions AND chip
+ * revision in the bnx2x_init_bp().
+ */
+ max_cos_est = set_max_cos_est(ent->driver_data);
+ if (max_cos_est < 0)
+ return max_cos_est;
+ is_vf = set_is_vf(ent->driver_data);
+ cnic_cnt = is_vf ? 0 : 1;
- WARN_ON(!max_non_def_sbs);
+ max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt, is_vf);
/* Maximum number of RSS queues: one IGU SB goes to CNIC */
- rss_count = max_non_def_sbs - cnic_cnt;
+ rss_count = is_vf ? 1 : max_non_def_sbs - cnic_cnt;
+
+ if (rss_count < 1)
+ return -EINVAL;
/* Maximum number of netdev Rx queues: RSS + FCoE L2 */
rx_count = rss_count + cnic_cnt;
- /*
- * Maximum number of netdev Tx queues:
+ /* Maximum number of netdev Tx queues:
* Maximum TSS queues * Maximum supported number of CoS + FCoE L2
*/
tx_count = rss_count * max_cos_est + cnic_cnt;
@@ -12113,42 +12413,55 @@ static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp = netdev_priv(dev);
+ bp->flags = 0;
+ if (is_vf)
+ bp->flags |= IS_VF_FLAG;
+
bp->igu_sb_cnt = max_non_def_sbs;
+ bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
bp->msg_enable = debug;
bp->cnic_support = cnic_cnt;
bp->cnic_probe = bnx2x_cnic_probe;
pci_set_drvdata(pdev, dev);
- rc = bnx2x_init_dev(pdev, dev, ent->driver_data);
+ rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
if (rc < 0) {
free_netdev(dev);
return rc;
}
+ BNX2X_DEV_INFO("This is a %s function\n",
+ IS_PF(bp) ? "physical" : "virtual");
BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
- BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs);
-
+ BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs);
BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
- tx_count, rx_count);
+ tx_count, rx_count);
rc = bnx2x_init_bp(bp);
if (rc)
goto init_one_exit;
- /*
- * Map doorbels here as we need the real value of bp->max_cos which
- * is initialized in bnx2x_init_bp().
+ /* Map doorbells here as we need the real value of bp->max_cos which
+ * is initialized in bnx2x_init_bp() to determine the number of
+ * l2 connections.
*/
- doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
- if (doorbell_size > pci_resource_len(pdev, 2)) {
- dev_err(&bp->pdev->dev,
- "Cannot map doorbells, bar size too small, aborting\n");
- rc = -ENOMEM;
- goto init_one_exit;
+ if (IS_VF(bp)) {
+ bnx2x_vf_map_doorbells(bp);
+ rc = bnx2x_vf_pci_alloc(bp);
+ if (rc)
+ goto init_one_exit;
+ } else {
+ doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
+ if (doorbell_size > pci_resource_len(pdev, 2)) {
+ dev_err(&bp->pdev->dev,
+ "Cannot map doorbells, bar size too small, aborting\n");
+ rc = -ENOMEM;
+ goto init_one_exit;
+ }
+ bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
+ doorbell_size);
}
- bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
- doorbell_size);
if (!bp->doorbells) {
dev_err(&bp->pdev->dev,
"Cannot map doorbell space, aborting\n");
@@ -12156,8 +12469,25 @@ static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_one_exit;
}
+ if (IS_VF(bp)) {
+ rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
+ if (rc)
+ goto init_one_exit;
+ }
+
+ /* Enable SRIOV if capability found in configuration space.
+ * Once the generic SR-IOV framework makes it in from the
+ * pci tree this will be revised, to allow dynamic control
+ * over the number of VFs. Right now, change the num of vfs
+ * param below to enable SR-IOV.
+ */
+ rc = bnx2x_iov_init_one(bp, int_mode, 0/*num vfs*/);
+ if (rc)
+ goto init_one_exit;
+
/* calc qm_cid_count */
bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
+ BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
/* disable FCOE L2 queue for E1x*/
if (CHIP_IS_E1x(bp))
@@ -12179,13 +12509,20 @@ static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Configure interrupt mode: try to enable MSI-X/MSI if
* needed.
*/
- bnx2x_set_int_mode(bp);
+ rc = bnx2x_set_int_mode(bp);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot set interrupts\n");
+ goto init_one_exit;
+ }
+ BNX2X_DEV_INFO("set interrupts successfully\n");
+ /* register the net device */
rc = register_netdev(dev);
if (rc) {
dev_err(&pdev->dev, "Cannot register net device\n");
goto init_one_exit;
}
+ BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
if (!NO_FCOE(bp)) {
@@ -12196,6 +12533,8 @@ static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
+ BNX2X_DEV_INFO("got pcie width %d and speed %d\n",
+ pcie_width, pcie_speed);
BNX2X_DEV_INFO(
"%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
@@ -12213,7 +12552,7 @@ init_one_exit:
if (bp->regview)
iounmap(bp->regview);
- if (bp->doorbells)
+ if (IS_PF(bp) && bp->doorbells)
iounmap(bp->doorbells);
free_netdev(dev);
@@ -12253,25 +12592,37 @@ static void bnx2x_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
/* Power on: we can't let PCI layer write to us while we are in D3 */
- bnx2x_set_power_state(bp, PCI_D0);
+ if (IS_PF(bp))
+ bnx2x_set_power_state(bp, PCI_D0);
/* Disable MSI/MSI-X */
bnx2x_disable_msi(bp);
/* Power off */
- bnx2x_set_power_state(bp, PCI_D3hot);
+ if (IS_PF(bp))
+ bnx2x_set_power_state(bp, PCI_D3hot);
/* Make sure RESET task is not scheduled before continuing */
cancel_delayed_work_sync(&bp->sp_rtnl_task);
+ bnx2x_iov_remove_one(bp);
+
+ /* send message via vfpf channel to release the resources of this vf */
+ if (IS_VF(bp))
+ bnx2x_vfpf_release(bp);
+
if (bp->regview)
iounmap(bp->regview);
- if (bp->doorbells)
- iounmap(bp->doorbells);
-
- bnx2x_release_firmware(bp);
+ /* for vf doorbells are part of the regview and were unmapped along with
+ * it. FW is only loaded by PF.
+ */
+ if (IS_PF(bp)) {
+ if (bp->doorbells)
+ iounmap(bp->doorbells);
+ bnx2x_release_firmware(bp);
+ }
bnx2x_free_mem_bp(bp);
free_netdev(dev);
@@ -13059,4 +13410,36 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
return cp;
}
+u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
+{
+ struct bnx2x *bp = fp->bp;
+ u32 offset = BAR_USTRORM_INTMEM;
+ if (IS_VF(bp))
+ return bnx2x_vf_ustorm_prods_offset(bp, fp);
+ else if (!CHIP_IS_E1x(bp))
+ offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
+ else
+ offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
+
+ return offset;
+}
+
+/* called only on E1H or E2.
+ * When pretending to be PF, the pretend value is the function number 0...7
+ * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
+ * combination
+ */
+int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
+{
+ u32 pretend_reg;
+
+ if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX)
+ return -1;
+
+ /* get my own pretend register */
+ pretend_reg = bnx2x_get_pretend_reg(bp);
+ REG_WR(bp, pretend_reg, pretend_func_val);
+ REG_RD(bp, pretend_reg);
+ return 0;
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
index ddd5106ad2f9..caf1aef651eb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
@@ -1,6 +1,6 @@
/* bnx2x_mfw_req.h: Broadcom Everest network driver.
*
- * Copyright (c) 2012 Broadcom Corporation
+ * Copyright (c) 2012-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index bc2f65b32649..791eb2d53011 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1,6 +1,6 @@
/* bnx2x_reg.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -825,6 +825,7 @@
/* [RW 28] The value sent to CM header in the case of CFC load error. */
#define DORQ_REG_ERR_CMHEAD 0x170058
#define DORQ_REG_IF_EN 0x170004
+#define DORQ_REG_MAX_RVFID_SIZE 0x1701ec
#define DORQ_REG_MODE_ACT 0x170008
/* [RW 5] The normal mode CID extraction offset. */
#define DORQ_REG_NORM_CID_OFST 0x17002c
@@ -847,6 +848,22 @@
writes the same initial credit to the rspa_crd_cnt and rspb_crd_cnt. The
read reads this written value. */
#define DORQ_REG_RSP_INIT_CRD 0x170048
+#define DORQ_REG_RSPB_CRD_CNT 0x1700b0
+#define DORQ_REG_VF_NORM_CID_BASE 0x1701a0
+#define DORQ_REG_VF_NORM_CID_OFST 0x1701f4
+#define DORQ_REG_VF_NORM_CID_WND_SIZE 0x1701a4
+#define DORQ_REG_VF_NORM_MAX_CID_COUNT 0x1701e4
+#define DORQ_REG_VF_NORM_VF_BASE 0x1701a8
+/* [RW 10] VF type validation mask value */
+#define DORQ_REG_VF_TYPE_MASK_0 0x170218
+/* [RW 17] VF type validation Min MCID value */
+#define DORQ_REG_VF_TYPE_MAX_MCID_0 0x1702d8
+/* [RW 17] VF type validation Max MCID value */
+#define DORQ_REG_VF_TYPE_MIN_MCID_0 0x170298
+/* [RW 10] VF type validation comp value */
+#define DORQ_REG_VF_TYPE_VALUE_0 0x170258
+#define DORQ_REG_VF_USAGE_CT_LIMIT 0x170340
+
/* [RW 4] Initial activity counter value on the load request; when the
shortcut is done. */
#define DORQ_REG_SHRT_ACT_CNT 0x170070
@@ -859,6 +876,7 @@
#define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 (0x1<<2)
#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 (0x1<<1)
#define HC_CONFIG_1_REG_BLOCK_DISABLE_1 (0x1<<0)
+#define DORQ_REG_VF_USAGE_CNT 0x170320
#define HC_REG_AGG_INT_0 0x108050
#define HC_REG_AGG_INT_1 0x108054
#define HC_REG_ATTN_BIT 0x108120
@@ -2136,6 +2154,8 @@
/* [R 32] Interrupt register #0 read */
#define NIG_REG_NIG_INT_STS_0 0x103b0
#define NIG_REG_NIG_INT_STS_1 0x103c0
+/* [RC 32] Interrupt register #0 read clear */
+#define NIG_REG_NIG_INT_STS_CLR_0 0x103b4
/* [R 32] Legacy E1 and E1H location for parity error mask register. */
#define NIG_REG_NIG_PRTY_MASK 0x103dc
/* [RW 32] Parity mask register #0 read/write */
@@ -2571,6 +2591,7 @@
current task in process). */
#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c
#define PBF_REG_DISABLE_PF 0x1402e8
+#define PBF_REG_DISABLE_VF 0x1402ec
/* [RW 18] For port 0: For each client that is subject to WFQ (the
* corresponding bit is 1); indicates to which of the credit registers this
* client is mapped. For clients which are not credit blocked; their mapping
@@ -3708,6 +3729,10 @@
#define PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS 0x10309c
/* [WB 160] Used for initialization of the inbound interrupts memory */
#define PXP_REG_HST_INBOUND_INT 0x103800
+/* [RW 7] Indirect access to the permission table. The fields are : {Valid;
+ * VFID[5:0]}
+ */
+#define PXP_REG_HST_ZONE_PERMISSION_TABLE 0x103400
/* [RW 32] Interrupt mask register #0 read/write */
#define PXP_REG_PXP_INT_MASK_0 0x103074
#define PXP_REG_PXP_INT_MASK_1 0x103084
@@ -5966,6 +5991,7 @@
#define HW_LOCK_RESOURCE_SPIO 2
#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT (0x1<<19)
#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18)
#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (0x1<<31)
#define AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR (0x1<<30)
@@ -6305,6 +6331,15 @@
#define PCI_PM_DATA_B 0x414
#define PCI_ID_VAL1 0x434
#define PCI_ID_VAL2 0x438
+#define GRC_CONFIG_REG_PF_INIT_VF 0x624
+#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf
+/* First VF_NUM for PF is encoded in this register.
+ * The number of VFs assigned to a PF is assumed to be a multiple of 8.
+ * Software should program these bits based on Total Number of VFs \
+ * programmed for each PF.
+ * Since registers from 0x000-0x7ff are split across functions, each PF will
+ * have the same location for the same 4 bits
+ */
#define PXPCS_TL_CONTROL_5 0x814
#define PXPCS_TL_CONTROL_5_UNKNOWNTYPE_ERR_ATTN (1 << 29) /*WC*/
@@ -6554,6 +6589,27 @@
(7L<<ME_REG_ABS_PF_NUM_SHIFT) /* Absolute PF Num */
+#define PXP_VF_ADDR_IGU_START 0
+#define PXP_VF_ADDR_IGU_SIZE 0x3000
+#define PXP_VF_ADDR_IGU_END\
+ ((PXP_VF_ADDR_IGU_START) + (PXP_VF_ADDR_IGU_SIZE) - 1)
+
+#define PXP_VF_ADDR_USDM_QUEUES_START 0x3000
+#define PXP_VF_ADDR_USDM_QUEUES_SIZE\
+ (PXP_VF_ADRR_NUM_QUEUES * PXP_ADDR_QUEUE_SIZE)
+#define PXP_VF_ADDR_USDM_QUEUES_END\
+ ((PXP_VF_ADDR_USDM_QUEUES_START) + (PXP_VF_ADDR_USDM_QUEUES_SIZE) - 1)
+
+#define PXP_VF_ADDR_CSDM_GLOBAL_START 0x7600
+#define PXP_VF_ADDR_CSDM_GLOBAL_SIZE (PXP_ADDR_REG_SIZE)
+#define PXP_VF_ADDR_CSDM_GLOBAL_END\
+ ((PXP_VF_ADDR_CSDM_GLOBAL_START) + (PXP_VF_ADDR_CSDM_GLOBAL_SIZE) - 1)
+
+#define PXP_VF_ADDR_DB_START 0x7c00
+#define PXP_VF_ADDR_DB_SIZE 0x200
+#define PXP_VF_ADDR_DB_END\
+ ((PXP_VF_ADDR_DB_START) + (PXP_VF_ADDR_DB_SIZE) - 1)
+
#define MDIO_REG_BANK_CL73_IEEEB0 0x0
#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN 0x0200
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 09b625e0fdaa..7306416bc90d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -1,6 +1,6 @@
/* bnx2x_sp.c: Broadcom Everest network driver.
*
- * Copyright (c) 2011-2012 Broadcom Corporation
+ * Copyright (c) 2011-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -325,7 +325,7 @@ static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
return 0;
}
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
if (bp->panic)
return -EIO;
@@ -707,7 +707,8 @@ static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
struct eth_classify_header *hdr, int rule_cnt)
{
- hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT);
+ hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
+ (type << BNX2X_SWCID_SHIFT));
hdr->rule_cnt = (u8)rule_cnt;
}
@@ -813,8 +814,9 @@ static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
hdr->length = 1;
hdr->offset = (u8)cam_offset;
- hdr->client_id = 0xff;
- hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT));
+ hdr->client_id = cpu_to_le16(0xff);
+ hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
+ (type << BNX2X_SWCID_SHIFT));
}
static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
@@ -903,7 +905,7 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
(struct eth_classify_rules_ramrod_data *)(raw->rdata);
int rule_cnt = rule_idx + 1;
union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
- int cmd = elem->cmd_data.vlan_mac.cmd;
+ enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
@@ -953,7 +955,7 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
(struct eth_classify_rules_ramrod_data *)(raw->rdata);
int rule_cnt = rule_idx + 1;
union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
- int cmd = elem->cmd_data.vlan_mac.cmd;
+ enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
@@ -1407,7 +1409,7 @@ static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
/* Wait until there are no pending commands */
if (!bnx2x_exe_queue_empty(exeq))
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
else
return 0;
}
@@ -1442,7 +1444,7 @@ static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
if (cqe->message.error)
return -EINVAL;
- /* Run the next bulk of pending commands if requeted */
+ /* Run the next bulk of pending commands if requested */
if (test_bit(RAMROD_CONT, ramrod_flags)) {
rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
if (rc < 0)
@@ -1532,7 +1534,7 @@ static inline int bnx2x_vlan_mac_get_registry_elem(
bool restore,
struct bnx2x_vlan_mac_registry_elem **re)
{
- int cmd = elem->cmd_data.vlan_mac.cmd;
+ enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
struct bnx2x_vlan_mac_registry_elem *reg_elem;
/* Allocate a new registry element if needed. */
@@ -1591,7 +1593,7 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
struct bnx2x_vlan_mac_registry_elem *reg_elem;
- int cmd;
+ enum bnx2x_vlan_mac_cmd cmd;
/*
* If DRIVER_ONLY execution is requested, cleanup a registry
@@ -2103,7 +2105,7 @@ static inline void __storm_memset_mac_filters(struct bnx2x *bp,
static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
struct bnx2x_rx_mode_ramrod_params *p)
{
- /* update the bp MAC filter structure */
+ /* update the bp MAC filter structure */
u32 mask = (1 << p->cl_id);
struct tstorm_eth_mac_filter_config *mac_filters =
@@ -2166,7 +2168,7 @@ static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
mac_filters->unmatched_unicast & ~mask;
DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
- "accp_mcast 0x%x\naccp_bcast 0x%x\n",
+ "accp_mcast 0x%x\naccp_bcast 0x%x\n",
mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
mac_filters->bcast_accept_all);
@@ -2186,12 +2188,12 @@ static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
struct eth_classify_header *hdr,
u8 rule_cnt)
{
- hdr->echo = cid;
+ hdr->echo = cpu_to_le32(cid);
hdr->rule_cnt = rule_cnt;
}
static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
- unsigned long accept_flags,
+ unsigned long *accept_flags,
struct eth_filter_rules_cmd *cmd,
bool clear_accept_all)
{
@@ -2201,33 +2203,33 @@ static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
- if (accept_flags) {
- if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags))
- state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+ if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
- if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags))
- state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+ if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
+ state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
- if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) {
- state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
- state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
- }
+ if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+ state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
+ }
- if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) {
- state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
- state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
- }
- if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags))
- state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
+ if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
+ state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
+ state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+ }
- if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) {
- state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
- state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
- }
- if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags))
- state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
+ if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
+ state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
+
+ if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+ state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
}
+ if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
+ state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
+
/* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
if (clear_accept_all) {
state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
@@ -2260,8 +2262,9 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->rules[rule_idx].cmd_general_data =
ETH_FILTER_RULES_CMD_TX_CMD;
- bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
- &(data->rules[rule_idx++]), false);
+ bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
+ &(data->rules[rule_idx++]),
+ false);
}
/* Rx */
@@ -2272,8 +2275,9 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->rules[rule_idx].cmd_general_data =
ETH_FILTER_RULES_CMD_RX_CMD;
- bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
- &(data->rules[rule_idx++]), false);
+ bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
+ &(data->rules[rule_idx++]),
+ false);
}
@@ -2293,9 +2297,10 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->rules[rule_idx].cmd_general_data =
ETH_FILTER_RULES_CMD_TX_CMD;
- bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
- &(data->rules[rule_idx++]),
+ bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
+ &(data->rules[rule_idx]),
true);
+ rule_idx++;
}
/* Rx */
@@ -2306,9 +2311,10 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->rules[rule_idx].cmd_general_data =
ETH_FILTER_RULES_CMD_RX_CMD;
- bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
- &(data->rules[rule_idx++]),
+ bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
+ &(data->rules[rule_idx]),
true);
+ rule_idx++;
}
}
@@ -2429,7 +2435,7 @@ static int bnx2x_mcast_wait(struct bnx2x *bp,
static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
struct bnx2x_mcast_obj *o,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
int total_sz;
struct bnx2x_pending_mcast_cmd *new_cmd;
@@ -2561,7 +2567,7 @@ static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
struct bnx2x_mcast_obj *o, int idx,
union bnx2x_mcast_config_data *cfg_data,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_raw_obj *r = &o->raw;
struct eth_multicast_rules_ramrod_data *data =
@@ -2625,7 +2631,7 @@ static inline int bnx2x_mcast_handle_restore_cmd_e2(
int *rdata_idx)
{
int cur_bin, cnt = *rdata_idx;
- union bnx2x_mcast_config_data cfg_data = {0};
+ union bnx2x_mcast_config_data cfg_data = {NULL};
/* go through the registry and configure the bins from it */
for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
@@ -2657,7 +2663,7 @@ static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
{
struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
int cnt = *line_idx;
- union bnx2x_mcast_config_data cfg_data = {0};
+ union bnx2x_mcast_config_data cfg_data = {NULL};
list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
link) {
@@ -2780,7 +2786,7 @@ static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
int *line_idx)
{
struct bnx2x_mcast_list_elem *mlist_pos;
- union bnx2x_mcast_config_data cfg_data = {0};
+ union bnx2x_mcast_config_data cfg_data = {NULL};
int cnt = *line_idx;
list_for_each_entry(mlist_pos, &p->mcast_list, link) {
@@ -2790,7 +2796,7 @@ static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
cnt++;
DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
- mlist_pos->mac);
+ mlist_pos->mac);
}
*line_idx = cnt;
@@ -2827,7 +2833,8 @@ static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
* Returns number of lines filled in the ramrod data in total.
*/
static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
- struct bnx2x_mcast_ramrod_params *p, int cmd,
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd,
int start_cnt)
{
struct bnx2x_mcast_obj *o = p->mcast_obj;
@@ -2861,7 +2868,7 @@ static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_mcast_obj *o = p->mcast_obj;
int reg_sz = o->get_registry_size(o);
@@ -2930,8 +2937,9 @@ static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
struct eth_multicast_rules_ramrod_data *data =
(struct eth_multicast_rules_ramrod_data *)(r->rdata);
- data->header.echo = ((r->cid & BNX2X_SWCID_MASK) |
- (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
+ data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
+ (BNX2X_FILTER_MCAST_PENDING <<
+ BNX2X_SWCID_SHIFT));
data->header.rule_cnt = len;
}
@@ -2965,7 +2973,7 @@ static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
struct bnx2x_mcast_obj *o = p->mcast_obj;
@@ -3051,7 +3059,7 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
/* Mark, that there is a work to do */
if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
@@ -3085,7 +3093,7 @@ static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
- mlist_pos->mac, bit);
+ mlist_pos->mac, bit);
/* bookkeeping... */
BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
@@ -3113,7 +3121,7 @@ static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
*/
static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
int i;
struct bnx2x_mcast_obj *o = p->mcast_obj;
@@ -3167,7 +3175,7 @@ static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_mcast_obj *o = p->mcast_obj;
int reg_sz = o->get_registry_size(o);
@@ -3240,7 +3248,7 @@ static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
struct bnx2x_mcast_obj *o, int idx,
union bnx2x_mcast_config_data *cfg_data,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_raw_obj *r = &o->raw;
struct mac_configuration_cmd *data =
@@ -3284,9 +3292,10 @@ static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
BNX2X_MAX_MULTICAST*(1 + r->func_id));
data->hdr.offset = offset;
- data->hdr.client_id = 0xff;
- data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) |
- (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
+ data->hdr.client_id = cpu_to_le16(0xff);
+ data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
+ (BNX2X_FILTER_MCAST_PENDING <<
+ BNX2X_SWCID_SHIFT));
data->hdr.length = len;
}
@@ -3309,7 +3318,7 @@ static inline int bnx2x_mcast_handle_restore_cmd_e1(
{
struct bnx2x_mcast_mac_elem *elem;
int i = 0;
- union bnx2x_mcast_config_data cfg_data = {0};
+ union bnx2x_mcast_config_data cfg_data = {NULL};
/* go through the registry and configure the MACs from it. */
list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
@@ -3319,7 +3328,7 @@ static inline int bnx2x_mcast_handle_restore_cmd_e1(
i++;
DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
- cfg_data.mac);
+ cfg_data.mac);
}
*rdata_idx = i;
@@ -3334,7 +3343,7 @@ static inline int bnx2x_mcast_handle_pending_cmds_e1(
struct bnx2x_pending_mcast_cmd *cmd_pos;
struct bnx2x_mcast_mac_elem *pmac_pos;
struct bnx2x_mcast_obj *o = p->mcast_obj;
- union bnx2x_mcast_config_data cfg_data = {0};
+ union bnx2x_mcast_config_data cfg_data = {NULL};
int cnt = 0;
@@ -3355,7 +3364,7 @@ static inline int bnx2x_mcast_handle_pending_cmds_e1(
cnt++;
DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
- pmac_pos->mac);
+ pmac_pos->mac);
}
break;
@@ -3458,7 +3467,7 @@ static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_mcast_obj *o = p->mcast_obj;
struct bnx2x_raw_obj *raw = &o->raw;
@@ -3562,7 +3571,7 @@ static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
int bnx2x_config_mcast(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_mcast_obj *o = p->mcast_obj;
struct bnx2x_raw_obj *r = &o->raw;
@@ -4085,8 +4094,8 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
DP(BNX2X_MSG_SP, "Configuring RSS\n");
/* Set an echo field */
- data->echo = (r->cid & BNX2X_SWCID_MASK) |
- (r->state << BNX2X_SWCID_SHIFT);
+ data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
+ (r->state << BNX2X_SWCID_SHIFT));
/* RSS mode */
if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
@@ -4237,11 +4246,16 @@ int bnx2x_queue_state_change(struct bnx2x *bp,
unsigned long *pending = &o->pending;
/* Check that the requested transition is legal */
- if (o->check_transition(bp, o, params))
+ rc = o->check_transition(bp, o, params);
+ if (rc) {
+ BNX2X_ERR("check transition returned an error. rc %d\n", rc);
return -EINVAL;
+ }
/* Set "pending" bit */
+ DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
pending_bit = o->set_pending(o, params);
+ DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
/* Don't send a command if only driver cleanup was requested */
if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
@@ -5025,8 +5039,11 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp,
* Don't allow a next state transition if we are in the middle of
* the previous one.
*/
- if (o->pending)
+ if (o->pending) {
+ BNX2X_ERR("Blocking transition since pending was %lx\n",
+ o->pending);
return -EBUSY;
+ }
switch (state) {
case BNX2X_Q_STATE_RESET:
@@ -5199,6 +5216,27 @@ void bnx2x_init_queue_obj(struct bnx2x *bp,
obj->set_pending = bnx2x_queue_set_pending;
}
+/* return a queue object's logical state*/
+int bnx2x_get_q_logical_state(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj)
+{
+ switch (obj->state) {
+ case BNX2X_Q_STATE_ACTIVE:
+ case BNX2X_Q_STATE_MULTI_COS:
+ return BNX2X_Q_LOGICAL_STATE_ACTIVE;
+ case BNX2X_Q_STATE_RESET:
+ case BNX2X_Q_STATE_INITIALIZED:
+ case BNX2X_Q_STATE_MCOS_TERMINATED:
+ case BNX2X_Q_STATE_INACTIVE:
+ case BNX2X_Q_STATE_STOPPED:
+ case BNX2X_Q_STATE_TERMINATED:
+ case BNX2X_Q_STATE_FLRED:
+ return BNX2X_Q_LOGICAL_STATE_STOPPED;
+ default:
+ return -EINVAL;
+ }
+}
+
/********************** Function state object *********************************/
enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
struct bnx2x_func_sp_obj *o)
@@ -5631,9 +5669,9 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
memset(rdata, 0, sizeof(*rdata));
/* Fill the ramrod data with provided parameters */
- rdata->function_mode = (u8)start_params->mf_mode;
- rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
- rdata->path_id = BP_PATH(bp);
+ rdata->function_mode = (u8)start_params->mf_mode;
+ rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
+ rdata->path_id = BP_PATH(bp);
rdata->network_cos_mode = start_params->network_cos_mode;
/*
@@ -5716,21 +5754,20 @@ inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
struct bnx2x_func_sp_obj *o = params->f_obj;
struct afex_vif_list_ramrod_data *rdata =
(struct afex_vif_list_ramrod_data *)o->afex_rdata;
- struct bnx2x_func_afex_viflists_params *afex_viflist_params =
+ struct bnx2x_func_afex_viflists_params *afex_vif_params =
&params->params.afex_viflists;
u64 *p_rdata = (u64 *)rdata;
memset(rdata, 0, sizeof(*rdata));
/* Fill the ramrod data with provided parameters */
- rdata->vif_list_index = afex_viflist_params->vif_list_index;
- rdata->func_bit_map = afex_viflist_params->func_bit_map;
- rdata->afex_vif_list_command =
- afex_viflist_params->afex_vif_list_command;
- rdata->func_to_clear = afex_viflist_params->func_to_clear;
+ rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index);
+ rdata->func_bit_map = afex_vif_params->func_bit_map;
+ rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
+ rdata->func_to_clear = afex_vif_params->func_to_clear;
/* send in echo type of sub command */
- rdata->echo = afex_viflist_params->afex_vif_list_command;
+ rdata->echo = afex_vif_params->afex_vif_list_command;
/* No need for an explicit memory barrier here as long we would
* need to ensure the ordering of writing to the SPQ element
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index adbd91b1bdfc..ff907609b9fc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -1,6 +1,6 @@
/* bnx2x_sp.h: Broadcom Everest network driver.
*
- * Copyright (c) 2011-2012 Broadcom Corporation
+ * Copyright (c) 2011-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -54,7 +54,7 @@ typedef enum {
BNX2X_OBJ_TYPE_RX_TX,
} bnx2x_obj_type;
-/* Filtering states */
+/* Public slow path states */
enum {
BNX2X_FILTER_MAC_PENDING,
BNX2X_FILTER_VLAN_PENDING,
@@ -524,7 +524,7 @@ struct bnx2x_mcast_ramrod_params {
int mcast_list_len;
};
-enum {
+enum bnx2x_mcast_cmd {
BNX2X_MCAST_CMD_ADD,
BNX2X_MCAST_CMD_CONT,
BNX2X_MCAST_CMD_DEL,
@@ -573,7 +573,8 @@ struct bnx2x_mcast_obj {
* @param cmd command to execute (BNX2X_MCAST_CMD_X, see above)
*/
int (*config_mcast)(struct bnx2x *bp,
- struct bnx2x_mcast_ramrod_params *p, int cmd);
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd);
/**
* Fills the ramrod data during the RESTORE flow.
@@ -590,11 +591,13 @@ struct bnx2x_mcast_obj {
int start_bin, int *rdata_idx);
int (*enqueue_cmd)(struct bnx2x *bp, struct bnx2x_mcast_obj *o,
- struct bnx2x_mcast_ramrod_params *p, int cmd);
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd);
void (*set_one_rule)(struct bnx2x *bp,
struct bnx2x_mcast_obj *o, int idx,
- union bnx2x_mcast_config_data *cfg_data, int cmd);
+ union bnx2x_mcast_config_data *cfg_data,
+ enum bnx2x_mcast_cmd cmd);
/** Checks if there are more mcast MACs to be set or a previous
* command is still pending.
@@ -617,7 +620,8 @@ struct bnx2x_mcast_obj {
* feasible.
*/
int (*validate)(struct bnx2x *bp,
- struct bnx2x_mcast_ramrod_params *p, int cmd);
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd);
/**
* Restore the values of internal counters in case of a failure.
@@ -776,6 +780,12 @@ enum bnx2x_q_state {
BNX2X_Q_STATE_MAX,
};
+/* Allowed Queue states */
+enum bnx2x_q_logical_state {
+ BNX2X_Q_LOGICAL_STATE_ACTIVE,
+ BNX2X_Q_LOGICAL_STATE_STOPPED,
+};
+
/* Allowed commands */
enum bnx2x_queue_cmd {
BNX2X_Q_CMD_INIT,
@@ -1261,6 +1271,9 @@ void bnx2x_init_queue_obj(struct bnx2x *bp,
int bnx2x_queue_state_change(struct bnx2x *bp,
struct bnx2x_queue_state_params *params);
+int bnx2x_get_q_logical_state(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj);
+
/********************* VLAN-MAC ****************/
void bnx2x_init_mac_obj(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *mac_obj,
@@ -1338,7 +1351,8 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp,
* completions.
*/
int bnx2x_config_mcast(struct bnx2x *bp,
- struct bnx2x_mcast_ramrod_params *p, int cmd);
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd);
/****************** CREDIT POOL ****************/
void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
new file mode 100644
index 000000000000..6adfa2093581
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -0,0 +1,3198 @@
+/* bnx2x_sriov.c: Broadcom Everest network driver.
+ *
+ * Copyright 2009-2013 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Shmulik Ravid <shmulikr@broadcom.com>
+ * Ariel Elior <ariele@broadcom.com>
+ *
+ */
+#include "bnx2x.h"
+#include "bnx2x_init.h"
+#include "bnx2x_cmn.h"
+#include <linux/crc32.h>
+
+/* General service functions */
+static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
+ u16 pf_id)
+{
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+}
+
+static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
+ u8 enable)
+{
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+}
+
+int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
+{
+ int idx;
+
+ for_each_vf(bp, idx)
+ if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
+ break;
+ return idx;
+}
+
+static
+struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
+{
+ u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
+ return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
+}
+
+static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ u8 igu_sb_id, u8 segment, u16 index, u8 op,
+ u8 update)
+{
+ /* acking a VF sb through the PF - use the GRC */
+ u32 ctl;
+ u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
+ u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
+ u32 func_encode = vf->abs_vfid;
+ u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
+ struct igu_regular cmd_data = {0};
+
+ cmd_data.sb_id_and_flags =
+ ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
+ (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
+ (update << IGU_REGULAR_BUPDATE_SHIFT) |
+ (op << IGU_REGULAR_ENABLE_INT_SHIFT));
+
+ ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
+ func_encode << IGU_CTRL_REG_FID_SHIFT |
+ IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
+
+ DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
+ cmd_data.sb_id_and_flags, igu_addr_data);
+ REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
+ mmiowb();
+ barrier();
+
+ DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
+ ctl, igu_addr_ctl);
+ REG_WR(bp, igu_addr_ctl, ctl);
+ mmiowb();
+ barrier();
+}
+/* VFOP - VF slow-path operation support */
+
+#define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000
+
+/* VFOP operations states */
+enum bnx2x_vfop_qctor_state {
+ BNX2X_VFOP_QCTOR_INIT,
+ BNX2X_VFOP_QCTOR_SETUP,
+ BNX2X_VFOP_QCTOR_INT_EN
+};
+
+enum bnx2x_vfop_qdtor_state {
+ BNX2X_VFOP_QDTOR_HALT,
+ BNX2X_VFOP_QDTOR_TERMINATE,
+ BNX2X_VFOP_QDTOR_CFCDEL,
+ BNX2X_VFOP_QDTOR_DONE
+};
+
+enum bnx2x_vfop_vlan_mac_state {
+ BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
+ BNX2X_VFOP_VLAN_MAC_CLEAR,
+ BNX2X_VFOP_VLAN_MAC_CHK_DONE,
+ BNX2X_VFOP_MAC_CONFIG_LIST,
+ BNX2X_VFOP_VLAN_CONFIG_LIST,
+ BNX2X_VFOP_VLAN_CONFIG_LIST_0
+};
+
+enum bnx2x_vfop_qsetup_state {
+ BNX2X_VFOP_QSETUP_CTOR,
+ BNX2X_VFOP_QSETUP_VLAN0,
+ BNX2X_VFOP_QSETUP_DONE
+};
+
+enum bnx2x_vfop_mcast_state {
+ BNX2X_VFOP_MCAST_DEL,
+ BNX2X_VFOP_MCAST_ADD,
+ BNX2X_VFOP_MCAST_CHK_DONE
+};
+enum bnx2x_vfop_qflr_state {
+ BNX2X_VFOP_QFLR_CLR_VLAN,
+ BNX2X_VFOP_QFLR_CLR_MAC,
+ BNX2X_VFOP_QFLR_TERMINATE,
+ BNX2X_VFOP_QFLR_DONE
+};
+
+enum bnx2x_vfop_flr_state {
+ BNX2X_VFOP_FLR_QUEUES,
+ BNX2X_VFOP_FLR_HW
+};
+
+enum bnx2x_vfop_close_state {
+ BNX2X_VFOP_CLOSE_QUEUES,
+ BNX2X_VFOP_CLOSE_HW
+};
+
+enum bnx2x_vfop_rxmode_state {
+ BNX2X_VFOP_RXMODE_CONFIG,
+ BNX2X_VFOP_RXMODE_DONE
+};
+
+enum bnx2x_vfop_qteardown_state {
+ BNX2X_VFOP_QTEARDOWN_RXMODE,
+ BNX2X_VFOP_QTEARDOWN_CLR_VLAN,
+ BNX2X_VFOP_QTEARDOWN_CLR_MAC,
+ BNX2X_VFOP_QTEARDOWN_QDTOR,
+ BNX2X_VFOP_QTEARDOWN_DONE
+};
+
+#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
+
+void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_queue_init_params *init_params,
+ struct bnx2x_queue_setup_params *setup_params,
+ u16 q_idx, u16 sb_idx)
+{
+ DP(BNX2X_MSG_IOV,
+ "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
+ vf->abs_vfid,
+ q_idx,
+ sb_idx,
+ init_params->tx.sb_cq_index,
+ init_params->tx.hc_rate,
+ setup_params->flags,
+ setup_params->txq_params.traffic_type);
+}
+
+void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_queue_init_params *init_params,
+ struct bnx2x_queue_setup_params *setup_params,
+ u16 q_idx, u16 sb_idx)
+{
+ struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
+
+ DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
+ "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
+ vf->abs_vfid,
+ q_idx,
+ sb_idx,
+ init_params->rx.sb_cq_index,
+ init_params->rx.hc_rate,
+ setup_params->gen_params.mtu,
+ rxq_params->buf_sz,
+ rxq_params->sge_buf_sz,
+ rxq_params->max_sges_pkt,
+ rxq_params->tpa_agg_sz,
+ setup_params->flags,
+ rxq_params->drop_flags,
+ rxq_params->cache_line_log);
+}
+
+void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vf_queue *q,
+ struct bnx2x_vfop_qctor_params *p,
+ unsigned long q_type)
+{
+ struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
+ struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
+
+ /* INIT */
+
+ /* Enable host coalescing in the transition to INIT state */
+ if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
+ __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
+
+ if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
+ __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
+
+ /* FW SB ID */
+ init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
+ init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
+
+ /* context */
+ init_p->cxts[0] = q->cxt;
+
+ /* SETUP */
+
+ /* Setup-op general parameters */
+ setup_p->gen_params.spcl_id = vf->sp_cl_id;
+ setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
+
+ /* Setup-op pause params:
+ * Nothing to do, the pause thresholds are set by default to 0 which
+ * effectively turns off the feature for this queue. We don't want
+ * one queue (VF) to interfering with another queue (another VF)
+ */
+ if (vf->cfg_flags & VF_CFG_FW_FC)
+ BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
+ vf->abs_vfid);
+ /* Setup-op flags:
+ * collect statistics, zero statistics, local-switching, security,
+ * OV for Flex10, RSS and MCAST for leading
+ */
+ if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
+ __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
+
+ /* for VFs, enable tx switching, bd coherency, and mac address
+ * anti-spoofing
+ */
+ __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
+ __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
+ __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
+
+ if (vfq_is_leading(q)) {
+ __set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags);
+ __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
+ }
+
+ /* Setup-op rx parameters */
+ if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
+ struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
+
+ rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
+ rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
+ rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
+
+ if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
+ rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
+ }
+
+ /* Setup-op tx parameters */
+ if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
+ setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
+ setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
+ }
+}
+
+/* VFOP queue construction */
+static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor;
+ struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
+ enum bnx2x_vfop_qctor_state state = vfop->state;
+
+ bnx2x_vfop_reset_wq(vf);
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ switch (state) {
+ case BNX2X_VFOP_QCTOR_INIT:
+
+ /* has this queue already been opened? */
+ if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
+ BNX2X_Q_LOGICAL_STATE_ACTIVE) {
+ DP(BNX2X_MSG_IOV,
+ "Entered qctor but queue was already up. Aborting gracefully\n");
+ goto op_done;
+ }
+
+ /* next state */
+ vfop->state = BNX2X_VFOP_QCTOR_SETUP;
+
+ q_params->cmd = BNX2X_Q_CMD_INIT;
+ vfop->rc = bnx2x_queue_state_change(bp, q_params);
+
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+
+ case BNX2X_VFOP_QCTOR_SETUP:
+ /* next state */
+ vfop->state = BNX2X_VFOP_QCTOR_INT_EN;
+
+ /* copy pre-prepared setup params to the queue-state params */
+ vfop->op_p->qctor.qstate.params.setup =
+ vfop->op_p->qctor.prep_qsetup;
+
+ q_params->cmd = BNX2X_Q_CMD_SETUP;
+ vfop->rc = bnx2x_queue_state_change(bp, q_params);
+
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+
+ case BNX2X_VFOP_QCTOR_INT_EN:
+
+ /* enable interrupts */
+ bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx),
+ USTORM_ID, 0, IGU_INT_ENABLE, 0);
+ goto op_done;
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_err:
+ BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n",
+ vf->abs_vfid, args->qid, q_params->cmd, vfop->rc);
+op_done:
+ bnx2x_vfop_end(bp, vf, vfop);
+op_pending:
+ return;
+}
+
+static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+
+ vfop->args.qctor.qid = qid;
+ vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx);
+
+ bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT,
+ bnx2x_vfop_qctor, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+/* VFOP queue destruction */
+static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor;
+ struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
+ enum bnx2x_vfop_qdtor_state state = vfop->state;
+
+ bnx2x_vfop_reset_wq(vf);
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ switch (state) {
+ case BNX2X_VFOP_QDTOR_HALT:
+
+ /* has this queue already been stopped? */
+ if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
+ BNX2X_Q_LOGICAL_STATE_STOPPED) {
+ DP(BNX2X_MSG_IOV,
+ "Entered qdtor but queue was already stopped. Aborting gracefully\n");
+ goto op_done;
+ }
+
+ /* next state */
+ vfop->state = BNX2X_VFOP_QDTOR_TERMINATE;
+
+ q_params->cmd = BNX2X_Q_CMD_HALT;
+ vfop->rc = bnx2x_queue_state_change(bp, q_params);
+
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+
+ case BNX2X_VFOP_QDTOR_TERMINATE:
+ /* next state */
+ vfop->state = BNX2X_VFOP_QDTOR_CFCDEL;
+
+ q_params->cmd = BNX2X_Q_CMD_TERMINATE;
+ vfop->rc = bnx2x_queue_state_change(bp, q_params);
+
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+
+ case BNX2X_VFOP_QDTOR_CFCDEL:
+ /* next state */
+ vfop->state = BNX2X_VFOP_QDTOR_DONE;
+
+ q_params->cmd = BNX2X_Q_CMD_CFC_DEL;
+ vfop->rc = bnx2x_queue_state_change(bp, q_params);
+
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+op_err:
+ BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n",
+ vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc);
+op_done:
+ case BNX2X_VFOP_QDTOR_DONE:
+ /* invalidate the context */
+ qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
+ qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
+ bnx2x_vfop_end(bp, vf, vfop);
+ return;
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_pending:
+ return;
+}
+
+static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ struct bnx2x_queue_state_params *qstate =
+ &vf->op_params.qctor.qstate;
+
+ memset(qstate, 0, sizeof(*qstate));
+ qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+
+ vfop->args.qdtor.qid = qid;
+ vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt);
+
+ bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT,
+ bnx2x_vfop_qdtor, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
+ cmd->block);
+ }
+ DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop.\n", vf->abs_vfid);
+ return -ENOMEM;
+}
+
+static void
+bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
+{
+ struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
+ if (vf) {
+ if (!vf_sb_count(vf))
+ vf->igu_base_id = igu_sb_id;
+ ++vf_sb_count(vf);
+ }
+}
+
+/* VFOP MAC/VLAN helpers */
+static inline void bnx2x_vfop_credit(struct bnx2x *bp,
+ struct bnx2x_vfop *vfop,
+ struct bnx2x_vlan_mac_obj *obj)
+{
+ struct bnx2x_vfop_args_filters *args = &vfop->args.filters;
+
+ /* update credit only if there is no error
+ * and a valid credit counter
+ */
+ if (!vfop->rc && args->credit) {
+ int cnt = 0;
+ struct list_head *pos;
+
+ list_for_each(pos, &obj->head)
+ cnt++;
+
+ atomic_set(args->credit, cnt);
+ }
+}
+
+static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
+ struct bnx2x_vfop_filter *pos,
+ struct bnx2x_vlan_mac_data *user_req)
+{
+ user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD :
+ BNX2X_VLAN_MAC_DEL;
+
+ switch (pos->type) {
+ case BNX2X_VFOP_FILTER_MAC:
+ memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN);
+ break;
+ case BNX2X_VFOP_FILTER_VLAN:
+ user_req->u.vlan.vlan = pos->vid;
+ break;
+ default:
+ BNX2X_ERR("Invalid filter type, skipping\n");
+ return 1;
+ }
+ return 0;
+}
+
+static int
+bnx2x_vfop_config_vlan0(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_ramrod_params *vlan_mac,
+ bool add)
+{
+ int rc;
+
+ vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD :
+ BNX2X_VLAN_MAC_DEL;
+ vlan_mac->user_req.u.vlan.vlan = 0;
+
+ rc = bnx2x_config_vlan_mac(bp, vlan_mac);
+ if (rc == -EEXIST)
+ rc = 0;
+ return rc;
+}
+
+static int bnx2x_vfop_config_list(struct bnx2x *bp,
+ struct bnx2x_vfop_filters *filters,
+ struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
+{
+ struct bnx2x_vfop_filter *pos, *tmp;
+ struct list_head rollback_list, *filters_list = &filters->head;
+ struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req;
+ int rc = 0, cnt = 0;
+
+ INIT_LIST_HEAD(&rollback_list);
+
+ list_for_each_entry_safe(pos, tmp, filters_list, link) {
+ if (bnx2x_vfop_set_user_req(bp, pos, user_req))
+ continue;
+
+ rc = bnx2x_config_vlan_mac(bp, vlan_mac);
+ if (rc >= 0) {
+ cnt += pos->add ? 1 : -1;
+ list_del(&pos->link);
+ list_add(&pos->link, &rollback_list);
+ rc = 0;
+ } else if (rc == -EEXIST) {
+ rc = 0;
+ } else {
+ BNX2X_ERR("Failed to add a new vlan_mac command\n");
+ break;
+ }
+ }
+
+ /* rollback if error or too many rules added */
+ if (rc || cnt > filters->add_cnt) {
+ BNX2X_ERR("error or too many rules added. Performing rollback\n");
+ list_for_each_entry_safe(pos, tmp, &rollback_list, link) {
+ pos->add = !pos->add; /* reverse op */
+ bnx2x_vfop_set_user_req(bp, pos, user_req);
+ bnx2x_config_vlan_mac(bp, vlan_mac);
+ list_del(&pos->link);
+ }
+ cnt = 0;
+ if (!rc)
+ rc = -EINVAL;
+ }
+ filters->add_cnt = cnt;
+ return rc;
+}
+
+/* VFOP set VLAN/MAC */
+static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac;
+ struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj;
+ struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter;
+
+ enum bnx2x_vfop_vlan_mac_state state = vfop->state;
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ bnx2x_vfop_reset_wq(vf);
+
+ switch (state) {
+ case BNX2X_VFOP_VLAN_MAC_CLEAR:
+ /* next state */
+ vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
+
+ /* do delete */
+ vfop->rc = obj->delete_all(bp, obj,
+ &vlan_mac->user_req.vlan_mac_flags,
+ &vlan_mac->ramrod_flags);
+
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+
+ case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE:
+ /* next state */
+ vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
+
+ /* do config */
+ vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
+ if (vfop->rc == -EEXIST)
+ vfop->rc = 0;
+
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+
+ case BNX2X_VFOP_VLAN_MAC_CHK_DONE:
+ vfop->rc = !!obj->raw.check_pending(&obj->raw);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+
+ case BNX2X_VFOP_MAC_CONFIG_LIST:
+ /* next state */
+ vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
+
+ /* do list config */
+ vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
+ if (vfop->rc)
+ goto op_err;
+
+ set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
+ vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+
+ case BNX2X_VFOP_VLAN_CONFIG_LIST:
+ /* next state */
+ vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0;
+
+ /* remove vlan0 - could be no-op */
+ vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false);
+ if (vfop->rc)
+ goto op_err;
+
+ /* Do vlan list config. if this operation fails we try to
+ * restore vlan0 to keep the queue is working order
+ */
+ vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
+ if (!vfop->rc) {
+ set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
+ vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
+ }
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */
+
+ case BNX2X_VFOP_VLAN_CONFIG_LIST_0:
+ /* next state */
+ vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
+
+ if (list_empty(&obj->head))
+ /* add vlan0 */
+ vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_err:
+ BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc);
+op_done:
+ kfree(filters);
+ bnx2x_vfop_credit(bp, vfop, obj);
+ bnx2x_vfop_end(bp, vf, vfop);
+op_pending:
+ return;
+}
+
+struct bnx2x_vfop_vlan_mac_flags {
+ bool drv_only;
+ bool dont_consume;
+ bool single_cmd;
+ bool add;
+};
+
+static void
+bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
+ struct bnx2x_vfop_vlan_mac_flags *flags)
+{
+ struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req;
+
+ memset(ramrod, 0, sizeof(*ramrod));
+
+ /* ramrod flags */
+ if (flags->drv_only)
+ set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags);
+ if (flags->single_cmd)
+ set_bit(RAMROD_EXEC, &ramrod->ramrod_flags);
+
+ /* mac_vlan flags */
+ if (flags->dont_consume)
+ set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags);
+
+ /* cmd */
+ ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL;
+}
+
+static inline void
+bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
+ struct bnx2x_vfop_vlan_mac_flags *flags)
+{
+ bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags);
+ set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags);
+}
+
+static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid, bool drv_only)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ struct bnx2x_vfop_args_filters filters = {
+ .multi_filter = NULL, /* single */
+ .credit = NULL, /* consume credit */
+ };
+ struct bnx2x_vfop_vlan_mac_flags flags = {
+ .drv_only = drv_only,
+ .dont_consume = (filters.credit != NULL),
+ .single_cmd = true,
+ .add = false /* don't care */,
+ };
+ struct bnx2x_vlan_mac_ramrod_params *ramrod =
+ &vf->op_params.vlan_mac;
+
+ /* set ramrod params */
+ bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
+
+ /* set object */
+ ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
+
+ /* set extra args */
+ vfop->args.filters = filters;
+
+ bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
+ bnx2x_vfop_vlan_mac, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ struct bnx2x_vfop_filters *macs,
+ int qid, bool drv_only)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ struct bnx2x_vfop_args_filters filters = {
+ .multi_filter = macs,
+ .credit = NULL, /* consume credit */
+ };
+ struct bnx2x_vfop_vlan_mac_flags flags = {
+ .drv_only = drv_only,
+ .dont_consume = (filters.credit != NULL),
+ .single_cmd = false,
+ .add = false, /* don't care since only the items in the
+ * filters list affect the sp operation,
+ * not the list itself
+ */
+ };
+ struct bnx2x_vlan_mac_ramrod_params *ramrod =
+ &vf->op_params.vlan_mac;
+
+ /* set ramrod params */
+ bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
+
+ /* set object */
+ ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
+
+ /* set extra args */
+ filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX;
+ vfop->args.filters = filters;
+
+ bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST,
+ bnx2x_vfop_vlan_mac, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid, u16 vid, bool add)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ struct bnx2x_vfop_args_filters filters = {
+ .multi_filter = NULL, /* single command */
+ .credit = &bnx2x_vfq(vf, qid, vlan_count),
+ };
+ struct bnx2x_vfop_vlan_mac_flags flags = {
+ .drv_only = false,
+ .dont_consume = (filters.credit != NULL),
+ .single_cmd = true,
+ .add = add,
+ };
+ struct bnx2x_vlan_mac_ramrod_params *ramrod =
+ &vf->op_params.vlan_mac;
+
+ /* set ramrod params */
+ bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
+ ramrod->user_req.u.vlan.vlan = vid;
+
+ /* set object */
+ ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
+
+ /* set extra args */
+ vfop->args.filters = filters;
+
+ bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
+ bnx2x_vfop_vlan_mac, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid, bool drv_only)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ struct bnx2x_vfop_args_filters filters = {
+ .multi_filter = NULL, /* single command */
+ .credit = &bnx2x_vfq(vf, qid, vlan_count),
+ };
+ struct bnx2x_vfop_vlan_mac_flags flags = {
+ .drv_only = drv_only,
+ .dont_consume = (filters.credit != NULL),
+ .single_cmd = true,
+ .add = false, /* don't care */
+ };
+ struct bnx2x_vlan_mac_ramrod_params *ramrod =
+ &vf->op_params.vlan_mac;
+
+ /* set ramrod params */
+ bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
+
+ /* set object */
+ ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
+
+ /* set extra args */
+ vfop->args.filters = filters;
+
+ bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
+ bnx2x_vfop_vlan_mac, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ struct bnx2x_vfop_filters *vlans,
+ int qid, bool drv_only)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ struct bnx2x_vfop_args_filters filters = {
+ .multi_filter = vlans,
+ .credit = &bnx2x_vfq(vf, qid, vlan_count),
+ };
+ struct bnx2x_vfop_vlan_mac_flags flags = {
+ .drv_only = drv_only,
+ .dont_consume = (filters.credit != NULL),
+ .single_cmd = false,
+ .add = false, /* don't care */
+ };
+ struct bnx2x_vlan_mac_ramrod_params *ramrod =
+ &vf->op_params.vlan_mac;
+
+ /* set ramrod params */
+ bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
+
+ /* set object */
+ ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
+
+ /* set extra args */
+ filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) -
+ atomic_read(filters.credit);
+
+ vfop->args.filters = filters;
+
+ bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST,
+ bnx2x_vfop_vlan_mac, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+/* VFOP queue setup (queue constructor + set vlan 0) */
+static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ int qid = vfop->args.qctor.qid;
+ enum bnx2x_vfop_qsetup_state state = vfop->state;
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vfop_qsetup,
+ .block = false,
+ };
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ switch (state) {
+ case BNX2X_VFOP_QSETUP_CTOR:
+ /* init the queue ctor command */
+ vfop->state = BNX2X_VFOP_QSETUP_VLAN0;
+ vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ case BNX2X_VFOP_QSETUP_VLAN0:
+ /* skip if non-leading or FPGA/EMU*/
+ if (qid)
+ goto op_done;
+
+ /* init the queue set-vlan command (for vlan 0) */
+ vfop->state = BNX2X_VFOP_QSETUP_DONE;
+ vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true);
+ if (vfop->rc)
+ goto op_err;
+ return;
+op_err:
+ BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
+op_done:
+ case BNX2X_VFOP_QSETUP_DONE:
+ bnx2x_vfop_end(bp, vf, vfop);
+ return;
+ default:
+ bnx2x_vfop_default(state);
+ }
+}
+
+int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ vfop->args.qctor.qid = qid;
+
+ bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR,
+ bnx2x_vfop_qsetup, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+/* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */
+static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ int qid = vfop->args.qx.qid;
+ enum bnx2x_vfop_qflr_state state = vfop->state;
+ struct bnx2x_queue_state_params *qstate;
+ struct bnx2x_vfop_cmd cmd;
+
+ bnx2x_vfop_reset_wq(vf);
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ cmd.done = bnx2x_vfop_qflr;
+ cmd.block = false;
+
+ switch (state) {
+ case BNX2X_VFOP_QFLR_CLR_VLAN:
+ /* vlan-clear-all: driver-only, don't consume credit */
+ vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
+ vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ case BNX2X_VFOP_QFLR_CLR_MAC:
+ /* mac-clear-all: driver only consume credit */
+ vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
+ vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true);
+ DP(BNX2X_MSG_IOV,
+ "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d",
+ vf->abs_vfid, vfop->rc);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ case BNX2X_VFOP_QFLR_TERMINATE:
+ qstate = &vfop->op_p->qctor.qstate;
+ memset(qstate , 0, sizeof(*qstate));
+ qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+ vfop->state = BNX2X_VFOP_QFLR_DONE;
+
+ DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n",
+ vf->abs_vfid, qstate->q_obj->state);
+
+ if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) {
+ qstate->q_obj->state = BNX2X_Q_STATE_STOPPED;
+ qstate->cmd = BNX2X_Q_CMD_TERMINATE;
+ vfop->rc = bnx2x_queue_state_change(bp, qstate);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND);
+ } else {
+ goto op_done;
+ }
+
+op_err:
+ BNX2X_ERR("QFLR[%d:%d] error: rc %d\n",
+ vf->abs_vfid, qid, vfop->rc);
+op_done:
+ case BNX2X_VFOP_QFLR_DONE:
+ bnx2x_vfop_end(bp, vf, vfop);
+ return;
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_pending:
+ return;
+}
+
+static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ vfop->args.qx.qid = qid;
+ bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN,
+ bnx2x_vfop_qflr, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+/* VFOP multi-casts */
+static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast;
+ struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw;
+ struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list;
+ enum bnx2x_vfop_mcast_state state = vfop->state;
+ int i;
+
+ bnx2x_vfop_reset_wq(vf);
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ switch (state) {
+ case BNX2X_VFOP_MCAST_DEL:
+ /* clear existing mcasts */
+ vfop->state = BNX2X_VFOP_MCAST_ADD;
+ vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+
+ case BNX2X_VFOP_MCAST_ADD:
+ if (raw->check_pending(raw))
+ goto op_pending;
+
+ if (args->mc_num) {
+ /* update mcast list on the ramrod params */
+ INIT_LIST_HEAD(&mcast->mcast_list);
+ for (i = 0; i < args->mc_num; i++)
+ list_add_tail(&(args->mc[i].link),
+ &mcast->mcast_list);
+ /* add new mcasts */
+ vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
+ vfop->rc = bnx2x_config_mcast(bp, mcast,
+ BNX2X_MCAST_CMD_ADD);
+ }
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+
+ case BNX2X_VFOP_MCAST_CHK_DONE:
+ vfop->rc = raw->check_pending(raw) ? 1 : 0;
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_err:
+ BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc);
+op_done:
+ kfree(args->mc);
+ bnx2x_vfop_end(bp, vf, vfop);
+op_pending:
+ return;
+}
+
+int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ bnx2x_mac_addr_t *mcasts,
+ int mcast_num, bool drv_only)
+{
+ struct bnx2x_vfop *vfop = NULL;
+ size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem);
+ struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) :
+ NULL;
+
+ if (!mc_sz || mc) {
+ vfop = bnx2x_vfop_add(bp, vf);
+ if (vfop) {
+ int i;
+ struct bnx2x_mcast_ramrod_params *ramrod =
+ &vf->op_params.mcast;
+
+ /* set ramrod params */
+ memset(ramrod, 0, sizeof(*ramrod));
+ ramrod->mcast_obj = &vf->mcast_obj;
+ if (drv_only)
+ set_bit(RAMROD_DRV_CLR_ONLY,
+ &ramrod->ramrod_flags);
+
+ /* copy mcasts pointers */
+ vfop->args.mc_list.mc_num = mcast_num;
+ vfop->args.mc_list.mc = mc;
+ for (i = 0; i < mcast_num; i++)
+ mc[i].mac = mcasts[i];
+
+ bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL,
+ bnx2x_vfop_mcast, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast,
+ cmd->block);
+ } else {
+ kfree(mc);
+ }
+ }
+ return -ENOMEM;
+}
+
+/* VFOP rx-mode */
+static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode;
+ enum bnx2x_vfop_rxmode_state state = vfop->state;
+
+ bnx2x_vfop_reset_wq(vf);
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ switch (state) {
+ case BNX2X_VFOP_RXMODE_CONFIG:
+ /* next state */
+ vfop->state = BNX2X_VFOP_RXMODE_DONE;
+
+ vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+op_err:
+ BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc);
+op_done:
+ case BNX2X_VFOP_RXMODE_DONE:
+ bnx2x_vfop_end(bp, vf, vfop);
+ return;
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_pending:
+ return;
+}
+
+int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid, unsigned long accept_flags)
+{
+ struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ struct bnx2x_rx_mode_ramrod_params *ramrod =
+ &vf->op_params.rx_mode;
+
+ memset(ramrod, 0, sizeof(*ramrod));
+
+ /* Prepare ramrod parameters */
+ ramrod->cid = vfq->cid;
+ ramrod->cl_id = vfq_cl_id(vf, vfq);
+ ramrod->rx_mode_obj = &bp->rx_mode_obj;
+ ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
+
+ ramrod->rx_accept_flags = accept_flags;
+ ramrod->tx_accept_flags = accept_flags;
+ ramrod->pstate = &vf->filter_state;
+ ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
+
+ set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
+ set_bit(RAMROD_RX, &ramrod->ramrod_flags);
+ set_bit(RAMROD_TX, &ramrod->ramrod_flags);
+
+ ramrod->rdata =
+ bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
+ ramrod->rdata_mapping =
+ bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
+
+ bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
+ bnx2x_vfop_rxmode, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+/* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs,
+ * queue destructor)
+ */
+static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ int qid = vfop->args.qx.qid;
+ enum bnx2x_vfop_qteardown_state state = vfop->state;
+ struct bnx2x_vfop_cmd cmd;
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ cmd.done = bnx2x_vfop_qdown;
+ cmd.block = false;
+
+ switch (state) {
+ case BNX2X_VFOP_QTEARDOWN_RXMODE:
+ /* Drop all */
+ vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
+ vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ case BNX2X_VFOP_QTEARDOWN_CLR_VLAN:
+ /* vlan-clear-all: don't consume credit */
+ vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC;
+ vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ case BNX2X_VFOP_QTEARDOWN_CLR_MAC:
+ /* mac-clear-all: consume credit */
+ vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
+ vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ case BNX2X_VFOP_QTEARDOWN_QDTOR:
+ /* run the queue destruction flow */
+ DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n");
+ vfop->state = BNX2X_VFOP_QTEARDOWN_DONE;
+ DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n");
+ vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid);
+ DP(BNX2X_MSG_IOV, "returned from cmd\n");
+ if (vfop->rc)
+ goto op_err;
+ return;
+op_err:
+ BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n",
+ vf->abs_vfid, qid, vfop->rc);
+
+ case BNX2X_VFOP_QTEARDOWN_DONE:
+ bnx2x_vfop_end(bp, vf, vfop);
+ return;
+ default:
+ bnx2x_vfop_default(state);
+ }
+}
+
+int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ vfop->args.qx.qid = qid;
+ bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE,
+ bnx2x_vfop_qdown, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
+ cmd->block);
+ }
+
+ return -ENOMEM;
+}
+
+/* VF enable primitives
+ * when pretend is required the caller is responsible
+ * for calling pretend prior to calling these routines
+ */
+
+/* internal vf enable - until vf is enabled internally all transactions
+ * are blocked. this routine should always be called last with pretend.
+ */
+static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
+{
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
+}
+
+/* clears vf error in all semi blocks */
+static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
+{
+ REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
+ REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
+ REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
+ REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
+}
+
+static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
+{
+ u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
+ u32 was_err_reg = 0;
+
+ switch (was_err_group) {
+ case 0:
+ was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
+ break;
+ case 1:
+ was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
+ break;
+ case 2:
+ was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
+ break;
+ case 3:
+ was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
+ break;
+ }
+ REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
+}
+
+static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int i;
+ u32 val;
+
+ /* Set VF masks and configuration - pretend */
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
+
+ REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
+ REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
+ REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
+ REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
+ REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
+ REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
+
+ val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
+ val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
+ if (vf->cfg_flags & VF_CFG_INT_SIMD)
+ val |= IGU_VF_CONF_SINGLE_ISR_EN;
+ val &= ~IGU_VF_CONF_PARENT_MASK;
+ val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */
+ REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
+
+ DP(BNX2X_MSG_IOV,
+ "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n",
+ vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION));
+
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ /* iterate over all queues, clear sb consumer */
+ for (i = 0; i < vf_sb_count(vf); i++) {
+ u8 igu_sb_id = vf_igu_sb(vf, i);
+
+ /* zero prod memory */
+ REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
+
+ /* clear sb state machine */
+ bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
+ false /* VF */);
+
+ /* disable + update */
+ bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
+ IGU_INT_DISABLE, 1);
+ }
+}
+
+void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
+{
+ /* set the VF-PF association in the FW */
+ storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
+ storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
+
+ /* clear vf errors*/
+ bnx2x_vf_semi_clear_err(bp, abs_vfid);
+ bnx2x_vf_pglue_clear_err(bp, abs_vfid);
+
+ /* internal vf-enable - pretend */
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
+ DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
+ bnx2x_vf_enable_internal(bp, true);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+}
+
+static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ /* Reset vf in IGU interrupts are still disabled */
+ bnx2x_vf_igu_reset(bp, vf);
+
+ /* pretend to enable the vf with the PBF */
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
+ REG_WR(bp, PBF_REG_DISABLE_VF, 0);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+}
+
+static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
+{
+ struct pci_dev *dev;
+ struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
+
+ if (!vf)
+ goto unknown_dev;
+
+ dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
+ if (dev)
+ return bnx2x_is_pcie_pending(dev);
+
+unknown_dev:
+ BNX2X_ERR("Unknown device\n");
+ return false;
+}
+
+int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
+{
+ /* Wait 100ms */
+ msleep(100);
+
+ /* Verify no pending pci transactions */
+ if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
+ BNX2X_ERR("PCIE Transactions still pending\n");
+
+ return 0;
+}
+
+/* must be called after the number of PF queues and the number of VFs are
+ * both known
+ */
+static void
+bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
+{
+ u16 vlan_count = 0;
+
+ /* will be set only during VF-ACQUIRE */
+ resc->num_rxqs = 0;
+ resc->num_txqs = 0;
+
+ /* no credit calculcis for macs (just yet) */
+ resc->num_mac_filters = 1;
+
+ /* divvy up vlan rules */
+ vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
+ vlan_count = 1 << ilog2(vlan_count);
+ resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
+
+ /* no real limitation */
+ resc->num_mc_filters = 0;
+
+ /* num_sbs already set */
+}
+
+/* FLR routines: */
+static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ /* reset the state variables */
+ bnx2x_iov_static_resc(bp, &vf->alloc_resc);
+ vf->state = VF_FREE;
+}
+
+static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
+
+ /* DQ usage counter */
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
+ bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
+ "DQ VF usage counter timed out",
+ poll_cnt);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ /* FW cleanup command - poll for the results */
+ if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid),
+ poll_cnt))
+ BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
+
+ /* verify TX hw is flushed */
+ bnx2x_tx_hw_flushed(bp, poll_cnt);
+}
+
+static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
+ enum bnx2x_vfop_flr_state state = vfop->state;
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vfop_flr,
+ .block = false,
+ };
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ switch (state) {
+ case BNX2X_VFOP_FLR_QUEUES:
+ /* the cleanup operations are valid if and only if the VF
+ * was first acquired.
+ */
+ if (++(qx->qid) < vf_rxq_count(vf)) {
+ vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd,
+ qx->qid);
+ if (vfop->rc)
+ goto op_err;
+ return;
+ }
+ /* remove multicasts */
+ vfop->state = BNX2X_VFOP_FLR_HW;
+ vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL,
+ 0, true);
+ if (vfop->rc)
+ goto op_err;
+ return;
+ case BNX2X_VFOP_FLR_HW:
+
+ /* dispatch final cleanup and wait for HW queues to flush */
+ bnx2x_vf_flr_clnup_hw(bp, vf);
+
+ /* release VF resources */
+ bnx2x_vf_free_resc(bp, vf);
+
+ /* re-open the mailbox */
+ bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
+
+ goto op_done;
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_err:
+ BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc);
+op_done:
+ vf->flr_clnup_stage = VF_FLR_ACK;
+ bnx2x_vfop_end(bp, vf, vfop);
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
+}
+
+static int bnx2x_vfop_flr_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ vfop_handler_t done)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ if (vfop) {
+ vfop->args.qx.qid = -1; /* loop */
+ bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES,
+ bnx2x_vfop_flr, done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false);
+ }
+ return -ENOMEM;
+}
+
+static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf)
+{
+ int i = prev_vf ? prev_vf->index + 1 : 0;
+ struct bnx2x_virtf *vf;
+
+ /* find next VF to cleanup */
+next_vf_to_clean:
+ for (;
+ i < BNX2X_NR_VIRTFN(bp) &&
+ (bnx2x_vf(bp, i, state) != VF_RESET ||
+ bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN);
+ i++)
+ ;
+
+ DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. num of vfs: %d\n", i,
+ BNX2X_NR_VIRTFN(bp));
+
+ if (i < BNX2X_NR_VIRTFN(bp)) {
+ vf = BP_VF(bp, i);
+
+ /* lock the vf pf channel */
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
+
+ /* invoke the VF FLR SM */
+ if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) {
+ BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n",
+ vf->abs_vfid);
+
+ /* mark the VF to be ACKED and continue */
+ vf->flr_clnup_stage = VF_FLR_ACK;
+ goto next_vf_to_clean;
+ }
+ return;
+ }
+
+ /* we are done, update vf records */
+ for_each_vf(bp, i) {
+ vf = BP_VF(bp, i);
+
+ if (vf->flr_clnup_stage != VF_FLR_ACK)
+ continue;
+
+ vf->flr_clnup_stage = VF_FLR_EPILOG;
+ }
+
+ /* Acknowledge the handled VFs.
+ * we are acknowledge all the vfs which an flr was requested for, even
+ * if amongst them there are such that we never opened, since the mcp
+ * will interrupt us immediately again if we only ack some of the bits,
+ * resulting in an endless loop. This can happen for example in KVM
+ * where an 'all ones' flr request is sometimes given by hyper visor
+ */
+ DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
+ bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
+ for (i = 0; i < FLRD_VFS_DWORDS; i++)
+ SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
+ bp->vfdb->flrd_vfs[i]);
+
+ bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
+
+ /* clear the acked bits - better yet if the MCP implemented
+ * write to clear semantics
+ */
+ for (i = 0; i < FLRD_VFS_DWORDS; i++)
+ SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
+}
+
+void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
+{
+ int i;
+
+ /* Read FLR'd VFs */
+ for (i = 0; i < FLRD_VFS_DWORDS; i++)
+ bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
+
+ DP(BNX2X_MSG_MCP,
+ "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
+ bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
+
+ for_each_vf(bp, i) {
+ struct bnx2x_virtf *vf = BP_VF(bp, i);
+ u32 reset = 0;
+
+ if (vf->abs_vfid < 32)
+ reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
+ else
+ reset = bp->vfdb->flrd_vfs[1] &
+ (1 << (vf->abs_vfid - 32));
+
+ if (reset) {
+ /* set as reset and ready for cleanup */
+ vf->state = VF_RESET;
+ vf->flr_clnup_stage = VF_FLR_CLN;
+
+ DP(BNX2X_MSG_IOV,
+ "Initiating Final cleanup for VF %d\n",
+ vf->abs_vfid);
+ }
+ }
+
+ /* do the FLR cleanup for all marked VFs*/
+ bnx2x_vf_flr_clnup(bp, NULL);
+}
+
+/* IOV global initialization routines */
+void bnx2x_iov_init_dq(struct bnx2x *bp)
+{
+ if (!IS_SRIOV(bp))
+ return;
+
+ /* Set the DQ such that the CID reflect the abs_vfid */
+ REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
+ REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
+
+ /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
+ * the PF L2 queues
+ */
+ REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
+
+ /* The VF window size is the log2 of the max number of CIDs per VF */
+ REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
+
+ /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
+ * the Pf doorbell size although the 2 are independent.
+ */
+ REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST,
+ BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
+
+ /* No security checks for now -
+ * configure single rule (out of 16) mask = 0x1, value = 0x0,
+ * CID range 0 - 0x1ffff
+ */
+ REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
+ REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
+ REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
+ REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
+
+ /* set the number of VF alllowed doorbells to the full DQ range */
+ REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
+
+ /* set the VF doorbell threshold */
+ REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
+}
+
+void bnx2x_iov_init_dmae(struct bnx2x *bp)
+{
+ DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF");
+ if (!IS_SRIOV(bp))
+ return;
+
+ REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
+}
+
+static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
+{
+ struct pci_dev *dev = bp->pdev;
+ struct bnx2x_sriov *iov = &bp->vfdb->sriov;
+
+ return dev->bus->number + ((dev->devfn + iov->offset +
+ iov->stride * vfid) >> 8);
+}
+
+static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
+{
+ struct pci_dev *dev = bp->pdev;
+ struct bnx2x_sriov *iov = &bp->vfdb->sriov;
+
+ return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
+}
+
+static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int i, n;
+ struct pci_dev *dev = bp->pdev;
+ struct bnx2x_sriov *iov = &bp->vfdb->sriov;
+
+ for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
+ u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
+ u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
+
+ size /= iov->total;
+ vf->bars[n].bar = start + size * vf->abs_vfid;
+ vf->bars[n].size = size;
+ }
+}
+
+static int bnx2x_ari_enabled(struct pci_dev *dev)
+{
+ return dev->bus->self && dev->bus->self->ari_enabled;
+}
+
+static void
+bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
+{
+ int sb_id;
+ u32 val;
+ u8 fid;
+
+ /* IGU in normal mode - read CAM */
+ for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
+ val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
+ if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
+ continue;
+ fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
+ if (!(fid & IGU_FID_ENCODE_IS_PF))
+ bnx2x_vf_set_igu_info(bp, sb_id,
+ (fid & IGU_FID_VF_NUM_MASK));
+
+ DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
+ ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
+ ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
+ (fid & IGU_FID_VF_NUM_MASK)), sb_id,
+ GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
+ }
+}
+
+static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
+{
+ if (bp->vfdb) {
+ kfree(bp->vfdb->vfqs);
+ kfree(bp->vfdb->vfs);
+ kfree(bp->vfdb);
+ }
+ bp->vfdb = NULL;
+}
+
+static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
+{
+ int pos;
+ struct pci_dev *dev = bp->pdev;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos) {
+ BNX2X_ERR("failed to find SRIOV capability in device\n");
+ return -ENODEV;
+ }
+
+ iov->pos = pos;
+ DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
+ pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
+ pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
+ pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
+ pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
+ pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
+ pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
+
+ return 0;
+}
+
+static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
+{
+ u32 val;
+
+ /* read the SRIOV capability structure
+ * The fields can be read via configuration read or
+ * directly from the device (starting at offset PCICFG_OFFSET)
+ */
+ if (bnx2x_sriov_pci_cfg_info(bp, iov))
+ return -ENODEV;
+
+ /* get the number of SRIOV bars */
+ iov->nres = 0;
+
+ /* read the first_vfid */
+ val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
+ iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
+ * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
+
+ DP(BNX2X_MSG_IOV,
+ "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
+ BP_FUNC(bp),
+ iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
+ iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
+
+ return 0;
+}
+
+static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
+{
+ int i;
+ u8 queue_count = 0;
+
+ if (IS_SRIOV(bp))
+ for_each_vf(bp, i)
+ queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
+
+ return queue_count;
+}
+
+/* must be called after PF bars are mapped */
+int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
+ int num_vfs_param)
+{
+ int err, i, qcount;
+ struct bnx2x_sriov *iov;
+ struct pci_dev *dev = bp->pdev;
+
+ bp->vfdb = NULL;
+
+ /* verify is pf */
+ if (IS_VF(bp))
+ return 0;
+
+ /* verify sriov capability is present in configuration space */
+ if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
+ return 0;
+
+ /* verify chip revision */
+ if (CHIP_IS_E1x(bp))
+ return 0;
+
+ /* check if SRIOV support is turned off */
+ if (!num_vfs_param)
+ return 0;
+
+ /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
+ if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
+ BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
+ BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
+ return 0;
+ }
+
+ /* SRIOV can be enabled only with MSIX */
+ if (int_mode_param == BNX2X_INT_MODE_MSI ||
+ int_mode_param == BNX2X_INT_MODE_INTX)
+ BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
+
+ err = -EIO;
+ /* verify ari is enabled */
+ if (!bnx2x_ari_enabled(bp->pdev)) {
+ BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n");
+ return err;
+ }
+
+ /* verify igu is in normal mode */
+ if (CHIP_INT_MODE_IS_BC(bp)) {
+ BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
+ return err;
+ }
+
+ /* allocate the vfs database */
+ bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
+ if (!bp->vfdb) {
+ BNX2X_ERR("failed to allocate vf database\n");
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ /* get the sriov info - Linux already collected all the pertinent
+ * information, however the sriov structure is for the private use
+ * of the pci module. Also we want this information regardless
+ * of the hyper-visor.
+ */
+ iov = &(bp->vfdb->sriov);
+ err = bnx2x_sriov_info(bp, iov);
+ if (err)
+ goto failed;
+
+ /* SR-IOV capability was enabled but there are no VFs*/
+ if (iov->total == 0)
+ goto failed;
+
+ /* calculate the actual number of VFs */
+ iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param);
+
+ /* allocate the vf array */
+ bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
+ BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
+ if (!bp->vfdb->vfs) {
+ BNX2X_ERR("failed to allocate vf array\n");
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
+ for_each_vf(bp, i) {
+ bnx2x_vf(bp, i, index) = i;
+ bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
+ bnx2x_vf(bp, i, state) = VF_FREE;
+ INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
+ mutex_init(&bnx2x_vf(bp, i, op_mutex));
+ bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
+ }
+
+ /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
+ bnx2x_get_vf_igu_cam_info(bp);
+
+ /* get the total queue count and allocate the global queue arrays */
+ qcount = bnx2x_iov_get_max_queue_count(bp);
+
+ /* allocate the queue arrays for all VFs */
+ bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
+ GFP_KERNEL);
+ if (!bp->vfdb->vfqs) {
+ BNX2X_ERR("failed to allocate vf queue array\n");
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ return 0;
+failed:
+ DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
+ __bnx2x_iov_free_vfdb(bp);
+ return err;
+}
+
+void bnx2x_iov_remove_one(struct bnx2x *bp)
+{
+ /* if SRIOV is not enabled there's nothing to do */
+ if (!IS_SRIOV(bp))
+ return;
+
+ DP(BNX2X_MSG_IOV, "about to call disable sriov\n");
+ pci_disable_sriov(bp->pdev);
+ DP(BNX2X_MSG_IOV, "sriov disabled\n");
+
+ /* free vf database */
+ __bnx2x_iov_free_vfdb(bp);
+}
+
+void bnx2x_iov_free_mem(struct bnx2x *bp)
+{
+ int i;
+
+ if (!IS_SRIOV(bp))
+ return;
+
+ /* free vfs hw contexts */
+ for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
+ struct hw_dma *cxt = &bp->vfdb->context[i];
+ BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
+ }
+
+ BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
+ BP_VFDB(bp)->sp_dma.mapping,
+ BP_VFDB(bp)->sp_dma.size);
+
+ BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
+ BP_VF_MBX_DMA(bp)->mapping,
+ BP_VF_MBX_DMA(bp)->size);
+
+ BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr,
+ BP_VF_BULLETIN_DMA(bp)->mapping,
+ BP_VF_BULLETIN_DMA(bp)->size);
+}
+
+int bnx2x_iov_alloc_mem(struct bnx2x *bp)
+{
+ size_t tot_size;
+ int i, rc = 0;
+
+ if (!IS_SRIOV(bp))
+ return rc;
+
+ /* allocate vfs hw contexts */
+ tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
+ BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
+
+ for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
+ struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
+ cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
+
+ if (cxt->size) {
+ BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
+ } else {
+ cxt->addr = NULL;
+ cxt->mapping = 0;
+ }
+ tot_size -= cxt->size;
+ }
+
+ /* allocate vfs ramrods dma memory - client_init and set_mac */
+ tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
+ BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
+ tot_size);
+ BP_VFDB(bp)->sp_dma.size = tot_size;
+
+ /* allocate mailboxes */
+ tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
+ BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
+ tot_size);
+ BP_VF_MBX_DMA(bp)->size = tot_size;
+
+ /* allocate local bulletin boards */
+ tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
+ BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr,
+ &BP_VF_BULLETIN_DMA(bp)->mapping, tot_size);
+ BP_VF_BULLETIN_DMA(bp)->size = tot_size;
+
+ return 0;
+
+alloc_mem_err:
+ return -ENOMEM;
+}
+
+static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_queue *q)
+{
+ u8 cl_id = vfq_cl_id(vf, q);
+ u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
+ unsigned long q_type = 0;
+
+ set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+ set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+
+ /* Queue State object */
+ bnx2x_init_queue_obj(bp, &q->sp_obj,
+ cl_id, &q->cid, 1, func_id,
+ bnx2x_vf_sp(bp, vf, q_data),
+ bnx2x_vf_sp_map(bp, vf, q_data),
+ q_type);
+
+ DP(BNX2X_MSG_IOV,
+ "initialized vf %d's queue object. func id set to %d\n",
+ vf->abs_vfid, q->sp_obj.func_id);
+
+ /* mac/vlan objects are per queue, but only those
+ * that belong to the leading queue are initialized
+ */
+ if (vfq_is_leading(q)) {
+ /* mac */
+ bnx2x_init_mac_obj(bp, &q->mac_obj,
+ cl_id, q->cid, func_id,
+ bnx2x_vf_sp(bp, vf, mac_rdata),
+ bnx2x_vf_sp_map(bp, vf, mac_rdata),
+ BNX2X_FILTER_MAC_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX,
+ &bp->macs_pool);
+ /* vlan */
+ bnx2x_init_vlan_obj(bp, &q->vlan_obj,
+ cl_id, q->cid, func_id,
+ bnx2x_vf_sp(bp, vf, vlan_rdata),
+ bnx2x_vf_sp_map(bp, vf, vlan_rdata),
+ BNX2X_FILTER_VLAN_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX,
+ &bp->vlans_pool);
+
+ /* mcast */
+ bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
+ q->cid, func_id, func_id,
+ bnx2x_vf_sp(bp, vf, mcast_rdata),
+ bnx2x_vf_sp_map(bp, vf, mcast_rdata),
+ BNX2X_FILTER_MCAST_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX);
+
+ vf->leading_rss = cl_id;
+ }
+}
+
+/* called by bnx2x_nic_load */
+int bnx2x_iov_nic_init(struct bnx2x *bp)
+{
+ int vfid, qcount, i;
+
+ if (!IS_SRIOV(bp)) {
+ DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
+ return 0;
+ }
+
+ DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
+
+ /* initialize vf database */
+ for_each_vf(bp, vfid) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vfid);
+
+ int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
+ BNX2X_CIDS_PER_VF;
+
+ union cdu_context *base_cxt = (union cdu_context *)
+ BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
+ (base_vf_cid & (ILT_PAGE_CIDS-1));
+
+ DP(BNX2X_MSG_IOV,
+ "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
+ vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
+ BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
+
+ /* init statically provisioned resources */
+ bnx2x_iov_static_resc(bp, &vf->alloc_resc);
+
+ /* queues are initialized during VF-ACQUIRE */
+
+ /* reserve the vf vlan credit */
+ bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
+
+ vf->filter_state = 0;
+ vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
+
+ /* init mcast object - This object will be re-initialized
+ * during VF-ACQUIRE with the proper cl_id and cid.
+ * It needs to be initialized here so that it can be safely
+ * handled by a subsequent FLR flow.
+ */
+ bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
+ 0xFF, 0xFF, 0xFF,
+ bnx2x_vf_sp(bp, vf, mcast_rdata),
+ bnx2x_vf_sp_map(bp, vf, mcast_rdata),
+ BNX2X_FILTER_MCAST_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX);
+
+ /* set the mailbox message addresses */
+ BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
+ (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
+ MBX_MSG_ALIGNED_SIZE);
+
+ BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
+ vfid * MBX_MSG_ALIGNED_SIZE;
+
+ /* Enable vf mailbox */
+ bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
+ }
+
+ /* Final VF init */
+ qcount = 0;
+ for_each_vf(bp, i) {
+ struct bnx2x_virtf *vf = BP_VF(bp, i);
+
+ /* fill in the BDF and bars */
+ vf->bus = bnx2x_vf_bus(bp, i);
+ vf->devfn = bnx2x_vf_devfn(bp, i);
+ bnx2x_vf_set_bars(bp, vf);
+
+ DP(BNX2X_MSG_IOV,
+ "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
+ vf->abs_vfid, vf->bus, vf->devfn,
+ (unsigned)vf->bars[0].bar, vf->bars[0].size,
+ (unsigned)vf->bars[1].bar, vf->bars[1].size,
+ (unsigned)vf->bars[2].bar, vf->bars[2].size);
+
+ /* set local queue arrays */
+ vf->vfqs = &bp->vfdb->vfqs[qcount];
+ qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
+ }
+
+ return 0;
+}
+
+/* called by bnx2x_chip_cleanup */
+int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
+{
+ int i;
+
+ if (!IS_SRIOV(bp))
+ return 0;
+
+ /* release all the VFs */
+ for_each_vf(bp, i)
+ bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */
+
+ return 0;
+}
+
+/* called by bnx2x_init_hw_func, returns the next ilt line */
+int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
+{
+ int i;
+ struct bnx2x_ilt *ilt = BP_ILT(bp);
+
+ if (!IS_SRIOV(bp))
+ return line;
+
+ /* set vfs ilt lines */
+ for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
+ struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
+
+ ilt->lines[line+i].page = hw_cxt->addr;
+ ilt->lines[line+i].page_mapping = hw_cxt->mapping;
+ ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
+ }
+ return line + i;
+}
+
+static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
+{
+ return ((cid >= BNX2X_FIRST_VF_CID) &&
+ ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
+}
+
+static
+void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
+ struct bnx2x_vf_queue *vfq,
+ union event_ring_elem *elem)
+{
+ unsigned long ramrod_flags = 0;
+ int rc = 0;
+
+ /* Always push next commands out, don't wait here */
+ set_bit(RAMROD_CONT, &ramrod_flags);
+
+ switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
+ case BNX2X_FILTER_MAC_PENDING:
+ rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
+ &ramrod_flags);
+ break;
+ case BNX2X_FILTER_VLAN_PENDING:
+ rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
+ &ramrod_flags);
+ break;
+ default:
+ BNX2X_ERR("Unsupported classification command: %d\n",
+ elem->message.data.eth_event.echo);
+ return;
+ }
+ if (rc < 0)
+ BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
+ else if (rc > 0)
+ DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
+}
+
+static
+void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ struct bnx2x_mcast_ramrod_params rparam = {NULL};
+ int rc;
+
+ rparam.mcast_obj = &vf->mcast_obj;
+ vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
+
+ /* If there are pending mcast commands - send them */
+ if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+ if (rc < 0)
+ BNX2X_ERR("Failed to send pending mcast commands: %d\n",
+ rc);
+ }
+}
+
+static
+void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ smp_mb__before_clear_bit();
+ clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
+ smp_mb__after_clear_bit();
+}
+
+int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
+{
+ struct bnx2x_virtf *vf;
+ int qidx = 0, abs_vfid;
+ u8 opcode;
+ u16 cid = 0xffff;
+
+ if (!IS_SRIOV(bp))
+ return 1;
+
+ /* first get the cid - the only events we handle here are cfc-delete
+ * and set-mac completion
+ */
+ opcode = elem->message.opcode;
+
+ switch (opcode) {
+ case EVENT_RING_OPCODE_CFC_DEL:
+ cid = SW_CID((__force __le32)
+ elem->message.data.cfc_del_event.cid);
+ DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
+ break;
+ case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
+ case EVENT_RING_OPCODE_MULTICAST_RULES:
+ case EVENT_RING_OPCODE_FILTERS_RULES:
+ cid = (elem->message.data.eth_event.echo &
+ BNX2X_SWCID_MASK);
+ DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
+ break;
+ case EVENT_RING_OPCODE_VF_FLR:
+ abs_vfid = elem->message.data.vf_flr_event.vf_id;
+ DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
+ abs_vfid);
+ goto get_vf;
+ case EVENT_RING_OPCODE_MALICIOUS_VF:
+ abs_vfid = elem->message.data.malicious_vf_event.vf_id;
+ DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d\n",
+ abs_vfid);
+ goto get_vf;
+ default:
+ return 1;
+ }
+
+ /* check if the cid is the VF range */
+ if (!bnx2x_iov_is_vf_cid(bp, cid)) {
+ DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
+ return 1;
+ }
+
+ /* extract vf and rxq index from vf_cid - relies on the following:
+ * 1. vfid on cid reflects the true abs_vfid
+ * 2. the max number of VFs (per path) is 64
+ */
+ qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
+ abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
+get_vf:
+ vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
+
+ if (!vf) {
+ BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
+ cid, abs_vfid);
+ return 0;
+ }
+
+ switch (opcode) {
+ case EVENT_RING_OPCODE_CFC_DEL:
+ DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
+ vf->abs_vfid, qidx);
+ vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
+ &vfq_get(vf,
+ qidx)->sp_obj,
+ BNX2X_Q_CMD_CFC_DEL);
+ break;
+ case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
+ DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
+ vf->abs_vfid, qidx);
+ bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
+ break;
+ case EVENT_RING_OPCODE_MULTICAST_RULES:
+ DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
+ vf->abs_vfid, qidx);
+ bnx2x_vf_handle_mcast_eqe(bp, vf);
+ break;
+ case EVENT_RING_OPCODE_FILTERS_RULES:
+ DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
+ vf->abs_vfid, qidx);
+ bnx2x_vf_handle_filters_eqe(bp, vf);
+ break;
+ case EVENT_RING_OPCODE_VF_FLR:
+ DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n",
+ vf->abs_vfid);
+ /* Do nothing for now */
+ break;
+ case EVENT_RING_OPCODE_MALICIOUS_VF:
+ DP(BNX2X_MSG_IOV, "got VF [%d] MALICIOUS notification\n",
+ vf->abs_vfid);
+ /* Do nothing for now */
+ break;
+ }
+ /* SRIOV: reschedule any 'in_progress' operations */
+ bnx2x_iov_sp_event(bp, cid, false);
+
+ return 0;
+}
+
+static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
+{
+ /* extract the vf from vf_cid - relies on the following:
+ * 1. vfid on cid reflects the true abs_vfid
+ * 2. the max number of VFs (per path) is 64
+ */
+ int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
+ return bnx2x_vf_by_abs_fid(bp, abs_vfid);
+}
+
+void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
+ struct bnx2x_queue_sp_obj **q_obj)
+{
+ struct bnx2x_virtf *vf;
+
+ if (!IS_SRIOV(bp))
+ return;
+
+ vf = bnx2x_vf_by_cid(bp, vf_cid);
+
+ if (vf) {
+ /* extract queue index from vf_cid - relies on the following:
+ * 1. vfid on cid reflects the true abs_vfid
+ * 2. the max number of VFs (per path) is 64
+ */
+ int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
+ *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
+ } else {
+ BNX2X_ERR("No vf matching cid %d\n", vf_cid);
+ }
+}
+
+void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
+{
+ struct bnx2x_virtf *vf;
+
+ /* check if the cid is the VF range */
+ if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
+ return;
+
+ vf = bnx2x_vf_by_cid(bp, vf_cid);
+ if (vf) {
+ /* set in_progress flag */
+ atomic_set(&vf->op_in_progress, 1);
+ if (queue_work)
+ queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+ }
+}
+
+void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
+{
+ int i;
+ int first_queue_query_index, num_queues_req;
+ dma_addr_t cur_data_offset;
+ struct stats_query_entry *cur_query_entry;
+ u8 stats_count = 0;
+ bool is_fcoe = false;
+
+ if (!IS_SRIOV(bp))
+ return;
+
+ if (!NO_FCOE(bp))
+ is_fcoe = true;
+
+ /* fcoe adds one global request and one queue request */
+ num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
+ first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
+ (is_fcoe ? 0 : 1);
+
+ DP(BNX2X_MSG_IOV,
+ "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
+ BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
+ first_queue_query_index + num_queues_req);
+
+ cur_data_offset = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, queue_stats) +
+ num_queues_req * sizeof(struct per_queue_stats);
+
+ cur_query_entry = &bp->fw_stats_req->
+ query[first_queue_query_index + num_queues_req];
+
+ for_each_vf(bp, i) {
+ int j;
+ struct bnx2x_virtf *vf = BP_VF(bp, i);
+
+ if (vf->state != VF_ENABLED) {
+ DP(BNX2X_MSG_IOV,
+ "vf %d not enabled so no stats for it\n",
+ vf->abs_vfid);
+ continue;
+ }
+
+ DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
+ for_each_vfq(vf, j) {
+ struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
+
+ /* collect stats fro active queues only */
+ if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
+ BNX2X_Q_LOGICAL_STATE_STOPPED)
+ continue;
+
+ /* create stats query entry for this queue */
+ cur_query_entry->kind = STATS_TYPE_QUEUE;
+ cur_query_entry->index = vfq_cl_id(vf, rxq);
+ cur_query_entry->funcID =
+ cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
+ cur_query_entry->address.hi =
+ cpu_to_le32(U64_HI(vf->fw_stat_map));
+ cur_query_entry->address.lo =
+ cpu_to_le32(U64_LO(vf->fw_stat_map));
+ DP(BNX2X_MSG_IOV,
+ "added address %x %x for vf %d queue %d client %d\n",
+ cur_query_entry->address.hi,
+ cur_query_entry->address.lo, cur_query_entry->funcID,
+ j, cur_query_entry->index);
+ cur_query_entry++;
+ cur_data_offset += sizeof(struct per_queue_stats);
+ stats_count++;
+ }
+ }
+ bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
+}
+
+void bnx2x_iov_sp_task(struct bnx2x *bp)
+{
+ int i;
+
+ if (!IS_SRIOV(bp))
+ return;
+ /* Iterate over all VFs and invoke state transition for VFs with
+ * 'in-progress' slow-path operations
+ */
+ DP(BNX2X_MSG_IOV, "searching for pending vf operations\n");
+ for_each_vf(bp, i) {
+ struct bnx2x_virtf *vf = BP_VF(bp, i);
+
+ if (!list_empty(&vf->op_list_head) &&
+ atomic_read(&vf->op_in_progress)) {
+ DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
+ bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
+ }
+ }
+}
+
+static inline
+struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
+{
+ int i;
+ struct bnx2x_virtf *vf = NULL;
+
+ for_each_vf(bp, i) {
+ vf = BP_VF(bp, i);
+ if (stat_id >= vf->igu_base_id &&
+ stat_id < vf->igu_base_id + vf_sb_count(vf))
+ break;
+ }
+ return vf;
+}
+
+/* VF API helpers */
+static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
+ u8 enable)
+{
+ u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
+ u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
+
+ REG_WR(bp, reg, val);
+}
+
+static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int i;
+
+ for_each_vfq(vf, i)
+ bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
+ vfq_qzone_id(vf, vfq_get(vf, i)), false);
+}
+
+static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ u32 val;
+
+ /* clear the VF configuration - pretend */
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
+ val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
+ val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
+ IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
+ REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+}
+
+u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
+ BNX2X_VF_MAX_QUEUES);
+}
+
+static
+int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct vf_pf_resc_request *req_resc)
+{
+ u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
+ u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
+
+ return ((req_resc->num_rxqs <= rxq_cnt) &&
+ (req_resc->num_txqs <= txq_cnt) &&
+ (req_resc->num_sbs <= vf_sb_count(vf)) &&
+ (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
+ (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
+}
+
+/* CORE VF API */
+int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct vf_pf_resc_request *resc)
+{
+ int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
+ BNX2X_CIDS_PER_VF;
+
+ union cdu_context *base_cxt = (union cdu_context *)
+ BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
+ (base_vf_cid & (ILT_PAGE_CIDS-1));
+ int i;
+
+ /* if state is 'acquired' the VF was not released or FLR'd, in
+ * this case the returned resources match the acquired already
+ * acquired resources. Verify that the requested numbers do
+ * not exceed the already acquired numbers.
+ */
+ if (vf->state == VF_ACQUIRED) {
+ DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
+ vf->abs_vfid);
+
+ if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
+ BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
+ vf->abs_vfid);
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ /* Otherwise vf state must be 'free' or 'reset' */
+ if (vf->state != VF_FREE && vf->state != VF_RESET) {
+ BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
+ vf->abs_vfid, vf->state);
+ return -EINVAL;
+ }
+
+ /* static allocation:
+ * the global maximum number are fixed per VF. fail the request if
+ * requested number exceed these globals
+ */
+ if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
+ DP(BNX2X_MSG_IOV,
+ "cannot fulfill vf resource request. Placing maximal available values in response\n");
+ /* set the max resource in the vf */
+ return -ENOMEM;
+ }
+
+ /* Set resources counters - 0 request means max available */
+ vf_sb_count(vf) = resc->num_sbs;
+ vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
+ vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
+ if (resc->num_mac_filters)
+ vf_mac_rules_cnt(vf) = resc->num_mac_filters;
+ if (resc->num_vlan_filters)
+ vf_vlan_rules_cnt(vf) = resc->num_vlan_filters;
+
+ DP(BNX2X_MSG_IOV,
+ "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
+ vf_sb_count(vf), vf_rxq_count(vf),
+ vf_txq_count(vf), vf_mac_rules_cnt(vf),
+ vf_vlan_rules_cnt(vf));
+
+ /* Initialize the queues */
+ if (!vf->vfqs) {
+ DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
+ return -EINVAL;
+ }
+
+ for_each_vfq(vf, i) {
+ struct bnx2x_vf_queue *q = vfq_get(vf, i);
+
+ if (!q) {
+ DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i);
+ return -EINVAL;
+ }
+
+ q->index = i;
+ q->cxt = &((base_cxt + i)->eth);
+ q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
+
+ DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
+ vf->abs_vfid, i, q->index, q->cid, q->cxt);
+
+ /* init SP objects */
+ bnx2x_vfq_init(bp, vf, q);
+ }
+ vf->state = VF_ACQUIRED;
+ return 0;
+}
+
+int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
+{
+ struct bnx2x_func_init_params func_init = {0};
+ u16 flags = 0;
+ int i;
+
+ /* the sb resources are initialized at this point, do the
+ * FW/HW initializations
+ */
+ for_each_vf_sb(vf, i)
+ bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
+ vf_igu_sb(vf, i), vf_igu_sb(vf, i));
+
+ /* Sanity checks */
+ if (vf->state != VF_ACQUIRED) {
+ DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
+ vf->abs_vfid, vf->state);
+ return -EINVAL;
+ }
+ /* FLR cleanup epilogue */
+ if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
+ return -EBUSY;
+
+ /* reset IGU VF statistics: MSIX */
+ REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
+
+ /* vf init */
+ if (vf->cfg_flags & VF_CFG_STATS)
+ flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
+
+ if (vf->cfg_flags & VF_CFG_TPA)
+ flags |= FUNC_FLG_TPA;
+
+ if (is_vf_multi(vf))
+ flags |= FUNC_FLG_RSS;
+
+ /* function setup */
+ func_init.func_flgs = flags;
+ func_init.pf_id = BP_FUNC(bp);
+ func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
+ func_init.fw_stat_map = vf->fw_stat_map;
+ func_init.spq_map = vf->spq_map;
+ func_init.spq_prod = 0;
+ bnx2x_func_init(bp, &func_init);
+
+ /* Enable the vf */
+ bnx2x_vf_enable_access(bp, vf->abs_vfid);
+ bnx2x_vf_enable_traffic(bp, vf);
+
+ /* queue protection table */
+ for_each_vfq(vf, i)
+ bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
+ vfq_qzone_id(vf, vfq_get(vf, i)), true);
+
+ vf->state = VF_ENABLED;
+
+ /* update vf bulletin board */
+ bnx2x_post_vf_bulletin(bp, vf->index);
+
+ return 0;
+}
+
+/* VFOP close (teardown the queues, delete mcasts and close HW) */
+static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
+ enum bnx2x_vfop_close_state state = vfop->state;
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vfop_close,
+ .block = false,
+ };
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ switch (state) {
+ case BNX2X_VFOP_CLOSE_QUEUES:
+
+ if (++(qx->qid) < vf_rxq_count(vf)) {
+ vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid);
+ if (vfop->rc)
+ goto op_err;
+ return;
+ }
+
+ /* remove multicasts */
+ vfop->state = BNX2X_VFOP_CLOSE_HW;
+ vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ case BNX2X_VFOP_CLOSE_HW:
+
+ /* disable the interrupts */
+ DP(BNX2X_MSG_IOV, "disabling igu\n");
+ bnx2x_vf_igu_disable(bp, vf);
+
+ /* disable the VF */
+ DP(BNX2X_MSG_IOV, "clearing qtbl\n");
+ bnx2x_vf_clr_qtbl(bp, vf);
+
+ goto op_done;
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_err:
+ BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc);
+op_done:
+ vf->state = VF_ACQUIRED;
+ DP(BNX2X_MSG_IOV, "set state to acquired\n");
+ bnx2x_vfop_end(bp, vf, vfop);
+}
+
+int bnx2x_vfop_close_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ if (vfop) {
+ vfop->args.qx.qid = -1; /* loop */
+ bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES,
+ bnx2x_vfop_close, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+/* VF release can be called either: 1. the VF was acquired but
+ * not enabled 2. the vf was enabled or in the process of being
+ * enabled
+ */
+static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vfop_release,
+ .block = false,
+ };
+
+ DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
+ vf->state == VF_FREE ? "Free" :
+ vf->state == VF_ACQUIRED ? "Acquired" :
+ vf->state == VF_ENABLED ? "Enabled" :
+ vf->state == VF_RESET ? "Reset" :
+ "Unknown");
+
+ switch (vf->state) {
+ case VF_ENABLED:
+ vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ case VF_ACQUIRED:
+ DP(BNX2X_MSG_IOV, "about to free resources\n");
+ bnx2x_vf_free_resc(bp, vf);
+ DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
+ goto op_done;
+
+ case VF_FREE:
+ case VF_RESET:
+ /* do nothing */
+ goto op_done;
+ default:
+ bnx2x_vfop_default(vf->state);
+ }
+op_err:
+ BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc);
+op_done:
+ bnx2x_vfop_end(bp, vf, vfop);
+}
+
+int bnx2x_vfop_release_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ if (vfop) {
+ bnx2x_vfop_opset(-1, /* use vf->state */
+ bnx2x_vfop_release, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+/* VF release ~ VF close + VF release-resources
+ * Release is the ultimate SW shutdown and is called whenever an
+ * irrecoverable error is encountered.
+ */
+void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block)
+{
+ struct bnx2x_vfop_cmd cmd = {
+ .done = NULL,
+ .block = block,
+ };
+ int rc;
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
+
+ rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
+ if (rc)
+ WARN(rc,
+ "VF[%d] Failed to allocate resources for release op- rc=%d\n",
+ vf->abs_vfid, rc);
+}
+
+static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
+ struct bnx2x_virtf *vf, u32 *sbdf)
+{
+ *sbdf = vf->devfn | (vf->bus << 8);
+}
+
+static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_bar_info *bar_info)
+{
+ int n;
+
+ bar_info->nr_bars = bp->vfdb->sriov.nres;
+ for (n = 0; n < bar_info->nr_bars; n++)
+ bar_info->bars[n] = vf->bars[n];
+}
+
+void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ enum channel_tlvs tlv)
+{
+ /* lock the channel */
+ mutex_lock(&vf->op_mutex);
+
+ /* record the locking op */
+ vf->op_current = tlv;
+
+ /* log the lock */
+ DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
+ vf->abs_vfid, tlv);
+}
+
+void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ enum channel_tlvs expected_tlv)
+{
+ WARN(expected_tlv != vf->op_current,
+ "lock mismatch: expected %d found %d", expected_tlv,
+ vf->op_current);
+
+ /* lock the channel */
+ mutex_unlock(&vf->op_mutex);
+
+ /* log the unlock */
+ DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
+ vf->abs_vfid, vf->op_current);
+
+ /* record the locking op */
+ vf->op_current = CHANNEL_TLV_NONE;
+}
+
+void bnx2x_enable_sriov(struct bnx2x *bp)
+{
+ int rc = 0;
+
+ /* disbale sriov in case it is still enabled */
+ pci_disable_sriov(bp->pdev);
+ DP(BNX2X_MSG_IOV, "sriov disabled\n");
+
+ /* enable sriov */
+ DP(BNX2X_MSG_IOV, "vf num (%d)\n", (bp->vfdb->sriov.nr_virtfn));
+ rc = pci_enable_sriov(bp->pdev, (bp->vfdb->sriov.nr_virtfn));
+ if (rc)
+ BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
+ else
+ DP(BNX2X_MSG_IOV, "sriov enabled\n");
+}
+
+/* New mac for VF. Consider these cases:
+ * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
+ * supply at acquire.
+ * 2. VF has already been acquired but has not yet initialized - store in local
+ * bulletin board. mac will be posted on VF bulletin board after VF init. VF
+ * will configure this mac when it is ready.
+ * 3. VF has already initialized but has not yet setup a queue - post the new
+ * mac on VF's bulletin board right now. VF will configure this mac when it
+ * is ready.
+ * 4. VF has already set a queue - delete any macs already configured for this
+ * queue and manually config the new mac.
+ * In any event, once this function has been called refuse any attempts by the
+ * VF to configure any mac for itself except for this mac. In case of a race
+ * where the VF fails to see the new post on its bulletin board before sending a
+ * mac configuration request, the PF will simply fail the request and VF can try
+ * again after consulting its bulletin board
+ */
+int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int rc, q_logical_state, vfidx = queue;
+ struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+
+ /* if SRIOV is disabled there is nothing to do (and somewhere, someone
+ * has erred).
+ */
+ if (!IS_SRIOV(bp)) {
+ BNX2X_ERR("bnx2x_set_vf_mac called though sriov is disabled\n");
+ return -EINVAL;
+ }
+
+ if (!is_valid_ether_addr(mac)) {
+ BNX2X_ERR("mac address invalid\n");
+ return -EINVAL;
+ }
+
+ /* update PF's copy of the VF's bulletin. will no longer accept mac
+ * configuration requests from vf unless match this mac
+ */
+ bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
+ memcpy(bulletin->mac, mac, ETH_ALEN);
+
+ /* Post update on VF's bulletin board */
+ rc = bnx2x_post_vf_bulletin(bp, vfidx);
+ if (rc) {
+ BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
+ return rc;
+ }
+
+ /* is vf initialized and queue set up? */
+ q_logical_state =
+ bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
+ if (vf->state == VF_ENABLED &&
+ q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
+ /* configure the mac in device on this vf's queue */
+ unsigned long flags = 0;
+ struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
+
+ /* must lock vfpf channel to protect against vf flows */
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
+
+ /* remove existing eth macs */
+ rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
+ if (rc) {
+ BNX2X_ERR("failed to delete eth macs\n");
+ return -EINVAL;
+ }
+
+ /* remove existing uc list macs */
+ rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
+ if (rc) {
+ BNX2X_ERR("failed to delete uc_list macs\n");
+ return -EINVAL;
+ }
+
+ /* configure the new mac to device */
+ __set_bit(RAMROD_COMP_WAIT, &flags);
+ bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
+ BNX2X_ETH_MAC, &flags);
+
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
+ }
+
+ return rc;
+}
+
+/* crc is the first field in the bulletin board. compute the crc over the
+ * entire bulletin board excluding the crc field itself
+ */
+u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
+ struct pf_vf_bulletin_content *bulletin)
+{
+ return crc32(BULLETIN_CRC_SEED,
+ ((u8 *)bulletin) + sizeof(bulletin->crc),
+ bulletin->length - sizeof(bulletin->crc));
+}
+
+/* Check for new posts on the bulletin board */
+enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
+{
+ struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
+ int attempts;
+
+ /* bulletin board hasn't changed since last sample */
+ if (bp->old_bulletin.version == bulletin.version)
+ return PFVF_BULLETIN_UNCHANGED;
+
+ /* validate crc of new bulletin board */
+ if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) {
+ /* sampling structure in mid post may result with corrupted data
+ * validate crc to ensure coherency.
+ */
+ for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
+ bulletin = bp->pf2vf_bulletin->content;
+ if (bulletin.crc == bnx2x_crc_vf_bulletin(bp,
+ &bulletin))
+ break;
+ BNX2X_ERR("bad crc on bulletin board. contained %x computed %x\n",
+ bulletin.crc,
+ bnx2x_crc_vf_bulletin(bp, &bulletin));
+ }
+ if (attempts >= BULLETIN_ATTEMPTS) {
+ BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
+ attempts);
+ return PFVF_BULLETIN_CRC_ERR;
+ }
+ }
+
+ /* the mac address in bulletin board is valid and is new */
+ if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID &&
+ memcmp(bulletin.mac, bp->old_bulletin.mac, ETH_ALEN)) {
+ /* update new mac to net device */
+ memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
+ }
+
+ /* copy new bulletin board to bp */
+ bp->old_bulletin = bulletin;
+
+ return PFVF_BULLETIN_UPDATED;
+}
+
+void bnx2x_vf_map_doorbells(struct bnx2x *bp)
+{
+ /* vf doorbells are embedded within the regview */
+ bp->doorbells = bp->regview + PXP_VF_ADDR_DB_START;
+}
+
+int bnx2x_vf_pci_alloc(struct bnx2x *bp)
+{
+ /* allocate vf2pf mailbox for vf to pf channel */
+ BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping,
+ sizeof(struct bnx2x_vf_mbx_msg));
+
+ /* allocate pf 2 vf bulletin board */
+ BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping,
+ sizeof(union pf_vf_bulletin));
+
+ return 0;
+
+alloc_mem_err:
+ BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
+ sizeof(struct bnx2x_vf_mbx_msg));
+ BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
+ sizeof(union pf_vf_bulletin));
+ return -ENOMEM;
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
new file mode 100644
index 000000000000..b4050173add9
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -0,0 +1,809 @@
+/* bnx2x_sriov.h: Broadcom Everest network driver.
+ *
+ * Copyright 2009-2013 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Shmulik Ravid <shmulikr@broadcom.com>
+ * Ariel Elior <ariele@broadcom.com>
+ */
+#ifndef BNX2X_SRIOV_H
+#define BNX2X_SRIOV_H
+
+#include "bnx2x_vfpf.h"
+#include "bnx2x.h"
+
+enum sample_bulletin_result {
+ PFVF_BULLETIN_UNCHANGED,
+ PFVF_BULLETIN_UPDATED,
+ PFVF_BULLETIN_CRC_ERR
+};
+
+#ifdef CONFIG_BNX2X_SRIOV
+
+/* The bnx2x device structure holds vfdb structure described below.
+ * The VF array is indexed by the relative vfid.
+ */
+#define BNX2X_VF_MAX_QUEUES 16
+#define BNX2X_VF_MAX_TPA_AGG_QUEUES 8
+
+struct bnx2x_sriov {
+ u32 first_vf_in_pf;
+
+ /* standard SRIOV capability fields, mostly for debugging */
+ int pos; /* capability position */
+ int nres; /* number of resources */
+ u32 cap; /* SR-IOV Capabilities */
+ u16 ctrl; /* SR-IOV Control */
+ u16 total; /* total VFs associated with the PF */
+ u16 initial; /* initial VFs associated with the PF */
+ u16 nr_virtfn; /* number of VFs available */
+ u16 offset; /* first VF Routing ID offset */
+ u16 stride; /* following VF stride */
+ u32 pgsz; /* page size for BAR alignment */
+ u8 link; /* Function Dependency Link */
+};
+
+/* bars */
+struct bnx2x_vf_bar {
+ u64 bar;
+ u32 size;
+};
+
+struct bnx2x_vf_bar_info {
+ struct bnx2x_vf_bar bars[PCI_SRIOV_NUM_BARS];
+ u8 nr_bars;
+};
+
+/* vf queue (used both for rx or tx) */
+struct bnx2x_vf_queue {
+ struct eth_context *cxt;
+
+ /* MACs object */
+ struct bnx2x_vlan_mac_obj mac_obj;
+
+ /* VLANs object */
+ struct bnx2x_vlan_mac_obj vlan_obj;
+ atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */
+
+ /* Queue Slow-path State object */
+ struct bnx2x_queue_sp_obj sp_obj;
+
+ u32 cid;
+ u16 index;
+ u16 sb_idx;
+};
+
+/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters:
+ * q-init, q-setup and SB index
+ */
+struct bnx2x_vfop_qctor_params {
+ struct bnx2x_queue_state_params qstate;
+ struct bnx2x_queue_setup_params prep_qsetup;
+};
+
+/* VFOP parameters (one copy per VF) */
+union bnx2x_vfop_params {
+ struct bnx2x_vlan_mac_ramrod_params vlan_mac;
+ struct bnx2x_rx_mode_ramrod_params rx_mode;
+ struct bnx2x_mcast_ramrod_params mcast;
+ struct bnx2x_config_rss_params rss;
+ struct bnx2x_vfop_qctor_params qctor;
+};
+
+/* forward */
+struct bnx2x_virtf;
+
+/* VFOP definitions */
+typedef void (*vfop_handler_t)(struct bnx2x *bp, struct bnx2x_virtf *vf);
+
+struct bnx2x_vfop_cmd {
+ vfop_handler_t done;
+ bool block;
+};
+
+/* VFOP queue filters command additional arguments */
+struct bnx2x_vfop_filter {
+ struct list_head link;
+ int type;
+#define BNX2X_VFOP_FILTER_MAC 1
+#define BNX2X_VFOP_FILTER_VLAN 2
+
+ bool add;
+ u8 *mac;
+ u16 vid;
+};
+
+struct bnx2x_vfop_filters {
+ int add_cnt;
+ struct list_head head;
+ struct bnx2x_vfop_filter filters[];
+};
+
+/* transient list allocated, built and saved until its
+ * passed to the SP-VERBs layer.
+ */
+struct bnx2x_vfop_args_mcast {
+ int mc_num;
+ struct bnx2x_mcast_list_elem *mc;
+};
+
+struct bnx2x_vfop_args_qctor {
+ int qid;
+ u16 sb_idx;
+};
+
+struct bnx2x_vfop_args_qdtor {
+ int qid;
+ struct eth_context *cxt;
+};
+
+struct bnx2x_vfop_args_defvlan {
+ int qid;
+ bool enable;
+ u16 vid;
+ u8 prio;
+};
+
+struct bnx2x_vfop_args_qx {
+ int qid;
+ bool en_add;
+};
+
+struct bnx2x_vfop_args_filters {
+ struct bnx2x_vfop_filters *multi_filter;
+ atomic_t *credit; /* non NULL means 'don't consume credit' */
+};
+
+union bnx2x_vfop_args {
+ struct bnx2x_vfop_args_mcast mc_list;
+ struct bnx2x_vfop_args_qctor qctor;
+ struct bnx2x_vfop_args_qdtor qdtor;
+ struct bnx2x_vfop_args_defvlan defvlan;
+ struct bnx2x_vfop_args_qx qx;
+ struct bnx2x_vfop_args_filters filters;
+};
+
+struct bnx2x_vfop {
+ struct list_head link;
+ int rc; /* return code */
+ int state; /* next state */
+ union bnx2x_vfop_args args; /* extra arguments */
+ union bnx2x_vfop_params *op_p; /* ramrod params */
+
+ /* state machine callbacks */
+ vfop_handler_t transition;
+ vfop_handler_t done;
+};
+
+/* vf context */
+struct bnx2x_virtf {
+ u16 cfg_flags;
+#define VF_CFG_STATS 0x0001
+#define VF_CFG_FW_FC 0x0002
+#define VF_CFG_TPA 0x0004
+#define VF_CFG_INT_SIMD 0x0008
+#define VF_CACHE_LINE 0x0010
+
+ u8 state;
+#define VF_FREE 0 /* VF ready to be acquired holds no resc */
+#define VF_ACQUIRED 1 /* VF aquired, but not initalized */
+#define VF_ENABLED 2 /* VF Enabled */
+#define VF_RESET 3 /* VF FLR'd, pending cleanup */
+
+ /* non 0 during flr cleanup */
+ u8 flr_clnup_stage;
+#define VF_FLR_CLN 1 /* reclaim resources and do 'final cleanup'
+ * sans the end-wait
+ */
+#define VF_FLR_ACK 2 /* ACK flr notification */
+#define VF_FLR_EPILOG 3 /* wait for VF remnants to dissipate in the HW
+ * ~ final cleanup' end wait
+ */
+
+ /* dma */
+ dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */
+ dma_addr_t spq_map;
+ dma_addr_t bulletin_map;
+
+ /* Allocated resources counters. Before the VF is acquired, the
+ * counters hold the following values:
+ *
+ * - xxq_count = 0 as the queues memory is not allocated yet.
+ *
+ * - sb_count = The number of status blocks configured for this VF in
+ * the IGU CAM. Initially read during probe.
+ *
+ * - xx_rules_count = The number of rules statically and equally
+ * allocated for each VF, during PF load.
+ */
+ struct vf_pf_resc_request alloc_resc;
+#define vf_rxq_count(vf) ((vf)->alloc_resc.num_rxqs)
+#define vf_txq_count(vf) ((vf)->alloc_resc.num_txqs)
+#define vf_sb_count(vf) ((vf)->alloc_resc.num_sbs)
+#define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters)
+#define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters)
+#define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters)
+
+ u8 sb_count; /* actual number of SBs */
+ u8 igu_base_id; /* base igu status block id */
+
+ struct bnx2x_vf_queue *vfqs;
+#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var)
+
+ u8 index; /* index in the vf array */
+ u8 abs_vfid;
+ u8 sp_cl_id;
+ u32 error; /* 0 means all's-well */
+
+ /* BDF */
+ unsigned int bus;
+ unsigned int devfn;
+
+ /* bars */
+ struct bnx2x_vf_bar bars[PCI_SRIOV_NUM_BARS];
+
+ /* set-mac ramrod state 1-pending, 0-done */
+ unsigned long filter_state;
+
+ /* leading rss client id ~~ the client id of the first rxq, must be
+ * set for each txq.
+ */
+ int leading_rss;
+
+ /* MCAST object */
+ struct bnx2x_mcast_obj mcast_obj;
+
+ /* RSS configuration object */
+ struct bnx2x_rss_config_obj rss_conf_obj;
+
+ /* slow-path operations */
+ atomic_t op_in_progress;
+ int op_rc;
+ bool op_wait_blocking;
+ struct list_head op_list_head;
+ union bnx2x_vfop_params op_params;
+ struct mutex op_mutex; /* one vfop at a time mutex */
+ enum channel_tlvs op_current;
+};
+
+#define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn)
+
+#define for_each_vf(bp, var) \
+ for ((var) = 0; (var) < BNX2X_NR_VIRTFN(bp); (var)++)
+
+#define for_each_vfq(vf, var) \
+ for ((var) = 0; (var) < vf_rxq_count(vf); (var)++)
+
+#define for_each_vf_sb(vf, var) \
+ for ((var) = 0; (var) < vf_sb_count(vf); (var)++)
+
+#define is_vf_multi(vf) (vf_rxq_count(vf) > 1)
+
+#define HW_VF_HANDLE(bp, abs_vfid) \
+ (u16)(BP_ABS_FUNC((bp)) | (1<<3) | ((u16)(abs_vfid) << 4))
+
+#define FW_PF_MAX_HANDLE 8
+
+#define FW_VF_HANDLE(abs_vfid) \
+ (abs_vfid + FW_PF_MAX_HANDLE)
+
+/* locking and unlocking the channel mutex */
+void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ enum channel_tlvs tlv);
+
+void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ enum channel_tlvs expected_tlv);
+
+/* VF mail box (aka vf-pf channel) */
+
+/* a container for the bi-directional vf<-->pf messages.
+ * The actual response will be placed according to the offset parameter
+ * provided in the request
+ */
+
+#define MBX_MSG_ALIGN 8
+#define MBX_MSG_ALIGNED_SIZE (roundup(sizeof(struct bnx2x_vf_mbx_msg), \
+ MBX_MSG_ALIGN))
+
+struct bnx2x_vf_mbx_msg {
+ union vfpf_tlvs req;
+ union pfvf_tlvs resp;
+};
+
+struct bnx2x_vf_mbx {
+ struct bnx2x_vf_mbx_msg *msg;
+ dma_addr_t msg_mapping;
+
+ /* VF GPA address */
+ u32 vf_addr_lo;
+ u32 vf_addr_hi;
+
+ struct vfpf_first_tlv first_tlv; /* saved VF request header */
+
+ u8 flags;
+#define VF_MSG_INPROCESS 0x1 /* failsafe - the FW should prevent
+ * more then one pending msg
+ */
+};
+
+struct bnx2x_vf_sp {
+ union {
+ struct eth_classify_rules_ramrod_data e2;
+ } mac_rdata;
+
+ union {
+ struct eth_classify_rules_ramrod_data e2;
+ } vlan_rdata;
+
+ union {
+ struct eth_filter_rules_ramrod_data e2;
+ } rx_mode_rdata;
+
+ union {
+ struct eth_multicast_rules_ramrod_data e2;
+ } mcast_rdata;
+
+ union {
+ struct client_init_ramrod_data init_data;
+ struct client_update_ramrod_data update_data;
+ } q_data;
+};
+
+struct hw_dma {
+ void *addr;
+ dma_addr_t mapping;
+ size_t size;
+};
+
+struct bnx2x_vfdb {
+#define BP_VFDB(bp) ((bp)->vfdb)
+ /* vf array */
+ struct bnx2x_virtf *vfs;
+#define BP_VF(bp, idx) (&((bp)->vfdb->vfs[(idx)]))
+#define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[(idx)].var)
+
+ /* queue array - for all vfs */
+ struct bnx2x_vf_queue *vfqs;
+
+ /* vf HW contexts */
+ struct hw_dma context[BNX2X_VF_CIDS/ILT_PAGE_CIDS];
+#define BP_VF_CXT_PAGE(bp, i) (&(bp)->vfdb->context[(i)])
+
+ /* SR-IOV information */
+ struct bnx2x_sriov sriov;
+ struct hw_dma mbx_dma;
+#define BP_VF_MBX_DMA(bp) (&((bp)->vfdb->mbx_dma))
+ struct bnx2x_vf_mbx mbxs[BNX2X_MAX_NUM_OF_VFS];
+#define BP_VF_MBX(bp, vfid) (&((bp)->vfdb->mbxs[(vfid)]))
+
+ struct hw_dma bulletin_dma;
+#define BP_VF_BULLETIN_DMA(bp) (&((bp)->vfdb->bulletin_dma))
+#define BP_VF_BULLETIN(bp, vf) \
+ (((struct pf_vf_bulletin_content *)(BP_VF_BULLETIN_DMA(bp)->addr)) \
+ + (vf))
+
+ struct hw_dma sp_dma;
+#define bnx2x_vf_sp(bp, vf, field) ((bp)->vfdb->sp_dma.addr + \
+ (vf)->index * sizeof(struct bnx2x_vf_sp) + \
+ offsetof(struct bnx2x_vf_sp, field))
+#define bnx2x_vf_sp_map(bp, vf, field) ((bp)->vfdb->sp_dma.mapping + \
+ (vf)->index * sizeof(struct bnx2x_vf_sp) + \
+ offsetof(struct bnx2x_vf_sp, field))
+
+#define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32)
+ u32 flrd_vfs[FLRD_VFS_DWORDS];
+};
+
+/* queue access */
+static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index)
+{
+ return &(vf->vfqs[index]);
+}
+
+static inline bool vfq_is_leading(struct bnx2x_vf_queue *vfq)
+{
+ return (vfq->index == 0);
+}
+
+/* FW ids */
+static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx)
+{
+ return vf->igu_base_id + sb_idx;
+}
+
+static inline u8 vf_hc_qzone(struct bnx2x_virtf *vf, u16 sb_idx)
+{
+ return vf_igu_sb(vf, sb_idx);
+}
+
+static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
+{
+ return vf->igu_base_id + q->index;
+}
+
+static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
+{
+ return vfq_cl_id(vf, q);
+}
+
+static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
+{
+ return vfq_cl_id(vf, q);
+}
+
+/* global iov routines */
+int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line);
+int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, int num_vfs_param);
+void bnx2x_iov_remove_one(struct bnx2x *bp);
+void bnx2x_iov_free_mem(struct bnx2x *bp);
+int bnx2x_iov_alloc_mem(struct bnx2x *bp);
+int bnx2x_iov_nic_init(struct bnx2x *bp);
+int bnx2x_iov_chip_cleanup(struct bnx2x *bp);
+void bnx2x_iov_init_dq(struct bnx2x *bp);
+void bnx2x_iov_init_dmae(struct bnx2x *bp);
+void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
+ struct bnx2x_queue_sp_obj **q_obj);
+void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work);
+int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem);
+void bnx2x_iov_adjust_stats_req(struct bnx2x *bp);
+void bnx2x_iov_storm_stats_update(struct bnx2x *bp);
+void bnx2x_iov_sp_task(struct bnx2x *bp);
+/* global vf mailbox routines */
+void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event);
+void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid);
+
+/* CORE VF API */
+typedef u8 bnx2x_mac_addr_t[ETH_ALEN];
+
+/* acquire */
+int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct vf_pf_resc_request *resc);
+/* init */
+int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ dma_addr_t *sb_map);
+
+/* VFOP generic helpers */
+#define bnx2x_vfop_default(state) do { \
+ BNX2X_ERR("Bad state %d\n", (state)); \
+ vfop->rc = -EINVAL; \
+ goto op_err; \
+ } while (0)
+
+enum {
+ VFOP_DONE,
+ VFOP_CONT,
+ VFOP_VERIFY_PEND,
+};
+
+#define bnx2x_vfop_finalize(vf, rc, next) do { \
+ if ((rc) < 0) \
+ goto op_err; \
+ else if ((rc) > 0) \
+ goto op_pending; \
+ else if ((next) == VFOP_DONE) \
+ goto op_done; \
+ else if ((next) == VFOP_VERIFY_PEND) \
+ BNX2X_ERR("expected pending\n"); \
+ else { \
+ DP(BNX2X_MSG_IOV, "no ramrod. scheduling\n"); \
+ atomic_set(&vf->op_in_progress, 1); \
+ queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); \
+ return; \
+ } \
+ } while (0)
+
+#define bnx2x_vfop_opset(first_state, trans_hndlr, done_hndlr) \
+ do { \
+ vfop->state = first_state; \
+ vfop->op_p = &vf->op_params; \
+ vfop->transition = trans_hndlr; \
+ vfop->done = done_hndlr; \
+ } while (0)
+
+static inline struct bnx2x_vfop *bnx2x_vfop_cur(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!");
+ WARN_ON(list_empty(&vf->op_list_head));
+ return list_first_entry(&vf->op_list_head, struct bnx2x_vfop, link);
+}
+
+static inline struct bnx2x_vfop *bnx2x_vfop_add(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = kzalloc(sizeof(*vfop), GFP_KERNEL);
+
+ WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!");
+ if (vfop) {
+ INIT_LIST_HEAD(&vfop->link);
+ list_add(&vfop->link, &vf->op_list_head);
+ }
+ return vfop;
+}
+
+static inline void bnx2x_vfop_end(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vfop *vfop)
+{
+ /* rc < 0 - error, otherwise set to 0 */
+ DP(BNX2X_MSG_IOV, "rc was %d\n", vfop->rc);
+ if (vfop->rc >= 0)
+ vfop->rc = 0;
+ DP(BNX2X_MSG_IOV, "rc is now %d\n", vfop->rc);
+
+ /* unlink the current op context and propagate error code
+ * must be done before invoking the 'done()' handler
+ */
+ WARN(!mutex_is_locked(&vf->op_mutex),
+ "about to access vf op linked list but mutex was not locked!");
+ list_del(&vfop->link);
+
+ if (list_empty(&vf->op_list_head)) {
+ DP(BNX2X_MSG_IOV, "list was empty %d\n", vfop->rc);
+ vf->op_rc = vfop->rc;
+ DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n",
+ vf->op_rc, vfop->rc);
+ } else {
+ struct bnx2x_vfop *cur_vfop;
+
+ DP(BNX2X_MSG_IOV, "list not empty %d\n", vfop->rc);
+ cur_vfop = bnx2x_vfop_cur(bp, vf);
+ cur_vfop->rc = vfop->rc;
+ DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n",
+ vf->op_rc, vfop->rc);
+ }
+
+ /* invoke done handler */
+ if (vfop->done) {
+ DP(BNX2X_MSG_IOV, "calling done handler\n");
+ vfop->done(bp, vf);
+ } else {
+ /* there is no done handler for the operation to unlock
+ * the mutex. Must have gotten here from PF initiated VF RELEASE
+ */
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
+ }
+
+ DP(BNX2X_MSG_IOV, "done handler complete. vf->op_rc %d, vfop->rc %d\n",
+ vf->op_rc, vfop->rc);
+
+ /* if this is the last nested op reset the wait_blocking flag
+ * to release any blocking wrappers, only after 'done()' is invoked
+ */
+ if (list_empty(&vf->op_list_head)) {
+ DP(BNX2X_MSG_IOV, "list was empty after done %d\n", vfop->rc);
+ vf->op_wait_blocking = false;
+ }
+
+ kfree(vfop);
+}
+
+static inline int bnx2x_vfop_wait_blocking(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ /* can take a while if any port is running */
+ int cnt = 5000;
+
+ might_sleep();
+ while (cnt--) {
+ if (vf->op_wait_blocking == false) {
+#ifdef BNX2X_STOP_ON_ERROR
+ DP(BNX2X_MSG_IOV, "exit (cnt %d)\n", 5000 - cnt);
+#endif
+ return 0;
+ }
+ usleep_range(1000, 2000);
+
+ if (bp->panic)
+ return -EIO;
+ }
+
+ /* timeout! */
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+#endif
+
+ return -EBUSY;
+}
+
+static inline int bnx2x_vfop_transition(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ vfop_handler_t transition,
+ bool block)
+{
+ if (block)
+ vf->op_wait_blocking = true;
+ transition(bp, vf);
+ if (block)
+ return bnx2x_vfop_wait_blocking(bp, vf);
+ return 0;
+}
+
+/* VFOP queue construction helpers */
+void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_queue_init_params *init_params,
+ struct bnx2x_queue_setup_params *setup_params,
+ u16 q_idx, u16 sb_idx);
+
+void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_queue_init_params *init_params,
+ struct bnx2x_queue_setup_params *setup_params,
+ u16 q_idx, u16 sb_idx);
+
+void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vf_queue *q,
+ struct bnx2x_vfop_qctor_params *p,
+ unsigned long q_type);
+int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ struct bnx2x_vfop_filters *macs,
+ int qid, bool drv_only);
+
+int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid, u16 vid, bool add);
+
+int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ struct bnx2x_vfop_filters *vlans,
+ int qid, bool drv_only);
+
+int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid);
+
+int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid);
+
+int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ bnx2x_mac_addr_t *mcasts,
+ int mcast_num, bool drv_only);
+
+int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid, unsigned long accept_flags);
+
+int bnx2x_vfop_close_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd);
+
+int bnx2x_vfop_release_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd);
+
+/* VF release ~ VF close + VF release-resources
+ *
+ * Release is the ultimate SW shutdown and is called whenever an
+ * irrecoverable error is encountered.
+ */
+void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block);
+int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid);
+u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf);
+
+/* FLR routines */
+
+/* VF FLR helpers */
+int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid);
+void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid);
+
+/* Handles an FLR (or VF_DISABLE) notification form the MCP */
+void bnx2x_vf_handle_flr_event(struct bnx2x *bp);
+
+void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
+ u16 length);
+void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
+ u16 type, u16 length);
+void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list);
+
+bool bnx2x_tlv_supported(u16 tlvtype);
+
+u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
+ struct pf_vf_bulletin_content *bulletin);
+int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf);
+
+
+enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
+
+/* VF side vfpf channel functions */
+int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count);
+int bnx2x_vfpf_release(struct bnx2x *bp);
+int bnx2x_vfpf_release(struct bnx2x *bp);
+int bnx2x_vfpf_init(struct bnx2x *bp);
+void bnx2x_vfpf_close_vf(struct bnx2x *bp);
+int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx);
+int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
+int bnx2x_vfpf_set_mac(struct bnx2x *bp);
+int bnx2x_vfpf_set_mcast(struct net_device *dev);
+int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
+
+static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf,
+ size_t buf_len)
+{
+ strlcpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len);
+}
+
+static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp)
+{
+ return PXP_VF_ADDR_USDM_QUEUES_START +
+ bp->acquire_resp.resc.hw_qid[fp->index] *
+ sizeof(struct ustorm_queue_zone_data);
+}
+
+enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
+void bnx2x_vf_map_doorbells(struct bnx2x *bp);
+int bnx2x_vf_pci_alloc(struct bnx2x *bp);
+void bnx2x_enable_sriov(struct bnx2x *bp);
+static inline int bnx2x_vf_headroom(struct bnx2x *bp)
+{
+ return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF;
+}
+
+#else /* CONFIG_BNX2X_SRIOV */
+
+static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
+ struct bnx2x_queue_sp_obj **q_obj) {}
+static inline void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid,
+ bool queue_work) {}
+static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {}
+static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp,
+ union event_ring_elem *elem) {return 1; }
+static inline void bnx2x_iov_sp_task(struct bnx2x *bp) {}
+static inline void bnx2x_vf_mbx(struct bnx2x *bp,
+ struct vf_pf_event_data *vfpf_event) {}
+static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; }
+static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {}
+static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_iov_free_mem(struct bnx2x *bp) {}
+static inline int bnx2x_iov_chip_cleanup(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_iov_init_dmae(struct bnx2x *bp) {}
+static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
+ int num_vfs_param) {return 0; }
+static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {}
+static inline void bnx2x_enable_sriov(struct bnx2x *bp) {}
+static inline int bnx2x_vfpf_acquire(struct bnx2x *bp,
+ u8 tx_count, u8 rx_count) {return 0; }
+static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
+static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) {return 0; }
+static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; }
+static inline int bnx2x_vfpf_set_mac(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_vfpf_set_mcast(struct net_device *dev) {return 0; }
+static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_vf_headroom(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) {}
+static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf,
+ size_t buf_len) {}
+static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp) {return 0; }
+static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
+{
+ return PFVF_BULLETIN_UNCHANGED;
+}
+
+static inline int bnx2x_vf_map_doorbells(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
+
+#endif /* CONFIG_BNX2X_SRIOV */
+#endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 89ec0667140a..4397f8b76f2e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1,6 +1,6 @@
/* bnx2x_stats.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,7 +19,7 @@
#include "bnx2x_stats.h"
#include "bnx2x_cmn.h"
-
+#include "bnx2x_sriov.h"
/* Statistics */
@@ -79,6 +79,42 @@ static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
* Init service functions
*/
+static void bnx2x_dp_stats(struct bnx2x *bp)
+{
+ int i;
+
+ DP(BNX2X_MSG_STATS, "dumping stats:\n"
+ "fw_stats_req\n"
+ " hdr\n"
+ " cmd_num %d\n"
+ " reserved0 %d\n"
+ " drv_stats_counter %d\n"
+ " reserved1 %d\n"
+ " stats_counters_addrs %x %x\n",
+ bp->fw_stats_req->hdr.cmd_num,
+ bp->fw_stats_req->hdr.reserved0,
+ bp->fw_stats_req->hdr.drv_stats_counter,
+ bp->fw_stats_req->hdr.reserved1,
+ bp->fw_stats_req->hdr.stats_counters_addrs.hi,
+ bp->fw_stats_req->hdr.stats_counters_addrs.lo);
+
+ for (i = 0; i < bp->fw_stats_req->hdr.cmd_num; i++) {
+ DP(BNX2X_MSG_STATS,
+ "query[%d]\n"
+ " kind %d\n"
+ " index %d\n"
+ " funcID %d\n"
+ " reserved %d\n"
+ " address %x %x\n",
+ i, bp->fw_stats_req->query[i].kind,
+ bp->fw_stats_req->query[i].index,
+ bp->fw_stats_req->query[i].funcID,
+ bp->fw_stats_req->query[i].reserved,
+ bp->fw_stats_req->query[i].address.hi,
+ bp->fw_stats_req->query[i].address.lo);
+ }
+}
+
/* Post the next statistics ramrod. Protect it with the spin in
* order to ensure the strict order between statistics ramrods
* (each ramrod has a sequence number passed in a
@@ -103,7 +139,9 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
bp->fw_stats_req->hdr.drv_stats_counter);
-
+ /* adjust the ramrod to include VF queues statistics */
+ bnx2x_iov_adjust_stats_req(bp);
+ bnx2x_dp_stats(bp);
/* send FW stats ramrod */
rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
@@ -174,7 +212,7 @@ static int bnx2x_stats_comp(struct bnx2x *bp)
break;
}
cnt--;
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
}
return 1;
}
@@ -482,6 +520,12 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
static void bnx2x_stats_start(struct bnx2x *bp)
{
+ /* vfs travel through here as part of the statistics FSM, but no action
+ * is required
+ */
+ if (IS_VF(bp))
+ return;
+
if (bp->port.pmf)
bnx2x_port_stats_init(bp);
@@ -501,6 +545,11 @@ static void bnx2x_stats_pmf_start(struct bnx2x *bp)
static void bnx2x_stats_restart(struct bnx2x *bp)
{
+ /* vfs travel through here as part of the statistics FSM, but no action
+ * is required
+ */
+ if (IS_VF(bp))
+ return;
bnx2x_stats_comp(bp);
bnx2x_stats_start(bp);
}
@@ -832,19 +881,10 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
return 0;
}
-static int bnx2x_storm_stats_update(struct bnx2x *bp)
+static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp)
{
- struct tstorm_per_port_stats *tport =
- &bp->fw_stats_data->port.tstorm_port_statistics;
- struct tstorm_per_pf_stats *tfunc =
- &bp->fw_stats_data->pf.tstorm_pf_statistics;
- struct host_func_stats *fstats = &bp->func_stats;
- struct bnx2x_eth_stats *estats = &bp->eth_stats;
- struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old;
struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
- int i;
u16 cur_stats_counter;
-
/* Make sure we use the value of the counter
* used for sending the last stats ramrod.
*/
@@ -880,6 +920,23 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
le16_to_cpu(counters->tstats_counter), bp->stats_counter);
return -EAGAIN;
}
+ return 0;
+}
+
+static int bnx2x_storm_stats_update(struct bnx2x *bp)
+{
+ struct tstorm_per_port_stats *tport =
+ &bp->fw_stats_data->port.tstorm_port_statistics;
+ struct tstorm_per_pf_stats *tfunc =
+ &bp->fw_stats_data->pf.tstorm_pf_statistics;
+ struct host_func_stats *fstats = &bp->func_stats;
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old;
+ int i;
+
+ /* vfs stat counter is managed by pf */
+ if (IS_PF(bp) && bnx2x_storm_stats_validate_counters(bp))
+ return -EAGAIN;
estats->error_bytes_received_hi = 0;
estats->error_bytes_received_lo = 0;
@@ -953,8 +1010,8 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
total_broadcast_packets_received);
UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
- etherstatsoverrsizepkts);
- UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard);
+ etherstatsoverrsizepkts, 32);
+ UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16);
SUB_EXTEND_USTAT(ucast_no_buff_pkts,
total_unicast_packets_received);
@@ -1033,15 +1090,15 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
estats->total_bytes_received_lo,
estats->rx_stat_ifhcinbadoctets_lo);
- ADD_64(estats->total_bytes_received_hi,
- le32_to_cpu(tfunc->rcv_error_bytes.hi),
- estats->total_bytes_received_lo,
- le32_to_cpu(tfunc->rcv_error_bytes.lo));
+ ADD_64_LE(estats->total_bytes_received_hi,
+ tfunc->rcv_error_bytes.hi,
+ estats->total_bytes_received_lo,
+ tfunc->rcv_error_bytes.lo);
- ADD_64(estats->error_bytes_received_hi,
- le32_to_cpu(tfunc->rcv_error_bytes.hi),
- estats->error_bytes_received_lo,
- le32_to_cpu(tfunc->rcv_error_bytes.lo));
+ ADD_64_LE(estats->error_bytes_received_hi,
+ tfunc->rcv_error_bytes.hi,
+ estats->error_bytes_received_lo,
+ tfunc->rcv_error_bytes.lo);
UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
@@ -1174,23 +1231,34 @@ static void bnx2x_stats_update(struct bnx2x *bp)
if (bnx2x_edebug_stats_stopped(bp))
return;
- if (*stats_comp != DMAE_COMP_VAL)
- return;
+ if (IS_PF(bp)) {
+ if (*stats_comp != DMAE_COMP_VAL)
+ return;
- if (bp->port.pmf)
- bnx2x_hw_stats_update(bp);
+ if (bp->port.pmf)
+ bnx2x_hw_stats_update(bp);
- if (bnx2x_storm_stats_update(bp)) {
- if (bp->stats_pending++ == 3) {
- BNX2X_ERR("storm stats were not updated for 3 times\n");
- bnx2x_panic();
+ if (bnx2x_storm_stats_update(bp)) {
+ if (bp->stats_pending++ == 3) {
+ BNX2X_ERR("storm stats were not updated for 3 times\n");
+ bnx2x_panic();
+ }
+ return;
}
- return;
+ } else {
+ /* vf doesn't collect HW statistics, and doesn't get completions
+ * perform only update
+ */
+ bnx2x_storm_stats_update(bp);
}
bnx2x_net_stats_update(bp);
bnx2x_drv_stats_update(bp);
+ /* vf is done */
+ if (IS_VF(bp))
+ return;
+
if (netif_msg_timer(bp)) {
struct bnx2x_eth_stats *estats = &bp->eth_stats;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index b4d7b26c7fe7..364e37ecbc5c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -1,6 +1,6 @@
/* bnx2x_stats.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -421,16 +421,19 @@ struct bnx2x_fw_port_stats_old {
new->s); \
} while (0)
-#define UPDATE_EXTEND_TSTAT(s, t) \
+#define UPDATE_EXTEND_TSTAT_X(s, t, size) \
do { \
- diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
+ diff = le##size##_to_cpu(tclient->s) - \
+ le##size##_to_cpu(old_tclient->s); \
old_tclient->s = tclient->s; \
ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
} while (0)
-#define UPDATE_EXTEND_E_TSTAT(s, t) \
+#define UPDATE_EXTEND_TSTAT(s, t) UPDATE_EXTEND_TSTAT_X(s, t, 32)
+
+#define UPDATE_EXTEND_E_TSTAT(s, t, size) \
do { \
- UPDATE_EXTEND_TSTAT(s, t); \
+ UPDATE_EXTEND_TSTAT_X(s, t, size); \
ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \
} while (0)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
new file mode 100644
index 000000000000..36246129864c
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -0,0 +1,1651 @@
+/* bnx2x_vfpf.c: Broadcom Everest network driver.
+ *
+ * Copyright 2009-2013 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Shmulik Ravid <shmulikr@broadcom.com>
+ * Ariel Elior <ariele@broadcom.com>
+ */
+
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+#include <linux/crc32.h>
+
+/* place a given tlv on the tlv buffer at a given offset */
+void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
+ u16 length)
+{
+ struct channel_tlv *tl =
+ (struct channel_tlv *)(tlvs_list + offset);
+
+ tl->type = type;
+ tl->length = length;
+}
+
+/* Clear the mailbox and init the header of the first tlv */
+void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
+ u16 type, u16 length)
+{
+ DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
+ type);
+
+ /* Clear mailbox */
+ memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
+
+ /* init type and length */
+ bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length);
+
+ /* init first tlv header */
+ first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
+}
+
+/* list the types and lengths of the tlvs on the buffer */
+void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
+{
+ int i = 1;
+ struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
+
+ while (tlv->type != CHANNEL_TLV_LIST_END) {
+ /* output tlv */
+ DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
+ tlv->type, tlv->length);
+
+ /* advance to next tlv */
+ tlvs_list += tlv->length;
+
+ /* cast general tlv list pointer to channel tlv header*/
+ tlv = (struct channel_tlv *)tlvs_list;
+
+ i++;
+
+ /* break condition for this loop */
+ if (i > MAX_TLVS_IN_LIST) {
+ WARN(true, "corrupt tlvs");
+ return;
+ }
+ }
+
+ /* output last tlv */
+ DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
+ tlv->type, tlv->length);
+}
+
+/* test whether we support a tlv type */
+bool bnx2x_tlv_supported(u16 tlvtype)
+{
+ return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
+}
+
+static inline int bnx2x_pfvf_status_codes(int rc)
+{
+ switch (rc) {
+ case 0:
+ return PFVF_STATUS_SUCCESS;
+ case -ENOMEM:
+ return PFVF_STATUS_NO_RESOURCE;
+ default:
+ return PFVF_STATUS_FAILURE;
+ }
+}
+
+int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
+{
+ struct cstorm_vf_zone_data __iomem *zone_data =
+ REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
+ int tout = 600, interval = 100; /* wait for 60 seconds */
+
+ if (*done) {
+ BNX2X_ERR("done was non zero before message to pf was sent\n");
+ WARN_ON(true);
+ return -EINVAL;
+ }
+
+ /* Write message address */
+ writel(U64_LO(msg_mapping),
+ &zone_data->non_trigger.vf_pf_channel.msg_addr_lo);
+ writel(U64_HI(msg_mapping),
+ &zone_data->non_trigger.vf_pf_channel.msg_addr_hi);
+
+ /* make sure the address is written before FW accesses it */
+ wmb();
+
+ /* Trigger the PF FW */
+ writeb(1, &zone_data->trigger.vf_pf_channel.addr_valid);
+
+ /* Wait for PF to complete */
+ while ((tout >= 0) && (!*done)) {
+ msleep(interval);
+ tout -= 1;
+
+ /* progress indicator - HV can take its own sweet time in
+ * answering VFs...
+ */
+ DP_CONT(BNX2X_MSG_IOV, ".");
+ }
+
+ if (!*done) {
+ BNX2X_ERR("PF response has timed out\n");
+ return -EAGAIN;
+ }
+ DP(BNX2X_MSG_SP, "Got a response from PF\n");
+ return 0;
+}
+
+int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
+{
+ u32 me_reg;
+ int tout = 10, interval = 100; /* Wait for 1 sec */
+
+ do {
+ /* pxp traps vf read of doorbells and returns me reg value */
+ me_reg = readl(bp->doorbells);
+ if (GOOD_ME_REG(me_reg))
+ break;
+
+ msleep(interval);
+
+ BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?",
+ me_reg);
+ } while (tout-- > 0);
+
+ if (!GOOD_ME_REG(me_reg)) {
+ BNX2X_ERR("Invalid ME register value: 0x%08x\n", me_reg);
+ return -EINVAL;
+ }
+
+ BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg);
+
+ *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
+
+ return 0;
+}
+
+int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
+{
+ int rc = 0, attempts = 0;
+ struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
+ struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
+ u32 vf_id;
+ bool resources_acquired = false;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
+
+ if (bnx2x_get_vf_id(bp, &vf_id))
+ return -EAGAIN;
+
+ req->vfdev_info.vf_id = vf_id;
+ req->vfdev_info.vf_os = 0;
+
+ req->resc_request.num_rxqs = rx_count;
+ req->resc_request.num_txqs = tx_count;
+ req->resc_request.num_sbs = bp->igu_sb_cnt;
+ req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
+ req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
+
+ /* pf 2 vf bulletin board address */
+ req->bulletin_addr = bp->pf2vf_bulletin_mapping;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ while (!resources_acquired) {
+ DP(BNX2X_MSG_SP, "attempting to acquire resources\n");
+
+ /* send acquire request */
+ rc = bnx2x_send_msg2pf(bp,
+ &resp->hdr.status,
+ bp->vf2pf_mbox_mapping);
+
+ /* PF timeout */
+ if (rc)
+ return rc;
+
+ /* copy acquire response from buffer to bp */
+ memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
+
+ attempts++;
+
+ /* test whether the PF accepted our request. If not, humble the
+ * the request and try again.
+ */
+ if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
+ DP(BNX2X_MSG_SP, "resources acquired\n");
+ resources_acquired = true;
+ } else if (bp->acquire_resp.hdr.status ==
+ PFVF_STATUS_NO_RESOURCE &&
+ attempts < VF_ACQUIRE_THRESH) {
+ DP(BNX2X_MSG_SP,
+ "PF unwilling to fulfill resource request. Try PF recommended amount\n");
+
+ /* humble our request */
+ req->resc_request.num_txqs =
+ bp->acquire_resp.resc.num_txqs;
+ req->resc_request.num_rxqs =
+ bp->acquire_resp.resc.num_rxqs;
+ req->resc_request.num_sbs =
+ bp->acquire_resp.resc.num_sbs;
+ req->resc_request.num_mac_filters =
+ bp->acquire_resp.resc.num_mac_filters;
+ req->resc_request.num_vlan_filters =
+ bp->acquire_resp.resc.num_vlan_filters;
+ req->resc_request.num_mc_filters =
+ bp->acquire_resp.resc.num_mc_filters;
+
+ /* Clear response buffer */
+ memset(&bp->vf2pf_mbox->resp, 0,
+ sizeof(union pfvf_tlvs));
+ } else {
+ /* PF reports error */
+ BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
+ bp->acquire_resp.hdr.status);
+ return -EAGAIN;
+ }
+ }
+
+ /* get HW info */
+ bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
+ bp->link_params.chip_id = bp->common.chip_id;
+ bp->db_size = bp->acquire_resp.pfdev_info.db_size;
+ bp->common.int_block = INT_BLOCK_IGU;
+ bp->common.chip_port_mode = CHIP_2_PORT_MODE;
+ bp->igu_dsb_id = -1;
+ bp->mf_ov = 0;
+ bp->mf_mode = 0;
+ bp->common.flash_size = 0;
+ bp->flags |=
+ NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
+ bp->igu_sb_cnt = 1;
+ bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
+ strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
+ sizeof(bp->fw_ver));
+
+ if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
+ memcpy(bp->dev->dev_addr,
+ bp->acquire_resp.resc.current_mac_addr,
+ ETH_ALEN);
+
+ return 0;
+}
+
+int bnx2x_vfpf_release(struct bnx2x *bp)
+{
+ struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ u32 rc = 0, vf_id;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
+
+ if (bnx2x_get_vf_id(bp, &vf_id))
+ return -EAGAIN;
+
+ req->vf_id = vf_id;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ /* send release request */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+
+ if (rc)
+ /* PF timeout */
+ return rc;
+ if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
+ /* PF released us */
+ DP(BNX2X_MSG_SP, "vf released\n");
+ } else {
+ /* PF reports error */
+ BNX2X_ERR("PF failed our release request - are we out of sync? response status: %d\n",
+ resp->hdr.status);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+/* Tell PF about SB addresses */
+int bnx2x_vfpf_init(struct bnx2x *bp)
+{
+ struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc, i;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req));
+
+ /* status blocks */
+ for_each_eth_queue(bp, i)
+ req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i,
+ status_blk_mapping);
+
+ /* statistics - requests only supports single queue for now */
+ req->stats_addr = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, queue_stats);
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
+ resp->hdr.status);
+ return -EAGAIN;
+ }
+
+ DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
+ return 0;
+}
+
+/* CLOSE VF - opposite to INIT_VF */
+void bnx2x_vfpf_close_vf(struct bnx2x *bp)
+{
+ struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int i, rc;
+ u32 vf_id;
+
+ /* If we haven't got a valid VF id, there is no sense to
+ * continue with sending messages
+ */
+ if (bnx2x_get_vf_id(bp, &vf_id))
+ goto free_irq;
+
+ /* Close the queues */
+ for_each_queue(bp, i)
+ bnx2x_vfpf_teardown_queue(bp, i);
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
+
+ req->vf_id = vf_id;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+
+ if (rc)
+ BNX2X_ERR("Sending CLOSE failed. rc was: %d\n", rc);
+
+ else if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
+ resp->hdr.status);
+
+free_irq:
+ /* Disable HW interrupts, NAPI */
+ bnx2x_netif_stop(bp, 0);
+ /* Delete all NAPI objects */
+ bnx2x_del_all_napi(bp);
+
+ /* Release IRQs */
+ bnx2x_free_irq(bp);
+}
+
+/* ask the pf to open a queue for the vf */
+int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
+{
+ struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
+ u16 tpa_agg_size = 0, flags = 0;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
+
+ /* select tpa mode to request */
+ if (!fp->disable_tpa) {
+ flags |= VFPF_QUEUE_FLG_TPA;
+ flags |= VFPF_QUEUE_FLG_TPA_IPV6;
+ if (fp->mode == TPA_MODE_GRO)
+ flags |= VFPF_QUEUE_FLG_TPA_GRO;
+ tpa_agg_size = TPA_AGG_SIZE;
+ }
+
+ /* calculate queue flags */
+ flags |= VFPF_QUEUE_FLG_STATS;
+ flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
+ flags |= IS_MF_SD(bp) ? VFPF_QUEUE_FLG_OV : 0;
+ flags |= VFPF_QUEUE_FLG_VLAN;
+ DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
+
+ /* Common */
+ req->vf_qid = fp_idx;
+ req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID;
+
+ /* Rx */
+ req->rxq.rcq_addr = fp->rx_comp_mapping;
+ req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE;
+ req->rxq.rxq_addr = fp->rx_desc_mapping;
+ req->rxq.sge_addr = fp->rx_sge_mapping;
+ req->rxq.vf_sb = fp_idx;
+ req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS;
+ req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0;
+ req->rxq.mtu = bp->dev->mtu;
+ req->rxq.buf_sz = fp->rx_buf_size;
+ req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE;
+ req->rxq.tpa_agg_sz = tpa_agg_size;
+ req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
+ req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) &
+ (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
+ req->rxq.flags = flags;
+ req->rxq.drop_flags = 0;
+ req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT;
+ req->rxq.stat_id = -1; /* No stats at the moment */
+
+ /* Tx */
+ req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping;
+ req->txq.vf_sb = fp_idx;
+ req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
+ req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0;
+ req->txq.flags = flags;
+ req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc)
+ BNX2X_ERR("Sending SETUP_Q message for queue[%d] failed!\n",
+ fp_idx);
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
+ fp_idx, resp->hdr.status);
+ return -EINVAL;
+ }
+ return rc;
+}
+
+int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
+{
+ struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q,
+ sizeof(*req));
+
+ req->vf_qid = qidx;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+
+ if (rc) {
+ BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
+ rc);
+ return rc;
+ }
+
+ /* PF failed the transaction */
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
+ resp->hdr.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* request pf to add a mac for the vf */
+int bnx2x_vfpf_set_mac(struct bnx2x *bp)
+{
+ struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+ sizeof(*req));
+
+ req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
+ req->vf_qid = 0;
+ req->n_mac_vlan_filters = 1;
+ req->filters[0].flags =
+ VFPF_Q_FILTER_DEST_MAC_VALID | VFPF_Q_FILTER_SET_MAC;
+
+ /* sample bulletin board for new mac */
+ bnx2x_sample_bulletin(bp);
+
+ /* copy mac from device to request */
+ memcpy(req->filters[0].mac, bp->dev->dev_addr, ETH_ALEN);
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ /* send message to pf */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc) {
+ BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
+ return rc;
+ }
+
+ /* failure may mean PF was configured with a new mac for us */
+ while (resp->hdr.status == PFVF_STATUS_FAILURE) {
+ DP(BNX2X_MSG_IOV,
+ "vfpf SET MAC failed. Check bulletin board for new posts\n");
+
+ /* check if bulletin board was updated */
+ if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
+ /* copy mac from device to request */
+ memcpy(req->filters[0].mac, bp->dev->dev_addr,
+ ETH_ALEN);
+
+ /* send message to pf */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status,
+ bp->vf2pf_mbox_mapping);
+ } else {
+ /* no new info in bulletin */
+ break;
+ }
+ }
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int bnx2x_vfpf_set_mcast(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc, i = 0;
+ struct netdev_hw_addr *ha;
+
+ if (bp->state != BNX2X_STATE_OPEN) {
+ DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
+ return -EINVAL;
+ }
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+ sizeof(*req));
+
+ /* Get Rx mode requested */
+ DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
+
+ netdev_for_each_mc_addr(ha, dev) {
+ DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
+ bnx2x_mc_addr(ha));
+ memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN);
+ i++;
+ }
+
+ /* We support four PFVF_MAX_MULTICAST_PER_VF mcast
+ * addresses tops
+ */
+ if (i >= PFVF_MAX_MULTICAST_PER_VF) {
+ DP(NETIF_MSG_IFUP,
+ "VF supports not more than %d multicast MAC addresses\n",
+ PFVF_MAX_MULTICAST_PER_VF);
+ return -EINVAL;
+ }
+
+ req->n_multicast = i;
+ req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
+ req->vf_qid = 0;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc) {
+ BNX2X_ERR("Sending a message failed: %d\n", rc);
+ return rc;
+ }
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
+ resp->hdr.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
+{
+ int mode = bp->rx_mode;
+ struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+ sizeof(*req));
+
+ DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
+
+ switch (mode) {
+ case BNX2X_RX_MODE_NONE: /* no Rx */
+ req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
+ break;
+ case BNX2X_RX_MODE_NORMAL:
+ req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+ break;
+ case BNX2X_RX_MODE_ALLMULTI:
+ req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+ break;
+ case BNX2X_RX_MODE_PROMISC:
+ req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+ break;
+ default:
+ BNX2X_ERR("BAD rx mode (%d)\n", mode);
+ return -EINVAL;
+ }
+
+ req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
+ req->vf_qid = 0;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc)
+ BNX2X_ERR("Sending a message failed: %d\n", rc);
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+/* General service functions */
+static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid)
+{
+ u32 addr = BAR_CSTRORM_INTMEM +
+ CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid);
+
+ REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY);
+}
+
+static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid)
+{
+ u32 addr = BAR_CSTRORM_INTMEM +
+ CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid);
+
+ REG_WR8(bp, addr, 1);
+}
+
+static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_vf(bp, i)
+ storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid));
+}
+
+/* enable vf_pf mailbox (aka vf-pf-chanell) */
+void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
+{
+ bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
+
+ /* enable the mailbox in the FW */
+ storm_memset_vf_mbx_ack(bp, abs_vfid);
+ storm_memset_vf_mbx_valid(bp, abs_vfid);
+
+ /* enable the VF access to the mailbox */
+ bnx2x_vf_enable_access(bp, abs_vfid);
+}
+
+/* this works only on !E1h */
+static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
+ dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi,
+ u32 vf_addr_lo, u32 len32)
+{
+ struct dmae_command dmae;
+
+ if (CHIP_IS_E1x(bp)) {
+ BNX2X_ERR("Chip revision does not support VFs\n");
+ return DMAE_NOT_RDY;
+ }
+
+ if (!bp->dmae_ready) {
+ BNX2X_ERR("DMAE is not ready, can not copy\n");
+ return DMAE_NOT_RDY;
+ }
+
+ /* set opcode and fixed command fields */
+ bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI);
+
+ if (from_vf) {
+ dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) |
+ (DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) |
+ (DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT);
+
+ dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT);
+
+ dmae.src_addr_lo = vf_addr_lo;
+ dmae.src_addr_hi = vf_addr_hi;
+ dmae.dst_addr_lo = U64_LO(pf_addr);
+ dmae.dst_addr_hi = U64_HI(pf_addr);
+ } else {
+ dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) |
+ (DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) |
+ (DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT);
+
+ dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT);
+
+ dmae.src_addr_lo = U64_LO(pf_addr);
+ dmae.src_addr_hi = U64_HI(pf_addr);
+ dmae.dst_addr_lo = vf_addr_lo;
+ dmae.dst_addr_hi = vf_addr_hi;
+ }
+ dmae.len = len32;
+ bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_DMAE);
+
+ /* issue the command and wait for completion */
+ return bnx2x_issue_dmae_with_comp(bp, &dmae);
+}
+
+static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
+ u64 vf_addr;
+ dma_addr_t pf_addr;
+ u16 length, type;
+ int rc;
+ struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
+
+ /* prepare response */
+ type = mbx->first_tlv.tl.type;
+ length = type == CHANNEL_TLV_ACQUIRE ?
+ sizeof(struct pfvf_acquire_resp_tlv) :
+ sizeof(struct pfvf_general_resp_tlv);
+ bnx2x_add_tlv(bp, resp, 0, type, length);
+ resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
+ bnx2x_add_tlv(bp, resp, length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+ bnx2x_dp_tlv_list(bp, resp);
+ DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
+ mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
+
+ /* send response */
+ vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
+ mbx->first_tlv.resp_msg_offset;
+ pf_addr = mbx->msg_mapping +
+ offsetof(struct bnx2x_vf_mbx_msg, resp);
+
+ /* copy the response body, if there is one, before the header, as the vf
+ * is sensitive to the header being written
+ */
+ if (resp->hdr.tl.length > sizeof(u64)) {
+ length = resp->hdr.tl.length - sizeof(u64);
+ vf_addr += sizeof(u64);
+ pf_addr += sizeof(u64);
+ rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
+ U64_HI(vf_addr),
+ U64_LO(vf_addr),
+ length/4);
+ if (rc) {
+ BNX2X_ERR("Failed to copy response body to VF %d\n",
+ vf->abs_vfid);
+ goto mbx_error;
+ }
+ vf_addr -= sizeof(u64);
+ pf_addr -= sizeof(u64);
+ }
+
+ /* ack the FW */
+ storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
+ mmiowb();
+
+ /* initiate dmae to send the response */
+ mbx->flags &= ~VF_MSG_INPROCESS;
+
+ /* copy the response header including status-done field,
+ * must be last dmae, must be after FW is acked
+ */
+ rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
+ U64_HI(vf_addr),
+ U64_LO(vf_addr),
+ sizeof(u64)/4);
+
+ /* unlock channel mutex */
+ bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
+
+ if (rc) {
+ BNX2X_ERR("Failed to copy response status to VF %d\n",
+ vf->abs_vfid);
+ goto mbx_error;
+ }
+ return;
+
+mbx_error:
+ bnx2x_vf_release(bp, vf, false); /* non blocking */
+}
+
+static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx, int vfop_status)
+{
+ int i;
+ struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
+ struct pf_vf_resc *resc = &resp->resc;
+ u8 status = bnx2x_pfvf_status_codes(vfop_status);
+
+ memset(resp, 0, sizeof(*resp));
+
+ /* fill in pfdev info */
+ resp->pfdev_info.chip_num = bp->common.chip_id;
+ resp->pfdev_info.db_size = (1 << BNX2X_DB_SHIFT);
+ resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
+ resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
+ /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
+ bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
+ sizeof(resp->pfdev_info.fw_ver));
+
+ if (status == PFVF_STATUS_NO_RESOURCE ||
+ status == PFVF_STATUS_SUCCESS) {
+ /* set resources numbers, if status equals NO_RESOURCE these
+ * are max possible numbers
+ */
+ resc->num_rxqs = vf_rxq_count(vf) ? :
+ bnx2x_vf_max_queue_cnt(bp, vf);
+ resc->num_txqs = vf_txq_count(vf) ? :
+ bnx2x_vf_max_queue_cnt(bp, vf);
+ resc->num_sbs = vf_sb_count(vf);
+ resc->num_mac_filters = vf_mac_rules_cnt(vf);
+ resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
+ resc->num_mc_filters = 0;
+
+ if (status == PFVF_STATUS_SUCCESS) {
+ /* fill in the allocated resources */
+ struct pf_vf_bulletin_content *bulletin =
+ BP_VF_BULLETIN(bp, vf->index);
+
+ for_each_vfq(vf, i)
+ resc->hw_qid[i] =
+ vfq_qzone_id(vf, vfq_get(vf, i));
+
+ for_each_vf_sb(vf, i) {
+ resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i);
+ resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i);
+ }
+
+ /* if a mac has been set for this vf, supply it */
+ if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
+ memcpy(resc->current_mac_addr, bulletin->mac,
+ ETH_ALEN);
+ }
+ }
+ }
+
+ DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n"
+ "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n",
+ vf->abs_vfid,
+ resp->pfdev_info.chip_num,
+ resp->pfdev_info.db_size,
+ resp->pfdev_info.indices_per_sb,
+ resp->pfdev_info.pf_cap,
+ resc->num_rxqs,
+ resc->num_txqs,
+ resc->num_sbs,
+ resc->num_mac_filters,
+ resc->num_vlan_filters,
+ resc->num_mc_filters,
+ resp->pfdev_info.fw_ver);
+
+ DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ ");
+ for (i = 0; i < vf_rxq_count(vf); i++)
+ DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]);
+ DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ ");
+ for (i = 0; i < vf_sb_count(vf); i++)
+ DP_CONT(BNX2X_MSG_IOV, "%d:%d ",
+ resc->hw_sbs[i].hw_sb_id,
+ resc->hw_sbs[i].sb_qid);
+ DP_CONT(BNX2X_MSG_IOV, "]\n");
+
+ /* send the response */
+ vf->op_rc = vfop_status;
+ bnx2x_vf_mbx_resp(bp, vf);
+}
+
+static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ int rc;
+ struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire;
+
+ /* log vfdef info */
+ DP(BNX2X_MSG_IOV,
+ "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n",
+ vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os,
+ acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs,
+ acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters,
+ acquire->resc_request.num_vlan_filters,
+ acquire->resc_request.num_mc_filters);
+
+ /* acquire the resources */
+ rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
+
+ /* store address of vf's bulletin board */
+ vf->bulletin_map = acquire->bulletin_addr;
+
+ /* response */
+ bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
+}
+
+static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct vfpf_init_tlv *init = &mbx->msg->req.init;
+
+ /* record ghost addresses from vf message */
+ vf->spq_map = init->spq_addr;
+ vf->fw_stat_map = init->stats_addr;
+ vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
+
+ /* response */
+ bnx2x_vf_mbx_resp(bp, vf);
+}
+
+/* convert MBX queue-flags to standard SP queue-flags */
+static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags,
+ unsigned long *sp_q_flags)
+{
+ if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
+ __set_bit(BNX2X_Q_FLG_TPA, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6)
+ __set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO)
+ __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
+ __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_OV)
+ __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
+ __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
+ __set_bit(BNX2X_Q_FLG_COS, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_HC)
+ __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
+ __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
+}
+
+static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vf_mbx_resp,
+ .block = false,
+ };
+
+ /* verify vf_qid */
+ if (setup_q->vf_qid >= vf_rxq_count(vf)) {
+ BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
+ setup_q->vf_qid, vf_rxq_count(vf));
+ vf->op_rc = -EINVAL;
+ goto response;
+ }
+
+ /* tx queues must be setup alongside rx queues thus if the rx queue
+ * is not marked as valid there's nothing to do.
+ */
+ if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) {
+ struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
+ unsigned long q_type = 0;
+
+ struct bnx2x_queue_init_params *init_p;
+ struct bnx2x_queue_setup_params *setup_p;
+
+ /* reinit the VF operation context */
+ memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
+ setup_p = &vf->op_params.qctor.prep_qsetup;
+ init_p = &vf->op_params.qctor.qstate.params.init;
+
+ /* activate immediately */
+ __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
+
+ if (setup_q->param_valid & VFPF_TXQ_VALID) {
+ struct bnx2x_txq_setup_params *txq_params =
+ &setup_p->txq_params;
+
+ __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+
+ /* save sb resource index */
+ q->sb_idx = setup_q->txq.vf_sb;
+
+ /* tx init */
+ init_p->tx.hc_rate = setup_q->txq.hc_rate;
+ init_p->tx.sb_cq_index = setup_q->txq.sb_index;
+
+ bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags,
+ &init_p->tx.flags);
+
+ /* tx setup - flags */
+ bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags,
+ &setup_p->flags);
+
+ /* tx setup - general, nothing */
+
+ /* tx setup - tx */
+ txq_params->dscr_map = setup_q->txq.txq_addr;
+ txq_params->sb_cq_index = setup_q->txq.sb_index;
+ txq_params->traffic_type = setup_q->txq.traffic_type;
+
+ bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p,
+ q->index, q->sb_idx);
+ }
+
+ if (setup_q->param_valid & VFPF_RXQ_VALID) {
+ struct bnx2x_rxq_setup_params *rxq_params =
+ &setup_p->rxq_params;
+
+ __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+
+ /* Note: there is no support for different SBs
+ * for TX and RX
+ */
+ q->sb_idx = setup_q->rxq.vf_sb;
+
+ /* rx init */
+ init_p->rx.hc_rate = setup_q->rxq.hc_rate;
+ init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
+ bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags,
+ &init_p->rx.flags);
+
+ /* rx setup - flags */
+ bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags,
+ &setup_p->flags);
+
+ /* rx setup - general */
+ setup_p->gen_params.mtu = setup_q->rxq.mtu;
+
+ /* rx setup - rx */
+ rxq_params->drop_flags = setup_q->rxq.drop_flags;
+ rxq_params->dscr_map = setup_q->rxq.rxq_addr;
+ rxq_params->sge_map = setup_q->rxq.sge_addr;
+ rxq_params->rcq_map = setup_q->rxq.rcq_addr;
+ rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr;
+ rxq_params->buf_sz = setup_q->rxq.buf_sz;
+ rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz;
+ rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt;
+ rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz;
+ rxq_params->cache_line_log =
+ setup_q->rxq.cache_line_log;
+ rxq_params->sb_cq_index = setup_q->rxq.sb_index;
+
+ bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
+ q->index, q->sb_idx);
+ }
+ /* complete the preparations */
+ bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type);
+
+ vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index);
+ if (vf->op_rc)
+ goto response;
+ return;
+ }
+response:
+ bnx2x_vf_mbx_resp(bp, vf);
+}
+
+enum bnx2x_vfop_filters_state {
+ BNX2X_VFOP_MBX_Q_FILTERS_MACS,
+ BNX2X_VFOP_MBX_Q_FILTERS_VLANS,
+ BNX2X_VFOP_MBX_Q_FILTERS_RXMODE,
+ BNX2X_VFOP_MBX_Q_FILTERS_MCAST,
+ BNX2X_VFOP_MBX_Q_FILTERS_DONE
+};
+
+static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct vfpf_set_q_filters_tlv *tlv,
+ struct bnx2x_vfop_filters **pfl,
+ u32 type_flag)
+{
+ int i, j;
+ struct bnx2x_vfop_filters *fl = NULL;
+ size_t fsz;
+
+ fsz = tlv->n_mac_vlan_filters * sizeof(struct bnx2x_vfop_filter) +
+ sizeof(struct bnx2x_vfop_filters);
+
+ fl = kzalloc(fsz, GFP_KERNEL);
+ if (!fl)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&fl->head);
+
+ for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
+ struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
+
+ if ((msg_filter->flags & type_flag) != type_flag)
+ continue;
+ if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
+ fl->filters[j].mac = msg_filter->mac;
+ fl->filters[j].type = BNX2X_VFOP_FILTER_MAC;
+ } else {
+ fl->filters[j].vid = msg_filter->vlan_tag;
+ fl->filters[j].type = BNX2X_VFOP_FILTER_VLAN;
+ }
+ fl->filters[j].add =
+ (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
+ true : false;
+ list_add_tail(&fl->filters[j++].link, &fl->head);
+ }
+ if (list_empty(&fl->head))
+ kfree(fl);
+ else
+ *pfl = fl;
+
+ return 0;
+}
+
+static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
+ struct vfpf_q_mac_vlan_filter *filter)
+{
+ DP(msglvl, "MAC-VLAN[%d] -- flags=0x%x\n", idx, filter->flags);
+ if (filter->flags & VFPF_Q_FILTER_VLAN_TAG_VALID)
+ DP_CONT(msglvl, ", vlan=%d", filter->vlan_tag);
+ if (filter->flags & VFPF_Q_FILTER_DEST_MAC_VALID)
+ DP_CONT(msglvl, ", MAC=%pM", filter->mac);
+ DP_CONT(msglvl, "\n");
+}
+
+static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
+ struct vfpf_set_q_filters_tlv *filters)
+{
+ int i;
+
+ if (filters->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED)
+ for (i = 0; i < filters->n_mac_vlan_filters; i++)
+ bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i,
+ &filters->filters[i]);
+
+ if (filters->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED)
+ DP(msglvl, "RX-MASK=0x%x\n", filters->rx_mask);
+
+ if (filters->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED)
+ for (i = 0; i < filters->n_multicast; i++)
+ DP(msglvl, "MULTICAST=%pM\n", filters->multicast[i]);
+}
+
+#define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
+#define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
+
+static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int rc;
+
+ struct vfpf_set_q_filters_tlv *msg =
+ &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
+
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ enum bnx2x_vfop_filters_state state = vfop->state;
+
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vfop_mbx_qfilters,
+ .block = false,
+ };
+
+ DP(BNX2X_MSG_IOV, "STATE: %d\n", state);
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ switch (state) {
+ case BNX2X_VFOP_MBX_Q_FILTERS_MACS:
+ /* next state */
+ vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_VLANS;
+
+ /* check for any vlan/mac changes */
+ if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
+ /* build mac list */
+ struct bnx2x_vfop_filters *fl = NULL;
+
+ vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+ VFPF_MAC_FILTER);
+ if (vfop->rc)
+ goto op_err;
+
+ if (fl) {
+ /* set mac list */
+ rc = bnx2x_vfop_mac_list_cmd(bp, vf, &cmd, fl,
+ msg->vf_qid,
+ false);
+ if (rc) {
+ vfop->rc = rc;
+ goto op_err;
+ }
+ return;
+ }
+ }
+ /* fall through */
+
+ case BNX2X_VFOP_MBX_Q_FILTERS_VLANS:
+ /* next state */
+ vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_RXMODE;
+
+ /* check for any vlan/mac changes */
+ if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
+ /* build vlan list */
+ struct bnx2x_vfop_filters *fl = NULL;
+
+ vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+ VFPF_VLAN_FILTER);
+ if (vfop->rc)
+ goto op_err;
+
+ if (fl) {
+ /* set vlan list */
+ rc = bnx2x_vfop_vlan_list_cmd(bp, vf, &cmd, fl,
+ msg->vf_qid,
+ false);
+ if (rc) {
+ vfop->rc = rc;
+ goto op_err;
+ }
+ return;
+ }
+ }
+ /* fall through */
+
+ case BNX2X_VFOP_MBX_Q_FILTERS_RXMODE:
+ /* next state */
+ vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_MCAST;
+
+ if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
+ unsigned long accept = 0;
+
+ /* covert VF-PF if mask to bnx2x accept flags */
+ if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
+ __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
+
+ if (msg->rx_mask &
+ VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST)
+ __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
+
+ if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST)
+ __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept);
+
+ if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST)
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept);
+
+ if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST)
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
+
+ /* A packet arriving the vf's mac should be accepted
+ * with any vlan
+ */
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
+
+ /* set rx-mode */
+ rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
+ msg->vf_qid, accept);
+ if (rc) {
+ vfop->rc = rc;
+ goto op_err;
+ }
+ return;
+ }
+ /* fall through */
+
+ case BNX2X_VFOP_MBX_Q_FILTERS_MCAST:
+ /* next state */
+ vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_DONE;
+
+ if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
+ /* set mcasts */
+ rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, msg->multicast,
+ msg->n_multicast, false);
+ if (rc) {
+ vfop->rc = rc;
+ goto op_err;
+ }
+ return;
+ }
+ /* fall through */
+op_done:
+ case BNX2X_VFOP_MBX_Q_FILTERS_DONE:
+ bnx2x_vfop_end(bp, vf, vfop);
+ return;
+op_err:
+ BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
+ vf->abs_vfid, msg->vf_qid, vfop->rc);
+ goto op_done;
+
+ default:
+ bnx2x_vfop_default(state);
+ }
+}
+
+static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ if (vfop) {
+ bnx2x_vfop_opset(BNX2X_VFOP_MBX_Q_FILTERS_MACS,
+ bnx2x_vfop_mbx_qfilters, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mbx_qfilters,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vf_mbx_resp,
+ .block = false,
+ };
+
+ /* if a mac was already set for this VF via the set vf mac ndo, we only
+ * accept mac configurations of that mac. Why accept them at all?
+ * because PF may have been unable to configure the mac at the time
+ * since queue was not set up.
+ */
+ if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
+ /* once a mac was set by ndo can only accept a single mac... */
+ if (filters->n_mac_vlan_filters > 1) {
+ BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
+ vf->abs_vfid);
+ vf->op_rc = -EPERM;
+ goto response;
+ }
+
+ /* ...and only the mac set by the ndo */
+ if (filters->n_mac_vlan_filters == 1 &&
+ memcmp(filters->filters->mac, bulletin->mac, ETH_ALEN)) {
+ BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
+ vf->abs_vfid);
+
+ vf->op_rc = -EPERM;
+ goto response;
+ }
+ }
+
+ /* verify vf_qid */
+ if (filters->vf_qid > vf_rxq_count(vf))
+ goto response;
+
+ DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
+ vf->abs_vfid,
+ filters->vf_qid);
+
+ /* print q_filter message */
+ bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
+
+ vf->op_rc = bnx2x_vfop_mbx_qfilters_cmd(bp, vf, &cmd);
+ if (vf->op_rc)
+ goto response;
+ return;
+
+response:
+ bnx2x_vf_mbx_resp(bp, vf);
+}
+
+static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ int qid = mbx->msg->req.q_op.vf_qid;
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vf_mbx_resp,
+ .block = false,
+ };
+
+ DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
+ vf->abs_vfid, qid);
+
+ vf->op_rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qid);
+ if (vf->op_rc)
+ bnx2x_vf_mbx_resp(bp, vf);
+}
+
+static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vf_mbx_resp,
+ .block = false,
+ };
+
+ DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
+
+ vf->op_rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
+ if (vf->op_rc)
+ bnx2x_vf_mbx_resp(bp, vf);
+}
+
+static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vf_mbx_resp,
+ .block = false,
+ };
+
+ DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
+
+ vf->op_rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
+ if (vf->op_rc)
+ bnx2x_vf_mbx_resp(bp, vf);
+}
+
+/* dispatch request */
+static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ int i;
+
+ /* check if tlv type is known */
+ if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
+ /* Lock the per vf op mutex and note the locker's identity.
+ * The unlock will take place in mbx response.
+ */
+ bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
+
+ /* switch on the opcode */
+ switch (mbx->first_tlv.tl.type) {
+ case CHANNEL_TLV_ACQUIRE:
+ bnx2x_vf_mbx_acquire(bp, vf, mbx);
+ break;
+ case CHANNEL_TLV_INIT:
+ bnx2x_vf_mbx_init_vf(bp, vf, mbx);
+ break;
+ case CHANNEL_TLV_SETUP_Q:
+ bnx2x_vf_mbx_setup_q(bp, vf, mbx);
+ break;
+ case CHANNEL_TLV_SET_Q_FILTERS:
+ bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
+ break;
+ case CHANNEL_TLV_TEARDOWN_Q:
+ bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
+ break;
+ case CHANNEL_TLV_CLOSE:
+ bnx2x_vf_mbx_close_vf(bp, vf, mbx);
+ break;
+ case CHANNEL_TLV_RELEASE:
+ bnx2x_vf_mbx_release_vf(bp, vf, mbx);
+ break;
+ }
+
+ } else {
+ /* unknown TLV - this may belong to a VF driver from the future
+ * - a version written after this PF driver was written, which
+ * supports features unknown as of yet. Too bad since we don't
+ * support them. Or this may be because someone wrote a crappy
+ * VF driver and is sending garbage over the channel.
+ */
+ BNX2X_ERR("unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
+ mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
+ for (i = 0; i < 20; i++)
+ DP_CONT(BNX2X_MSG_IOV, "%x ",
+ mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
+
+ /* test whether we can respond to the VF (do we have an address
+ * for it?)
+ */
+ if (vf->state == VF_ACQUIRED) {
+ /* mbx_resp uses the op_rc of the VF */
+ vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
+
+ /* notify the VF that we do not support this request */
+ bnx2x_vf_mbx_resp(bp, vf);
+ } else {
+ /* can't send a response since this VF is unknown to us
+ * just unlock the channel and be done with.
+ */
+ bnx2x_unlock_vf_pf_channel(bp, vf,
+ mbx->first_tlv.tl.type);
+ }
+ }
+}
+
+/* handle new vf-pf message */
+void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
+{
+ struct bnx2x_virtf *vf;
+ struct bnx2x_vf_mbx *mbx;
+ u8 vf_idx;
+ int rc;
+
+ DP(BNX2X_MSG_IOV,
+ "vf pf event received: vfid %d, address_hi %x, address lo %x",
+ vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo);
+ /* Sanity checks consider removing later */
+
+ /* check if the vf_id is valid */
+ if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf >
+ BNX2X_NR_VIRTFN(bp)) {
+ BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
+ vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
+ goto mbx_done;
+ }
+ vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
+ mbx = BP_VF_MBX(bp, vf_idx);
+
+ /* verify an event is not currently being processed -
+ * debug failsafe only
+ */
+ if (mbx->flags & VF_MSG_INPROCESS) {
+ BNX2X_ERR("Previous message is still being processed, vf_id %d\n",
+ vfpf_event->vf_id);
+ goto mbx_done;
+ }
+ vf = BP_VF(bp, vf_idx);
+
+ /* save the VF message address */
+ mbx->vf_addr_hi = vfpf_event->msg_addr_hi;
+ mbx->vf_addr_lo = vfpf_event->msg_addr_lo;
+ DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
+ mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
+
+ /* dmae to get the VF request */
+ rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, vf->abs_vfid,
+ mbx->vf_addr_hi, mbx->vf_addr_lo,
+ sizeof(union vfpf_tlvs)/4);
+ if (rc) {
+ BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid);
+ goto mbx_error;
+ }
+
+ /* process the VF message header */
+ mbx->first_tlv = mbx->msg->req.first_tlv;
+
+ /* dispatch the request (will prepare the response) */
+ bnx2x_vf_mbx_request(bp, vf, mbx);
+ goto mbx_done;
+
+mbx_error:
+ bnx2x_vf_release(bp, vf, false); /* non blocking */
+mbx_done:
+ return;
+}
+
+/* propagate local bulletin board to vf */
+int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf)
+{
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf);
+ dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping +
+ vf * BULLETIN_CONTENT_SIZE;
+ dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map);
+ int rc;
+
+ /* can only update vf after init took place */
+ if (bnx2x_vf(bp, vf, state) != VF_ENABLED &&
+ bnx2x_vf(bp, vf, state) != VF_ACQUIRED)
+ return 0;
+
+ /* increment bulletin board version and compute crc */
+ bulletin->version++;
+ bulletin->length = BULLETIN_CONTENT_SIZE;
+ bulletin->crc = bnx2x_crc_vf_bulletin(bp, bulletin);
+
+ /* propagate bulletin board via dmae to vm memory */
+ rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr,
+ bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr),
+ U64_LO(vf_addr), bulletin->length / 4);
+ return rc;
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
new file mode 100644
index 000000000000..bfc80baec00d
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -0,0 +1,360 @@
+/* bnx2x_vfpf.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2011-2013 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Ariel Elior <ariele@broadcom.com>
+ */
+#ifndef VF_PF_IF_H
+#define VF_PF_IF_H
+
+#ifdef CONFIG_BNX2X_SRIOV
+
+/* Common definitions for all HVs */
+struct vf_pf_resc_request {
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters; /* No limit so superfluous */
+};
+
+struct hw_sb_info {
+ u8 hw_sb_id; /* aka absolute igu id, used to ack the sb */
+ u8 sb_qid; /* used to update DHC for sb */
+};
+
+/* HW VF-PF channel definitions
+ * A.K.A VF-PF mailbox
+ */
+#define TLV_BUFFER_SIZE 1024
+#define PF_VF_BULLETIN_SIZE 512
+
+#define VFPF_QUEUE_FLG_TPA 0x0001
+#define VFPF_QUEUE_FLG_TPA_IPV6 0x0002
+#define VFPF_QUEUE_FLG_TPA_GRO 0x0004
+#define VFPF_QUEUE_FLG_CACHE_ALIGN 0x0008
+#define VFPF_QUEUE_FLG_STATS 0x0010
+#define VFPF_QUEUE_FLG_OV 0x0020
+#define VFPF_QUEUE_FLG_VLAN 0x0040
+#define VFPF_QUEUE_FLG_COS 0x0080
+#define VFPF_QUEUE_FLG_HC 0x0100
+#define VFPF_QUEUE_FLG_DHC 0x0200
+
+#define VFPF_QUEUE_DROP_IP_CS_ERR (1 << 0)
+#define VFPF_QUEUE_DROP_TCP_CS_ERR (1 << 1)
+#define VFPF_QUEUE_DROP_TTL0 (1 << 2)
+#define VFPF_QUEUE_DROP_UDP_CS_ERR (1 << 3)
+
+#define VFPF_RX_MASK_ACCEPT_NONE 0x00000000
+#define VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST 0x00000001
+#define VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST 0x00000002
+#define VFPF_RX_MASK_ACCEPT_ALL_UNICAST 0x00000004
+#define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST 0x00000008
+#define VFPF_RX_MASK_ACCEPT_BROADCAST 0x00000010
+#define BULLETIN_CONTENT_SIZE (sizeof(struct pf_vf_bulletin_content))
+#define BULLETIN_ATTEMPTS 5 /* crc failures before throwing towel */
+#define BULLETIN_CRC_SEED 0
+
+enum {
+ PFVF_STATUS_WAITING = 0,
+ PFVF_STATUS_SUCCESS,
+ PFVF_STATUS_FAILURE,
+ PFVF_STATUS_NOT_SUPPORTED,
+ PFVF_STATUS_NO_RESOURCE
+};
+
+/* vf pf channel tlvs */
+/* general tlv header (used for both vf->pf request and pf->vf response) */
+struct channel_tlv {
+ u16 type;
+ u16 length;
+};
+
+/* header of first vf->pf tlv carries the offset used to calculate response
+ * buffer address
+ */
+struct vfpf_first_tlv {
+ struct channel_tlv tl;
+ u32 resp_msg_offset;
+};
+
+/* header of pf->vf tlvs, carries the status of handling the request */
+struct pfvf_tlv {
+ struct channel_tlv tl;
+ u8 status;
+ u8 padding[3];
+};
+
+/* response tlv used for most tlvs */
+struct pfvf_general_resp_tlv {
+ struct pfvf_tlv hdr;
+};
+
+/* used to terminate and pad a tlv list */
+struct channel_list_end_tlv {
+ struct channel_tlv tl;
+ u8 padding[4];
+};
+
+/* Acquire */
+struct vfpf_acquire_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ struct vf_pf_vfdev_info {
+ /* the following fields are for debug purposes */
+ u8 vf_id; /* ME register value */
+ u8 vf_os; /* e.g. Linux, W2K8 */
+ u8 padding[2];
+ } vfdev_info;
+
+ struct vf_pf_resc_request resc_request;
+
+ aligned_u64 bulletin_addr;
+};
+
+/* simple operation request on queue */
+struct vfpf_q_op_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u8 vf_qid;
+ u8 padding[3];
+};
+
+/* acquire response tlv - carries the allocated resources */
+struct pfvf_acquire_resp_tlv {
+ struct pfvf_tlv hdr;
+ struct pf_vf_pfdev_info {
+ u32 chip_num;
+ u32 pf_cap;
+#define PFVF_CAP_RSS 0x00000001
+#define PFVF_CAP_DHC 0x00000002
+#define PFVF_CAP_TPA 0x00000004
+ char fw_ver[32];
+ u16 db_size;
+ u8 indices_per_sb;
+ u8 padding;
+ } pfdev_info;
+ struct pf_vf_resc {
+ /* in case of status NO_RESOURCE in message hdr, pf will fill
+ * this struct with suggested amount of resources for next
+ * acquire request
+ */
+#define PFVF_MAX_QUEUES_PER_VF 16
+#define PFVF_MAX_SBS_PER_VF 16
+ struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
+ u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters;
+ u8 permanent_mac_addr[ETH_ALEN];
+ u8 current_mac_addr[ETH_ALEN];
+ u8 padding[2];
+ } resc;
+};
+
+/* Init VF */
+struct vfpf_init_tlv {
+ struct vfpf_first_tlv first_tlv;
+ aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */
+ aligned_u64 spq_addr;
+ aligned_u64 stats_addr;
+};
+
+/* Setup Queue */
+struct vfpf_setup_q_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ struct vf_pf_rxq_params {
+ /* physical addresses */
+ aligned_u64 rcq_addr;
+ aligned_u64 rcq_np_addr;
+ aligned_u64 rxq_addr;
+ aligned_u64 sge_addr;
+
+ /* sb + hc info */
+ u8 vf_sb; /* index in hw_sbs[] */
+ u8 sb_index; /* Index in the SB */
+ u16 hc_rate; /* desired interrupts per sec. */
+ /* valid iff VFPF_QUEUE_FLG_HC */
+ /* rx buffer info */
+ u16 mtu;
+ u16 buf_sz;
+ u16 flags; /* VFPF_QUEUE_FLG_X flags */
+ u16 stat_id; /* valid iff VFPF_QUEUE_FLG_STATS */
+
+ /* valid iff VFPF_QUEUE_FLG_TPA */
+ u16 sge_buf_sz;
+ u16 tpa_agg_sz;
+ u8 max_sge_pkt;
+
+ u8 drop_flags; /* VFPF_QUEUE_DROP_X, for Linux VMs
+ * all the flags are turned off
+ */
+
+ u8 cache_line_log; /* VFPF_QUEUE_FLG_CACHE_ALIGN */
+ u8 padding;
+ } rxq;
+
+ struct vf_pf_txq_params {
+ /* physical addresses */
+ aligned_u64 txq_addr;
+
+ /* sb + hc info */
+ u8 vf_sb; /* index in hw_sbs[] */
+ u8 sb_index; /* Index in the SB */
+ u16 hc_rate; /* desired interrupts per sec. */
+ /* valid iff VFPF_QUEUE_FLG_HC */
+ u32 flags; /* VFPF_QUEUE_FLG_X flags */
+ u16 stat_id; /* valid iff VFPF_QUEUE_FLG_STATS */
+ u8 traffic_type; /* see in setup_context() */
+ u8 padding;
+ } txq;
+
+ u8 vf_qid; /* index in hw_qid[] */
+ u8 param_valid;
+#define VFPF_RXQ_VALID 0x01
+#define VFPF_TXQ_VALID 0x02
+ u8 padding[2];
+};
+
+/* Set Queue Filters */
+struct vfpf_q_mac_vlan_filter {
+ u32 flags;
+#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
+#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
+#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
+ u8 mac[ETH_ALEN];
+ u16 vlan_tag;
+};
+
+/* configure queue filters */
+struct vfpf_set_q_filters_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u32 flags;
+#define VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED 0x01
+#define VFPF_SET_Q_FILTERS_MULTICAST_CHANGED 0x02
+#define VFPF_SET_Q_FILTERS_RX_MASK_CHANGED 0x04
+
+ u8 vf_qid; /* index in hw_qid[] */
+ u8 n_mac_vlan_filters;
+ u8 n_multicast;
+ u8 padding;
+
+#define PFVF_MAX_MAC_FILTERS 16
+#define PFVF_MAX_VLAN_FILTERS 16
+#define PFVF_MAX_FILTERS (PFVF_MAX_MAC_FILTERS +\
+ PFVF_MAX_VLAN_FILTERS)
+ struct vfpf_q_mac_vlan_filter filters[PFVF_MAX_FILTERS];
+
+#define PFVF_MAX_MULTICAST_PER_VF 32
+ u8 multicast[PFVF_MAX_MULTICAST_PER_VF][ETH_ALEN];
+
+ u32 rx_mask; /* see mask constants at the top of the file */
+};
+
+/* close VF (disable VF) */
+struct vfpf_close_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u16 vf_id; /* for debug */
+ u8 padding[2];
+};
+
+/* release the VF's acquired resources */
+struct vfpf_release_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u16 vf_id;
+ u8 padding[2];
+};
+
+struct tlv_buffer_size {
+ u8 tlv_buffer[TLV_BUFFER_SIZE];
+};
+
+union vfpf_tlvs {
+ struct vfpf_first_tlv first_tlv;
+ struct vfpf_acquire_tlv acquire;
+ struct vfpf_init_tlv init;
+ struct vfpf_close_tlv close;
+ struct vfpf_q_op_tlv q_op;
+ struct vfpf_setup_q_tlv setup_q;
+ struct vfpf_set_q_filters_tlv set_q_filters;
+ struct vfpf_release_tlv release;
+ struct channel_list_end_tlv list_end;
+ struct tlv_buffer_size tlv_buf_size;
+};
+
+union pfvf_tlvs {
+ struct pfvf_general_resp_tlv general_resp;
+ struct pfvf_acquire_resp_tlv acquire_resp;
+ struct channel_list_end_tlv list_end;
+ struct tlv_buffer_size tlv_buf_size;
+};
+
+/* This is a structure which is allocated in the VF, which the PF may update
+ * when it deems it necessary to do so. The bulletin board is sampled
+ * periodically by the VF. A copy per VF is maintained in the PF (to prevent
+ * loss of data upon multiple updates (or the need for read modify write)).
+ */
+struct pf_vf_bulletin_size {
+ u8 size[PF_VF_BULLETIN_SIZE];
+};
+
+struct pf_vf_bulletin_content {
+ u32 crc; /* crc of structure to ensure is not in
+ * mid-update
+ */
+ u16 version;
+ u16 length;
+
+ aligned_u64 valid_bitmap; /* bitmap indicating which fields
+ * hold valid values
+ */
+
+#define MAC_ADDR_VALID 0 /* alert the vf that a new mac address
+ * is available for it
+ */
+
+ u8 mac[ETH_ALEN];
+ u8 padding[2];
+};
+
+union pf_vf_bulletin {
+ struct pf_vf_bulletin_content content;
+ struct pf_vf_bulletin_size size;
+};
+
+#define MAX_TLVS_IN_LIST 50
+
+enum channel_tlvs {
+ CHANNEL_TLV_NONE,
+ CHANNEL_TLV_ACQUIRE,
+ CHANNEL_TLV_INIT,
+ CHANNEL_TLV_SETUP_Q,
+ CHANNEL_TLV_SET_Q_FILTERS,
+ CHANNEL_TLV_TEARDOWN_Q,
+ CHANNEL_TLV_CLOSE,
+ CHANNEL_TLV_RELEASE,
+ CHANNEL_TLV_PF_RELEASE_VF,
+ CHANNEL_TLV_LIST_END,
+ CHANNEL_TLV_FLR,
+ CHANNEL_TLV_PF_SET_MAC,
+ CHANNEL_TLV_MAX
+};
+
+#endif /* CONFIG_BNX2X_SRIOV */
+#endif /* VF_PF_IF_H */
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index df8c30d1a52c..149a3a038491 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -4816,6 +4816,8 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
return err;
}
+ ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
+
return 0;
}
@@ -5136,6 +5138,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
if (ret)
return ret;
+ ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
return 0;
}
@@ -5387,6 +5390,7 @@ static void cnic_stop_hw(struct cnic_dev *dev)
}
cnic_shutdown_rings(dev);
cp->stop_cm(dev);
+ cp->ethdev->drv_state &= ~CNIC_DRV_STATE_HANDLES_IRQ;
clear_bit(CNIC_F_CNIC_UP, &dev->flags);
RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
synchronize_rcu();
@@ -5421,11 +5425,9 @@ static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
- cdev = kzalloc(alloc_size , GFP_KERNEL);
- if (cdev == NULL) {
- netdev_err(dev, "allocate dev struct failure\n");
+ cdev = kzalloc(alloc_size, GFP_KERNEL);
+ if (cdev == NULL)
return NULL;
- }
cdev->netdev = dev;
cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 2a35436f9095..0c9367a0f57d 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -179,6 +179,7 @@ struct cnic_eth_dev {
#define CNIC_DRV_STATE_NO_ISCSI_OOO 0x00000004
#define CNIC_DRV_STATE_NO_ISCSI 0x00000008
#define CNIC_DRV_STATE_NO_FCOE 0x00000010
+#define CNIC_DRV_STATE_HANDLES_IRQ 0x00000020
u32 chip_id;
u32 max_kwqe_pending;
struct pci_dev *pdev;
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 3a1c8a3cf7c9..e9b35da375cb 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2385,7 +2385,7 @@ static int sbmac_mii_probe(struct net_device *dev)
return -ENXIO;
}
- phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), &sbmac_mii_poll, 0,
+ phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), &sbmac_mii_poll,
PHY_INTERFACE_MODE_GMII);
if (IS_ERR(phy_dev)) {
printk(KERN_ERR "%s: could not attach to PHY\n", dev->name);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 78ea90c40e19..fdb9b5655414 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -4,7 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2005-2012 Broadcom Corporation.
+ * Copyright (C) 2005-2013 Broadcom Corporation.
*
* Firmware is:
* Derived from proprietary unpublished source code,
@@ -44,6 +44,7 @@
#include <linux/prefetch.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
+#include <linux/ssb/ssb_driver_gige.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
@@ -93,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 128
+#define TG3_MIN_NUM 130
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "December 03, 2012"
+#define DRV_MODULE_RELDATE "February 14, 2013"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -263,6 +264,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
TG3_DRV_DATA_FLAG_5705_10_100},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
@@ -330,6 +332,10 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -570,7 +576,9 @@ static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
{
tp->write32_mbox(tp, off, val);
- if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
+ if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
+ (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
+ !tg3_flag(tp, ICH_WORKAROUND)))
tp->read32_mbox(tp, off);
}
@@ -580,7 +588,8 @@ static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
writel(val, mbox);
if (tg3_flag(tp, TXD_MBOX_HWBUG))
writel(val, mbox);
- if (tg3_flag(tp, MBOX_WRITE_REORDER))
+ if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
+ tg3_flag(tp, FLUSH_POSTED_WRITES))
readl(mbox);
}
@@ -609,7 +618,7 @@ static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
{
unsigned long flags;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
(off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
return;
@@ -634,7 +643,7 @@ static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
{
unsigned long flags;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
(off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
*val = 0;
return;
@@ -662,7 +671,7 @@ static void tg3_ape_lock_init(struct tg3 *tp)
int i;
u32 regbase, bit;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ if (tg3_asic_rev(tp) == ASIC_REV_5761)
regbase = TG3_APE_LOCK_GRANT;
else
regbase = TG3_APE_PER_LOCK_GRANT;
@@ -698,7 +707,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
switch (locknum) {
case TG3_APE_LOCK_GPIO:
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ if (tg3_asic_rev(tp) == ASIC_REV_5761)
return 0;
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
@@ -717,7 +726,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
return -EINVAL;
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5761) {
req = TG3_APE_LOCK_REQ;
gnt = TG3_APE_LOCK_GRANT;
} else {
@@ -755,7 +764,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
switch (locknum) {
case TG3_APE_LOCK_GPIO:
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ if (tg3_asic_rev(tp) == ASIC_REV_5761)
return;
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
@@ -774,7 +783,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
return;
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ if (tg3_asic_rev(tp) == ASIC_REV_5761)
gnt = TG3_APE_LOCK_GRANT;
else
gnt = TG3_APE_PER_LOCK_GRANT;
@@ -1088,7 +1097,8 @@ static void tg3_switch_clocks(struct tg3 *tp)
#define PHY_BUSY_LOOPS 5000
-static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
+static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
+ u32 *val)
{
u32 frame_val;
unsigned int loops;
@@ -1104,7 +1114,7 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
*val = 0x0;
- frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
+ frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
MI_COM_PHY_ADDR_MASK);
frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
MI_COM_REG_ADDR_MASK);
@@ -1141,7 +1151,13 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
return ret;
}
-static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
+static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
+{
+ return __tg3_readphy(tp, tp->phy_addr, reg, val);
+}
+
+static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
+ u32 val)
{
u32 frame_val;
unsigned int loops;
@@ -1159,7 +1175,7 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
tg3_ape_lock(tp, tp->phy_ape_lock);
- frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
+ frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
MI_COM_PHY_ADDR_MASK);
frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
MI_COM_REG_ADDR_MASK);
@@ -1194,6 +1210,11 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
return ret;
}
+static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
+{
+ return __tg3_writephy(tp, tp->phy_addr, reg, val);
+}
+
static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
{
int err;
@@ -1283,14 +1304,26 @@ static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
}
-#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
- tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
- MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
- MII_TG3_AUXCTL_ACTL_TX_6DB)
+static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
+{
+ u32 val;
+ int err;
-#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
- tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
- MII_TG3_AUXCTL_ACTL_TX_6DB);
+ err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
+
+ if (err)
+ return err;
+ if (enable)
+
+ val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
+ else
+ val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
+
+ err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
+ val | MII_TG3_AUXCTL_ACTL_TX_6DB);
+
+ return err;
+}
static int tg3_bmcr_reset(struct tg3 *tp)
{
@@ -1446,7 +1479,7 @@ static void tg3_mdio_start(struct tg3 *tp)
udelay(80);
if (tg3_flag(tp, MDIOBUS_INITED) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ tg3_asic_rev(tp) == ASIC_REV_5785)
tg3_mdio_config_5785(tp);
}
@@ -1461,7 +1494,7 @@ static int tg3_mdio_init(struct tg3 *tp)
tp->phy_addr = tp->pci_fn + 1;
- if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
+ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
else
is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
@@ -1549,7 +1582,7 @@ static int tg3_mdio_init(struct tg3 *tp)
tg3_flag_set(tp, MDIOBUS_INITED);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ if (tg3_asic_rev(tp) == ASIC_REV_5785)
tg3_mdio_config_5785(tp);
return 0;
@@ -1766,7 +1799,12 @@ static int tg3_poll_fw(struct tg3 *tp)
int i;
u32 val;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_flag(tp, IS_SSB_CORE)) {
+ /* We don't use firmware. */
+ return 0;
+ }
+
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
/* Wait up to 20ms for init done. */
for (i = 0; i < 200; i++) {
if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
@@ -1795,7 +1833,7 @@ static int tg3_poll_fw(struct tg3 *tp)
netdev_info(tp->dev, "No firmware running\n");
}
- if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
/* The 57765 A0 needs a little more
* time to do some important work.
*/
@@ -1925,7 +1963,7 @@ static void tg3_adjust_link(struct net_device *dev)
if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
mac_mode |= MAC_MODE_PORT_MODE_MII;
else if (phydev->speed == SPEED_1000 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
+ tg3_asic_rev(tp) != ASIC_REV_5785)
mac_mode |= MAC_MODE_PORT_MODE_GMII;
else
mac_mode |= MAC_MODE_PORT_MODE_MII;
@@ -1952,7 +1990,7 @@ static void tg3_adjust_link(struct net_device *dev)
udelay(40);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5785) {
if (phydev->speed == SPEED_10)
tw32(MAC_MI_STAT,
MAC_MI_STAT_10MBPS_MODE |
@@ -2001,8 +2039,8 @@ static int tg3_phy_init(struct tg3 *tp)
phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
/* Attach the MAC to the PHY. */
- phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
- phydev->dev_flags, phydev->interface);
+ phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
+ tg3_adjust_link, phydev->interface);
if (IS_ERR(phydev)) {
dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
return PTR_ERR(phydev);
@@ -2144,7 +2182,7 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
MII_TG3_MISC_SHDW_SCR5_DLPTLM |
MII_TG3_MISC_SHDW_SCR5_SDTL |
MII_TG3_MISC_SHDW_SCR5_C125OE;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
+ if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
@@ -2223,7 +2261,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
otp = tp->phy_otp;
- if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
+ if (tg3_phy_toggle_auxctl_smdsp(tp, true))
return;
phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
@@ -2248,7 +2286,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);
}
static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
@@ -2284,9 +2322,9 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
if (!tp->setlpicnt) {
if (current_link_up == 1 &&
- !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);
}
val = tr32(TG3_CPMU_EEE_MODE);
@@ -2299,14 +2337,14 @@ static void tg3_phy_eee_enable(struct tg3 *tp)
u32 val;
if (tp->link_config.active_speed == SPEED_1000 &&
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
tg3_flag(tp, 57765_CLASS)) &&
- !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
val = MII_TG3_DSP_TAP26_ALNOKO |
MII_TG3_DSP_TAP26_RMRXSTO;
tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);
}
val = tr32(TG3_CPMU_EEE_MODE);
@@ -2450,7 +2488,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
tg3_writephy(tp, MII_CTRL1000,
CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
- err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
+ err = tg3_phy_toggle_auxctl_smdsp(tp, true);
if (err)
return err;
@@ -2471,7 +2509,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);
tg3_writephy(tp, MII_CTRL1000, phy9_orig);
@@ -2504,7 +2542,7 @@ static int tg3_phy_reset(struct tg3 *tp)
u32 val, cpmuctrl;
int err;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
val = tr32(GRC_MISC_CFG);
tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
udelay(40);
@@ -2519,9 +2557,9 @@ static int tg3_phy_reset(struct tg3 *tp)
tg3_link_report(tp);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
+ tg3_asic_rev(tp) == ASIC_REV_5704 ||
+ tg3_asic_rev(tp) == ASIC_REV_5705) {
err = tg3_phy_reset_5703_4_5(tp);
if (err)
return err;
@@ -2529,8 +2567,8 @@ static int tg3_phy_reset(struct tg3 *tp)
}
cpmuctrl = 0;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
- GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
+ tg3_chip_rev(tp) != CHIPREV_5784_AX) {
cpmuctrl = tr32(TG3_CPMU_CTRL);
if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
tw32(TG3_CPMU_CTRL,
@@ -2548,8 +2586,8 @@ static int tg3_phy_reset(struct tg3 *tp)
tw32(TG3_CPMU_CTRL, cpmuctrl);
}
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
- GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
+ if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
+ tg3_chip_rev(tp) == CHIPREV_5761_AX) {
val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
CPMU_LSPD_1000MB_MACCLK_12_5) {
@@ -2572,10 +2610,10 @@ static int tg3_phy_reset(struct tg3 *tp)
out:
if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
- !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
tg3_phydsp_write(tp, 0x201f, 0x2aaa);
tg3_phydsp_write(tp, 0x000a, 0x0323);
- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);
}
if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
@@ -2584,14 +2622,14 @@ out:
}
if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
- if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
tg3_phydsp_write(tp, 0x000a, 0x310b);
tg3_phydsp_write(tp, 0x201f, 0x9506);
tg3_phydsp_write(tp, 0x401f, 0x14e2);
- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);
}
} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
- if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
@@ -2600,7 +2638,7 @@ out:
} else
tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ tg3_phy_toggle_auxctl_smdsp(tp, false);
}
}
@@ -2627,11 +2665,14 @@ out:
val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
/* adjust output voltage */
tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
}
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
+ tg3_phydsp_write(tp, 0xffb, 0x4000);
+
tg3_phy_toggle_automdix(tp, 1);
tg3_phy_set_wirespeed(tp);
return 0;
@@ -2657,8 +2698,8 @@ static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
{
u32 status, shift;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719)
status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
else
status = tr32(TG3_CPMU_DRV_STATUS);
@@ -2667,8 +2708,8 @@ static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
status &= ~(TG3_GPIO_MSG_MASK << shift);
status |= (newstat << shift);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719)
tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
else
tw32(TG3_CPMU_DRV_STATUS, status);
@@ -2681,9 +2722,9 @@ static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
if (!tg3_flag(tp, IS_NIC))
return 0;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720) {
if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
return -EIO;
@@ -2706,8 +2747,8 @@ static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
u32 grc_local_ctrl;
if (!tg3_flag(tp, IS_NIC) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
+ tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701)
return;
grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
@@ -2730,8 +2771,8 @@ static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
if (!tg3_flag(tp, IS_NIC))
return;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701) {
tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
(GRC_LCLCTRL_GPIO_OE0 |
GRC_LCLCTRL_GPIO_OE1 |
@@ -2763,7 +2804,7 @@ static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
u32 grc_local_ctrl = 0;
/* Workaround to prevent overdrawing Amps. */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5714) {
grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
grc_local_ctrl,
@@ -2835,9 +2876,9 @@ static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
return;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720) {
tg3_frob_aux_power_5717(tp, include_wol ?
tg3_flag(tp, WOL_ENABLE) != 0 : 0);
return;
@@ -2889,7 +2930,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
u32 val;
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5704) {
u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
u32 serdes_cfg = tr32(MAC_SERDES_CFG);
@@ -2901,7 +2942,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
return;
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
tg3_bmcr_reset(tp);
val = tr32(GRC_MISC_CFG);
tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
@@ -2940,16 +2981,16 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
/* The PHY should not be powered down on some chips because
* of bugs.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5704 ||
+ (tg3_asic_rev(tp) == ASIC_REV_5780 &&
(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+ (tg3_asic_rev(tp) == ASIC_REV_5717 &&
!tp->pci_fn))
return;
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
- GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
+ if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
+ tg3_chip_rev(tp) == CHIPREV_5761_AX) {
val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
val |= CPMU_LSPD_1000MB_MACCLK_12_5;
@@ -3332,7 +3373,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
!tg3_flag(tp, 57765_PLUS))
tw32(NVRAM_ADDR, phy_addr);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
+ if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
!tg3_flag(tp, 5755_PLUS) &&
(tp->nvram_jedecnum == JEDEC_ST) &&
(nvram_cmd & NVRAM_CMD_FIRST)) {
@@ -3417,7 +3458,7 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
u32 val = tr32(GRC_VCPU_EXT_CTRL);
tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
@@ -3435,6 +3476,13 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
udelay(10);
} else {
+ /*
+ * There is only an Rx CPU for the 5750 derivative in the
+ * BCM4785.
+ */
+ if (tg3_flag(tp, IS_SSB_CORE))
+ return 0;
+
for (i = 0; i < 10000; i++) {
tw32(offset + CPU_STATE, 0xffffffff);
tw32(offset + CPU_MODE, CPU_MODE_HALT);
@@ -3588,7 +3636,7 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
info.fw_len = tp->fw->size - 12;
info.fw_data = &fw_data[3];
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5705) {
cpu_base = RX_CPU_BASE;
cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
} else {
@@ -3646,8 +3694,8 @@ static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
+ tg3_asic_rev(tp) == ASIC_REV_5704) {
for (i = 0; i < 12; i++) {
tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
@@ -3766,7 +3814,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
tg3_setup_phy(tp, 0);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
u32 val;
val = tr32(GRC_VCPU_EXT_CTRL);
@@ -3808,8 +3856,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
mac_mode = MAC_MODE_PORT_MODE_MII;
mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
- ASIC_REV_5700) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700) {
u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
SPEED_100 : SPEED_10;
if (tg3_5700_link_polarity(tp, speed))
@@ -3842,8 +3889,8 @@ static int tg3_power_down_prepare(struct tg3 *tp)
}
if (!tg3_flag(tp, WOL_SPEED_100MB) &&
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
+ (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701)) {
u32 base_val;
base_val = tp->pci_clock_ctrl;
@@ -3854,13 +3901,13 @@ static int tg3_power_down_prepare(struct tg3 *tp)
CLOCK_CTRL_PWRDOWN_PLL133, 40);
} else if (tg3_flag(tp, 5780_CLASS) ||
tg3_flag(tp, CPMU_PRESENT) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ tg3_asic_rev(tp) == ASIC_REV_5906) {
/* do nothing */
} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
u32 newbits1, newbits2;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701) {
newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
CLOCK_CTRL_TXCLK_DISABLE |
CLOCK_CTRL_ALTCLK);
@@ -3882,8 +3929,8 @@ static int tg3_power_down_prepare(struct tg3 *tp)
if (!tg3_flag(tp, 5705_PLUS)) {
u32 newbits3;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701) {
newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
CLOCK_CTRL_TXCLK_DISABLE |
CLOCK_CTRL_44MHZ_CORE);
@@ -3902,8 +3949,9 @@ static int tg3_power_down_prepare(struct tg3 *tp)
tg3_frob_aux_power(tp, true);
/* Workaround for unstable PLL clock */
- if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
- (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
+ if ((!tg3_flag(tp, IS_SSB_CORE)) &&
+ ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
+ (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
u32 val = tr32(0x7d00);
val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
@@ -3994,8 +4042,8 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
err = tg3_writephy(tp, MII_CTRL1000, new_adv);
@@ -4009,7 +4057,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
tw32(TG3_CPMU_EEE_MODE,
tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
- err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
+ err = tg3_phy_toggle_auxctl_smdsp(tp, true);
if (!err) {
u32 err2;
@@ -4024,7 +4072,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
if (err)
val = 0;
- switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
+ switch (tg3_asic_rev(tp)) {
case ASIC_REV_5717:
case ASIC_REV_57765:
case ASIC_REV_57766:
@@ -4037,12 +4085,13 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
/* Fall through */
case ASIC_REV_5720:
+ case ASIC_REV_5762:
if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
MII_TG3_DSP_CH34TP2_HIBW01);
}
- err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
if (!err)
err = err2;
}
@@ -4171,8 +4220,8 @@ static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
return false;
if (tgtadv &&
- (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
+ (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
@@ -4256,9 +4305,9 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
/* Some third-party PHYs need to be reset on link going
* down.
*/
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
+ if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
+ tg3_asic_rev(tp) == ASIC_REV_5704 ||
+ tg3_asic_rev(tp) == ASIC_REV_5705) &&
tp->link_up) {
tg3_readphy(tp, MII_BMSR, &bmsr);
if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
@@ -4300,8 +4349,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
return err;
}
}
- } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
+ } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
/* 5701 {A0,B0} CRC bug workaround */
tg3_writephy(tp, 0x15, 0x0a75);
tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
@@ -4318,8 +4367,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
tg3_writephy(tp, MII_TG3_IMASK, ~0);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701) {
if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
tg3_writephy(tp, MII_TG3_EXT_CTRL,
MII_TG3_EXT_CTRL_LNK3_LED_MODE);
@@ -4423,6 +4472,15 @@ relink:
if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
tg3_phy_copper_begin(tp);
+ if (tg3_flag(tp, ROBOSWITCH)) {
+ current_link_up = 1;
+ /* FIXME: when BCM5325 switch is used use 100 MBit/s */
+ current_speed = SPEED_1000;
+ current_duplex = DUPLEX_FULL;
+ tp->link_config.active_speed = current_speed;
+ tp->link_config.active_duplex = current_duplex;
+ }
+
tg3_readphy(tp, MII_BMSR, &bmsr);
if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
@@ -4441,11 +4499,31 @@ relink:
else
tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+ /* In order for the 5750 core in BCM4785 chip to work properly
+ * in RGMII mode, the Led Control Register must be set up.
+ */
+ if (tg3_flag(tp, RGMII_MODE)) {
+ u32 led_ctrl = tr32(MAC_LED_CTRL);
+ led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
+
+ if (tp->link_config.active_speed == SPEED_10)
+ led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
+ else if (tp->link_config.active_speed == SPEED_100)
+ led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
+ LED_CTRL_100MBPS_ON);
+ else if (tp->link_config.active_speed == SPEED_1000)
+ led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
+ LED_CTRL_1000MBPS_ON);
+
+ tw32(MAC_LED_CTRL, led_ctrl);
+ udelay(40);
+ }
+
tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
if (tp->link_config.active_duplex == DUPLEX_HALF)
tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700) {
if (current_link_up == 1 &&
tg3_5700_link_polarity(tp, tp->link_config.active_speed))
tp->mac_mode |= MAC_MODE_LINK_POLARITY;
@@ -4457,7 +4535,7 @@ relink:
* ??? send/receive packets...
*/
if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
- tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
tw32_f(MAC_MI_MODE, tp->mi_mode);
udelay(80);
@@ -4476,7 +4554,7 @@ relink:
}
udelay(40);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
current_link_up == 1 &&
tp->link_config.active_speed == SPEED_1000 &&
(tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
@@ -4931,8 +5009,8 @@ static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
port_a = 1;
current_link_up = 0;
- if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
+ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
workaround = 1;
if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
port_a = 0;
@@ -5261,7 +5339,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5714) {
if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
bmsr |= BMSR_LSTATUS;
else
@@ -5330,8 +5408,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
bmcr = new_bmcr;
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
- ASIC_REV_5714) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5714) {
if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
bmsr |= BMSR_LSTATUS;
else
@@ -5466,7 +5543,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
else
err = tg3_setup_copper_phy(tp, force_reset);
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
+ if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
u32 scale;
val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
@@ -5484,7 +5561,8 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
(6 << TX_LENGTHS_IPG_SHIFT);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
val |= tr32(MAC_TX_LENGTHS) &
(TX_LENGTHS_JMB_FRM_LEN_MSK |
TX_LENGTHS_CNT_DWN_VAL_MSK);
@@ -5773,10 +5851,8 @@ static void tg3_dump_state(struct tg3 *tp)
u32 *regs;
regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
- if (!regs) {
- netdev_err(tp->dev, "Failed allocating register dump buffer\n");
+ if (!regs)
return;
- }
if (tg3_flag(tp, PCI_EXPRESS)) {
/* Read up to but not including private PCI registers */
@@ -6950,6 +7026,9 @@ static void tg3_poll_controller(struct net_device *dev)
int i;
struct tg3 *tp = netdev_priv(dev);
+ if (tg3_irq_sync(tp))
+ return;
+
for (i = 0; i < tp->irq_cnt; i++)
tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
}
@@ -7107,7 +7186,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
dma_addr_t new_addr = 0;
int ret = 0;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
+ if (tg3_asic_rev(tp) != ASIC_REV_5701)
new_skb = skb_copy(skb, GFP_ATOMIC);
else {
int more_headroom = 4 - ((unsigned long)skb->data & 3);
@@ -7281,7 +7360,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
} else if (tg3_flag(tp, HW_TSO_2))
mss |= hdr_len << 9;
else if (tg3_flag(tp, HW_TSO_1) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ tg3_asic_rev(tp) == ASIC_REV_5705) {
if (tcp_opt_len || iph->ihl > 5) {
int tsflags;
@@ -7437,7 +7516,7 @@ static void tg3_mac_loopback(struct tg3 *tp, bool enable)
if (tg3_flag(tp, 5705_PLUS) ||
(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
+ tg3_asic_rev(tp) == ASIC_REV_5700)
tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
}
@@ -7496,7 +7575,7 @@ static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
udelay(40);
if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+ tg3_asic_rev(tp) == ASIC_REV_5785) {
tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
MII_TG3_FET_PTEST_FRC_TX_LINK |
MII_TG3_FET_PTEST_FRC_TX_LOCK);
@@ -7520,7 +7599,7 @@ static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
else
mac_mode |= MAC_MODE_PORT_MODE_MII;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700) {
u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
if (masked_phy_id == TG3_PHY_ID_BCM5401)
@@ -8198,7 +8277,7 @@ static void tg3_restore_pci_state(struct tg3 *tp)
/* Set MAX PCI retry to zero. */
val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
tg3_flag(tp, PCIX_MODE))
val |= PCISTATE_RETRY_SAME_DMA;
/* Allow reads and writes to the APE register and memory space. */
@@ -8270,7 +8349,7 @@ static int tg3_chip_reset(struct tg3 *tp)
*/
tg3_save_pci_state(tp);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
tg3_flag(tp, 5755_PLUS))
tw32(GRC_FASTBOOT_PC, 0);
@@ -8305,7 +8384,7 @@ static int tg3_chip_reset(struct tg3 *tp)
for (i = 0; i < tp->irq_cnt; i++)
synchronize_irq(tp->napi[i].irq_vec);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+ if (tg3_asic_rev(tp) == ASIC_REV_57780) {
val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
}
@@ -8315,19 +8394,19 @@ static int tg3_chip_reset(struct tg3 *tp)
if (tg3_flag(tp, PCI_EXPRESS)) {
/* Force PCIe 1.0a mode */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+ if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
!tg3_flag(tp, 57765_PLUS) &&
tr32(TG3_PCIE_PHY_TSTCTL) ==
(TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
- if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
+ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
tw32(GRC_MISC_CFG, (1 << 29));
val |= (1 << 29);
}
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
tw32(GRC_VCPU_EXT_CTRL,
tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
@@ -8370,7 +8449,7 @@ static int tg3_chip_reset(struct tg3 *tp)
if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
u16 val16;
- if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
int j;
u32 cfg_val;
@@ -8411,23 +8490,33 @@ static int tg3_chip_reset(struct tg3 *tp)
val = tr32(MEMARB_MODE);
tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
tg3_stop_fw(tp);
tw32(0x5000, 0x400);
}
+ if (tg3_flag(tp, IS_SSB_CORE)) {
+ /*
+ * BCM4785: In order to avoid repercussions from using
+ * potentially defective internal ROM, stop the Rx RISC CPU,
+ * which is not required.
+ */
+ tg3_stop_fw(tp);
+ tg3_halt_cpu(tp, RX_CPU_BASE);
+ }
+
tw32(GRC_MODE, tp->grc_mode);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
val = tr32(0xc4);
tw32(0xc4, val | (1 << 15));
}
if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ tg3_asic_rev(tp) == ASIC_REV_5705) {
tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
- if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
}
@@ -8453,15 +8542,15 @@ static int tg3_chip_reset(struct tg3 *tp)
tg3_mdio_start(tp);
if (tg3_flag(tp, PCI_EXPRESS) &&
- tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
+ tg3_asic_rev(tp) != ASIC_REV_5785 &&
!tg3_flag(tp, 57765_PLUS)) {
val = tr32(0x7c00);
tw32(0x7c00, val | (1 << 25));
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5720) {
val = tr32(TG3_CPMU_CLCK_ORIDE);
tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
}
@@ -8672,7 +8761,8 @@ static void tg3_rings_reset(struct tg3 *tp)
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
else if (tg3_flag(tp, 5717_PLUS))
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
- else if (tg3_flag(tp, 57765_CLASS))
+ else if (tg3_flag(tp, 57765_CLASS) ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
else
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
@@ -8688,7 +8778,8 @@ static void tg3_rings_reset(struct tg3 *tp)
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
else if (!tg3_flag(tp, 5705_PLUS))
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+ else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762 ||
tg3_flag(tp, 57765_CLASS))
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
else
@@ -8794,12 +8885,12 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
if (!tg3_flag(tp, 5750_PLUS) ||
tg3_flag(tp, 5780_CLASS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+ tg3_asic_rev(tp) == ASIC_REV_5750 ||
+ tg3_asic_rev(tp) == ASIC_REV_5752 ||
tg3_flag(tp, 57765_PLUS))
bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+ tg3_asic_rev(tp) == ASIC_REV_5787)
bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
else
bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
@@ -8979,9 +9070,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Enable MAC control of LPI */
if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
- tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
- TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
- TG3_CPMU_EEE_LNKIDL_UART_IDL);
+ val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
+ TG3_CPMU_EEE_LNKIDL_UART_IDL;
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
+ val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
+
+ tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
tw32_f(TG3_CPMU_EEE_CTRL,
TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
@@ -8991,7 +9085,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
TG3_CPMU_EEEMD_LPI_IN_RX |
TG3_CPMU_EEEMD_EEE_ENABLE;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
+ if (tg3_asic_rev(tp) != ASIC_REV_5717)
val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
if (tg3_flag(tp, ENABLE_APE))
@@ -9017,7 +9111,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tg3_write_sig_legacy(tp, RESET_KIND_INIT);
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
+ if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
val = tr32(TG3_CPMU_CTRL);
val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
tw32(TG3_CPMU_CTRL, val);
@@ -9038,7 +9132,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(TG3_CPMU_HST_ACC, val);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+ if (tg3_asic_rev(tp) == ASIC_REV_57780) {
val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
PCIE_PWR_MGMT_L1_THRESH_4MS;
@@ -9068,7 +9162,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
}
if (tg3_flag(tp, 57765_CLASS)) {
- if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
u32 grc_mode = tr32(GRC_MODE);
/* Access the lower 1K of PL PCIE block registers. */
@@ -9083,8 +9177,15 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(GRC_MODE, grc_mode);
}
- if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
- u32 grc_mode = tr32(GRC_MODE);
+ if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
+ u32 grc_mode;
+
+ /* Fix transmit hangs */
+ val = tr32(TG3_CPMU_PADRNG_CTL);
+ val |= TG3_CPMU_PADRNG_CTL_RDIV2;
+ tw32(TG3_CPMU_PADRNG_CTL, val);
+
+ grc_mode = tr32(GRC_MODE);
/* Access the lower 1K of DL PCIE block registers. */
val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
@@ -9116,7 +9217,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
}
- if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
tg3_flag(tp, PCIX_MODE)) {
val = tr32(TG3PCI_PCISTATE);
val |= PCISTATE_RETRY_SAME_DMA;
@@ -9134,7 +9235,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(TG3PCI_PCISTATE, val);
}
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
+ if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
/* Enable some hw fixes. */
val = tr32(TG3PCI_MSI_DATA);
val |= (1 << 26) | (1 << 28) | (1 << 29);
@@ -9153,14 +9254,15 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (tg3_flag(tp, 57765_PLUS)) {
val = tr32(TG3PCI_DMA_RW_CTRL) &
~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
- if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
if (!tg3_flag(tp, 57765_CLASS) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
+ tg3_asic_rev(tp) != ASIC_REV_5717 &&
+ tg3_asic_rev(tp) != ASIC_REV_5762)
val |= DMA_RWCTRL_TAGGED_STAT_WA;
tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
+ } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
+ tg3_asic_rev(tp) != ASIC_REV_5761) {
/* This value is determined during the probe time DMA
* engine test, tg3_test_dma.
*/
@@ -9200,9 +9302,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Initialize MBUF/DESC pool. */
if (tg3_flag(tp, 5750_PLUS)) {
/* Do nothing. */
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
+ } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+ if (tg3_asic_rev(tp) == ASIC_REV_5704)
tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
else
tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
@@ -9240,11 +9342,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tp->bufmgr_config.dma_high_water);
val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ if (tg3_asic_rev(tp) == ASIC_REV_5719)
val |= BUFMGR_MODE_NO_TX_UNDERRUN;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
tw32(BUFMGR_MODE, val);
for (i = 0; i < 2000; i++) {
@@ -9257,7 +9359,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
return -ENODEV;
}
- if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
tg3_setup_rxbd_thresholds(tp);
@@ -9295,7 +9397,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Program the jumbo buffer descriptor ring control
* blocks on those devices that have them.
*/
- if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
(tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
@@ -9308,7 +9410,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
val | BDINFO_FLAGS_USE_EXT_RECV);
if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
- tg3_flag(tp, 57765_CLASS))
+ tg3_flag(tp, 57765_CLASS) ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
NIC_SRAM_RX_JUMBO_BUFFER_DESC);
} else {
@@ -9350,7 +9453,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
(6 << TX_LENGTHS_IPG_SHIFT) |
(32 << TX_LENGTHS_SLOT_TIME_SHIFT);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
val |= tr32(MAC_TX_LENGTHS) &
(TX_LENGTHS_JMB_FRM_LEN_MSK |
TX_LENGTHS_CNT_DWN_VAL_MSK);
@@ -9370,20 +9474,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
RDMAC_MODE_LNGREAD_ENAB);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+ if (tg3_asic_rev(tp) == ASIC_REV_5717)
rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
+ tg3_asic_rev(tp) == ASIC_REV_5785 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780)
rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
if (tg3_flag(tp, TSO_CAPABLE) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ tg3_asic_rev(tp) == ASIC_REV_5705) {
rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
!tg3_flag(tp, IS_5788)) {
@@ -9394,26 +9498,43 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (tg3_flag(tp, PCI_EXPRESS))
rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
+ if (tg3_asic_rev(tp) == ASIC_REV_57766) {
+ tp->dma_limit = 0;
+ if (tp->dev->mtu <= ETH_DATA_LEN) {
+ rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
+ tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
+ }
+ }
+
if (tg3_flag(tp, HW_TSO_1) ||
tg3_flag(tp, HW_TSO_2) ||
tg3_flag(tp, HW_TSO_3))
rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
if (tg3_flag(tp, 57765_PLUS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ tg3_asic_rev(tp) == ASIC_REV_5785 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780)
rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
+ tg3_asic_rev(tp) == ASIC_REV_5784 ||
+ tg3_asic_rev(tp) == ASIC_REV_5785 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780 ||
tg3_flag(tp, 57765_PLUS)) {
- val = tr32(TG3_RDMA_RSRVCTRL_REG);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
+ u32 tgtreg;
+
+ if (tg3_asic_rev(tp) == ASIC_REV_5762)
+ tgtreg = TG3_RDMA_RSRVCTRL_REG2;
+ else
+ tgtreg = TG3_RDMA_RSRVCTRL_REG;
+
+ val = tr32(tgtreg);
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762) {
val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
@@ -9421,14 +9542,21 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
}
- tw32(TG3_RDMA_RSRVCTRL_REG,
- val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
+ tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
- val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
- tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
+ if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762) {
+ u32 tgtreg;
+
+ if (tg3_asic_rev(tp) == ASIC_REV_5762)
+ tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
+ else
+ tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
+
+ val = tr32(tgtreg);
+ tw32(tgtreg, val |
TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
}
@@ -9505,7 +9633,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
if (!tg3_flag(tp, 5705_PLUS) &&
!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
+ tg3_asic_rev(tp) != ASIC_REV_5700)
tp->mac_mode |= MAC_MODE_LINK_POLARITY;
tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
udelay(40);
@@ -9523,11 +9651,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+ if (tg3_asic_rev(tp) == ASIC_REV_5752)
gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
GRC_LCLCTRL_GPIO_OUTPUT3;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+ if (tg3_asic_rev(tp) == ASIC_REV_5755)
gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
tp->grc_local_ctrl &= ~gpio_mask;
@@ -9562,11 +9690,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
WDMAC_MODE_LNGREAD_ENAB);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
if (tg3_flag(tp, TSO_CAPABLE) &&
- (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
+ (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
/* nothing */
} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
!tg3_flag(tp, IS_5788)) {
@@ -9578,7 +9706,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (tg3_flag(tp, 5755_PLUS))
val |= WDMAC_MODE_STATUS_TAG_FIX;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ if (tg3_asic_rev(tp) == ASIC_REV_5785)
val |= WDMAC_MODE_BURST_ALL_DATA;
tw32_f(WDMAC_MODE, val);
@@ -9589,10 +9717,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
&pcix_cmd);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5703) {
pcix_cmd &= ~PCI_X_CMD_MAX_READ;
pcix_cmd |= PCI_X_CMD_READ_2K;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
pcix_cmd |= PCI_X_CMD_READ_2K;
}
@@ -9603,7 +9731,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32_f(RDMAC_MODE, rdmac_mode);
udelay(40);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5719) {
for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
break;
@@ -9620,7 +9748,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (!tg3_flag(tp, 5705_PLUS))
tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ if (tg3_asic_rev(tp) == ASIC_REV_5761)
tw32(SNDDATAC_MODE,
SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
else
@@ -9643,7 +9771,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(SNDBDI_MODE, val);
tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
err = tg3_load_5701_a0_firmware_fix(tp);
if (err)
return err;
@@ -9658,10 +9786,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tp->tx_mode = TX_MODE_ENABLE;
if (tg3_flag(tp, 5755_PLUS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ tg3_asic_rev(tp) == ASIC_REV_5906)
tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762) {
val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
tp->tx_mode &= ~val;
tp->tx_mode |= tr32(MAC_TX_MODE) & val;
@@ -9712,8 +9841,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
udelay(10);
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
- !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
+ if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
+ !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
/* Set drive transmission level to 1.2V */
/* only if the signal pre-emphasis bit is not set */
val = tr32(MAC_SERDES_CFG);
@@ -9721,7 +9850,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
val |= 0x880;
tw32(MAC_SERDES_CFG, val);
}
- if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
tw32(MAC_SERDES_CFG, 0x616000);
}
@@ -9734,14 +9863,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
val = 2;
tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
/* Use hardware link auto-negotiation */
tg3_flag_set(tp, HW_AUTONEG);
}
if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ tg3_asic_rev(tp) == ASIC_REV_5714) {
u32 tmp;
tmp = tr32(SERDES_RX_CTRL);
@@ -9995,9 +10124,9 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
+ if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
} else {
u32 val = tr32(HOSTCC_FLOW_ATTN);
@@ -10045,10 +10174,15 @@ static void tg3_timer(unsigned long __opaque)
spin_lock(&tp->lock);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
tg3_flag(tp, 57765_CLASS))
tg3_chk_missed_msi(tp);
+ if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
+ /* BCM4785: Flush posted writes from GbE to host memory. */
+ tr32(HOSTCC_MODE);
+ }
+
if (!tg3_flag(tp, TAGGED_STATUS)) {
/* All of this garbage is because when using non-tagged
* IRQ status the mailbox/status_block protocol the chip
@@ -10166,7 +10300,7 @@ restart_timer:
static void tg3_timer_init(struct tg3 *tp)
{
if (tg3_flag(tp, TAGGED_STATUS) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+ tg3_asic_rev(tp) != ASIC_REV_5717 &&
!tg3_flag(tp, 57765_CLASS))
tp->timer_offset = HZ;
else
@@ -10747,7 +10881,7 @@ static int tg3_open(struct net_device *dev)
if (tp->fw_needed) {
err = tg3_request_firmware(tp);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
if (err)
return err;
} else if (err) {
@@ -10817,8 +10951,8 @@ static u64 tg3_calc_crc_errors(struct tg3 *tp)
struct tg3_hw_stats *hw_stats = tp->hw_stats;
if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
+ (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701)) {
u32 val;
if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
@@ -12342,11 +12476,12 @@ static int tg3_test_memory(struct tg3 *tp)
if (tg3_flag(tp, 5717_PLUS))
mem_tbl = mem_tbl_5717;
- else if (tg3_flag(tp, 57765_CLASS))
+ else if (tg3_flag(tp, 57765_CLASS) ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
mem_tbl = mem_tbl_57765;
else if (tg3_flag(tp, 5755_PLUS))
mem_tbl = mem_tbl_5755;
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5906)
mem_tbl = mem_tbl_5906;
else if (tg3_flag(tp, 5705_PLUS))
mem_tbl = mem_tbl_5705;
@@ -12458,7 +12593,7 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
} else if (tg3_flag(tp, HW_TSO_2))
mss |= hdr_len << 9;
else if (tg3_flag(tp, HW_TSO_1) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ tg3_asic_rev(tp) == ASIC_REV_5705) {
mss |= (TG3_TSO_TCP_OPT_LEN << 9);
} else {
base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
@@ -12644,7 +12779,7 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
* errata. Also, the MAC loopback test is deprecated for
* all newer ASIC revisions.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
+ if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
!tg3_flag(tp, CPMU_PRESENT)) {
tg3_mac_loopback(tp, true);
@@ -12922,7 +13057,8 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EAGAIN;
spin_lock_bh(&tp->lock);
- err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
+ err = __tg3_readphy(tp, data->phy_id & 0x1f,
+ data->reg_num & 0x1f, &mii_regval);
spin_unlock_bh(&tp->lock);
data->val_out = mii_regval;
@@ -12938,7 +13074,8 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EAGAIN;
spin_lock_bh(&tp->lock);
- err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
+ err = __tg3_writephy(tp, data->phy_id & 0x1f,
+ data->reg_num & 0x1f, data->val_in);
spin_unlock_bh(&tp->lock);
return err;
@@ -13129,7 +13266,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
/* Reset PHY, otherwise the read DMA engine will be in a mode that
* breaks all requests to 256 bytes.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+ if (tg3_asic_rev(tp) == ASIC_REV_57766)
reset_phy = 1;
err = tg3_restart_hw(tp, reset_phy);
@@ -13242,7 +13379,7 @@ static void tg3_get_nvram_info(struct tg3 *tp)
tw32(NVRAM_CFG1, nvcfg1);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
tg3_flag(tp, 5780_CLASS)) {
switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
@@ -13683,6 +13820,22 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
nvcfg1 = tr32(NVRAM_CFG1);
nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
+ if (tg3_asic_rev(tp) == ASIC_REV_5762) {
+ if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
+ tg3_flag_set(tp, NO_NVRAM);
+ return;
+ }
+
+ switch (nvmpinstrp) {
+ case FLASH_5762_EEPROM_HD:
+ nvmpinstrp = FLASH_5720_EEPROM_HD;
+ break;
+ case FLASH_5762_EEPROM_LD:
+ nvmpinstrp = FLASH_5720_EEPROM_LD;
+ break;
+ }
+ }
+
switch (nvmpinstrp) {
case FLASH_5720_EEPROM_HD:
case FLASH_5720_EEPROM_LD:
@@ -13728,7 +13881,8 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
tp->nvram_size = TG3_NVRAM_SIZE_1MB;
break;
default:
- tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+ if (tg3_asic_rev(tp) != ASIC_REV_5762)
+ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
break;
}
break;
@@ -13774,7 +13928,8 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
tp->nvram_size = TG3_NVRAM_SIZE_1MB;
break;
default:
- tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+ if (tg3_asic_rev(tp) != ASIC_REV_5762)
+ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
break;
}
break;
@@ -13786,11 +13941,30 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
tg3_nvram_get_pagesize(tp, nvcfg1);
if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
+
+ if (tg3_asic_rev(tp) == ASIC_REV_5762) {
+ u32 val;
+
+ if (tg3_nvram_read(tp, 0, &val))
+ return;
+
+ if (val != TG3_EEPROM_MAGIC &&
+ (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
+ tg3_flag_set(tp, NO_NVRAM);
+ }
}
/* Chips other than 5700/5701 use the NVRAM for fetching info. */
static void tg3_nvram_init(struct tg3 *tp)
{
+ if (tg3_flag(tp, IS_SSB_CORE)) {
+ /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
+ tg3_flag_clear(tp, NVRAM);
+ tg3_flag_clear(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, NO_NVRAM);
+ return;
+ }
+
tw32_f(GRC_EEPROM_ADDR,
(EEPROM_ADDR_FSM_RESET |
(EEPROM_DEFAULT_CLOCK_PERIOD <<
@@ -13803,8 +13977,8 @@ static void tg3_nvram_init(struct tg3 *tp)
tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
udelay(100);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+ tg3_asic_rev(tp) != ASIC_REV_5701) {
tg3_flag_set(tp, NVRAM);
if (tg3_nvram_lock(tp)) {
@@ -13817,25 +13991,26 @@ static void tg3_nvram_init(struct tg3 *tp)
tp->nvram_size = 0;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+ if (tg3_asic_rev(tp) == ASIC_REV_5752)
tg3_get_5752_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5755)
tg3_get_5755_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
+ tg3_asic_rev(tp) == ASIC_REV_5784 ||
+ tg3_asic_rev(tp) == ASIC_REV_5785)
tg3_get_5787_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5761)
tg3_get_5761_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5906)
tg3_get_5906_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
tg3_flag(tp, 57765_CLASS))
tg3_get_57780_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719)
tg3_get_5717_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
tg3_get_5720_nvram_info(tp);
else
tg3_get_nvram_info(tp);
@@ -13948,7 +14123,7 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tg3_flag_set(tp, EEPROM_WRITE_PROT);
tg3_flag_set(tp, WOL_CAP);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
tg3_flag_clear(tp, EEPROM_WRITE_PROT);
tg3_flag_set(tp, IS_NIC);
@@ -13975,13 +14150,13 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
ver >>= NIC_SRAM_DATA_VER_SHIFT;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
+ if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+ tg3_asic_rev(tp) != ASIC_REV_5701 &&
+ tg3_asic_rev(tp) != ASIC_REV_5703 &&
(ver > 0) && (ver < 0x100))
tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ if (tg3_asic_rev(tp) == ASIC_REV_5785)
tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
@@ -14029,18 +14204,16 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
/* Default to PHY_1_MODE if 0 (MAC_MODE) is
* read on some older 5700/5701 bootcode.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
- ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) ==
- ASIC_REV_5701)
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701)
tp->led_ctrl = LED_CTRL_MODE_PHY_1;
break;
case SHASTA_EXT_LED_SHARED:
tp->led_ctrl = LED_CTRL_MODE_SHARED;
- if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
+ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
LED_CTRL_MODE_PHY_2);
break;
@@ -14051,19 +14224,19 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
case SHASTA_EXT_LED_COMBO:
tp->led_ctrl = LED_CTRL_MODE_COMBO;
- if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
+ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
LED_CTRL_MODE_PHY_2);
break;
}
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
+ if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701) &&
tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
tp->led_ctrl = LED_CTRL_MODE_PHY_2;
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
+ if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
tp->led_ctrl = LED_CTRL_MODE_PHY_1;
if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
@@ -14107,13 +14280,13 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
if ((tg3_flag(tp, 57765_PLUS) ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
- GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
+ (tg3_asic_rev(tp) == ASIC_REV_5784 &&
+ tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
(cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
if (tg3_flag(tp, PCI_EXPRESS) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+ tg3_asic_rev(tp) != ASIC_REV_5785 &&
!tg3_flag(tp, 57765_PLUS)) {
u32 cfg3;
@@ -14137,6 +14310,39 @@ done:
device_set_wakeup_capable(&tp->pdev->dev, false);
}
+static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
+{
+ int i, err;
+ u32 val2, off = offset * 8;
+
+ err = tg3_nvram_lock(tp);
+ if (err)
+ return err;
+
+ tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
+ tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
+ APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
+ tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
+ udelay(10);
+
+ for (i = 0; i < 100; i++) {
+ val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
+ if (val2 & APE_OTP_STATUS_CMD_DONE) {
+ *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
+ break;
+ }
+ udelay(10);
+ }
+
+ tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
+
+ tg3_nvram_unlock(tp);
+ if (val2 & APE_OTP_STATUS_CMD_DONE)
+ return 0;
+
+ return -EBUSY;
+}
+
static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
{
int i;
@@ -14283,10 +14489,19 @@ static int tg3_phy_probe(struct tg3 *tp)
* subsys device table.
*/
p = tg3_lookup_by_subsys(tp);
- if (!p)
+ if (p) {
+ tp->phy_id = p->phy_id;
+ } else if (!tg3_flag(tp, IS_SSB_CORE)) {
+ /* For now we saw the IDs 0xbc050cd0,
+ * 0xbc050f80 and 0xbc050c30 on devices
+ * connected to an BCM4785 and there are
+ * probably more. Just assume that the phy is
+ * supported when it is connected to a SSB core
+ * for now.
+ */
return -ENODEV;
+ }
- tp->phy_id = p->phy_id;
if (!tp->phy_id ||
tp->phy_id == TG3_PHY_ID_BCM8002)
tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
@@ -14294,12 +14509,13 @@ static int tg3_phy_probe(struct tg3 *tp)
}
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
- (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
- tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
+ (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762 ||
+ (tg3_asic_rev(tp) == ASIC_REV_5717 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
+ (tg3_asic_rev(tp) == ASIC_REV_57765 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
tg3_phy_init_link_config(tp);
@@ -14409,7 +14625,7 @@ out_not_found:
return;
out_no_vpd:
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5717) {
if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
strcpy(tp->board_part_number, "BCM5717");
@@ -14417,7 +14633,7 @@ out_no_vpd:
strcpy(tp->board_part_number, "BCM5718");
else
goto nomatch;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
strcpy(tp->board_part_number, "BCM57780");
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
@@ -14428,7 +14644,7 @@ out_no_vpd:
strcpy(tp->board_part_number, "BCM57788");
else
goto nomatch;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
strcpy(tp->board_part_number, "BCM57761");
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
@@ -14443,7 +14659,7 @@ out_no_vpd:
strcpy(tp->board_part_number, "BCM57795");
else
goto nomatch;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
strcpy(tp->board_part_number, "BCM57762");
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
@@ -14454,7 +14670,7 @@ out_no_vpd:
strcpy(tp->board_part_number, "BCM57786");
else
goto nomatch;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
strcpy(tp->board_part_number, "BCM95906");
} else {
nomatch:
@@ -14676,6 +14892,8 @@ static void tg3_read_dash_ver(struct tg3 *tp)
if (tg3_flag(tp, APE_HAS_NCSI))
fwtype = "NCSI";
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
+ fwtype = "SMASH";
else
fwtype = "DASH";
@@ -14689,6 +14907,31 @@ static void tg3_read_dash_ver(struct tg3 *tp)
(apedata & APE_FW_VERSION_BLDMSK));
}
+static void tg3_read_otp_ver(struct tg3 *tp)
+{
+ u32 val, val2;
+
+ if (tg3_asic_rev(tp) != ASIC_REV_5762)
+ return;
+
+ if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
+ !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
+ TG3_OTP_MAGIC0_VALID(val)) {
+ u64 val64 = (u64) val << 32 | val2;
+ u32 ver = 0;
+ int i, vlen;
+
+ for (i = 0; i < 7; i++) {
+ if ((val64 & 0xff) == 0)
+ break;
+ ver = val64 & 0xff;
+ val64 >>= 8;
+ }
+ vlen = strlen(tp->fw_ver);
+ snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
+ }
+}
+
static void tg3_read_fw_ver(struct tg3 *tp)
{
u32 val;
@@ -14699,6 +14942,7 @@ static void tg3_read_fw_ver(struct tg3 *tp)
if (tg3_flag(tp, NO_NVRAM)) {
strcat(tp->fw_ver, "sb");
+ tg3_read_otp_ver(tp);
return;
}
@@ -14773,7 +15017,7 @@ static struct pci_dev *tg3_find_peer(struct tg3 *tp)
static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
{
tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
+ if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
u32 reg;
/* All devices that use the alternate
@@ -14785,7 +15029,10 @@ static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
reg = TG3PCI_GEN2_PRODID_ASICREV;
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
@@ -14807,46 +15054,47 @@ static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
/* Wrong chip ID in 5752 A0. This code can be removed later
* as A0 is not in production.
*/
- if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
- if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720)
tg3_flag_set(tp, 5717_PLUS);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+ if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
+ tg3_asic_rev(tp) == ASIC_REV_57766)
tg3_flag_set(tp, 57765_CLASS);
- if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
+ if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
tg3_flag_set(tp, 57765_PLUS);
/* Intentionally exclude ASIC_REV_5906 */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+ tg3_asic_rev(tp) == ASIC_REV_5787 ||
+ tg3_asic_rev(tp) == ASIC_REV_5784 ||
+ tg3_asic_rev(tp) == ASIC_REV_5761 ||
+ tg3_asic_rev(tp) == ASIC_REV_5785 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780 ||
tg3_flag(tp, 57765_PLUS))
tg3_flag_set(tp, 5755_PLUS);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
+ if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
+ tg3_asic_rev(tp) == ASIC_REV_5714)
tg3_flag_set(tp, 5780_CLASS);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
+ tg3_asic_rev(tp) == ASIC_REV_5752 ||
+ tg3_asic_rev(tp) == ASIC_REV_5906 ||
tg3_flag(tp, 5755_PLUS) ||
tg3_flag(tp, 5780_CLASS))
tg3_flag_set(tp, 5750_PLUS);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
tg3_flag(tp, 5750_PLUS))
tg3_flag_set(tp, 5705_PLUS);
}
@@ -14856,13 +15104,13 @@ static bool tg3_10_100_only_device(struct tg3 *tp,
{
u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
- (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
+ if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
+ (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
(tp->phy_flags & TG3_PHYFLG_IS_FET))
return true;
if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5705) {
if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
return true;
} else {
@@ -14923,8 +15171,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
* enable this workaround if the 5703 is on the secondary
* bus of these ICH bridges.
*/
- if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
- (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
+ if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
+ (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
static struct tg3_dev_id {
u32 vendor;
u32 device;
@@ -14964,7 +15212,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
}
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5701) {
static struct tg3_dev_id {
u32 vendor;
u32 device;
@@ -15024,29 +15272,29 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
} while (bridge);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
+ if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
+ tg3_asic_rev(tp) == ASIC_REV_5714)
tp->pdev_peer = tg3_find_peer(tp);
/* Determine TSO capabilities */
- if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
; /* Do nothing. HW bug. */
else if (tg3_flag(tp, 57765_PLUS))
tg3_flag_set(tp, HW_TSO_3);
else if (tg3_flag(tp, 5755_PLUS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ tg3_asic_rev(tp) == ASIC_REV_5906)
tg3_flag_set(tp, HW_TSO_2);
else if (tg3_flag(tp, 5750_PLUS)) {
tg3_flag_set(tp, HW_TSO_1);
tg3_flag_set(tp, TSO_BUG);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
- tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
+ if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
+ tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
tg3_flag_clear(tp, TSO_BUG);
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
+ } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+ tg3_asic_rev(tp) != ASIC_REV_5701 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
tg3_flag_set(tp, TSO_BUG);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
+ if (tg3_asic_rev(tp) == ASIC_REV_5705)
tp->fw_needed = FIRMWARE_TG3TSO5;
else
tp->fw_needed = FIRMWARE_TG3TSO;
@@ -15068,22 +15316,22 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tp->fw_needed = NULL;
}
- if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
tp->fw_needed = FIRMWARE_TG3;
tp->irq_max = 1;
if (tg3_flag(tp, 5750_PLUS)) {
tg3_flag_set(tp, SUPPORT_MSI);
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
- GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
- tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
+ if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
+ tg3_chip_rev(tp) == CHIPREV_5750_BX ||
+ (tg3_asic_rev(tp) == ASIC_REV_5714 &&
+ tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
tp->pdev_peer == tp->pdev))
tg3_flag_clear(tp, SUPPORT_MSI);
if (tg3_flag(tp, 5755_PLUS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ tg3_asic_rev(tp) == ASIC_REV_5906) {
tg3_flag_set(tp, 1SHOT_MSI);
}
@@ -15099,25 +15347,26 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tp->rxq_max = TG3_RSS_MAX_NUM_QS;
tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720)
tp->txq_max = tp->irq_max - 1;
}
if (tg3_flag(tp, 5755_PLUS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ tg3_asic_rev(tp) == ASIC_REV_5906)
tg3_flag_set(tp, SHORT_DMA_BUG);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ if (tg3_asic_rev(tp) == ASIC_REV_5719)
tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
tg3_flag_set(tp, LRG_PROD_RING_CAP);
if (tg3_flag(tp, 57765_PLUS) &&
- tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
tg3_flag_set(tp, USE_JUMBO_BDFLAG);
if (!tg3_flag(tp, 5705_PLUS) ||
@@ -15135,20 +15384,19 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
- ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
tg3_flag_clear(tp, HW_TSO_2);
tg3_flag_clear(tp, TSO_CAPABLE);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
+ if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
+ tg3_asic_rev(tp) == ASIC_REV_5761 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
tg3_flag_set(tp, CLKREQ_BUG);
- } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
+ } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
tg3_flag_set(tp, L1PLLPD_EN);
}
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
/* BCM5785 devices are effectively PCIe devices, and should
* follow PCIe codepaths, but do not have a PCIe capabilities
* section.
@@ -15181,7 +15429,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
&tp->pci_cacheline_sz);
pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
&tp->pci_lat_timer);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
tp->pci_lat_timer < 64) {
tp->pci_lat_timer = 64;
pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
@@ -15191,7 +15439,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
/* Important! -- It is critical that the PCI-X hw workaround
* situation is decided before the first MMIO register access.
*/
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
+ if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
/* 5700 BX chips need to have their TX producer index
* mailboxes written twice to workaround a bug.
*/
@@ -15233,7 +15481,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tg3_flag_set(tp, PCI_32BIT);
/* Chip-specific fixup from Broadcom driver */
- if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
+ if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
(!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
@@ -15250,9 +15498,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
/* Various workaround register access methods */
if (tg3_flag(tp, PCIX_TARGET_HWBUG))
tp->write32 = tg3_write_indirect_reg32;
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
+ else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
(tg3_flag(tp, PCI_EXPRESS) &&
- tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
/*
* Back to back register writes can cause problems on these
* chips, the workaround is to read back all reg writes
@@ -15284,7 +15532,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
pci_cmd &= ~PCI_COMMAND_MEMORY;
pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
tp->read32_mbox = tg3_read32_mbox_5906;
tp->write32_mbox = tg3_write32_mbox_5906;
tp->write32_tx_mbox = tg3_write32_mbox_5906;
@@ -15293,8 +15541,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
if (tp->write32 == tg3_write_indirect_reg32 ||
(tg3_flag(tp, PCIX_MODE) &&
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
+ (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701)))
tg3_flag_set(tp, SRAM_USE_CONFIG);
/* The memory arbiter has to be enabled in order for SRAM accesses
@@ -15306,7 +15554,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
tg3_flag(tp, 5780_CLASS)) {
if (tg3_flag(tp, PCIX_MODE)) {
pci_read_config_dword(tp->pdev,
@@ -15314,21 +15562,23 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
&val);
tp->pci_fn = val & 0x7;
}
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
- tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
- if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
- NIC_SRAM_CPMUSTAT_SIG) {
- tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
- tp->pci_fn = tp->pci_fn ? 1 : 0;
- }
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720) {
tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
- if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
- NIC_SRAM_CPMUSTAT_SIG) {
+ if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
+ val = tr32(TG3_CPMU_STATUS);
+
+ if (tg3_asic_rev(tp) == ASIC_REV_5717)
+ tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
+ else
tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
TG3_CPMU_STATUS_FSHFT_5719;
- }
+ }
+
+ if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
+ tp->write32_tx_mbox = tg3_write_flush_reg32;
+ tp->write32_rx_mbox = tg3_write_flush_reg32;
}
/* Get eeprom hw config before calling tg3_set_power_state().
@@ -15366,18 +15616,18 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
* It is also used as eeprom write protect on LOMs.
*/
tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
tg3_flag(tp, EEPROM_WRITE_PROT))
tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
GRC_LCLCTRL_GPIO_OUTPUT1);
/* Unused GPIO3 must be driven as output on 5752 because there
* are no pull-up resistors on unused GPIO pins.
*/
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5752)
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780 ||
tg3_flag(tp, 57765_CLASS))
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
@@ -15391,6 +15641,10 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
GRC_LCLCTRL_GPIO_OUTPUT0;
}
+ if (tg3_asic_rev(tp) == ASIC_REV_5762)
+ tp->grc_local_ctrl |=
+ tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
+
/* Switch out of Vaux if it is a NIC */
tg3_pwrsrc_switch_to_vmain(tp);
@@ -15401,42 +15655,42 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tg3_flag_set(tp, JUMBO_RING_ENABLE);
/* Determine WakeOnLan speed to use. */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
tg3_flag_clear(tp, WOL_SPEED_100MB);
} else {
tg3_flag_set(tp, WOL_SPEED_100MB);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ if (tg3_asic_rev(tp) == ASIC_REV_5906)
tp->phy_flags |= TG3_PHYFLG_IS_FET;
/* A few boards don't want Ethernet@WireSpeed phy feature */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
- (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
- (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ (tg3_asic_rev(tp) == ASIC_REV_5705 &&
+ (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
+ (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
(tp->phy_flags & TG3_PHYFLG_IS_FET) ||
(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
- GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
+ if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
+ tg3_chip_rev(tp) == CHIPREV_5704_AX)
tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
- if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
if (tg3_flag(tp, 5705_PLUS) &&
!(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
+ tg3_asic_rev(tp) != ASIC_REV_5785 &&
+ tg3_asic_rev(tp) != ASIC_REV_57780 &&
!tg3_flag(tp, 57765_PLUS)) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+ tg3_asic_rev(tp) == ASIC_REV_5787 ||
+ tg3_asic_rev(tp) == ASIC_REV_5784 ||
+ tg3_asic_rev(tp) == ASIC_REV_5761) {
if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
@@ -15446,8 +15700,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tp->phy_flags |= TG3_PHYFLG_BER_BUG;
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
- GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
+ tg3_chip_rev(tp) != CHIPREV_5784_AX) {
tp->phy_otp = tg3_read_otp_phycfg(tp);
if (tp->phy_otp == 0)
tp->phy_otp = TG3_OTP_DEFAULT;
@@ -15459,20 +15713,20 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tp->mi_mode = MAC_MI_MODE_BASE;
tp->coalesce_mode = 0;
- if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
- GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
+ if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
+ tg3_chip_rev(tp) != CHIPREV_5700_BX)
tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
/* Set these bits to enable statistics workaround. */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
tp->coalesce_mode |= HOSTCC_MODE_ATTN;
tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780)
tg3_flag_set(tp, USE_PHYLIB);
err = tg3_mdio_init(tp);
@@ -15481,7 +15735,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
/* Initialize data/descriptor byte/word swapping. */
val = tr32(GRC_MODE);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
GRC_MODE_WORD_SWAP_B2HRX_DATA |
GRC_MODE_B2HRX_ENABLE |
@@ -15501,12 +15756,10 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
&pci_state_reg);
if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
!tg3_flag(tp, PCIX_TARGET_HWBUG)) {
- u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
-
- if (chiprevid == CHIPREV_ID_5701_A0 ||
- chiprevid == CHIPREV_ID_5701_B0 ||
- chiprevid == CHIPREV_ID_5701_B2 ||
- chiprevid == CHIPREV_ID_5701_B5) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
void __iomem *sram_base;
/* Write some dummy words into the SRAM status block
@@ -15529,13 +15782,13 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
grc_misc_cfg = tr32(GRC_MISC_CFG);
grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
(grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
tg3_flag_set(tp, IS_5788);
if (!tg3_flag(tp, IS_5788) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
+ tg3_asic_rev(tp) != ASIC_REV_5700)
tg3_flag_set(tp, TAGGED_STATUS);
if (tg3_flag(tp, TAGGED_STATUS)) {
tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
@@ -15568,7 +15821,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
} else {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
+ if (tg3_asic_rev(tp) == ASIC_REV_5700)
tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
else
tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
@@ -15578,7 +15831,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
* change bit implementation, so we must use the
* status register in those cases.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
+ if (tg3_asic_rev(tp) == ASIC_REV_5700)
tg3_flag_set(tp, USE_LINKCHG_REG);
else
tg3_flag_clear(tp, USE_LINKCHG_REG);
@@ -15588,7 +15841,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
* upon subsystem IDs.
*/
if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
+ tg3_asic_rev(tp) == ASIC_REV_5701 &&
!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
tg3_flag_set(tp, USE_LINKCHG_REG);
@@ -15602,7 +15855,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
tg3_flag(tp, PCIX_MODE)) {
tp->rx_offset = NET_SKB_PAD;
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -15619,9 +15872,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
/* Increment the rx prod index on the rx std ring by at most
* 8 for these chips to workaround hw errata.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+ if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
+ tg3_asic_rev(tp) == ASIC_REV_5752 ||
+ tg3_asic_rev(tp) == ASIC_REV_5755)
tp->rx_std_max_post = 8;
if (tg3_flag(tp, ASPM_WORKAROUND))
@@ -15643,7 +15896,6 @@ static int tg3_get_macaddr_sparc(struct tg3 *tp)
addr = of_get_property(dp, "local-mac-address", &len);
if (addr && len == 6) {
memcpy(dev->dev_addr, addr, 6);
- memcpy(dev->perm_addr, dev->dev_addr, 6);
return 0;
}
return -ENODEV;
@@ -15654,7 +15906,6 @@ static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
struct net_device *dev = tp->dev;
memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
- memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
return 0;
}
#endif
@@ -15664,14 +15915,21 @@ static int tg3_get_device_address(struct tg3 *tp)
struct net_device *dev = tp->dev;
u32 hi, lo, mac_offset;
int addr_ok = 0;
+ int err;
#ifdef CONFIG_SPARC
if (!tg3_get_macaddr_sparc(tp))
return 0;
#endif
+ if (tg3_flag(tp, IS_SSB_CORE)) {
+ err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
+ if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
+ return 0;
+ }
+
mac_offset = 0x7c;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
tg3_flag(tp, 5780_CLASS)) {
if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
mac_offset = 0xcc;
@@ -15684,7 +15942,7 @@ static int tg3_get_device_address(struct tg3 *tp)
mac_offset = 0xcc;
if (tp->pci_fn > 1)
mac_offset += 0x18c;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
mac_offset = 0x10;
/* First try to get it from MAC address mailbox. */
@@ -15731,7 +15989,6 @@ static int tg3_get_device_address(struct tg3 *tp)
#endif
return -EINVAL;
}
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
return 0;
}
@@ -15753,8 +16010,8 @@ static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
/* On 5703 and later chips, the boundary bits have no
* effect.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
+ if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+ tg3_asic_rev(tp) != ASIC_REV_5701 &&
!tg3_flag(tp, PCI_EXPRESS))
goto out;
@@ -15992,14 +16249,14 @@ static int tg3_test_dma(struct tg3 *tp)
/* DMA read watermark not used on PCIE */
tp->dma_rwctrl |= 0x00180000;
} else if (!tg3_flag(tp, PCIX_MODE)) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
+ if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
+ tg3_asic_rev(tp) == ASIC_REV_5750)
tp->dma_rwctrl |= 0x003f0000;
else
tp->dma_rwctrl |= 0x003f000f;
} else {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
+ tg3_asic_rev(tp) == ASIC_REV_5704) {
u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
u32 read_water = 0x7;
@@ -16008,35 +16265,37 @@ static int tg3_test_dma(struct tg3 *tp)
* better performance.
*/
if (tg3_flag(tp, 40BIT_DMA_BUG) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+ tg3_asic_rev(tp) == ASIC_REV_5704)
tp->dma_rwctrl |= 0x8000;
else if (ccval == 0x6 || ccval == 0x7)
tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
+ if (tg3_asic_rev(tp) == ASIC_REV_5703)
read_water = 4;
/* Set bit 23 to enable PCIX hw bug fix */
tp->dma_rwctrl |=
(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
(1 << 23);
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
/* 5780 always in PCIX mode */
tp->dma_rwctrl |= 0x00144000;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
/* 5714 always in PCIX mode */
tp->dma_rwctrl |= 0x00148000;
} else {
tp->dma_rwctrl |= 0x001b000f;
}
}
+ if (tg3_flag(tp, ONE_DMA_AT_ONCE))
+ tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+ if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
+ tg3_asic_rev(tp) == ASIC_REV_5704)
tp->dma_rwctrl &= 0xfffffff0;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701) {
/* Remove this if it causes problems for some boards. */
tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
@@ -16060,8 +16319,8 @@ static int tg3_test_dma(struct tg3 *tp)
tg3_switch_clocks(tp);
#endif
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
+ if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+ tg3_asic_rev(tp) != ASIC_REV_5701)
goto out;
/* It is best to perform DMA test with maximum write burst size
@@ -16180,7 +16439,7 @@ static void tg3_init_bufmgr_config(struct tg3 *tp)
DEFAULT_MB_MACRX_LOW_WATER_5705;
tp->bufmgr_config.mbuf_high_water =
DEFAULT_MB_HIGH_WATER_5705;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
tp->bufmgr_config.mbuf_mac_rx_low_water =
DEFAULT_MB_MACRX_LOW_WATER_5906;
tp->bufmgr_config.mbuf_high_water =
@@ -16238,6 +16497,7 @@ static char *tg3_phy_string(struct tg3 *tp)
case TG3_PHY_ID_BCM57765: return "57765";
case TG3_PHY_ID_BCM5719C: return "5719C";
case TG3_PHY_ID_BCM5720C: return "5720C";
+ case TG3_PHY_ID_BCM5762: return "5762C";
case TG3_PHY_ID_BCM8002: return "8002/serdes";
case 0: return "serdes";
default: return "unknown";
@@ -16367,12 +16627,25 @@ static int tg3_init_one(struct pci_dev *pdev,
tp->pm_cap = pm_cap;
tp->rx_mode = TG3_DEF_RX_MODE;
tp->tx_mode = TG3_DEF_TX_MODE;
+ tp->irq_sync = 1;
if (tg3_debug > 0)
tp->msg_enable = tg3_debug;
else
tp->msg_enable = TG3_DEF_MSG_ENABLE;
+ if (pdev_is_ssb_gige_core(pdev)) {
+ tg3_flag_set(tp, IS_SSB_CORE);
+ if (ssb_gige_must_flush_posted_writes(pdev))
+ tg3_flag_set(tp, FLUSH_POSTED_WRITES);
+ if (ssb_gige_one_dma_at_once(pdev))
+ tg3_flag_set(tp, ONE_DMA_AT_ONCE);
+ if (ssb_gige_have_roboswitch(pdev))
+ tg3_flag_set(tp, ROBOSWITCH);
+ if (ssb_gige_is_rgmii(pdev))
+ tg3_flag_set(tp, RGMII_MODE);
+ }
+
/* The word/byte swap controls here control register access byte
* swapping. DMA data byte swapping is controlled in the GRC_MODE
* setting below.
@@ -16413,7 +16686,10 @@ static int tg3_init_one(struct pci_dev *pdev,
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
tg3_flag_set(tp, ENABLE_APE);
tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
if (!tp->aperegs) {
@@ -16485,7 +16761,7 @@ static int tg3_init_one(struct pci_dev *pdev,
/* 5700 B0 chips do not support checksumming correctly due
* to hardware bugs.
*/
- if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
+ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
if (tg3_flag(tp, 5755_PLUS))
@@ -16505,11 +16781,11 @@ static int tg3_init_one(struct pci_dev *pdev,
if (features & NETIF_F_IPV6_CSUM)
features |= NETIF_F_TSO6;
if (tg3_flag(tp, HW_TSO_3) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
- GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ tg3_asic_rev(tp) == ASIC_REV_5761 ||
+ (tg3_asic_rev(tp) == ASIC_REV_5784 &&
+ tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
+ tg3_asic_rev(tp) == ASIC_REV_5785 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780)
features |= NETIF_F_TSO_ECN;
}
@@ -16521,14 +16797,14 @@ static int tg3_init_one(struct pci_dev *pdev,
* MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
* loopback for the remaining devices.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
+ if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
!tg3_flag(tp, CPMU_PRESENT))
/* Add the loopback capability */
features |= NETIF_F_LOOPBACK;
dev->hw_features |= features;
- if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
!tg3_flag(tp, TSO_CAPABLE) &&
!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
tg3_flag_set(tp, MAX_RXPEND_64);
@@ -16607,8 +16883,9 @@ static int tg3_init_one(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
tg3_flag_set(tp, PTP_CAPABLE);
if (tg3_flag(tp, 5717_PLUS)) {
@@ -16618,6 +16895,8 @@ static int tg3_init_one(struct pci_dev *pdev,
tg3_timer_init(tp);
+ tg3_carrier_off(tp);
+
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "Cannot register net device, aborting\n");
@@ -16626,7 +16905,7 @@ static int tg3_init_one(struct pci_dev *pdev,
netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
tp->board_part_number,
- tp->pci_chip_rev_id,
+ tg3_chip_rev_id(tp),
tg3_bus_string(tp, str),
dev->dev_addr);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index d330e81f5793..8d7d4c2ab5d6 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -4,7 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2007-2012 Broadcom Corporation.
+ * Copyright (C) 2007-2013 Broadcom Corporation.
*/
#ifndef _T3_H
@@ -65,6 +65,9 @@
#define TG3PCI_DEVICE_TIGON3_57766 0x1686
#define TG3PCI_DEVICE_TIGON3_57786 0x16b3
#define TG3PCI_DEVICE_TIGON3_57782 0x16b7
+#define TG3PCI_DEVICE_TIGON3_5762 0x1687
+#define TG3PCI_DEVICE_TIGON3_5725 0x1643
+#define TG3PCI_DEVICE_TIGON3_5727 0x16f3
/* 0x04 --> 0x2c unused */
#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM
#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644
@@ -117,9 +120,7 @@
#define MISC_HOST_CTRL_TAGGED_STATUS 0x00000200
#define MISC_HOST_CTRL_CHIPREV 0xffff0000
#define MISC_HOST_CTRL_CHIPREV_SHIFT 16
-#define GET_CHIP_REV_ID(MISC_HOST_CTRL) \
- (((MISC_HOST_CTRL) & MISC_HOST_CTRL_CHIPREV) >> \
- MISC_HOST_CTRL_CHIPREV_SHIFT)
+
#define CHIPREV_ID_5700_A0 0x7000
#define CHIPREV_ID_5700_A1 0x7001
#define CHIPREV_ID_5700_B0 0x7100
@@ -159,7 +160,8 @@
#define CHIPREV_ID_57765_A0 0x57785000
#define CHIPREV_ID_5719_A0 0x05719000
#define CHIPREV_ID_5720_A0 0x05720000
-#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
+#define CHIPREV_ID_5762_A0 0x05762000
+
#define ASIC_REV_5700 0x07
#define ASIC_REV_5701 0x00
#define ASIC_REV_5703 0x01
@@ -182,7 +184,7 @@
#define ASIC_REV_5719 0x5719
#define ASIC_REV_5720 0x5720
#define ASIC_REV_57766 0x57766
-#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8)
+#define ASIC_REV_5762 0x5762
#define CHIPREV_5700_AX 0x70
#define CHIPREV_5700_BX 0x71
#define CHIPREV_5700_CX 0x72
@@ -195,7 +197,6 @@
#define CHIPREV_5784_AX 0x57840
#define CHIPREV_5761_AX 0x57610
#define CHIPREV_57765_AX 0x577650
-#define GET_METAL_REV(CHIP_REV_ID) ((CHIP_REV_ID) & 0xff)
#define METAL_REV_A0 0x00
#define METAL_REV_A1 0x01
#define METAL_REV_B0 0x00
@@ -774,7 +775,7 @@
#define SG_DIG_AUTONEG_ERROR 0x00000001
#define TG3_TX_TSTAMP_LSB 0x000005c0
#define TG3_TX_TSTAMP_MSB 0x000005c4
-#define TG3_TSTAMP_MASK 0x7fffffffffffffff
+#define TG3_TSTAMP_MASK 0x7fffffffffffffffLL
/* 0x5c8 --> 0x600 unused */
#define MAC_TX_MAC_STATE_BASE 0x00000600 /* 16 bytes */
#define MAC_RX_MAC_STATE_BASE 0x00000610 /* 20 bytes */
@@ -1159,6 +1160,8 @@
#define CPMU_MUTEX_GNT_DRIVER 0x00001000
#define TG3_CPMU_PHY_STRAP 0x00003664
#define TG3_CPMU_PHY_STRAP_IS_SERDES 0x00000020
+#define TG3_CPMU_PADRNG_CTL 0x00003668
+#define TG3_CPMU_PADRNG_CTL_RDIV2 0x00040000
/* 0x3664 --> 0x36b0 unused */
#define TG3_CPMU_EEE_MODE 0x000036b0
@@ -1178,6 +1181,7 @@
#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc
#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000
#define TG3_CPMU_EEE_LNKIDL_UART_IDL 0x00000004
+#define TG3_CPMU_EEE_LNKIDL_APE_TX_MT 0x00000002
/* 0x36c0 --> 0x36d0 unused */
#define TG3_CPMU_EEE_CTRL 0x000036d0
@@ -1400,7 +1404,10 @@
#define RDMAC_STATUS_FIFOURUN 0x00000080
#define RDMAC_STATUS_FIFOOREAD 0x00000100
#define RDMAC_STATUS_LNGREAD 0x00000200
-/* 0x4808 --> 0x4900 unused */
+/* 0x4808 --> 0x4890 unused */
+
+#define TG3_RDMA_RSRVCTRL_REG2 0x00004890
+#define TG3_LSO_RD_DMA_CRPTEN_CTRL2 0x000048a0
#define TG3_RDMA_RSRVCTRL_REG 0x00004900
#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004
@@ -1850,6 +1857,7 @@
#define FLASH_VENDOR_SST_SMALL 0x00000001
#define FLASH_VENDOR_SST_LARGE 0x02000001
#define NVRAM_CFG1_5752VENDOR_MASK 0x03c00003
+#define NVRAM_CFG1_5762VENDOR_MASK 0x03e00003
#define FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ 0x00000000
#define FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ 0x02000000
#define FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED 0x02000003
@@ -1910,6 +1918,8 @@
#define FLASH_5717VENDOR_ST_45USPT 0x03400001
#define FLASH_5720_EEPROM_HD 0x00000001
#define FLASH_5720_EEPROM_LD 0x00000003
+#define FLASH_5762_EEPROM_HD 0x02000001
+#define FLASH_5762_EEPROM_LD 0x02000003
#define FLASH_5720VENDOR_M_ATMEL_DB011D 0x01000000
#define FLASH_5720VENDOR_M_ATMEL_DB021D 0x01000002
#define FLASH_5720VENDOR_M_ATMEL_DB041D 0x01000001
@@ -2365,6 +2375,20 @@
#define APE_LOCK_REQ_DRIVER 0x00001000
#define TG3_APE_LOCK_GRANT 0x004c
#define APE_LOCK_GRANT_DRIVER 0x00001000
+#define TG3_APE_OTP_CTRL 0x00e8
+#define APE_OTP_CTRL_PROG_EN 0x200000
+#define APE_OTP_CTRL_CMD_RD 0x000000
+#define APE_OTP_CTRL_START 0x000001
+#define TG3_APE_OTP_STATUS 0x00ec
+#define APE_OTP_STATUS_CMD_DONE 0x000001
+#define TG3_APE_OTP_ADDR 0x00f0
+#define APE_OTP_ADDR_CPU_ENABLE 0x80000000
+#define TG3_APE_OTP_RD_DATA 0x00f8
+
+#define OTP_ADDRESS_MAGIC0 0x00000050
+#define TG3_OTP_MAGIC0_VALID(val) \
+ ((((val) & 0xf0000000) == 0xa0000000) ||\
+ (((val) & 0x0f000000) == 0x0a000000))
/* APE shared memory. Accessible through BAR1 */
#define TG3_APE_SHMEM_BASE 0x4000
@@ -3030,6 +3054,11 @@ enum TG3_FLAGS {
TG3_FLAG_57765_PLUS,
TG3_FLAG_57765_CLASS,
TG3_FLAG_5717_PLUS,
+ TG3_FLAG_IS_SSB_CORE,
+ TG3_FLAG_FLUSH_POSTED_WRITES,
+ TG3_FLAG_ROBOSWITCH,
+ TG3_FLAG_ONE_DMA_AT_ONCE,
+ TG3_FLAG_RGMII_MODE,
/* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */
@@ -3206,6 +3235,7 @@ struct tg3 {
#define TG3_PHY_ID_BCM57765 0x5c0d8a40
#define TG3_PHY_ID_BCM5719C 0x5c0d8a20
#define TG3_PHY_ID_BCM5720C 0x5c0d8b60
+#define TG3_PHY_ID_BCM5762 0x85803780
#define TG3_PHY_ID_BCM5906 0xdc00ac40
#define TG3_PHY_ID_BCM8002 0x60010140
#define TG3_PHY_ID_INVALID 0xffffffff
@@ -3230,6 +3260,7 @@ struct tg3 {
(X) == TG3_PHY_ID_BCM5906 || (X) == TG3_PHY_ID_BCM5761 || \
(X) == TG3_PHY_ID_BCM5718C || (X) == TG3_PHY_ID_BCM5718S || \
(X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM5719C || \
+ (X) == TG3_PHY_ID_BCM5720C || (X) == TG3_PHY_ID_BCM5762 || \
(X) == TG3_PHY_ID_BCM8002)
u32 phy_flags;
@@ -3320,10 +3351,22 @@ struct tg3 {
const struct firmware *fw;
u32 fw_len; /* includes BSS */
-#if IS_ENABLED(CONFIG_HWMON)
struct device *hwmon_dev;
-#endif
bool link_up;
};
+/* Accessor macros for chip and asic attributes
+ *
+ * nb: Using static inlines equivalent to the accessor macros generates
+ * larger object code with gcc 4.7.
+ * Using statement expression macros to check tp with
+ * typecheck(struct tg3 *, tp) also creates larger objects.
+ */
+#define tg3_chip_rev_id(tp) \
+ ((tp)->pci_chip_rev_id)
+#define tg3_asic_rev(tp) \
+ ((tp)->pci_chip_rev_id >> 12)
+#define tg3_chip_rev(tp) \
+ ((tp)->pci_chip_rev_id >> 8)
+
#endif /* !(_T3_H) */
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index ceb0de0cf62c..1194446f859a 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -22,6 +22,7 @@ if NET_CADENCE
config ARM_AT91_ETHER
tristate "AT91RM9200 Ethernet support"
+ depends on GENERIC_HARDIRQS
select NET_CORE
select MACB
---help---
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index a9b0830fb39d..79039439bfdc 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -287,7 +287,7 @@ static int macb_mii_probe(struct net_device *dev)
}
/* attach the mac to the phy */
- ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 0,
+ ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
bp->phy_interface);
if (ret) {
netdev_err(dev, "Could not attach to PHY\n");
@@ -693,6 +693,11 @@ static int macb_poll(struct napi_struct *napi, int budget)
* get notified when new packets arrive.
*/
macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+
+ /* Packets received while interrupts were disabled */
+ status = macb_readl(bp, RSR);
+ if (unlikely(status))
+ napi_reschedule(napi);
}
/* TODO: Handle errors */
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index b407043ce9b0..a170065b5973 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -548,6 +548,10 @@ static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
return -1;
}
+ /* All frames should fit into a single buffer */
+ if (!(status & RXDESC_FIRST_SEG) || !(status & RXDESC_LAST_SEG))
+ return -1;
+
/* Check if packet has checksum already */
if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&
!(ext_status & RXDESC_IP_PAYLOAD_MASK))
@@ -1459,7 +1463,6 @@ static int xgmac_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index c8fdeaae56c0..20d2085f61c5 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -131,7 +131,7 @@ static void t1_set_rxmode(struct net_device *dev)
static void link_report(struct port_info *p)
{
if (!netif_carrier_ok(p->dev))
- printk(KERN_INFO "%s: link down\n", p->dev->name);
+ netdev_info(p->dev, "link down\n");
else {
const char *s = "10Mbps";
@@ -141,9 +141,9 @@ static void link_report(struct port_info *p)
case SPEED_100: s = "100Mbps"; break;
}
- printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
- p->dev->name, s,
- p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
+ netdev_info(p->dev, "link up, %s, %s-duplex\n",
+ s, p->link_config.duplex == DUPLEX_FULL
+ ? "full" : "half");
}
}
@@ -976,19 +976,13 @@ static const struct net_device_ops cxgb_netdev_ops = {
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int version_printed;
-
int i, err, pci_using_dac = 0;
unsigned long mmio_start, mmio_len;
const struct board_info *bi;
struct adapter *adapter = NULL;
struct port_info *pi;
- if (!version_printed) {
- printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
- DRV_VERSION);
- ++version_printed;
- }
+ pr_info_once("%s - version %s\n", DRV_DESCRIPTION, DRV_VERSION);
err = pci_enable_device(pdev);
if (err)
@@ -1124,8 +1118,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < bi->port_number; ++i) {
err = register_netdev(adapter->port[i].dev);
if (err)
- pr_warning("%s: cannot register net device %s, skipping\n",
- pci_name(pdev), adapter->port[i].dev->name);
+ pr_warn("%s: cannot register net device %s, skipping\n",
+ pci_name(pdev), adapter->port[i].dev->name);
else {
/*
* Change the name we use for messages to the name of
@@ -1143,10 +1137,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_release_adapter_res;
}
- printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
- bi->desc, adapter->params.chip_revision,
- adapter->params.pci.is_pcix ? "PCIX" : "PCI",
- adapter->params.pci.speed, adapter->params.pci.width);
+ pr_info("%s: %s (rev %d), %s %dMHz/%d-bit\n",
+ adapter->name, bi->desc, adapter->params.chip_revision,
+ adapter->params.pci.is_pcix ? "PCIX" : "PCI",
+ adapter->params.pci.speed, adapter->params.pci.width);
/*
* Set the T1B ASIC and memory clocks.
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index d84872e88171..482976925154 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1822,8 +1822,8 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (unlikely(skb->len < ETH_HLEN ||
skb->len > dev->mtu + eth_hdr_len(skb->data))) {
- pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name,
- skb->len, eth_hdr_len(skb->data), dev->mtu);
+ netdev_dbg(dev, "packet size %d hdr %d mtu%d\n",
+ skb->len, eth_hdr_len(skb->data), dev->mtu);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1831,7 +1831,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb->ip_summed == CHECKSUM_PARTIAL &&
ip_hdr(skb)->protocol == IPPROTO_UDP) {
if (unlikely(skb_checksum_help(skb))) {
- pr_debug("%s: unable to do udp checksum\n", dev->name);
+ netdev_dbg(dev, "unable to do udp checksum\n");
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index f15ee326d5c1..2b5e62193cea 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -29,6 +29,9 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
@@ -153,7 +156,7 @@ struct workqueue_struct *cxgb3_wq;
static void link_report(struct net_device *dev)
{
if (!netif_carrier_ok(dev))
- printk(KERN_INFO "%s: link down\n", dev->name);
+ netdev_info(dev, "link down\n");
else {
const char *s = "10Mbps";
const struct port_info *p = netdev_priv(dev);
@@ -170,8 +173,9 @@ static void link_report(struct net_device *dev)
break;
}
- printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
- p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
+ netdev_info(dev, "link up, %s, %s-duplex\n",
+ s, p->link_config.duplex == DUPLEX_FULL
+ ? "full" : "half");
}
}
@@ -318,10 +322,10 @@ void t3_os_phymod_changed(struct adapter *adap, int port_id)
const struct port_info *pi = netdev_priv(dev);
if (pi->phy.modtype == phy_modtype_none)
- printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
+ netdev_info(dev, "PHY module unplugged\n");
else
- printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
- mod_str[pi->phy.modtype]);
+ netdev_info(dev, "%s PHY module inserted\n",
+ mod_str[pi->phy.modtype]);
}
static void cxgb_set_rxmode(struct net_device *dev)
@@ -1422,8 +1426,7 @@ static int cxgb_open(struct net_device *dev)
if (is_offload(adapter) && !ofld_disable) {
err = offload_open(dev);
if (err)
- printk(KERN_WARNING
- "Could not initialize offload capabilities\n");
+ pr_warn("Could not initialize offload capabilities\n");
}
netif_set_real_num_tx_queues(dev, pi->nqsets);
@@ -3132,14 +3135,13 @@ static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
if (!test_bit(i, &adap->registered_device_map))
continue;
- printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
- dev->name, ai->desc, pi->phy.desc,
- is_offload(adap) ? "R" : "", adap->params.rev, buf,
- (adap->flags & USING_MSIX) ? " MSI-X" :
- (adap->flags & USING_MSI) ? " MSI" : "");
+ netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
+ ai->desc, pi->phy.desc,
+ is_offload(adap) ? "R" : "", adap->params.rev, buf,
+ (adap->flags & USING_MSIX) ? " MSI-X" :
+ (adap->flags & USING_MSI) ? " MSI" : "");
if (adap->name == dev->name && adap->params.vpd.mclk)
- printk(KERN_INFO
- "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
+ pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
adap->name, t3_mc7_size(&adap->cm) >> 20,
t3_mc7_size(&adap->pmtx) >> 20,
t3_mc7_size(&adap->pmrx) >> 20,
@@ -3177,24 +3179,18 @@ static void cxgb3_init_iscsi_mac(struct net_device *dev)
NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int version_printed;
-
int i, err, pci_using_dac = 0;
resource_size_t mmio_start, mmio_len;
const struct adapter_info *ai;
struct adapter *adapter = NULL;
struct port_info *pi;
- if (!version_printed) {
- printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
- ++version_printed;
- }
+ pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
if (!cxgb3_wq) {
cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
if (!cxgb3_wq) {
- printk(KERN_ERR DRV_NAME
- ": cannot initialize work queue\n");
+ pr_err("cannot initialize work queue\n");
return -ENOMEM;
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 942dace361d2..4232767862b5 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -30,6 +30,8 @@
* SOFTWARE.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/list.h>
#include <linux/slab.h>
#include <net/neighbour.h>
@@ -62,9 +64,8 @@ static const unsigned int MAX_ATIDS = 64 * 1024;
static const unsigned int ATID_BASE = 0x10000;
static void cxgb_neigh_update(struct neighbour *neigh);
-static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh,
- struct dst_entry *new, struct neighbour *new_neigh,
- const void *daddr);
+static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new,
+ struct neighbour *neigh, const void *daddr);
static inline int offload_activated(struct t3cdev *tdev)
{
@@ -182,14 +183,17 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
struct net_device *dev = adapter->port[i];
if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
+ rcu_read_lock();
if (vlan && vlan != VLAN_VID_MASK) {
- rcu_read_lock();
dev = __vlan_find_dev_deep(dev, vlan);
- rcu_read_unlock();
} else if (netif_is_bond_slave(dev)) {
- while (dev->master)
- dev = dev->master;
+ struct net_device *upper_dev;
+
+ while ((upper_dev =
+ netdev_master_upper_dev_get_rcu(dev)))
+ dev = upper_dev;
}
+ rcu_read_unlock();
return dev;
}
}
@@ -232,8 +236,7 @@ static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
if ((val >> S_MAXRXDATA) != 0x3f60) {
val &= (M_RXCOALESCESIZE << S_RXCOALESCESIZE);
val |= V_MAXRXDATA(0x3f60);
- printk(KERN_INFO
- "%s, iscsi set MaxRxData to 16224 (0x%x).\n",
+ pr_info("%s, iscsi set MaxRxData to 16224 (0x%x)\n",
adapter->name, val);
t3_write_reg(adapter, A_TP_PARA_REG2, val);
}
@@ -253,8 +256,7 @@ static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
for (i = 0; i < 4; i++)
val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i);
if (val && (val != t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ))) {
- printk(KERN_INFO
- "%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u.\n",
+ pr_info("%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u\n",
adapter->name, val, uiip->pgsz_factor[0],
uiip->pgsz_factor[1], uiip->pgsz_factor[2],
uiip->pgsz_factor[3]);
@@ -706,8 +708,7 @@ static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
struct cpl_smt_write_rpl *rpl = cplhdr(skb);
if (rpl->status != CPL_ERR_NONE)
- printk(KERN_ERR
- "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
+ pr_err("Unexpected SMT_WRITE_RPL status %u for entry %u\n",
rpl->status, GET_TID(rpl));
return CPL_RET_BUF_DONE;
@@ -718,8 +719,7 @@ static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
struct cpl_l2t_write_rpl *rpl = cplhdr(skb);
if (rpl->status != CPL_ERR_NONE)
- printk(KERN_ERR
- "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
+ pr_err("Unexpected L2T_WRITE_RPL status %u for entry %u\n",
rpl->status, GET_TID(rpl));
return CPL_RET_BUF_DONE;
@@ -730,8 +730,7 @@ static int do_rte_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
struct cpl_rte_write_rpl *rpl = cplhdr(skb);
if (rpl->status != CPL_ERR_NONE)
- printk(KERN_ERR
- "Unexpected RTE_WRITE_RPL status %u for entry %u\n",
+ pr_err("Unexpected RTE_WRITE_RPL status %u for entry %u\n",
rpl->status, GET_TID(rpl));
return CPL_RET_BUF_DONE;
@@ -751,7 +750,7 @@ static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
t3c_tid->
ctx);
} else {
- printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ pr_err("%s: received clientless CPL command 0x%x\n",
dev->name, CPL_ACT_OPEN_RPL);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
@@ -769,7 +768,7 @@ static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
return t3c_tid->client->handlers[p->opcode] (dev, skb,
t3c_tid->ctx);
} else {
- printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ pr_err("%s: received clientless CPL command 0x%x\n",
dev->name, p->opcode);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
@@ -787,7 +786,7 @@ static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
return t3c_tid->client->handlers[p->opcode]
(dev, skb, t3c_tid->ctx);
} else {
- printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ pr_err("%s: received clientless CPL command 0x%x\n",
dev->name, p->opcode);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
@@ -814,7 +813,7 @@ static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
(dev, skb, t3c_tid->ctx);
} else {
- printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ pr_err("%s: received clientless CPL command 0x%x\n",
dev->name, CPL_PASS_ACCEPT_REQ);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
@@ -908,7 +907,7 @@ static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
(dev, skb, t3c_tid->ctx);
} else {
- printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ pr_err("%s: received clientless CPL command 0x%x\n",
dev->name, CPL_ACT_ESTABLISH);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
@@ -954,7 +953,7 @@ static int do_term(struct t3cdev *dev, struct sk_buff *skb)
return t3c_tid->client->handlers[opcode] (dev, skb,
t3c_tid->ctx);
} else {
- printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ pr_err("%s: received clientless CPL command 0x%x\n",
dev->name, opcode);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
@@ -970,10 +969,9 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
}
case (NETEVENT_REDIRECT):{
struct netevent_redirect *nr = ctx;
- cxgb_redirect(nr->old, nr->old_neigh,
- nr->new, nr->new_neigh,
+ cxgb_redirect(nr->old, nr->new, nr->neigh,
nr->daddr);
- cxgb_neigh_update(nr->new_neigh);
+ cxgb_neigh_update(nr->neigh);
break;
}
default:
@@ -991,8 +989,7 @@ static struct notifier_block nb = {
*/
static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb)
{
- printk(KERN_ERR "%s: received bad CPL command 0x%x\n", dev->name,
- *skb->data);
+ pr_err("%s: received bad CPL command 0x%x\n", dev->name, *skb->data);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
@@ -1010,8 +1007,8 @@ void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
if (opcode < NUM_CPL_CMDS)
cpl_handlers[opcode] = h ? h : do_bad_cpl;
else
- printk(KERN_ERR "T3C: handler registration for "
- "opcode %x failed\n", opcode);
+ pr_err("T3C: handler registration for opcode %x failed\n",
+ opcode);
}
EXPORT_SYMBOL(t3_register_cpl_handler);
@@ -1030,9 +1027,8 @@ static int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
if (ret & CPL_RET_UNKNOWN_TID) {
union opcode_tid *p = cplhdr(skb);
- printk(KERN_ERR "%s: CPL message (opcode %u) had "
- "unknown TID %u\n", dev->name, opcode,
- G_TID(ntohl(p->opcode_tid)));
+ pr_err("%s: CPL message (opcode %u) had unknown TID %u\n",
+ dev->name, opcode, G_TID(ntohl(p->opcode_tid)));
}
#endif
if (ret & CPL_RET_BUF_DONE)
@@ -1096,7 +1092,7 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
if (!skb) {
- printk(KERN_ERR "%s: cannot allocate skb!\n", __func__);
+ pr_err("%s: cannot allocate skb!\n", __func__);
return;
}
skb->priority = CPL_PRIORITY_CONTROL;
@@ -1111,11 +1107,11 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
tdev->send(tdev, skb);
}
-static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh,
- struct dst_entry *new, struct neighbour *new_neigh,
+static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new,
+ struct neighbour *neigh,
const void *daddr)
{
- struct net_device *olddev, *newdev;
+ struct net_device *dev;
struct tid_info *ti;
struct t3cdev *tdev;
u32 tid;
@@ -1123,29 +1119,17 @@ static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh,
struct l2t_entry *e;
struct t3c_tid_entry *te;
- olddev = old_neigh->dev;
- newdev = new_neigh->dev;
+ dev = neigh->dev;
- if (!is_offloading(olddev))
- return;
- if (!is_offloading(newdev)) {
- printk(KERN_WARNING "%s: Redirect to non-offload "
- "device ignored.\n", __func__);
+ if (!is_offloading(dev))
return;
- }
- tdev = dev2t3cdev(olddev);
+ tdev = dev2t3cdev(dev);
BUG_ON(!tdev);
- if (tdev != dev2t3cdev(newdev)) {
- printk(KERN_WARNING "%s: Redirect to different "
- "offload device ignored.\n", __func__);
- return;
- }
/* Add new L2T entry */
- e = t3_l2t_get(tdev, new, newdev, daddr);
+ e = t3_l2t_get(tdev, new, dev, daddr);
if (!e) {
- printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
- __func__);
+ pr_err("%s: couldn't allocate new l2t entry!\n", __func__);
return;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index dd901c5061b9..9d67eb794c4b 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -1278,7 +1278,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* update port statistics */
- if (skb->ip_summed == CHECKSUM_COMPLETE)
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
qs->port_stats[SGE_PSTAT_TX_CSUM]++;
if (skb_shinfo(skb)->gso_size)
qs->port_stats[SGE_PSTAT_TSO]++;
@@ -2130,8 +2130,10 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
- if (cpl->vlan_valid)
+ if (cpl->vlan_valid) {
+ qs->port_stats[SGE_PSTAT_VLANEX]++;
__vlan_hwaccel_put_tag(skb, ntohs(cpl->vlan));
+ }
napi_gro_frags(&qs->napi);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index 3dee68612c9e..c74a898fcd4f 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -3725,8 +3725,6 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
memcpy(adapter->port[i]->dev_addr, hw_addr,
ETH_ALEN);
- memcpy(adapter->port[i]->perm_addr, hw_addr,
- ETH_ALEN);
init_link_config(&p->link_config, p->phy.caps);
p->phy.ops->power_down(&p->phy, 1);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index f0718e1a8369..c6c05bfef0e0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -1994,9 +1994,20 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
{
const struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
-
- return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
- c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
+ struct sge_rspq *q;
+ int i;
+ int r = 0;
+
+ for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
+ q = &adap->sge.ethrxq[i].rspq;
+ r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
+ c->rx_max_coalesced_frames);
+ if (r) {
+ dev_err(&dev->dev, "failed to set coalesce %d\n", r);
+ break;
+ }
+ }
+ return r;
}
static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
@@ -4016,8 +4027,7 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
VFRES_NEQ, VFRES_NETHCTRL,
VFRES_NIQFLINT, VFRES_NIQ,
VFRES_TC, VFRES_NVI,
- FW_PFVF_CMD_CMASK_GET(
- FW_PFVF_CMD_CMASK_MASK),
+ FW_PFVF_CMD_CMASK_MASK,
pfvfres_pmask(
adapter, pf, vf),
VFRES_NEXACTF,
@@ -5131,7 +5141,7 @@ static int __init cxgb4_init_module(void)
/* Debugfs support is optional, just warn if this fails */
cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
if (!cxgb4_debugfs_root)
- pr_warning("could not create debugfs entry, continuing\n");
+ pr_warn("could not create debugfs entry, continuing\n");
ret = pci_register_driver(&cxgb4_driver);
if (ret < 0)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 22f3af5166bf..4ce62031f62f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3603,7 +3603,6 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
p->lport = j;
p->rss_size = rss_size;
memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
- memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
adap->port[i]->dev_id = j;
ret = ntohl(c.u.info.lstatus_to_modtype);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 611396c4b381..68eaa9c88c7d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -466,7 +466,6 @@ static inline void t4_os_set_hw_addr(struct adapter *adapter, int pidx,
u8 hw_addr[])
{
memcpy(adapter->port[pidx]->dev_addr, hw_addr, ETH_ALEN);
- memcpy(adapter->port[pidx]->perm_addr, hw_addr, ETH_ALEN);
}
/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 0188df705719..56b46ab2d4c5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -33,6 +33,8 @@
* SOFTWARE.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
@@ -196,11 +198,10 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
break;
}
- printk(KERN_INFO "%s: link up, %s, full-duplex, %s PAUSE\n",
- dev->name, s, fc);
+ netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc);
} else {
netif_carrier_off(dev);
- printk(KERN_INFO "%s: link down\n", dev->name);
+ netdev_info(dev, "link down\n");
}
}
@@ -2465,8 +2466,6 @@ static const struct net_device_ops cxgb4vf_netdev_ops = {
static int cxgb4vf_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- static int version_printed;
-
int pci_using_dac;
int err, pidx;
unsigned int pmask;
@@ -2478,10 +2477,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
* Print our driver banner the first time we're called to initialize a
* device.
*/
- if (version_printed == 0) {
- printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
- version_printed = 1;
- }
+ pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
/*
* Initialize generic PCI device state.
@@ -2920,18 +2916,15 @@ static int __init cxgb4vf_module_init(void)
* Vet our module parameters.
*/
if (msi != MSI_MSIX && msi != MSI_MSI) {
- printk(KERN_WARNING KBUILD_MODNAME
- ": bad module parameter msi=%d; must be %d"
- " (MSI-X or MSI) or %d (MSI)\n",
- msi, MSI_MSIX, MSI_MSI);
+ pr_warn("bad module parameter msi=%d; must be %d (MSI-X or MSI) or %d (MSI)\n",
+ msi, MSI_MSIX, MSI_MSI);
return -EINVAL;
}
/* Debugfs support is optional, just warn if this fails */
cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
- printk(KERN_WARNING KBUILD_MODNAME ": could not create"
- " debugfs entry, continuing\n");
+ pr_warn("could not create debugfs entry, continuing\n");
ret = pci_register_driver(&cxgb4vf_driver);
if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 92170d50d9d8..9488032d6d2d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1477,8 +1477,10 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rxq->rspq.idx);
- if (pkt->vlan_ex)
+ if (pkt->vlan_ex) {
__vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan));
+ rxq->stats.vlan_ex++;
+ }
ret = napi_gro_frags(&rxq->rspq.napi);
if (ret == GRO_HELD)
@@ -1501,7 +1503,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
const struct pkt_gl *gl)
{
struct sk_buff *skb;
- const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
+ const struct cpl_rx_pkt *pkt = (void *)rsp;
bool csum_ok = pkt->csum_calc && !pkt->err_vec;
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 78c55213eaf7..354cbb78ed50 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -710,8 +710,8 @@ static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static int ep93xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 64866ff1aea0..ec1a233622c6 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -865,7 +865,6 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr)
}
memcpy(netdev->dev_addr, addr, netdev->addr_len);
- netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
return 0;
}
@@ -1491,7 +1490,8 @@ static int enic_request_intr(struct enic *enic)
for (i = 0; i < enic->rq_count; i++) {
intr = enic_msix_rq_intr(enic, i);
- sprintf(enic->msix[intr].devname,
+ snprintf(enic->msix[intr].devname,
+ sizeof(enic->msix[intr].devname),
"%.11s-rx-%d", netdev->name, i);
enic->msix[intr].isr = enic_isr_msix_rq;
enic->msix[intr].devid = &enic->napi[i];
@@ -1499,20 +1499,23 @@ static int enic_request_intr(struct enic *enic)
for (i = 0; i < enic->wq_count; i++) {
intr = enic_msix_wq_intr(enic, i);
- sprintf(enic->msix[intr].devname,
+ snprintf(enic->msix[intr].devname,
+ sizeof(enic->msix[intr].devname),
"%.11s-tx-%d", netdev->name, i);
enic->msix[intr].isr = enic_isr_msix_wq;
enic->msix[intr].devid = enic;
}
intr = enic_msix_err_intr(enic);
- sprintf(enic->msix[intr].devname,
+ snprintf(enic->msix[intr].devname,
+ sizeof(enic->msix[intr].devname),
"%.11s-err", netdev->name);
enic->msix[intr].isr = enic_isr_msix_err;
enic->msix[intr].devid = enic;
intr = enic_msix_notify_intr(enic);
- sprintf(enic->msix[intr].devname,
+ snprintf(enic->msix[intr].devname,
+ sizeof(enic->msix[intr].devname),
"%.11s-notify", netdev->name);
enic->msix[intr].isr = enic_isr_msix_notify;
enic->msix[intr].devid = enic;
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index c73472c369cd..8cdf02503d13 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -434,9 +434,10 @@ static void dm9000_get_drvinfo(struct net_device *dev,
{
board_info_t *dm = to_dm9000_board(dev);
- strcpy(info->driver, CARDNAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, to_platform_device(dm->dev)->name);
+ strlcpy(info->driver, CARDNAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, to_platform_device(dm->dev)->name,
+ sizeof(info->bus_info));
}
static u32 dm9000_get_msglevel(struct net_device *dev)
diff --git a/drivers/net/ethernet/dec/Kconfig b/drivers/net/ethernet/dec/Kconfig
index 37940279ded8..68262aa57d01 100644
--- a/drivers/net/ethernet/dec/Kconfig
+++ b/drivers/net/ethernet/dec/Kconfig
@@ -17,21 +17,5 @@ config NET_VENDOR_DEC
your specific card in the following questions.
if NET_VENDOR_DEC
-
-config EWRK3
- tristate "EtherWORKS 3 (DE203, DE204, DE205) support"
- depends on ISA
- select CRC32
- ---help---
- This driver supports the DE203, DE204 and DE205 network (Ethernet)
- cards. If this is for you, say Y and read
- <file:Documentation/networking/ewrk3.txt> in the kernel source as
- well as the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called ewrk3.
-
source "drivers/net/ethernet/dec/tulip/Kconfig"
-
endif # NET_VENDOR_DEC
diff --git a/drivers/net/ethernet/dec/Makefile b/drivers/net/ethernet/dec/Makefile
index 1b01ed8d42c8..32993fccbbfd 100644
--- a/drivers/net/ethernet/dec/Makefile
+++ b/drivers/net/ethernet/dec/Makefile
@@ -2,5 +2,4 @@
# Makefile for the Digital Equipment Inc. network device drivers.
#
-obj-$(CONFIG_EWRK3) += ewrk3.o
obj-$(CONFIG_NET_TULIP) += tulip/
diff --git a/drivers/net/ethernet/dec/ewrk3.c b/drivers/net/ethernet/dec/ewrk3.c
deleted file mode 100644
index 9f992b95eddc..000000000000
--- a/drivers/net/ethernet/dec/ewrk3.c
+++ /dev/null
@@ -1,1961 +0,0 @@
-/* ewrk3.c: A DIGITAL EtherWORKS 3 ethernet driver for Linux.
-
- Written 1994 by David C. Davies.
-
- Copyright 1994 Digital Equipment Corporation.
-
- This software may be used and distributed according to the terms of
- the GNU General Public License, incorporated herein by reference.
-
- This driver is written for the Digital Equipment Corporation series
- of EtherWORKS ethernet cards:
-
- DE203 Turbo (BNC)
- DE204 Turbo (TP)
- DE205 Turbo (TP BNC)
-
- The driver has been tested on a relatively busy network using the DE205
- card and benchmarked with 'ttcp': it transferred 16M of data at 975kB/s
- (7.8Mb/s) to a DECstation 5000/200.
-
- The author may be reached at davies@maniac.ultranet.com.
-
- =========================================================================
- This driver has been written substantially from scratch, although its
- inheritance of style and stack interface from 'depca.c' and in turn from
- Donald Becker's 'lance.c' should be obvious.
-
- The DE203/4/5 boards all use a new proprietary chip in place of the
- LANCE chip used in prior cards (DEPCA, DE100, DE200/1/2, DE210, DE422).
- Use the depca.c driver in the standard distribution for the LANCE based
- cards from DIGITAL; this driver will not work with them.
-
- The DE203/4/5 cards have 2 main modes: shared memory and I/O only. I/O
- only makes all the card accesses through I/O transactions and no high
- (shared) memory is used. This mode provides a >48% performance penalty
- and is deprecated in this driver, although allowed to provide initial
- setup when hardstrapped.
-
- The shared memory mode comes in 3 flavours: 2kB, 32kB and 64kB. There is
- no point in using any mode other than the 2kB mode - their performances
- are virtually identical, although the driver has been tested in the 2kB
- and 32kB modes. I would suggest you uncomment the line:
-
- FORCE_2K_MODE;
-
- to allow the driver to configure the card as a 2kB card at your current
- base address, thus leaving more room to clutter your system box with
- other memory hungry boards.
-
- As many ISA and EISA cards can be supported under this driver as you
- wish, limited primarily by the available IRQ lines, rather than by the
- available I/O addresses (24 ISA, 16 EISA). I have checked different
- configurations of multiple depca cards and ewrk3 cards and have not
- found a problem yet (provided you have at least depca.c v0.38) ...
-
- The board IRQ setting must be at an unused IRQ which is auto-probed
- using Donald Becker's autoprobe routines. All these cards are at
- {5,10,11,15}.
-
- No 16MB memory limitation should exist with this driver as DMA is not
- used and the common memory area is in low memory on the network card (my
- current system has 20MB and I've not had problems yet).
-
- The ability to load this driver as a loadable module has been included
- and used extensively during the driver development (to save those long
- reboot sequences). To utilise this ability, you have to do 8 things:
-
- 0) have a copy of the loadable modules code installed on your system.
- 1) copy ewrk3.c from the /linux/drivers/net directory to your favourite
- temporary directory.
- 2) edit the source code near line 1898 to reflect the I/O address and
- IRQ you're using.
- 3) compile ewrk3.c, but include -DMODULE in the command line to ensure
- that the correct bits are compiled (see end of source code).
- 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
- kernel with the ewrk3 configuration turned off and reboot.
- 5) insmod ewrk3.o
- [Alan Cox: Changed this so you can insmod ewrk3.o irq=x io=y]
- [Adam Kropelin: now accepts irq=x1,x2 io=y1,y2 for multiple cards]
- 6) run the net startup bits for your new eth?? interface manually
- (usually /etc/rc.inet[12] at boot time).
- 7) enjoy!
-
- Note that autoprobing is not allowed in loadable modules - the system is
- already up and running and you're messing with interrupts.
-
- To unload a module, turn off the associated interface
- 'ifconfig eth?? down' then 'rmmod ewrk3'.
-
- Promiscuous mode has been turned off in this driver, but all the
- multicast address bits have been turned on. This improved the send
- performance on a busy network by about 13%.
-
- Ioctl's have now been provided (primarily because I wanted to grab some
- packet size statistics). They are patterned after 'plipconfig.c' from a
- suggestion by Alan Cox. Using these ioctls, you can enable promiscuous
- mode, add/delete multicast addresses, change the hardware address, get
- packet size distribution statistics and muck around with the control and
- status register. I'll add others if and when the need arises.
-
- TO DO:
- ------
-
-
- Revision History
- ----------------
-
- Version Date Description
-
- 0.1 26-aug-94 Initial writing. ALPHA code release.
- 0.11 31-aug-94 Fixed: 2k mode memory base calc.,
- LeMAC version calc.,
- IRQ vector assignments during autoprobe.
- 0.12 31-aug-94 Tested working on LeMAC2 (DE20[345]-AC) card.
- Fixed up MCA hash table algorithm.
- 0.20 4-sep-94 Added IOCTL functionality.
- 0.21 14-sep-94 Added I/O mode.
- 0.21axp 15-sep-94 Special version for ALPHA AXP Linux V1.0.
- 0.22 16-sep-94 Added more IOCTLs & tidied up.
- 0.23 21-sep-94 Added transmit cut through.
- 0.24 31-oct-94 Added uid checks in some ioctls.
- 0.30 1-nov-94 BETA code release.
- 0.31 5-dec-94 Added check/allocate region code.
- 0.32 16-jan-95 Broadcast packet fix.
- 0.33 10-Feb-95 Fix recognition bug reported by <bkm@star.rl.ac.uk>.
- 0.40 27-Dec-95 Rationalise MODULE and autoprobe code.
- Rewrite for portability & updated.
- ALPHA support from <jestabro@amt.tay1.dec.com>
- Added verify_area() calls in ewrk3_ioctl() from
- suggestion by <heiko@colossus.escape.de>.
- Add new multicasting code.
- 0.41 20-Jan-96 Fix IRQ set up problem reported by
- <kenneth@bbs.sas.ntu.ac.sg>.
- 0.42 22-Apr-96 Fix alloc_device() bug <jari@markkus2.fimr.fi>
- 0.43 16-Aug-96 Update alloc_device() to conform to de4x5.c
- 0.44 08-Nov-01 use library crc32 functions <Matt_Domsch@dell.com>
- 0.45 19-Jul-02 fix unaligned access on alpha <martin@bruli.net>
- 0.46 10-Oct-02 Multiple NIC support when module <akropel1@rochester.rr.com>
- 0.47 18-Oct-02 ethtool support <akropel1@rochester.rr.com>
- 0.48 18-Oct-02 cli/sti removal for 2.5 <vda@port.imtp.ilyichevsk.odessa.ua>
- ioctl locking, signature search cleanup <akropel1@rochester.rr.com>
-
- =========================================================================
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/crc32.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/ethtool.h>
-#include <linux/time.h>
-#include <linux/types.h>
-#include <linux/unistd.h>
-#include <linux/ctype.h>
-#include <linux/bitops.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/uaccess.h>
-
-#include "ewrk3.h"
-
-#define DRV_NAME "ewrk3"
-#define DRV_VERSION "0.48"
-
-static char version[] __initdata =
-DRV_NAME ":v" DRV_VERSION " 2002/10/18 davies@maniac.ultranet.com\n";
-
-#ifdef EWRK3_DEBUG
-static int ewrk3_debug = EWRK3_DEBUG;
-#else
-static int ewrk3_debug = 1;
-#endif
-
-#define EWRK3_NDA 0xffe0 /* No Device Address */
-
-#define PROBE_LENGTH 32
-#define ETH_PROM_SIG 0xAA5500FFUL
-
-#ifndef EWRK3_SIGNATURE
-#define EWRK3_SIGNATURE {"DE203","DE204","DE205",""}
-#define EWRK3_STRLEN 8
-#endif
-
-#ifndef EWRK3_RAM_BASE_ADDRESSES
-#define EWRK3_RAM_BASE_ADDRESSES {0xc0000,0xd0000,0x00000}
-#endif
-
-/*
- ** Sets up the I/O area for the autoprobe.
- */
-#define EWRK3_IO_BASE 0x100 /* Start address for probe search */
-#define EWRK3_IOP_INC 0x20 /* I/O address increment */
-#define EWRK3_TOTAL_SIZE 0x20 /* required I/O address length */
-
-#ifndef MAX_NUM_EWRK3S
-#define MAX_NUM_EWRK3S 21
-#endif
-
-#ifndef EWRK3_EISA_IO_PORTS
-#define EWRK3_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
-#endif
-
-#ifndef MAX_EISA_SLOTS
-#define MAX_EISA_SLOTS 16
-#define EISA_SLOT_INC 0x1000
-#endif
-
-#define QUEUE_PKT_TIMEOUT (1*HZ) /* Jiffies */
-
-/*
- ** EtherWORKS 3 shared memory window sizes
- */
-#define IO_ONLY 0x00
-#define SHMEM_2K 0x800
-#define SHMEM_32K 0x8000
-#define SHMEM_64K 0x10000
-
-/*
- ** EtherWORKS 3 IRQ ENABLE/DISABLE
- */
-#define ENABLE_IRQs { \
- icr |= lp->irq_mask;\
- outb(icr, EWRK3_ICR); /* Enable the IRQs */\
-}
-
-#define DISABLE_IRQs { \
- icr = inb(EWRK3_ICR);\
- icr &= ~lp->irq_mask;\
- outb(icr, EWRK3_ICR); /* Disable the IRQs */\
-}
-
-/*
- ** EtherWORKS 3 START/STOP
- */
-#define START_EWRK3 { \
- csr = inb(EWRK3_CSR);\
- csr &= ~(CSR_TXD|CSR_RXD);\
- outb(csr, EWRK3_CSR); /* Enable the TX and/or RX */\
-}
-
-#define STOP_EWRK3 { \
- csr = (CSR_TXD|CSR_RXD);\
- outb(csr, EWRK3_CSR); /* Disable the TX and/or RX */\
-}
-
-/*
- ** The EtherWORKS 3 private structure
- */
-#define EWRK3_PKT_STAT_SZ 16
-#define EWRK3_PKT_BIN_SZ 128 /* Should be >=100 unless you
- increase EWRK3_PKT_STAT_SZ */
-
-struct ewrk3_stats {
- u32 bins[EWRK3_PKT_STAT_SZ];
- u32 unicast;
- u32 multicast;
- u32 broadcast;
- u32 excessive_collisions;
- u32 tx_underruns;
- u32 excessive_underruns;
-};
-
-struct ewrk3_private {
- char adapter_name[80]; /* Name exported to /proc/ioports */
- u_long shmem_base; /* Shared memory start address */
- void __iomem *shmem;
- u_long shmem_length; /* Shared memory window length */
- struct ewrk3_stats pktStats; /* Private stats counters */
- u_char irq_mask; /* Adapter IRQ mask bits */
- u_char mPage; /* Maximum 2kB Page number */
- u_char lemac; /* Chip rev. level */
- u_char hard_strapped; /* Don't allow a full open */
- u_char txc; /* Transmit cut through */
- void __iomem *mctbl; /* Pointer to the multicast table */
- u_char led_mask; /* Used to reserve LED access for ethtool */
- spinlock_t hw_lock;
-};
-
-/*
- ** Force the EtherWORKS 3 card to be in 2kB MODE
- */
-#define FORCE_2K_MODE { \
- shmem_length = SHMEM_2K;\
- outb(((mem_start - 0x80000) >> 11), EWRK3_MBR);\
-}
-
-/*
- ** Public Functions
- */
-static int ewrk3_open(struct net_device *dev);
-static netdev_tx_t ewrk3_queue_pkt(struct sk_buff *skb, struct net_device *dev);
-static irqreturn_t ewrk3_interrupt(int irq, void *dev_id);
-static int ewrk3_close(struct net_device *dev);
-static void set_multicast_list(struct net_device *dev);
-static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static const struct ethtool_ops ethtool_ops_203;
-static const struct ethtool_ops ethtool_ops;
-
-/*
- ** Private functions
- */
-static int ewrk3_hw_init(struct net_device *dev, u_long iobase);
-static void ewrk3_init(struct net_device *dev);
-static int ewrk3_rx(struct net_device *dev);
-static int ewrk3_tx(struct net_device *dev);
-static void ewrk3_timeout(struct net_device *dev);
-
-static void EthwrkSignature(char *name, char *eeprom_image);
-static int DevicePresent(u_long iobase);
-static void SetMulticastFilter(struct net_device *dev);
-static int EISA_signature(char *name, s32 eisa_id);
-
-static int Read_EEPROM(u_long iobase, u_char eaddr);
-static int Write_EEPROM(short data, u_long iobase, u_char eaddr);
-static u_char get_hw_addr(struct net_device *dev, u_char * eeprom_image, char chipType);
-
-static int ewrk3_probe1(struct net_device *dev, u_long iobase, int irq);
-static int isa_probe(struct net_device *dev, u_long iobase);
-static int eisa_probe(struct net_device *dev, u_long iobase);
-
-static u_char irq[MAX_NUM_EWRK3S+1] = {5, 0, 10, 3, 11, 9, 15, 12};
-
-static char name[EWRK3_STRLEN + 1];
-static int num_ewrks3s;
-
-/*
- ** Miscellaneous defines...
- */
-#define INIT_EWRK3 {\
- outb(EEPROM_INIT, EWRK3_IOPR);\
- mdelay(1);\
-}
-
-#ifndef MODULE
-struct net_device * __init ewrk3_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct ewrk3_private));
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- }
-
- err = ewrk3_probe1(dev, dev->base_addr, dev->irq);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-
-}
-#endif
-
-static int __init ewrk3_probe1(struct net_device *dev, u_long iobase, int irq)
-{
- int err;
-
- dev->base_addr = iobase;
- dev->irq = irq;
-
- /* Address PROM pattern */
- err = isa_probe(dev, iobase);
- if (err != 0)
- err = eisa_probe(dev, iobase);
-
- if (err)
- return err;
-
- err = register_netdev(dev);
- if (err)
- release_region(dev->base_addr, EWRK3_TOTAL_SIZE);
-
- return err;
-}
-
-static const struct net_device_ops ewrk3_netdev_ops = {
- .ndo_open = ewrk3_open,
- .ndo_start_xmit = ewrk3_queue_pkt,
- .ndo_stop = ewrk3_close,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_do_ioctl = ewrk3_ioctl,
- .ndo_tx_timeout = ewrk3_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int __init
-ewrk3_hw_init(struct net_device *dev, u_long iobase)
-{
- struct ewrk3_private *lp;
- int i, status = 0;
- u_long mem_start, shmem_length;
- u_char cr, cmr, icr, nicsr, lemac, hard_strapped = 0;
- u_char eeprom_image[EEPROM_MAX], chksum, eisa_cr = 0;
-
- /*
- ** Stop the EWRK3. Enable the DBR ROM. Disable interrupts and remote boot.
- ** This also disables the EISA_ENABLE bit in the EISA Control Register.
- */
- if (iobase > 0x400)
- eisa_cr = inb(EISA_CR);
- INIT_EWRK3;
-
- nicsr = inb(EWRK3_CSR);
-
- icr = inb(EWRK3_ICR);
- icr &= 0x70;
- outb(icr, EWRK3_ICR); /* Disable all the IRQs */
-
- if (nicsr != (CSR_TXD | CSR_RXD))
- return -ENXIO;
-
- /* Check that the EEPROM is alive and well and not living on Pluto... */
- for (chksum = 0, i = 0; i < EEPROM_MAX; i += 2) {
- union {
- short val;
- char c[2];
- } tmp;
-
- tmp.val = (short) Read_EEPROM(iobase, (i >> 1));
- eeprom_image[i] = tmp.c[0];
- eeprom_image[i + 1] = tmp.c[1];
- chksum += eeprom_image[i] + eeprom_image[i + 1];
- }
-
- if (chksum != 0) { /* Bad EEPROM Data! */
- printk("%s: Device has a bad on-board EEPROM.\n", dev->name);
- return -ENXIO;
- }
-
- EthwrkSignature(name, eeprom_image);
- if (*name == '\0')
- return -ENXIO;
-
- dev->base_addr = iobase;
-
- if (iobase > 0x400) {
- outb(eisa_cr, EISA_CR); /* Rewrite the EISA CR */
- }
- lemac = eeprom_image[EEPROM_CHIPVER];
- cmr = inb(EWRK3_CMR);
-
- if (((lemac == LeMAC) && ((cmr & CMR_NO_EEPROM) != CMR_NO_EEPROM)) ||
- ((lemac == LeMAC2) && !(cmr & CMR_HS))) {
- printk("%s: %s at %#4lx", dev->name, name, iobase);
- hard_strapped = 1;
- } else if ((iobase & 0x0fff) == EWRK3_EISA_IO_PORTS) {
- /* EISA slot address */
- printk("%s: %s at %#4lx (EISA slot %ld)",
- dev->name, name, iobase, ((iobase >> 12) & 0x0f));
- } else { /* ISA port address */
- printk("%s: %s at %#4lx", dev->name, name, iobase);
- }
-
- printk(", h/w address ");
- if (lemac != LeMAC2)
- DevicePresent(iobase); /* need after EWRK3_INIT */
- status = get_hw_addr(dev, eeprom_image, lemac);
- printk("%pM\n", dev->dev_addr);
-
- if (status) {
- printk(" which has an EEPROM CRC error.\n");
- return -ENXIO;
- }
-
- if (lemac == LeMAC2) { /* Special LeMAC2 CMR things */
- cmr &= ~(CMR_RA | CMR_WB | CMR_LINK | CMR_POLARITY | CMR_0WS);
- if (eeprom_image[EEPROM_MISC0] & READ_AHEAD)
- cmr |= CMR_RA;
- if (eeprom_image[EEPROM_MISC0] & WRITE_BEHIND)
- cmr |= CMR_WB;
- if (eeprom_image[EEPROM_NETMAN0] & NETMAN_POL)
- cmr |= CMR_POLARITY;
- if (eeprom_image[EEPROM_NETMAN0] & NETMAN_LINK)
- cmr |= CMR_LINK;
- if (eeprom_image[EEPROM_MISC0] & _0WS_ENA)
- cmr |= CMR_0WS;
- }
- if (eeprom_image[EEPROM_SETUP] & SETUP_DRAM)
- cmr |= CMR_DRAM;
- outb(cmr, EWRK3_CMR);
-
- cr = inb(EWRK3_CR); /* Set up the Control Register */
- cr |= eeprom_image[EEPROM_SETUP] & SETUP_APD;
- if (cr & SETUP_APD)
- cr |= eeprom_image[EEPROM_SETUP] & SETUP_PS;
- cr |= eeprom_image[EEPROM_MISC0] & FAST_BUS;
- cr |= eeprom_image[EEPROM_MISC0] & ENA_16;
- outb(cr, EWRK3_CR);
-
- /*
- ** Determine the base address and window length for the EWRK3
- ** RAM from the memory base register.
- */
- mem_start = inb(EWRK3_MBR);
- shmem_length = 0;
- if (mem_start != 0) {
- if ((mem_start >= 0x0a) && (mem_start <= 0x0f)) {
- mem_start *= SHMEM_64K;
- shmem_length = SHMEM_64K;
- } else if ((mem_start >= 0x14) && (mem_start <= 0x1f)) {
- mem_start *= SHMEM_32K;
- shmem_length = SHMEM_32K;
- } else if ((mem_start >= 0x40) && (mem_start <= 0xff)) {
- mem_start = mem_start * SHMEM_2K + 0x80000;
- shmem_length = SHMEM_2K;
- } else {
- return -ENXIO;
- }
- }
- /*
- ** See the top of this source code for comments about
- ** uncommenting this line.
- */
-/* FORCE_2K_MODE; */
-
- if (hard_strapped) {
- printk(" is hard strapped.\n");
- } else if (mem_start) {
- printk(" has a %dk RAM window", (int) (shmem_length >> 10));
- printk(" at 0x%.5lx", mem_start);
- } else {
- printk(" is in I/O only mode");
- }
-
- lp = netdev_priv(dev);
- lp->shmem_base = mem_start;
- lp->shmem = ioremap(mem_start, shmem_length);
- if (!lp->shmem)
- return -ENOMEM;
- lp->shmem_length = shmem_length;
- lp->lemac = lemac;
- lp->hard_strapped = hard_strapped;
- lp->led_mask = CR_LED;
- spin_lock_init(&lp->hw_lock);
-
- lp->mPage = 64;
- if (cmr & CMR_DRAM)
- lp->mPage <<= 1; /* 2 DRAMS on module */
-
- sprintf(lp->adapter_name, "%s (%s)", name, dev->name);
-
- lp->irq_mask = ICR_TNEM | ICR_TXDM | ICR_RNEM | ICR_RXDM;
-
- if (!hard_strapped) {
- /*
- ** Enable EWRK3 board interrupts for autoprobing
- */
- icr |= ICR_IE; /* Enable interrupts */
- outb(icr, EWRK3_ICR);
-
- /* The DMA channel may be passed in on this parameter. */
- dev->dma = 0;
-
- /* To auto-IRQ we enable the initialization-done and DMA err,
- interrupts. For now we will always get a DMA error. */
- if (dev->irq < 2) {
-#ifndef MODULE
- u_char irqnum;
- unsigned long irq_mask;
-
-
- irq_mask = probe_irq_on();
-
- /*
- ** Trigger a TNE interrupt.
- */
- icr |= ICR_TNEM;
- outb(1, EWRK3_TDQ); /* Write to the TX done queue */
- outb(icr, EWRK3_ICR); /* Unmask the TXD interrupt */
-
- irqnum = irq[((icr & IRQ_SEL) >> 4)];
-
- mdelay(20);
- dev->irq = probe_irq_off(irq_mask);
- if ((dev->irq) && (irqnum == dev->irq)) {
- printk(" and uses IRQ%d.\n", dev->irq);
- } else {
- if (!dev->irq) {
- printk(" and failed to detect IRQ line.\n");
- } else if ((irqnum == 1) && (lemac == LeMAC2)) {
- printk(" and an illegal IRQ line detected.\n");
- } else {
- printk(", but incorrect IRQ line detected.\n");
- }
- iounmap(lp->shmem);
- return -ENXIO;
- }
-
- DISABLE_IRQs; /* Mask all interrupts */
-
-#endif /* MODULE */
- } else {
- printk(" and requires IRQ%d.\n", dev->irq);
- }
- }
-
- if (ewrk3_debug > 1) {
- printk(version);
- }
- /* The EWRK3-specific entries in the device structure. */
- dev->netdev_ops = &ewrk3_netdev_ops;
- if (lp->adapter_name[4] == '3')
- SET_ETHTOOL_OPS(dev, &ethtool_ops_203);
- else
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
- dev->watchdog_timeo = QUEUE_PKT_TIMEOUT;
-
- dev->mem_start = 0;
-
- return 0;
-}
-
-
-static int ewrk3_open(struct net_device *dev)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int status = 0;
- u_char icr, csr;
-
- /*
- ** Stop the TX and RX...
- */
- STOP_EWRK3;
-
- if (!lp->hard_strapped) {
- if (request_irq(dev->irq, (void *) ewrk3_interrupt, 0, "ewrk3", dev)) {
- printk("ewrk3_open(): Requested IRQ%d is busy\n", dev->irq);
- status = -EAGAIN;
- } else {
-
- /*
- ** Re-initialize the EWRK3...
- */
- ewrk3_init(dev);
-
- if (ewrk3_debug > 1) {
- printk("%s: ewrk3 open with irq %d\n", dev->name, dev->irq);
- printk(" physical address: %pM\n", dev->dev_addr);
- if (lp->shmem_length == 0) {
- printk(" no shared memory, I/O only mode\n");
- } else {
- printk(" start of shared memory: 0x%08lx\n", lp->shmem_base);
- printk(" window length: 0x%04lx\n", lp->shmem_length);
- }
- printk(" # of DRAMS: %d\n", ((inb(EWRK3_CMR) & 0x02) ? 2 : 1));
- printk(" csr: 0x%02x\n", inb(EWRK3_CSR));
- printk(" cr: 0x%02x\n", inb(EWRK3_CR));
- printk(" icr: 0x%02x\n", inb(EWRK3_ICR));
- printk(" cmr: 0x%02x\n", inb(EWRK3_CMR));
- printk(" fmqc: 0x%02x\n", inb(EWRK3_FMQC));
- }
- netif_start_queue(dev);
- /*
- ** Unmask EWRK3 board interrupts
- */
- icr = inb(EWRK3_ICR);
- ENABLE_IRQs;
-
- }
- } else {
- printk(KERN_ERR "%s: ewrk3 available for hard strapped set up only.\n", dev->name);
- printk(KERN_ERR " Run the 'ewrk3setup' utility or remove the hard straps.\n");
- return -EINVAL;
- }
-
- return status;
-}
-
-/*
- ** Initialize the EtherWORKS 3 operating conditions
- */
-static void ewrk3_init(struct net_device *dev)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- u_char csr, page;
- u_long iobase = dev->base_addr;
- int i;
-
- /*
- ** Enable any multicasts
- */
- set_multicast_list(dev);
-
- /*
- ** Set hardware MAC address. Address is initialized from the EEPROM
- ** during startup but may have since been changed by the user.
- */
- for (i=0; i<ETH_ALEN; i++)
- outb(dev->dev_addr[i], EWRK3_PAR0 + i);
-
- /*
- ** Clean out any remaining entries in all the queues here
- */
- while (inb(EWRK3_TQ));
- while (inb(EWRK3_TDQ));
- while (inb(EWRK3_RQ));
- while (inb(EWRK3_FMQ));
-
- /*
- ** Write a clean free memory queue
- */
- for (page = 1; page < lp->mPage; page++) { /* Write the free page numbers */
- outb(page, EWRK3_FMQ); /* to the Free Memory Queue */
- }
-
- START_EWRK3; /* Enable the TX and/or RX */
-}
-
-/*
- * Transmit timeout
- */
-
-static void ewrk3_timeout(struct net_device *dev)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- u_char icr, csr;
- u_long iobase = dev->base_addr;
-
- if (!lp->hard_strapped)
- {
- printk(KERN_WARNING"%s: transmit timed/locked out, status %04x, resetting.\n",
- dev->name, inb(EWRK3_CSR));
-
- /*
- ** Mask all board interrupts
- */
- DISABLE_IRQs;
-
- /*
- ** Stop the TX and RX...
- */
- STOP_EWRK3;
-
- ewrk3_init(dev);
-
- /*
- ** Unmask EWRK3 board interrupts
- */
- ENABLE_IRQs;
-
- dev->trans_start = jiffies; /* prevent tx timeout */
- netif_wake_queue(dev);
- }
-}
-
-/*
- ** Writes a socket buffer to the free page queue
- */
-static netdev_tx_t ewrk3_queue_pkt(struct sk_buff *skb, struct net_device *dev)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- void __iomem *buf = NULL;
- u_char icr;
- u_char page;
-
- spin_lock_irq (&lp->hw_lock);
- DISABLE_IRQs;
-
- /* if no resources available, exit, request packet be queued */
- if (inb (EWRK3_FMQC) == 0) {
- printk (KERN_WARNING "%s: ewrk3_queue_pkt(): No free resources...\n",
- dev->name);
- printk (KERN_WARNING "%s: ewrk3_queue_pkt(): CSR: %02x ICR: %02x FMQC: %02x\n",
- dev->name, inb (EWRK3_CSR), inb (EWRK3_ICR),
- inb (EWRK3_FMQC));
- goto err_out;
- }
-
- /*
- ** Get a free page from the FMQ
- */
- if ((page = inb (EWRK3_FMQ)) >= lp->mPage) {
- printk ("ewrk3_queue_pkt(): Invalid free memory page (%d).\n",
- (u_char) page);
- goto err_out;
- }
-
-
- /*
- ** Set up shared memory window and pointer into the window
- */
- if (lp->shmem_length == IO_ONLY) {
- outb (page, EWRK3_IOPR);
- } else if (lp->shmem_length == SHMEM_2K) {
- buf = lp->shmem;
- outb (page, EWRK3_MPR);
- } else if (lp->shmem_length == SHMEM_32K) {
- buf = (((short) page << 11) & 0x7800) + lp->shmem;
- outb ((page >> 4), EWRK3_MPR);
- } else if (lp->shmem_length == SHMEM_64K) {
- buf = (((short) page << 11) & 0xf800) + lp->shmem;
- outb ((page >> 5), EWRK3_MPR);
- } else {
- printk (KERN_ERR "%s: Oops - your private data area is hosed!\n",
- dev->name);
- BUG ();
- }
-
- /*
- ** Set up the buffer control structures and copy the data from
- ** the socket buffer to the shared memory .
- */
- if (lp->shmem_length == IO_ONLY) {
- int i;
- u_char *p = skb->data;
- outb ((char) (TCR_QMODE | TCR_PAD | TCR_IFC), EWRK3_DATA);
- outb ((char) (skb->len & 0xff), EWRK3_DATA);
- outb ((char) ((skb->len >> 8) & 0xff), EWRK3_DATA);
- outb ((char) 0x04, EWRK3_DATA);
- for (i = 0; i < skb->len; i++) {
- outb (*p++, EWRK3_DATA);
- }
- outb (page, EWRK3_TQ); /* Start sending pkt */
- } else {
- writeb ((char) (TCR_QMODE | TCR_PAD | TCR_IFC), buf); /* ctrl byte */
- buf += 1;
- writeb ((char) (skb->len & 0xff), buf); /* length (16 bit xfer) */
- buf += 1;
- if (lp->txc) {
- writeb(((skb->len >> 8) & 0xff) | XCT, buf);
- buf += 1;
- writeb (0x04, buf); /* index byte */
- buf += 1;
- writeb (0x00, (buf + skb->len)); /* Write the XCT flag */
- memcpy_toio (buf, skb->data, PRELOAD); /* Write PRELOAD bytes */
- outb (page, EWRK3_TQ); /* Start sending pkt */
- memcpy_toio (buf + PRELOAD,
- skb->data + PRELOAD,
- skb->len - PRELOAD);
- writeb (0xff, (buf + skb->len)); /* Write the XCT flag */
- } else {
- writeb ((skb->len >> 8) & 0xff, buf);
- buf += 1;
- writeb (0x04, buf); /* index byte */
- buf += 1;
- memcpy_toio (buf, skb->data, skb->len); /* Write data bytes */
- outb (page, EWRK3_TQ); /* Start sending pkt */
- }
- }
-
- ENABLE_IRQs;
- spin_unlock_irq (&lp->hw_lock);
-
- dev->stats.tx_bytes += skb->len;
- dev_kfree_skb (skb);
-
- /* Check for free resources: stop Tx queue if there are none */
- if (inb (EWRK3_FMQC) == 0)
- netif_stop_queue (dev);
-
- return NETDEV_TX_OK;
-
-err_out:
- ENABLE_IRQs;
- spin_unlock_irq (&lp->hw_lock);
- return NETDEV_TX_BUSY;
-}
-
-/*
- ** The EWRK3 interrupt handler.
- */
-static irqreturn_t ewrk3_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct ewrk3_private *lp;
- u_long iobase;
- u_char icr, cr, csr;
-
- lp = netdev_priv(dev);
- iobase = dev->base_addr;
-
- /* get the interrupt information */
- csr = inb(EWRK3_CSR);
-
- /*
- ** Mask the EWRK3 board interrupts and turn on the LED
- */
- spin_lock(&lp->hw_lock);
- DISABLE_IRQs;
-
- cr = inb(EWRK3_CR);
- cr |= lp->led_mask;
- outb(cr, EWRK3_CR);
-
- if (csr & CSR_RNE) /* Rx interrupt (packet[s] arrived) */
- ewrk3_rx(dev);
-
- if (csr & CSR_TNE) /* Tx interrupt (packet sent) */
- ewrk3_tx(dev);
-
- /*
- ** Now deal with the TX/RX disable flags. These are set when there
- ** are no more resources. If resources free up then enable these
- ** interrupts, otherwise mask them - failure to do this will result
- ** in the system hanging in an interrupt loop.
- */
- if (inb(EWRK3_FMQC)) { /* any resources available? */
- lp->irq_mask |= ICR_TXDM | ICR_RXDM; /* enable the interrupt source */
- csr &= ~(CSR_TXD | CSR_RXD); /* ensure restart of a stalled TX or RX */
- outb(csr, EWRK3_CSR);
- netif_wake_queue(dev);
- } else {
- lp->irq_mask &= ~(ICR_TXDM | ICR_RXDM); /* disable the interrupt source */
- }
-
- /* Unmask the EWRK3 board interrupts and turn off the LED */
- cr &= ~(lp->led_mask);
- outb(cr, EWRK3_CR);
- ENABLE_IRQs;
- spin_unlock(&lp->hw_lock);
- return IRQ_HANDLED;
-}
-
-/* Called with lp->hw_lock held */
-static int ewrk3_rx(struct net_device *dev)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int i, status = 0;
- u_char page;
- void __iomem *buf = NULL;
-
- while (inb(EWRK3_RQC) && !status) { /* Whilst there's incoming data */
- if ((page = inb(EWRK3_RQ)) < lp->mPage) { /* Get next entry's buffer page */
- /*
- ** Set up shared memory window and pointer into the window
- */
- if (lp->shmem_length == IO_ONLY) {
- outb(page, EWRK3_IOPR);
- } else if (lp->shmem_length == SHMEM_2K) {
- buf = lp->shmem;
- outb(page, EWRK3_MPR);
- } else if (lp->shmem_length == SHMEM_32K) {
- buf = (((short) page << 11) & 0x7800) + lp->shmem;
- outb((page >> 4), EWRK3_MPR);
- } else if (lp->shmem_length == SHMEM_64K) {
- buf = (((short) page << 11) & 0xf800) + lp->shmem;
- outb((page >> 5), EWRK3_MPR);
- } else {
- status = -1;
- printk("%s: Oops - your private data area is hosed!\n", dev->name);
- }
-
- if (!status) {
- char rx_status;
- int pkt_len;
-
- if (lp->shmem_length == IO_ONLY) {
- rx_status = inb(EWRK3_DATA);
- pkt_len = inb(EWRK3_DATA);
- pkt_len |= ((u_short) inb(EWRK3_DATA) << 8);
- } else {
- rx_status = readb(buf);
- buf += 1;
- pkt_len = readw(buf);
- buf += 3;
- }
-
- if (!(rx_status & R_ROK)) { /* There was an error. */
- dev->stats.rx_errors++; /* Update the error stats. */
- if (rx_status & R_DBE)
- dev->stats.rx_frame_errors++;
- if (rx_status & R_CRC)
- dev->stats.rx_crc_errors++;
- if (rx_status & R_PLL)
- dev->stats.rx_fifo_errors++;
- } else {
- struct sk_buff *skb;
- skb = netdev_alloc_skb(dev,
- pkt_len + 2);
-
- if (skb != NULL) {
- unsigned char *p;
- skb_reserve(skb, 2); /* Align to 16 bytes */
- p = skb_put(skb, pkt_len);
-
- if (lp->shmem_length == IO_ONLY) {
- *p = inb(EWRK3_DATA); /* dummy read */
- for (i = 0; i < pkt_len; i++) {
- *p++ = inb(EWRK3_DATA);
- }
- } else {
- memcpy_fromio(p, buf, pkt_len);
- }
-
- for (i = 1; i < EWRK3_PKT_STAT_SZ - 1; i++) {
- if (pkt_len < i * EWRK3_PKT_BIN_SZ) {
- lp->pktStats.bins[i]++;
- i = EWRK3_PKT_STAT_SZ;
- }
- }
- p = skb->data; /* Look at the dest addr */
- if (is_multicast_ether_addr(p)) {
- if (is_broadcast_ether_addr(p)) {
- lp->pktStats.broadcast++;
- } else {
- lp->pktStats.multicast++;
- }
- } else if (ether_addr_equal(p,
- dev->dev_addr)) {
- lp->pktStats.unicast++;
- }
- lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
- if (lp->pktStats.bins[0] == 0) { /* Reset counters */
- memset(&lp->pktStats, 0, sizeof(lp->pktStats));
- }
- /*
- ** Notify the upper protocol layers that there is another
- ** packet to handle
- */
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
-
- /*
- ** Update stats
- */
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- } else {
- printk("%s: Insufficient memory; nuking packet.\n", dev->name);
- dev->stats.rx_dropped++; /* Really, deferred. */
- break;
- }
- }
- }
- /*
- ** Return the received buffer to the free memory queue
- */
- outb(page, EWRK3_FMQ);
- } else {
- printk("ewrk3_rx(): Illegal page number, page %d\n", page);
- printk("ewrk3_rx(): CSR: %02x ICR: %02x FMQC: %02x\n", inb(EWRK3_CSR), inb(EWRK3_ICR), inb(EWRK3_FMQC));
- }
- }
- return status;
-}
-
-/*
-** Buffer sent - check for TX buffer errors.
-** Called with lp->hw_lock held
-*/
-static int ewrk3_tx(struct net_device *dev)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- u_char tx_status;
-
- while ((tx_status = inb(EWRK3_TDQ)) > 0) { /* Whilst there's old buffers */
- if (tx_status & T_VSTS) { /* The status is valid */
- if (tx_status & T_TXE) {
- dev->stats.tx_errors++;
- if (tx_status & T_NCL)
- dev->stats.tx_carrier_errors++;
- if (tx_status & T_LCL)
- dev->stats.tx_window_errors++;
- if (tx_status & T_CTU) {
- if ((tx_status & T_COLL) ^ T_XUR) {
- lp->pktStats.tx_underruns++;
- } else {
- lp->pktStats.excessive_underruns++;
- }
- } else if (tx_status & T_COLL) {
- if ((tx_status & T_COLL) ^ T_XCOLL) {
- dev->stats.collisions++;
- } else {
- lp->pktStats.excessive_collisions++;
- }
- }
- } else {
- dev->stats.tx_packets++;
- }
- }
- }
-
- return 0;
-}
-
-static int ewrk3_close(struct net_device *dev)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- u_char icr, csr;
-
- netif_stop_queue(dev);
-
- if (ewrk3_debug > 1) {
- printk("%s: Shutting down ethercard, status was %2.2x.\n",
- dev->name, inb(EWRK3_CSR));
- }
- /*
- ** We stop the EWRK3 here... mask interrupts and stop TX & RX
- */
- DISABLE_IRQs;
-
- STOP_EWRK3;
-
- /*
- ** Clean out the TX and RX queues here (note that one entry
- ** may get added to either the TXD or RX queues if the TX or RX
- ** just starts processing a packet before the STOP_EWRK3 command
- ** is received. This will be flushed in the ewrk3_open() call).
- */
- while (inb(EWRK3_TQ));
- while (inb(EWRK3_TDQ));
- while (inb(EWRK3_RQ));
-
- if (!lp->hard_strapped) {
- free_irq(dev->irq, dev);
- }
- return 0;
-}
-
-/*
- ** Set or clear the multicast filter for this adapter.
- */
-static void set_multicast_list(struct net_device *dev)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- u_char csr;
-
- csr = inb(EWRK3_CSR);
-
- if (lp->shmem_length == IO_ONLY) {
- lp->mctbl = NULL;
- } else {
- lp->mctbl = lp->shmem + PAGE0_HTE;
- }
-
- csr &= ~(CSR_PME | CSR_MCE);
- if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
- csr |= CSR_PME;
- outb(csr, EWRK3_CSR);
- } else {
- SetMulticastFilter(dev);
- csr |= CSR_MCE;
- outb(csr, EWRK3_CSR);
- }
-}
-
-/*
- ** Calculate the hash code and update the logical address filter
- ** from a list of ethernet multicast addresses.
- ** Little endian crc one liner from Matt Thomas, DEC.
- **
- ** Note that when clearing the table, the broadcast bit must remain asserted
- ** to receive broadcast messages.
- */
-static void SetMulticastFilter(struct net_device *dev)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- struct netdev_hw_addr *ha;
- u_long iobase = dev->base_addr;
- int i;
- char bit, byte;
- short __iomem *p = lp->mctbl;
- u16 hashcode;
- u32 crc;
-
- spin_lock_irq(&lp->hw_lock);
-
- if (lp->shmem_length == IO_ONLY) {
- outb(0, EWRK3_IOPR);
- outw(PAGE0_HTE, EWRK3_PIR1);
- } else {
- outb(0, EWRK3_MPR);
- }
-
- if (dev->flags & IFF_ALLMULTI) {
- for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) {
- if (lp->shmem_length == IO_ONLY) {
- outb(0xff, EWRK3_DATA);
- } else { /* memset didn't work here */
- writew(0xffff, p);
- p++;
- i++;
- }
- }
- } else {
- /* Clear table except for broadcast bit */
- if (lp->shmem_length == IO_ONLY) {
- for (i = 0; i < (HASH_TABLE_LEN >> 4) - 1; i++) {
- outb(0x00, EWRK3_DATA);
- }
- outb(0x80, EWRK3_DATA);
- i++; /* insert the broadcast bit */
- for (; i < (HASH_TABLE_LEN >> 3); i++) {
- outb(0x00, EWRK3_DATA);
- }
- } else {
- memset_io(lp->mctbl, 0, HASH_TABLE_LEN >> 3);
- writeb(0x80, lp->mctbl + (HASH_TABLE_LEN >> 4) - 1);
- }
-
- /* Update table */
- netdev_for_each_mc_addr(ha, dev) {
- crc = ether_crc_le(ETH_ALEN, ha->addr);
- hashcode = crc & ((1 << 9) - 1); /* hashcode is 9 LSb of CRC */
-
- byte = hashcode >> 3; /* bit[3-8] -> byte in filter */
- bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */
-
- if (lp->shmem_length == IO_ONLY) {
- u_char tmp;
-
- outw(PAGE0_HTE + byte, EWRK3_PIR1);
- tmp = inb(EWRK3_DATA);
- tmp |= bit;
- outw(PAGE0_HTE + byte, EWRK3_PIR1);
- outb(tmp, EWRK3_DATA);
- } else {
- writeb(readb(lp->mctbl + byte) | bit, lp->mctbl + byte);
- }
- }
- }
-
- spin_unlock_irq(&lp->hw_lock);
-}
-
-/*
- ** ISA bus I/O device probe
- */
-static int __init isa_probe(struct net_device *dev, u_long ioaddr)
-{
- int i = num_ewrks3s, maxSlots;
- int ret = -ENODEV;
-
- u_long iobase;
-
- if (ioaddr >= 0x400)
- goto out;
-
- if (ioaddr == 0) { /* Autoprobing */
- iobase = EWRK3_IO_BASE; /* Get the first slot address */
- maxSlots = 24;
- } else { /* Probe a specific location */
- iobase = ioaddr;
- maxSlots = i + 1;
- }
-
- for (; (i < maxSlots) && (dev != NULL);
- iobase += EWRK3_IOP_INC, i++)
- {
- if (request_region(iobase, EWRK3_TOTAL_SIZE, DRV_NAME)) {
- if (DevicePresent(iobase) == 0) {
- int irq = dev->irq;
- ret = ewrk3_hw_init(dev, iobase);
- if (!ret)
- break;
- dev->irq = irq;
- }
- release_region(iobase, EWRK3_TOTAL_SIZE);
- }
- }
- out:
-
- return ret;
-}
-
-/*
- ** EISA bus I/O device probe. Probe from slot 1 since slot 0 is usually
- ** the motherboard.
- */
-static int __init eisa_probe(struct net_device *dev, u_long ioaddr)
-{
- int i, maxSlots;
- u_long iobase;
- int ret = -ENODEV;
-
- if (ioaddr < 0x1000)
- goto out;
-
- iobase = ioaddr;
- i = (ioaddr >> 12);
- maxSlots = i + 1;
-
- for (i = 1; (i < maxSlots) && (dev != NULL); i++, iobase += EISA_SLOT_INC) {
- if (EISA_signature(name, EISA_ID) == 0) {
- if (request_region(iobase, EWRK3_TOTAL_SIZE, DRV_NAME) &&
- DevicePresent(iobase) == 0) {
- int irq = dev->irq;
- ret = ewrk3_hw_init(dev, iobase);
- if (!ret)
- break;
- dev->irq = irq;
- }
- release_region(iobase, EWRK3_TOTAL_SIZE);
- }
- }
-
- out:
- return ret;
-}
-
-
-/*
- ** Read the EWRK3 EEPROM using this routine
- */
-static int Read_EEPROM(u_long iobase, u_char eaddr)
-{
- int i;
-
- outb((eaddr & 0x3f), EWRK3_PIR1); /* set up 6 bits of address info */
- outb(EEPROM_RD, EWRK3_IOPR); /* issue read command */
- for (i = 0; i < 5000; i++)
- inb(EWRK3_CSR); /* wait 1msec */
-
- return inw(EWRK3_EPROM1); /* 16 bits data return */
-}
-
-/*
- ** Write the EWRK3 EEPROM using this routine
- */
-static int Write_EEPROM(short data, u_long iobase, u_char eaddr)
-{
- int i;
-
- outb(EEPROM_WR_EN, EWRK3_IOPR); /* issue write enable command */
- for (i = 0; i < 5000; i++)
- inb(EWRK3_CSR); /* wait 1msec */
- outw(data, EWRK3_EPROM1); /* write data to register */
- outb((eaddr & 0x3f), EWRK3_PIR1); /* set up 6 bits of address info */
- outb(EEPROM_WR, EWRK3_IOPR); /* issue write command */
- for (i = 0; i < 75000; i++)
- inb(EWRK3_CSR); /* wait 15msec */
- outb(EEPROM_WR_DIS, EWRK3_IOPR); /* issue write disable command */
- for (i = 0; i < 5000; i++)
- inb(EWRK3_CSR); /* wait 1msec */
-
- return 0;
-}
-
-/*
- ** Look for a particular board name in the on-board EEPROM.
- */
-static void __init EthwrkSignature(char *name, char *eeprom_image)
-{
- int i;
- char *signatures[] = EWRK3_SIGNATURE;
-
- for (i=0; *signatures[i] != '\0'; i++)
- if( !strncmp(eeprom_image+EEPROM_PNAME7, signatures[i], strlen(signatures[i])) )
- break;
-
- if (*signatures[i] != '\0') {
- memcpy(name, eeprom_image+EEPROM_PNAME7, EWRK3_STRLEN);
- name[EWRK3_STRLEN] = '\0';
- } else
- name[0] = '\0';
-}
-
-/*
- ** Look for a special sequence in the Ethernet station address PROM that
- ** is common across all EWRK3 products.
- **
- ** Search the Ethernet address ROM for the signature. Since the ROM address
- ** counter can start at an arbitrary point, the search must include the entire
- ** probe sequence length plus the (length_of_the_signature - 1).
- ** Stop the search IMMEDIATELY after the signature is found so that the
- ** PROM address counter is correctly positioned at the start of the
- ** ethernet address for later read out.
- */
-
-static int __init DevicePresent(u_long iobase)
-{
- union {
- struct {
- u32 a;
- u32 b;
- } llsig;
- char Sig[sizeof(u32) << 1];
- }
- dev;
- short sigLength;
- char data;
- int i, j, status = 0;
-
- dev.llsig.a = ETH_PROM_SIG;
- dev.llsig.b = ETH_PROM_SIG;
- sigLength = sizeof(u32) << 1;
-
- for (i = 0, j = 0; j < sigLength && i < PROBE_LENGTH + sigLength - 1; i++) {
- data = inb(EWRK3_APROM);
- if (dev.Sig[j] == data) { /* track signature */
- j++;
- } else { /* lost signature; begin search again */
- if (data == dev.Sig[0]) {
- j = 1;
- } else {
- j = 0;
- }
- }
- }
-
- if (j != sigLength) {
- status = -ENODEV; /* search failed */
- }
- return status;
-}
-
-static u_char __init get_hw_addr(struct net_device *dev, u_char * eeprom_image, char chipType)
-{
- int i, j, k;
- u_short chksum;
- u_char crc, lfsr, sd, status = 0;
- u_long iobase = dev->base_addr;
- u16 tmp;
-
- if (chipType == LeMAC2) {
- for (crc = 0x6a, j = 0; j < ETH_ALEN; j++) {
- sd = dev->dev_addr[j] = eeprom_image[EEPROM_PADDR0 + j];
- outb(dev->dev_addr[j], EWRK3_PAR0 + j);
- for (k = 0; k < 8; k++, sd >>= 1) {
- lfsr = ((((crc & 0x02) >> 1) ^ (crc & 0x01)) ^ (sd & 0x01)) << 7;
- crc = (crc >> 1) + lfsr;
- }
- }
- if (crc != eeprom_image[EEPROM_PA_CRC])
- status = -1;
- } else {
- for (i = 0, k = 0; i < ETH_ALEN;) {
- k <<= 1;
- if (k > 0xffff)
- k -= 0xffff;
-
- k += (u_char) (tmp = inb(EWRK3_APROM));
- dev->dev_addr[i] = (u_char) tmp;
- outb(dev->dev_addr[i], EWRK3_PAR0 + i);
- i++;
- k += (u_short) ((tmp = inb(EWRK3_APROM)) << 8);
- dev->dev_addr[i] = (u_char) tmp;
- outb(dev->dev_addr[i], EWRK3_PAR0 + i);
- i++;
-
- if (k > 0xffff)
- k -= 0xffff;
- }
- if (k == 0xffff)
- k = 0;
- chksum = inb(EWRK3_APROM);
- chksum |= (inb(EWRK3_APROM) << 8);
- if (k != chksum)
- status = -1;
- }
-
- return status;
-}
-
-/*
- ** Look for a particular board name in the EISA configuration space
- */
-static int __init EISA_signature(char *name, s32 eisa_id)
-{
- u_long i;
- char *signatures[] = EWRK3_SIGNATURE;
- char ManCode[EWRK3_STRLEN];
- union {
- s32 ID;
- char Id[4];
- } Eisa;
- int status = 0;
-
- *name = '\0';
- for (i = 0; i < 4; i++) {
- Eisa.Id[i] = inb(eisa_id + i);
- }
-
- ManCode[0] = (((Eisa.Id[0] >> 2) & 0x1f) + 0x40);
- ManCode[1] = (((Eisa.Id[1] & 0xe0) >> 5) + ((Eisa.Id[0] & 0x03) << 3) + 0x40);
- ManCode[2] = (((Eisa.Id[2] >> 4) & 0x0f) + 0x30);
- ManCode[3] = ((Eisa.Id[2] & 0x0f) + 0x30);
- ManCode[4] = (((Eisa.Id[3] >> 4) & 0x0f) + 0x30);
- ManCode[5] = '\0';
-
- for (i = 0; (*signatures[i] != '\0') && (*name == '\0'); i++) {
- if (strstr(ManCode, signatures[i]) != NULL) {
- strcpy(name, ManCode);
- status = 1;
- }
- }
-
- return status; /* return the device name string */
-}
-
-static void ewrk3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
- int fwrev = Read_EEPROM(dev->base_addr, EEPROM_REVLVL);
-
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->fw_version, "%d", fwrev);
- strcpy(info->bus_info, "N/A");
- info->eedump_len = EEPROM_MAX;
-}
-
-static int ewrk3_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- unsigned long iobase = dev->base_addr;
- u8 cr = inb(EWRK3_CR);
-
- switch (lp->adapter_name[4]) {
- case '3': /* DE203 */
- ecmd->supported = SUPPORTED_BNC;
- ecmd->port = PORT_BNC;
- break;
-
- case '4': /* DE204 */
- ecmd->supported = SUPPORTED_TP;
- ecmd->port = PORT_TP;
- break;
-
- case '5': /* DE205 */
- ecmd->supported = SUPPORTED_TP | SUPPORTED_BNC | SUPPORTED_AUI;
- ecmd->autoneg = !(cr & CR_APD);
- /*
- ** Port is only valid if autoneg is disabled
- ** and even then we don't know if AUI is jumpered.
- */
- if (!ecmd->autoneg)
- ecmd->port = (cr & CR_PSEL) ? PORT_BNC : PORT_TP;
- break;
- }
-
- ecmd->supported |= SUPPORTED_10baseT_Half;
- ethtool_cmd_speed_set(ecmd, SPEED_10);
- ecmd->duplex = DUPLEX_HALF;
- return 0;
-}
-
-static int ewrk3_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- unsigned long iobase = dev->base_addr;
- unsigned long flags;
- u8 cr;
-
- /* DE205 is the only card with anything to set */
- if (lp->adapter_name[4] != '5')
- return -EOPNOTSUPP;
-
- /* Sanity-check parameters */
- if (ecmd->speed != SPEED_10)
- return -EINVAL;
- if (ecmd->port != PORT_TP && ecmd->port != PORT_BNC)
- return -EINVAL; /* AUI is not software-selectable */
- if (ecmd->transceiver != XCVR_INTERNAL)
- return -EINVAL;
- if (ecmd->duplex != DUPLEX_HALF)
- return -EINVAL;
- if (ecmd->phy_address != 0)
- return -EINVAL;
-
- spin_lock_irqsave(&lp->hw_lock, flags);
- cr = inb(EWRK3_CR);
-
- /* If Autoneg is set, change to Auto Port mode */
- /* Otherwise, disable Auto Port and set port explicitly */
- if (ecmd->autoneg) {
- cr &= ~CR_APD;
- } else {
- cr |= CR_APD;
- if (ecmd->port == PORT_TP)
- cr &= ~CR_PSEL; /* Force TP */
- else
- cr |= CR_PSEL; /* Force BNC */
- }
-
- /* Commit the changes */
- outb(cr, EWRK3_CR);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
- return 0;
-}
-
-static u32 ewrk3_get_link(struct net_device *dev)
-{
- unsigned long iobase = dev->base_addr;
- u8 cmr = inb(EWRK3_CMR);
- /* DE203 has BNC only and link status does not apply */
- /* On DE204 this is always valid since TP is the only port. */
- /* On DE205 this reflects TP status even if BNC or AUI is selected. */
- return !(cmr & CMR_LINK);
-}
-
-static int ewrk3_set_phys_id(struct net_device *dev,
- enum ethtool_phys_id_state state)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- unsigned long iobase = dev->base_addr;
- u8 cr;
-
- spin_lock_irq(&lp->hw_lock);
-
- switch (state) {
- case ETHTOOL_ID_ACTIVE:
- /* Prevent ISR from twiddling the LED */
- lp->led_mask = 0;
- spin_unlock_irq(&lp->hw_lock);
- return 2; /* cycle on/off twice per second */
-
- case ETHTOOL_ID_ON:
- cr = inb(EWRK3_CR);
- outb(cr | CR_LED, EWRK3_CR);
- break;
-
- case ETHTOOL_ID_OFF:
- cr = inb(EWRK3_CR);
- outb(cr & ~CR_LED, EWRK3_CR);
- break;
-
- case ETHTOOL_ID_INACTIVE:
- lp->led_mask = CR_LED;
- cr = inb(EWRK3_CR);
- outb(cr & ~CR_LED, EWRK3_CR);
- }
- spin_unlock_irq(&lp->hw_lock);
-
- return 0;
-}
-
-static const struct ethtool_ops ethtool_ops_203 = {
- .get_drvinfo = ewrk3_get_drvinfo,
- .get_settings = ewrk3_get_settings,
- .set_settings = ewrk3_set_settings,
- .set_phys_id = ewrk3_set_phys_id,
-};
-
-static const struct ethtool_ops ethtool_ops = {
- .get_drvinfo = ewrk3_get_drvinfo,
- .get_settings = ewrk3_get_settings,
- .set_settings = ewrk3_set_settings,
- .get_link = ewrk3_get_link,
- .set_phys_id = ewrk3_set_phys_id,
-};
-
-/*
- ** Perform IOCTL call functions here. Some are privileged operations and the
- ** effective uid is checked in those cases.
- */
-static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- struct ewrk3_ioctl *ioc = (struct ewrk3_ioctl *) &rq->ifr_ifru;
- u_long iobase = dev->base_addr;
- int i, j, status = 0;
- u_char csr;
- unsigned long flags;
- union ewrk3_addr {
- u_char addr[HASH_TABLE_LEN * ETH_ALEN];
- u_short val[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
- };
-
- union ewrk3_addr *tmp;
-
- /* All we handle are private IOCTLs */
- if (cmd != EWRK3IOCTL)
- return -EOPNOTSUPP;
-
- tmp = kmalloc(sizeof(union ewrk3_addr), GFP_KERNEL);
- if(tmp==NULL)
- return -ENOMEM;
-
- switch (ioc->cmd) {
- case EWRK3_GET_HWADDR: /* Get the hardware address */
- for (i = 0; i < ETH_ALEN; i++) {
- tmp->addr[i] = dev->dev_addr[i];
- }
- ioc->len = ETH_ALEN;
- if (copy_to_user(ioc->data, tmp->addr, ioc->len))
- status = -EFAULT;
- break;
-
- case EWRK3_SET_HWADDR: /* Set the hardware address */
- if (capable(CAP_NET_ADMIN)) {
- spin_lock_irqsave(&lp->hw_lock, flags);
- csr = inb(EWRK3_CSR);
- csr |= (CSR_TXD | CSR_RXD);
- outb(csr, EWRK3_CSR); /* Disable the TX and RX */
- spin_unlock_irqrestore(&lp->hw_lock, flags);
-
- if (copy_from_user(tmp->addr, ioc->data, ETH_ALEN)) {
- status = -EFAULT;
- break;
- }
- spin_lock_irqsave(&lp->hw_lock, flags);
- for (i = 0; i < ETH_ALEN; i++) {
- dev->dev_addr[i] = tmp->addr[i];
- outb(tmp->addr[i], EWRK3_PAR0 + i);
- }
-
- csr = inb(EWRK3_CSR);
- csr &= ~(CSR_TXD | CSR_RXD); /* Enable the TX and RX */
- outb(csr, EWRK3_CSR);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_SET_PROM: /* Set Promiscuous Mode */
- if (capable(CAP_NET_ADMIN)) {
- spin_lock_irqsave(&lp->hw_lock, flags);
- csr = inb(EWRK3_CSR);
- csr |= CSR_PME;
- csr &= ~CSR_MCE;
- outb(csr, EWRK3_CSR);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_CLR_PROM: /* Clear Promiscuous Mode */
- if (capable(CAP_NET_ADMIN)) {
- spin_lock_irqsave(&lp->hw_lock, flags);
- csr = inb(EWRK3_CSR);
- csr &= ~CSR_PME;
- outb(csr, EWRK3_CSR);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_GET_MCA: /* Get the multicast address table */
- spin_lock_irqsave(&lp->hw_lock, flags);
- if (lp->shmem_length == IO_ONLY) {
- outb(0, EWRK3_IOPR);
- outw(PAGE0_HTE, EWRK3_PIR1);
- for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) {
- tmp->addr[i] = inb(EWRK3_DATA);
- }
- } else {
- outb(0, EWRK3_MPR);
- memcpy_fromio(tmp->addr, lp->shmem + PAGE0_HTE, (HASH_TABLE_LEN >> 3));
- }
- spin_unlock_irqrestore(&lp->hw_lock, flags);
-
- ioc->len = (HASH_TABLE_LEN >> 3);
- if (copy_to_user(ioc->data, tmp->addr, ioc->len))
- status = -EFAULT;
-
- break;
- case EWRK3_SET_MCA: /* Set a multicast address */
- if (capable(CAP_NET_ADMIN)) {
- if (ioc->len > HASH_TABLE_LEN) {
- status = -EINVAL;
- break;
- }
- if (copy_from_user(tmp->addr, ioc->data, ETH_ALEN * ioc->len)) {
- status = -EFAULT;
- break;
- }
- set_multicast_list(dev);
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_CLR_MCA: /* Clear all multicast addresses */
- if (capable(CAP_NET_ADMIN)) {
- set_multicast_list(dev);
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_MCA_EN: /* Enable multicast addressing */
- if (capable(CAP_NET_ADMIN)) {
- spin_lock_irqsave(&lp->hw_lock, flags);
- csr = inb(EWRK3_CSR);
- csr |= CSR_MCE;
- csr &= ~CSR_PME;
- outb(csr, EWRK3_CSR);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_GET_STATS: { /* Get the driver statistics */
- struct ewrk3_stats *tmp_stats =
- kmalloc(sizeof(lp->pktStats), GFP_KERNEL);
- if (!tmp_stats) {
- status = -ENOMEM;
- break;
- }
-
- spin_lock_irqsave(&lp->hw_lock, flags);
- memcpy(tmp_stats, &lp->pktStats, sizeof(lp->pktStats));
- spin_unlock_irqrestore(&lp->hw_lock, flags);
-
- ioc->len = sizeof(lp->pktStats);
- if (copy_to_user(ioc->data, tmp_stats, sizeof(lp->pktStats)))
- status = -EFAULT;
- kfree(tmp_stats);
- break;
- }
- case EWRK3_CLR_STATS: /* Zero out the driver statistics */
- if (capable(CAP_NET_ADMIN)) {
- spin_lock_irqsave(&lp->hw_lock, flags);
- memset(&lp->pktStats, 0, sizeof(lp->pktStats));
- spin_unlock_irqrestore(&lp->hw_lock,flags);
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_GET_CSR: /* Get the CSR Register contents */
- tmp->addr[0] = inb(EWRK3_CSR);
- ioc->len = 1;
- if (copy_to_user(ioc->data, tmp->addr, ioc->len))
- status = -EFAULT;
- break;
- case EWRK3_SET_CSR: /* Set the CSR Register contents */
- if (capable(CAP_NET_ADMIN)) {
- if (copy_from_user(tmp->addr, ioc->data, 1)) {
- status = -EFAULT;
- break;
- }
- outb(tmp->addr[0], EWRK3_CSR);
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_GET_EEPROM: /* Get the EEPROM contents */
- if (capable(CAP_NET_ADMIN)) {
- for (i = 0; i < (EEPROM_MAX >> 1); i++) {
- tmp->val[i] = (short) Read_EEPROM(iobase, i);
- }
- i = EEPROM_MAX;
- tmp->addr[i++] = inb(EWRK3_CMR); /* Config/Management Reg. */
- for (j = 0; j < ETH_ALEN; j++) {
- tmp->addr[i++] = inb(EWRK3_PAR0 + j);
- }
- ioc->len = EEPROM_MAX + 1 + ETH_ALEN;
- if (copy_to_user(ioc->data, tmp->addr, ioc->len))
- status = -EFAULT;
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_SET_EEPROM: /* Set the EEPROM contents */
- if (capable(CAP_NET_ADMIN)) {
- if (copy_from_user(tmp->addr, ioc->data, EEPROM_MAX)) {
- status = -EFAULT;
- break;
- }
- for (i = 0; i < (EEPROM_MAX >> 1); i++) {
- Write_EEPROM(tmp->val[i], iobase, i);
- }
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_GET_CMR: /* Get the CMR Register contents */
- tmp->addr[0] = inb(EWRK3_CMR);
- ioc->len = 1;
- if (copy_to_user(ioc->data, tmp->addr, ioc->len))
- status = -EFAULT;
- break;
- case EWRK3_SET_TX_CUT_THRU: /* Set TX cut through mode */
- if (capable(CAP_NET_ADMIN)) {
- lp->txc = 1;
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_CLR_TX_CUT_THRU: /* Clear TX cut through mode */
- if (capable(CAP_NET_ADMIN)) {
- lp->txc = 0;
- } else {
- status = -EPERM;
- }
-
- break;
- default:
- status = -EOPNOTSUPP;
- }
- kfree(tmp);
- return status;
-}
-
-#ifdef MODULE
-static struct net_device *ewrk3_devs[MAX_NUM_EWRK3S];
-static int ndevs;
-static int io[MAX_NUM_EWRK3S+1] = { 0x300, 0, };
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, byte, NULL, 0);
-MODULE_PARM_DESC(io, "EtherWORKS 3 I/O base address(es)");
-MODULE_PARM_DESC(irq, "EtherWORKS 3 IRQ number(s)");
-
-static __exit void ewrk3_exit_module(void)
-{
- int i;
-
- for( i=0; i<ndevs; i++ ) {
- struct net_device *dev = ewrk3_devs[i];
- struct ewrk3_private *lp = netdev_priv(dev);
- ewrk3_devs[i] = NULL;
- unregister_netdev(dev);
- release_region(dev->base_addr, EWRK3_TOTAL_SIZE);
- iounmap(lp->shmem);
- free_netdev(dev);
- }
-}
-
-static __init int ewrk3_init_module(void)
-{
- int i=0;
-
- while( io[i] && irq[i] ) {
- struct net_device *dev
- = alloc_etherdev(sizeof(struct ewrk3_private));
-
- if (!dev)
- break;
-
- if (ewrk3_probe1(dev, io[i], irq[i]) != 0) {
- free_netdev(dev);
- break;
- }
-
- ewrk3_devs[ndevs++] = dev;
- i++;
- }
-
- return ndevs ? 0 : -EIO;
-}
-
-
-/* Hack for breakage in new module stuff */
-module_exit(ewrk3_exit_module);
-module_init(ewrk3_init_module);
-#endif /* MODULE */
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/dec/ewrk3.h b/drivers/net/ethernet/dec/ewrk3.h
deleted file mode 100644
index 8e0ee906567b..000000000000
--- a/drivers/net/ethernet/dec/ewrk3.h
+++ /dev/null
@@ -1,322 +0,0 @@
-/*
- Written 1994 by David C. Davies.
-
- Copyright 1994 Digital Equipment Corporation.
-
- This software may be used and distributed according to the terms of the
- GNU General Public License, incorporated herein by reference.
-
- The author may be reached as davies@wanton.lkg.dec.com or Digital
- Equipment Corporation, 550 King Street, Littleton MA 01460.
-
- =========================================================================
-*/
-
-/*
-** I/O Address Register Map
-*/
-#define EWRK3_CSR iobase+0x00 /* Control and Status Register */
-#define EWRK3_CR iobase+0x01 /* Control Register */
-#define EWRK3_ICR iobase+0x02 /* Interrupt Control Register */
-#define EWRK3_TSR iobase+0x03 /* Transmit Status Register */
-#define EWRK3_RSVD1 iobase+0x04 /* RESERVED */
-#define EWRK3_RSVD2 iobase+0x05 /* RESERVED */
-#define EWRK3_FMQ iobase+0x06 /* Free Memory Queue */
-#define EWRK3_FMQC iobase+0x07 /* Free Memory Queue Counter */
-#define EWRK3_RQ iobase+0x08 /* Receive Queue */
-#define EWRK3_RQC iobase+0x09 /* Receive Queue Counter */
-#define EWRK3_TQ iobase+0x0a /* Transmit Queue */
-#define EWRK3_TQC iobase+0x0b /* Transmit Queue Counter */
-#define EWRK3_TDQ iobase+0x0c /* Transmit Done Queue */
-#define EWRK3_TDQC iobase+0x0d /* Transmit Done Queue Counter */
-#define EWRK3_PIR1 iobase+0x0e /* Page Index Register 1 */
-#define EWRK3_PIR2 iobase+0x0f /* Page Index Register 2 */
-#define EWRK3_DATA iobase+0x10 /* Data Register */
-#define EWRK3_IOPR iobase+0x11 /* I/O Page Register */
-#define EWRK3_IOBR iobase+0x12 /* I/O Base Register */
-#define EWRK3_MPR iobase+0x13 /* Memory Page Register */
-#define EWRK3_MBR iobase+0x14 /* Memory Base Register */
-#define EWRK3_APROM iobase+0x15 /* Address PROM */
-#define EWRK3_EPROM1 iobase+0x16 /* EEPROM Data Register 1 */
-#define EWRK3_EPROM2 iobase+0x17 /* EEPROM Data Register 2 */
-#define EWRK3_PAR0 iobase+0x18 /* Physical Address Register 0 */
-#define EWRK3_PAR1 iobase+0x19 /* Physical Address Register 1 */
-#define EWRK3_PAR2 iobase+0x1a /* Physical Address Register 2 */
-#define EWRK3_PAR3 iobase+0x1b /* Physical Address Register 3 */
-#define EWRK3_PAR4 iobase+0x1c /* Physical Address Register 4 */
-#define EWRK3_PAR5 iobase+0x1d /* Physical Address Register 5 */
-#define EWRK3_CMR iobase+0x1e /* Configuration/Management Register */
-
-/*
-** Control Page Map
-*/
-#define PAGE0_FMQ 0x000 /* Free Memory Queue */
-#define PAGE0_RQ 0x080 /* Receive Queue */
-#define PAGE0_TQ 0x100 /* Transmit Queue */
-#define PAGE0_TDQ 0x180 /* Transmit Done Queue */
-#define PAGE0_HTE 0x200 /* Hash Table Entries */
-#define PAGE0_RSVD 0x240 /* RESERVED */
-#define PAGE0_USRD 0x600 /* User Data */
-
-/*
-** Control and Status Register bit definitions (EWRK3_CSR)
-*/
-#define CSR_RA 0x80 /* Runt Accept */
-#define CSR_PME 0x40 /* Promiscuous Mode Enable */
-#define CSR_MCE 0x20 /* Multicast Enable */
-#define CSR_TNE 0x08 /* TX Done Queue Not Empty */
-#define CSR_RNE 0x04 /* RX Queue Not Empty */
-#define CSR_TXD 0x02 /* TX Disable */
-#define CSR_RXD 0x01 /* RX Disable */
-
-/*
-** Control Register bit definitions (EWRK3_CR)
-*/
-#define CR_APD 0x80 /* Auto Port Disable */
-#define CR_PSEL 0x40 /* Port Select (0->TP port) */
-#define CR_LBCK 0x20 /* LoopBaCK enable */
-#define CR_FDUP 0x10 /* Full DUPlex enable */
-#define CR_FBUS 0x08 /* Fast BUS enable (ISA clk > 8.33MHz) */
-#define CR_EN_16 0x04 /* ENable 16 bit memory accesses */
-#define CR_LED 0x02 /* LED (1-> turn on) */
-
-/*
-** Interrupt Control Register bit definitions (EWRK3_ICR)
-*/
-#define ICR_IE 0x80 /* Interrupt Enable */
-#define ICR_IS 0x60 /* Interrupt Selected */
-#define ICR_TNEM 0x08 /* TNE Mask (0->mask) */
-#define ICR_RNEM 0x04 /* RNE Mask (0->mask) */
-#define ICR_TXDM 0x02 /* TXD Mask (0->mask) */
-#define ICR_RXDM 0x01 /* RXD Mask (0->mask) */
-
-/*
-** Transmit Status Register bit definitions (EWRK3_TSR)
-*/
-#define TSR_NCL 0x80 /* No Carrier Loopback */
-#define TSR_ID 0x40 /* Initially Deferred */
-#define TSR_LCL 0x20 /* Late CoLlision */
-#define TSR_ECL 0x10 /* Excessive CoLlisions */
-#define TSR_RCNTR 0x0f /* Retries CouNTeR */
-
-/*
-** I/O Page Register bit definitions (EWRK3_IOPR)
-*/
-#define EEPROM_INIT 0xc0 /* EEPROM INIT command */
-#define EEPROM_WR_EN 0xc8 /* EEPROM WRITE ENABLE command */
-#define EEPROM_WR 0xd0 /* EEPROM WRITE command */
-#define EEPROM_WR_DIS 0xd8 /* EEPROM WRITE DISABLE command */
-#define EEPROM_RD 0xe0 /* EEPROM READ command */
-
-/*
-** I/O Base Register bit definitions (EWRK3_IOBR)
-*/
-#define EISA_REGS_EN 0x20 /* Enable EISA ID and Control Registers */
-#define EISA_IOB 0x1f /* Compare bits for I/O Base Address */
-
-/*
-** I/O Configuration/Management Register bit definitions (EWRK3_CMR)
-*/
-#define CMR_RA 0x80 /* Read Ahead */
-#define CMR_WB 0x40 /* Write Behind */
-#define CMR_LINK 0x20 /* 0->TP */
-#define CMR_POLARITY 0x10 /* Informational */
-#define CMR_NO_EEPROM 0x0c /* NO_EEPROM<1:0> pin status */
-#define CMR_HS 0x08 /* Hard Strapped pin status (LeMAC2) */
-#define CMR_PNP 0x04 /* Plug 'n Play */
-#define CMR_DRAM 0x02 /* 0-> 1DRAM, 1-> 2 DRAM on board */
-#define CMR_0WS 0x01 /* Zero Wait State */
-
-/*
-** MAC Receive Status Register bit definitions
-*/
-
-#define R_ROK 0x80 /* Receive OK summary */
-#define R_IAM 0x10 /* Individual Address Match */
-#define R_MCM 0x08 /* MultiCast Match */
-#define R_DBE 0x04 /* Dribble Bit Error */
-#define R_CRC 0x02 /* CRC error */
-#define R_PLL 0x01 /* Phase Lock Lost */
-
-/*
-** MAC Transmit Control Register bit definitions
-*/
-
-#define TCR_SQEE 0x40 /* SQE Enable - look for heartbeat */
-#define TCR_SED 0x20 /* Stop when Error Detected */
-#define TCR_QMODE 0x10 /* Q_MODE */
-#define TCR_LAB 0x08 /* Less Aggressive Backoff */
-#define TCR_PAD 0x04 /* PAD Runt Packets */
-#define TCR_IFC 0x02 /* Insert Frame Check */
-#define TCR_ISA 0x01 /* Insert Source Address */
-
-/*
-** MAC Transmit Status Register bit definitions
-*/
-
-#define T_VSTS 0x80 /* Valid STatuS */
-#define T_CTU 0x40 /* Cut Through Used */
-#define T_SQE 0x20 /* Signal Quality Error */
-#define T_NCL 0x10 /* No Carrier Loopback */
-#define T_LCL 0x08 /* Late Collision */
-#define T_ID 0x04 /* Initially Deferred */
-#define T_COLL 0x03 /* COLLision status */
-#define T_XCOLL 0x03 /* Excessive Collisions */
-#define T_MCOLL 0x02 /* Multiple Collisions */
-#define T_OCOLL 0x01 /* One Collision */
-#define T_NOCOLL 0x00 /* No Collisions */
-#define T_XUR 0x03 /* Excessive Underruns */
-#define T_TXE 0x7f /* TX Errors */
-
-/*
-** EISA Configuration Register bit definitions
-*/
-
-#define EISA_ID iobase + 0x0c80 /* EISA ID Registers */
-#define EISA_ID0 iobase + 0x0c80 /* EISA ID Register 0 */
-#define EISA_ID1 iobase + 0x0c81 /* EISA ID Register 1 */
-#define EISA_ID2 iobase + 0x0c82 /* EISA ID Register 2 */
-#define EISA_ID3 iobase + 0x0c83 /* EISA ID Register 3 */
-#define EISA_CR iobase + 0x0c84 /* EISA Control Register */
-
-/*
-** EEPROM BYTES
-*/
-#define EEPROM_MEMB 0x00
-#define EEPROM_IOB 0x01
-#define EEPROM_EISA_ID0 0x02
-#define EEPROM_EISA_ID1 0x03
-#define EEPROM_EISA_ID2 0x04
-#define EEPROM_EISA_ID3 0x05
-#define EEPROM_MISC0 0x06
-#define EEPROM_MISC1 0x07
-#define EEPROM_PNAME7 0x08
-#define EEPROM_PNAME6 0x09
-#define EEPROM_PNAME5 0x0a
-#define EEPROM_PNAME4 0x0b
-#define EEPROM_PNAME3 0x0c
-#define EEPROM_PNAME2 0x0d
-#define EEPROM_PNAME1 0x0e
-#define EEPROM_PNAME0 0x0f
-#define EEPROM_SWFLAGS 0x10
-#define EEPROM_HWCAT 0x11
-#define EEPROM_NETMAN2 0x12
-#define EEPROM_REVLVL 0x13
-#define EEPROM_NETMAN0 0x14
-#define EEPROM_NETMAN1 0x15
-#define EEPROM_CHIPVER 0x16
-#define EEPROM_SETUP 0x17
-#define EEPROM_PADDR0 0x18
-#define EEPROM_PADDR1 0x19
-#define EEPROM_PADDR2 0x1a
-#define EEPROM_PADDR3 0x1b
-#define EEPROM_PADDR4 0x1c
-#define EEPROM_PADDR5 0x1d
-#define EEPROM_PA_CRC 0x1e
-#define EEPROM_CHKSUM 0x1f
-
-/*
-** EEPROM bytes for checksumming
-*/
-#define EEPROM_MAX 32 /* bytes */
-
-/*
-** EEPROM MISCELLANEOUS FLAGS
-*/
-#define RBE_SHADOW 0x0100 /* Remote Boot Enable Shadow */
-#define READ_AHEAD 0x0080 /* Read Ahead feature */
-#define IRQ_SEL2 0x0070 /* IRQ line selection (LeMAC2) */
-#define IRQ_SEL 0x0060 /* IRQ line selection */
-#define FAST_BUS 0x0008 /* ISA Bus speeds > 8.33MHz */
-#define ENA_16 0x0004 /* Enables 16 bit memory transfers */
-#define WRITE_BEHIND 0x0002 /* Write Behind feature */
-#define _0WS_ENA 0x0001 /* Zero Wait State Enable */
-
-/*
-** EEPROM NETWORK MANAGEMENT FLAGS
-*/
-#define NETMAN_POL 0x04 /* Polarity defeat */
-#define NETMAN_LINK 0x02 /* Link defeat */
-#define NETMAN_CCE 0x01 /* Custom Counters Enable */
-
-/*
-** EEPROM SW FLAGS
-*/
-#define SW_SQE 0x10 /* Signal Quality Error */
-#define SW_LAB 0x08 /* Less Aggressive Backoff */
-#define SW_INIT 0x04 /* Initialized */
-#define SW_TIMEOUT 0x02 /* 0:2.5 mins, 1: 30 secs */
-#define SW_REMOTE 0x01 /* Remote Boot Enable -> 1 */
-
-/*
-** EEPROM SETUP FLAGS
-*/
-#define SETUP_APD 0x80 /* AutoPort Disable */
-#define SETUP_PS 0x40 /* Port Select */
-#define SETUP_MP 0x20 /* MultiPort */
-#define SETUP_1TP 0x10 /* 1 port, TP */
-#define SETUP_1COAX 0x00 /* 1 port, Coax */
-#define SETUP_DRAM 0x02 /* Number of DRAMS on board */
-
-/*
-** EEPROM MANAGEMENT FLAGS
-*/
-#define MGMT_CCE 0x01 /* Custom Counters Enable */
-
-/*
-** EEPROM VERSIONS
-*/
-#define LeMAC 0x11
-#define LeMAC2 0x12
-
-/*
-** Miscellaneous
-*/
-
-#define EEPROM_WAIT_TIME 1000 /* Number of microseconds */
-#define EISA_EN 0x0001 /* Enable EISA bus buffers */
-
-#define HASH_TABLE_LEN 512 /* Bits */
-
-#define XCT 0x80 /* Transmit Cut Through */
-#define PRELOAD 16 /* 4 long words */
-
-#define MASK_INTERRUPTS 1
-#define UNMASK_INTERRUPTS 0
-
-#define EEPROM_OFFSET(a) ((u_short)((u_long)(a)))
-
-/*
-** Include the IOCTL stuff
-*/
-#include <linux/sockios.h>
-
-#define EWRK3IOCTL SIOCDEVPRIVATE
-
-struct ewrk3_ioctl {
- unsigned short cmd; /* Command to run */
- unsigned short len; /* Length of the data buffer */
- unsigned char __user *data; /* Pointer to the data buffer */
-};
-
-/*
-** Recognised commands for the driver
-*/
-#define EWRK3_GET_HWADDR 0x01 /* Get the hardware address */
-#define EWRK3_SET_HWADDR 0x02 /* Get the hardware address */
-#define EWRK3_SET_PROM 0x03 /* Set Promiscuous Mode */
-#define EWRK3_CLR_PROM 0x04 /* Clear Promiscuous Mode */
-#define EWRK3_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
-#define EWRK3_GET_MCA 0x06 /* Get a multicast address */
-#define EWRK3_SET_MCA 0x07 /* Set a multicast address */
-#define EWRK3_CLR_MCA 0x08 /* Clear a multicast address */
-#define EWRK3_MCA_EN 0x09 /* Enable a multicast address group */
-#define EWRK3_GET_STATS 0x0a /* Get the driver statistics */
-#define EWRK3_CLR_STATS 0x0b /* Zero out the driver statistics */
-#define EWRK3_GET_CSR 0x0c /* Get the CSR Register contents */
-#define EWRK3_SET_CSR 0x0d /* Set the CSR Register contents */
-#define EWRK3_GET_EEPROM 0x0e /* Get the EEPROM contents */
-#define EWRK3_SET_EEPROM 0x0f /* Set the EEPROM contents */
-#define EWRK3_GET_CMR 0x10 /* Get the CMR Register contents */
-#define EWRK3_CLR_TX_CUT_THRU 0x11 /* Clear the TX cut through mode */
-#define EWRK3_SET_TX_CUT_THRU 0x12 /* Set the TX cut through mode */
diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig
index 1203be0436e2..0c37fb2cc867 100644
--- a/drivers/net/ethernet/dec/tulip/Kconfig
+++ b/drivers/net/ethernet/dec/tulip/Kconfig
@@ -57,8 +57,8 @@ config TULIP
be called tulip.
config TULIP_MWI
- bool "New bus configuration (EXPERIMENTAL)"
- depends on TULIP && EXPERIMENTAL
+ bool "New bus configuration"
+ depends on TULIP
---help---
This configures your Tulip card specifically for the card and
system cache line size type you are using.
diff --git a/drivers/net/ethernet/dlink/Kconfig b/drivers/net/ethernet/dlink/Kconfig
index b5afe218c31b..ee26ce78e270 100644
--- a/drivers/net/ethernet/dlink/Kconfig
+++ b/drivers/net/ethernet/dlink/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_DLINK
bool "D-Link devices"
default y
- depends on PCI || PARPORT
+ depends on PCI
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -18,36 +18,6 @@ config NET_VENDOR_DLINK
if NET_VENDOR_DLINK
-config DE600
- tristate "D-Link DE600 pocket adapter support"
- depends on PARPORT
- ---help---
- This is a network (Ethernet) device which attaches to your parallel
- port. Read <file:Documentation/networking/DLINK.txt> as well as the
- Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>, if you want to use
- this. It is possible to have several devices share a single parallel
- port and it is safe to compile the corresponding drivers into the
- kernel.
-
- To compile this driver as a module, choose M here: the module
- will be called de600.
-
-config DE620
- tristate "D-Link DE620 pocket adapter support"
- depends on PARPORT
- ---help---
- This is a network (Ethernet) device which attaches to your parallel
- port. Read <file:Documentation/networking/DLINK.txt> as well as the
- Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>, if you want to use
- this. It is possible to have several devices share a single parallel
- port and it is safe to compile the corresponding drivers into the
- kernel.
-
- To compile this driver as a module, choose M here: the module
- will be called de620.
-
config DL2K
tristate "DL2000/TC902x-based Gigabit Ethernet support"
depends on PCI
diff --git a/drivers/net/ethernet/dlink/Makefile b/drivers/net/ethernet/dlink/Makefile
index c705eaa4f5b2..40085f67157b 100644
--- a/drivers/net/ethernet/dlink/Makefile
+++ b/drivers/net/ethernet/dlink/Makefile
@@ -2,7 +2,5 @@
# Makefile for the D-Link network device drivers.
#
-obj-$(CONFIG_DE600) += de600.o
-obj-$(CONFIG_DE620) += de620.o
obj-$(CONFIG_DL2K) += dl2k.o
obj-$(CONFIG_SUNDANCE) += sundance.o
diff --git a/drivers/net/ethernet/dlink/de600.c b/drivers/net/ethernet/dlink/de600.c
deleted file mode 100644
index 414f0eea1049..000000000000
--- a/drivers/net/ethernet/dlink/de600.c
+++ /dev/null
@@ -1,529 +0,0 @@
-static const char version[] = "de600.c: $Revision: 1.41-2.5 $, Bjorn Ekwall (bj0rn@blox.se)\n";
-/*
- * de600.c
- *
- * Linux driver for the D-Link DE-600 Ethernet pocket adapter.
- *
- * Portions (C) Copyright 1993, 1994 by Bjorn Ekwall
- * The Author may be reached as bj0rn@blox.se
- *
- * Based on adapter information gathered from DE600.ASM by D-Link Inc.,
- * as included on disk C in the v.2.11 of PC/TCP from FTP Software.
- * For DE600.asm:
- * Portions (C) Copyright 1990 D-Link, Inc.
- * Copyright, 1988-1992, Russell Nelson, Crynwr Software
- *
- * Adapted to the sample network driver core for linux,
- * written by: Donald Becker <becker@super.org>
- * (Now at <becker@scyld.com>)
- *
- **************************************************************/
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- **************************************************************/
-
-/* Add more time here if your adapter won't work OK: */
-#define DE600_SLOW_DOWN udelay(delay_time)
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/string.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/inet.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-
-#include <asm/io.h>
-
-#include "de600.h"
-
-static bool check_lost = true;
-module_param(check_lost, bool, 0);
-MODULE_PARM_DESC(check_lost, "If set then check for unplugged de600");
-
-static unsigned int delay_time = 10;
-module_param(delay_time, int, 0);
-MODULE_PARM_DESC(delay_time, "DE-600 deley on I/O in microseconds");
-
-
-/*
- * D-Link driver variables:
- */
-
-static volatile int rx_page;
-
-#define TX_PAGES 2
-static volatile int tx_fifo[TX_PAGES];
-static volatile int tx_fifo_in;
-static volatile int tx_fifo_out;
-static volatile int free_tx_pages = TX_PAGES;
-static int was_down;
-static DEFINE_SPINLOCK(de600_lock);
-
-static inline u8 de600_read_status(struct net_device *dev)
-{
- u8 status;
-
- outb_p(STATUS, DATA_PORT);
- status = inb(STATUS_PORT);
- outb_p(NULL_COMMAND | HI_NIBBLE, DATA_PORT);
-
- return status;
-}
-
-static inline u8 de600_read_byte(unsigned char type, struct net_device *dev)
-{
- /* dev used by macros */
- u8 lo;
- outb_p((type), DATA_PORT);
- lo = ((unsigned char)inb(STATUS_PORT)) >> 4;
- outb_p((type) | HI_NIBBLE, DATA_PORT);
- return ((unsigned char)inb(STATUS_PORT) & (unsigned char)0xf0) | lo;
-}
-
-/*
- * Open/initialize the board. This is called (in the current kernel)
- * after booting when 'ifconfig <dev->name> $IP_ADDR' is run (in rc.inet1).
- *
- * This routine should set everything up anew at each open, even
- * registers that "should" only need to be set once at boot, so that
- * there is a non-reboot way to recover if something goes wrong.
- */
-
-static int de600_open(struct net_device *dev)
-{
- unsigned long flags;
- int ret = request_irq(DE600_IRQ, de600_interrupt, 0, dev->name, dev);
- if (ret) {
- printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name, DE600_IRQ);
- return ret;
- }
- spin_lock_irqsave(&de600_lock, flags);
- ret = adapter_init(dev);
- spin_unlock_irqrestore(&de600_lock, flags);
- return ret;
-}
-
-/*
- * The inverse routine to de600_open().
- */
-
-static int de600_close(struct net_device *dev)
-{
- select_nic();
- rx_page = 0;
- de600_put_command(RESET);
- de600_put_command(STOP_RESET);
- de600_put_command(0);
- select_prn();
- free_irq(DE600_IRQ, dev);
- return 0;
-}
-
-static inline void trigger_interrupt(struct net_device *dev)
-{
- de600_put_command(FLIP_IRQ);
- select_prn();
- DE600_SLOW_DOWN;
- select_nic();
- de600_put_command(0);
-}
-
-/*
- * Copy a buffer to the adapter transmit page memory.
- * Start sending.
- */
-
-static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- unsigned long flags;
- int transmit_from;
- int len;
- int tickssofar;
- u8 *buffer = skb->data;
- int i;
-
- if (free_tx_pages <= 0) { /* Do timeouts, to avoid hangs. */
- tickssofar = jiffies - dev_trans_start(dev);
- if (tickssofar < HZ/20)
- return NETDEV_TX_BUSY;
- /* else */
- printk(KERN_WARNING "%s: transmit timed out (%d), %s?\n", dev->name, tickssofar, "network cable problem");
- /* Restart the adapter. */
- spin_lock_irqsave(&de600_lock, flags);
- if (adapter_init(dev)) {
- spin_unlock_irqrestore(&de600_lock, flags);
- return NETDEV_TX_BUSY;
- }
- spin_unlock_irqrestore(&de600_lock, flags);
- }
-
- /* Start real output */
- pr_debug("de600_start_xmit:len=%d, page %d/%d\n", skb->len, tx_fifo_in, free_tx_pages);
-
- if ((len = skb->len) < RUNT)
- len = RUNT;
-
- spin_lock_irqsave(&de600_lock, flags);
- select_nic();
- tx_fifo[tx_fifo_in] = transmit_from = tx_page_adr(tx_fifo_in) - len;
- tx_fifo_in = (tx_fifo_in + 1) % TX_PAGES; /* Next free tx page */
-
- if(check_lost)
- {
- /* This costs about 40 instructions per packet... */
- de600_setup_address(NODE_ADDRESS, RW_ADDR);
- de600_read_byte(READ_DATA, dev);
- if (was_down || (de600_read_byte(READ_DATA, dev) != 0xde)) {
- if (adapter_init(dev)) {
- spin_unlock_irqrestore(&de600_lock, flags);
- return NETDEV_TX_BUSY;
- }
- }
- }
-
- de600_setup_address(transmit_from, RW_ADDR);
- for (i = 0; i < skb->len ; ++i, ++buffer)
- de600_put_byte(*buffer);
- for (; i < len; ++i)
- de600_put_byte(0);
-
- if (free_tx_pages-- == TX_PAGES) { /* No transmission going on */
- dev->trans_start = jiffies;
- netif_start_queue(dev); /* allow more packets into adapter */
- /* Send page and generate a faked interrupt */
- de600_setup_address(transmit_from, TX_ADDR);
- de600_put_command(TX_ENABLE);
- }
- else {
- if (free_tx_pages)
- netif_start_queue(dev);
- else
- netif_stop_queue(dev);
- select_prn();
- }
- spin_unlock_irqrestore(&de600_lock, flags);
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-/*
- * The typical workload of the driver:
- * Handle the network interface interrupts.
- */
-
-static irqreturn_t de600_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- u8 irq_status;
- int retrig = 0;
- int boguscount = 0;
-
- spin_lock(&de600_lock);
-
- select_nic();
- irq_status = de600_read_status(dev);
-
- do {
- pr_debug("de600_interrupt (%02X)\n", irq_status);
-
- if (irq_status & RX_GOOD)
- de600_rx_intr(dev);
- else if (!(irq_status & RX_BUSY))
- de600_put_command(RX_ENABLE);
-
- /* Any transmission in progress? */
- if (free_tx_pages < TX_PAGES)
- retrig = de600_tx_intr(dev, irq_status);
- else
- retrig = 0;
-
- irq_status = de600_read_status(dev);
- } while ( (irq_status & RX_GOOD) || ((++boguscount < 100) && retrig) );
- /*
- * Yeah, it _looks_ like busy waiting, smells like busy waiting
- * and I know it's not PC, but please, it will only occur once
- * in a while and then only for a loop or so (< 1ms for sure!)
- */
-
- /* Enable adapter interrupts */
- select_prn();
- if (retrig)
- trigger_interrupt(dev);
- spin_unlock(&de600_lock);
- return IRQ_HANDLED;
-}
-
-static int de600_tx_intr(struct net_device *dev, int irq_status)
-{
- /*
- * Returns 1 if tx still not done
- */
-
- /* Check if current transmission is done yet */
- if (irq_status & TX_BUSY)
- return 1; /* tx not done, try again */
-
- /* else */
- /* If last transmission OK then bump fifo index */
- if (!(irq_status & TX_FAILED16)) {
- tx_fifo_out = (tx_fifo_out + 1) % TX_PAGES;
- ++free_tx_pages;
- dev->stats.tx_packets++;
- netif_wake_queue(dev);
- }
-
- /* More to send, or resend last packet? */
- if ((free_tx_pages < TX_PAGES) || (irq_status & TX_FAILED16)) {
- dev->trans_start = jiffies;
- de600_setup_address(tx_fifo[tx_fifo_out], TX_ADDR);
- de600_put_command(TX_ENABLE);
- return 1;
- }
- /* else */
-
- return 0;
-}
-
-/*
- * We have a good packet, get it out of the adapter.
- */
-static void de600_rx_intr(struct net_device *dev)
-{
- struct sk_buff *skb;
- int i;
- int read_from;
- int size;
- unsigned char *buffer;
-
- /* Get size of received packet */
- size = de600_read_byte(RX_LEN, dev); /* low byte */
- size += (de600_read_byte(RX_LEN, dev) << 8); /* high byte */
- size -= 4; /* Ignore trailing 4 CRC-bytes */
-
- /* Tell adapter where to store next incoming packet, enable receiver */
- read_from = rx_page_adr();
- next_rx_page();
- de600_put_command(RX_ENABLE);
-
- if ((size < 32) || (size > 1535)) {
- printk(KERN_WARNING "%s: Bogus packet size %d.\n", dev->name, size);
- if (size > 10000)
- adapter_init(dev);
- return;
- }
-
- skb = netdev_alloc_skb(dev, size + 2);
- if (skb == NULL) {
- printk("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size);
- return;
- }
- /* else */
-
- skb_reserve(skb,2); /* Align */
-
- /* 'skb->data' points to the start of sk_buff data area. */
- buffer = skb_put(skb,size);
-
- /* copy the packet into the buffer */
- de600_setup_address(read_from, RW_ADDR);
- for (i = size; i > 0; --i, ++buffer)
- *buffer = de600_read_byte(READ_DATA, dev);
-
- skb->protocol=eth_type_trans(skb,dev);
-
- netif_rx(skb);
-
- /* update stats */
- dev->stats.rx_packets++; /* count all receives */
- dev->stats.rx_bytes += size; /* count all received bytes */
-
- /*
- * If any worth-while packets have been received, netif_rx()
- * will work on them when we get to the tasklets.
- */
-}
-
-static const struct net_device_ops de600_netdev_ops = {
- .ndo_open = de600_open,
- .ndo_stop = de600_close,
- .ndo_start_xmit = de600_start_xmit,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-
-static struct net_device * __init de600_probe(void)
-{
- int i;
- struct net_device *dev;
- int err;
-
- dev = alloc_etherdev(0);
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
-
- if (!request_region(DE600_IO, 3, "de600")) {
- printk(KERN_WARNING "DE600: port 0x%x busy\n", DE600_IO);
- err = -EBUSY;
- goto out;
- }
-
- printk(KERN_INFO "%s: D-Link DE-600 pocket adapter", dev->name);
- /* Alpha testers must have the version number to report bugs. */
- pr_debug("%s", version);
-
- /* probe for adapter */
- err = -ENODEV;
- rx_page = 0;
- select_nic();
- (void)de600_read_status(dev);
- de600_put_command(RESET);
- de600_put_command(STOP_RESET);
- if (de600_read_status(dev) & 0xf0) {
- printk(": not at I/O %#3x.\n", DATA_PORT);
- goto out1;
- }
-
- /*
- * Maybe we found one,
- * have to check if it is a D-Link DE-600 adapter...
- */
-
- /* Get the adapter ethernet address from the ROM */
- de600_setup_address(NODE_ADDRESS, RW_ADDR);
- for (i = 0; i < ETH_ALEN; i++) {
- dev->dev_addr[i] = de600_read_byte(READ_DATA, dev);
- dev->broadcast[i] = 0xff;
- }
-
- /* Check magic code */
- if ((dev->dev_addr[1] == 0xde) && (dev->dev_addr[2] == 0x15)) {
- /* OK, install real address */
- dev->dev_addr[0] = 0x00;
- dev->dev_addr[1] = 0x80;
- dev->dev_addr[2] = 0xc8;
- dev->dev_addr[3] &= 0x0f;
- dev->dev_addr[3] |= 0x70;
- } else {
- printk(" not identified in the printer port\n");
- goto out1;
- }
-
- printk(", Ethernet Address: %pM\n", dev->dev_addr);
-
- dev->netdev_ops = &de600_netdev_ops;
-
- dev->flags&=~IFF_MULTICAST;
-
- select_prn();
-
- err = register_netdev(dev);
- if (err)
- goto out1;
-
- return dev;
-
-out1:
- release_region(DE600_IO, 3);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-static int adapter_init(struct net_device *dev)
-{
- int i;
-
- select_nic();
- rx_page = 0; /* used by RESET */
- de600_put_command(RESET);
- de600_put_command(STOP_RESET);
-
- /* Check if it is still there... */
- /* Get the some bytes of the adapter ethernet address from the ROM */
- de600_setup_address(NODE_ADDRESS, RW_ADDR);
- de600_read_byte(READ_DATA, dev);
- if ((de600_read_byte(READ_DATA, dev) != 0xde) ||
- (de600_read_byte(READ_DATA, dev) != 0x15)) {
- /* was: if (de600_read_status(dev) & 0xf0) { */
- printk("Something has happened to the DE-600! Please check it and do a new ifconfig!\n");
- /* Goodbye, cruel world... */
- dev->flags &= ~IFF_UP;
- de600_close(dev);
- was_down = 1;
- netif_stop_queue(dev); /* Transmit busy... */
- return 1; /* failed */
- }
-
- if (was_down) {
- printk(KERN_INFO "%s: Thanks, I feel much better now!\n", dev->name);
- was_down = 0;
- }
-
- tx_fifo_in = 0;
- tx_fifo_out = 0;
- free_tx_pages = TX_PAGES;
-
-
- /* set the ether address. */
- de600_setup_address(NODE_ADDRESS, RW_ADDR);
- for (i = 0; i < ETH_ALEN; i++)
- de600_put_byte(dev->dev_addr[i]);
-
- /* where to start saving incoming packets */
- rx_page = RX_BP | RX_BASE_PAGE;
- de600_setup_address(MEM_4K, RW_ADDR);
- /* Enable receiver */
- de600_put_command(RX_ENABLE);
- select_prn();
-
- netif_start_queue(dev);
-
- return 0; /* OK */
-}
-
-static struct net_device *de600_dev;
-
-static int __init de600_init(void)
-{
- de600_dev = de600_probe();
- if (IS_ERR(de600_dev))
- return PTR_ERR(de600_dev);
- return 0;
-}
-
-static void __exit de600_exit(void)
-{
- unregister_netdev(de600_dev);
- release_region(DE600_IO, 3);
- free_netdev(de600_dev);
-}
-
-module_init(de600_init);
-module_exit(de600_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/dlink/de600.h b/drivers/net/ethernet/dlink/de600.h
deleted file mode 100644
index e80ecbabcf4e..000000000000
--- a/drivers/net/ethernet/dlink/de600.h
+++ /dev/null
@@ -1,168 +0,0 @@
-/**************************************************
- * *
- * Definition of D-Link Ethernet Pocket adapter *
- * *
- **************************************************/
-/*
- * D-Link Ethernet pocket adapter ports
- */
-/*
- * OK, so I'm cheating, but there are an awful lot of
- * reads and writes in order to get anything in and out
- * of the DE-600 with 4 bits at a time in the parallel port,
- * so every saved instruction really helps :-)
- */
-
-#ifndef DE600_IO
-#define DE600_IO 0x378
-#endif
-
-#define DATA_PORT (DE600_IO)
-#define STATUS_PORT (DE600_IO + 1)
-#define COMMAND_PORT (DE600_IO + 2)
-
-#ifndef DE600_IRQ
-#define DE600_IRQ 7
-#endif
-/*
- * It really should look like this, and autoprobing as well...
- *
-#define DATA_PORT (dev->base_addr + 0)
-#define STATUS_PORT (dev->base_addr + 1)
-#define COMMAND_PORT (dev->base_addr + 2)
-#define DE600_IRQ dev->irq
- */
-
-/*
- * D-Link COMMAND_PORT commands
- */
-#define SELECT_NIC 0x04 /* select Network Interface Card */
-#define SELECT_PRN 0x1c /* select Printer */
-#define NML_PRN 0xec /* normal Printer situation */
-#define IRQEN 0x10 /* enable IRQ line */
-
-/*
- * D-Link STATUS_PORT
- */
-#define RX_BUSY 0x80
-#define RX_GOOD 0x40
-#define TX_FAILED16 0x10
-#define TX_BUSY 0x08
-
-/*
- * D-Link DATA_PORT commands
- * command in low 4 bits
- * data in high 4 bits
- * select current data nibble with HI_NIBBLE bit
- */
-#define WRITE_DATA 0x00 /* write memory */
-#define READ_DATA 0x01 /* read memory */
-#define STATUS 0x02 /* read status register */
-#define COMMAND 0x03 /* write command register (see COMMAND below) */
-#define NULL_COMMAND 0x04 /* null command */
-#define RX_LEN 0x05 /* read received packet length */
-#define TX_ADDR 0x06 /* set adapter transmit memory address */
-#define RW_ADDR 0x07 /* set adapter read/write memory address */
-#define HI_NIBBLE 0x08 /* read/write the high nibble of data,
- or-ed with rest of command */
-
-/*
- * command register, accessed through DATA_PORT with low bits = COMMAND
- */
-#define RX_ALL 0x01 /* PROMISCUOUS */
-#define RX_BP 0x02 /* default: BROADCAST & PHYSICAL ADDRESS */
-#define RX_MBP 0x03 /* MULTICAST, BROADCAST & PHYSICAL ADDRESS */
-
-#define TX_ENABLE 0x04 /* bit 2 */
-#define RX_ENABLE 0x08 /* bit 3 */
-
-#define RESET 0x80 /* set bit 7 high */
-#define STOP_RESET 0x00 /* set bit 7 low */
-
-/*
- * data to command register
- * (high 4 bits in write to DATA_PORT)
- */
-#define RX_PAGE2_SELECT 0x10 /* bit 4, only 2 pages to select */
-#define RX_BASE_PAGE 0x20 /* bit 5, always set when specifying RX_ADDR */
-#define FLIP_IRQ 0x40 /* bit 6 */
-
-/*
- * D-Link adapter internal memory:
- *
- * 0-2K 1:st transmit page (send from pointer up to 2K)
- * 2-4K 2:nd transmit page (send from pointer up to 4K)
- *
- * 4-6K 1:st receive page (data from 4K upwards)
- * 6-8K 2:nd receive page (data from 6K upwards)
- *
- * 8K+ Adapter ROM (contains magic code and last 3 bytes of Ethernet address)
- */
-#define MEM_2K 0x0800 /* 2048 */
-#define MEM_4K 0x1000 /* 4096 */
-#define MEM_6K 0x1800 /* 6144 */
-#define NODE_ADDRESS 0x2000 /* 8192 */
-
-#define RUNT 60 /* Too small Ethernet packet */
-
-/**************************************************
- * *
- * End of definition *
- * *
- **************************************************/
-
-/*
- * Index to functions, as function prototypes.
- */
-/* Routines used internally. (See "convenience macros") */
-static u8 de600_read_status(struct net_device *dev);
-static u8 de600_read_byte(unsigned char type, struct net_device *dev);
-
-/* Put in the device structure. */
-static int de600_open(struct net_device *dev);
-static int de600_close(struct net_device *dev);
-static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev);
-
-/* Dispatch from interrupts. */
-static irqreturn_t de600_interrupt(int irq, void *dev_id);
-static int de600_tx_intr(struct net_device *dev, int irq_status);
-static void de600_rx_intr(struct net_device *dev);
-
-/* Initialization */
-static void trigger_interrupt(struct net_device *dev);
-static int adapter_init(struct net_device *dev);
-
-/*
- * Convenience macros/functions for D-Link adapter
- */
-
-#define select_prn() outb_p(SELECT_PRN, COMMAND_PORT); DE600_SLOW_DOWN
-#define select_nic() outb_p(SELECT_NIC, COMMAND_PORT); DE600_SLOW_DOWN
-
-/* Thanks for hints from Mark Burton <markb@ordern.demon.co.uk> */
-#define de600_put_byte(data) ( \
- outb_p(((data) << 4) | WRITE_DATA , DATA_PORT), \
- outb_p(((data) & 0xf0) | WRITE_DATA | HI_NIBBLE, DATA_PORT))
-
-/*
- * The first two outb_p()'s below could perhaps be deleted if there
- * would be more delay in the last two. Not certain about it yet...
- */
-#define de600_put_command(cmd) ( \
- outb_p(( rx_page << 4) | COMMAND , DATA_PORT), \
- outb_p(( rx_page & 0xf0) | COMMAND | HI_NIBBLE, DATA_PORT), \
- outb_p(((rx_page | cmd) << 4) | COMMAND , DATA_PORT), \
- outb_p(((rx_page | cmd) & 0xf0) | COMMAND | HI_NIBBLE, DATA_PORT))
-
-#define de600_setup_address(addr,type) ( \
- outb_p((((addr) << 4) & 0xf0) | type , DATA_PORT), \
- outb_p(( (addr) & 0xf0) | type | HI_NIBBLE, DATA_PORT), \
- outb_p((((addr) >> 4) & 0xf0) | type , DATA_PORT), \
- outb_p((((addr) >> 8) & 0xf0) | type | HI_NIBBLE, DATA_PORT))
-
-#define rx_page_adr() ((rx_page & RX_PAGE2_SELECT)?(MEM_6K):(MEM_4K))
-
-/* Flip bit, only 2 pages */
-#define next_rx_page() (rx_page ^= RX_PAGE2_SELECT)
-
-#define tx_page_adr(a) (((a) + 1) * MEM_2K)
diff --git a/drivers/net/ethernet/dlink/de620.c b/drivers/net/ethernet/dlink/de620.c
deleted file mode 100644
index 2e2bc60ee811..000000000000
--- a/drivers/net/ethernet/dlink/de620.c
+++ /dev/null
@@ -1,987 +0,0 @@
-/*
- * de620.c $Revision: 1.40 $ BETA
- *
- *
- * Linux driver for the D-Link DE-620 Ethernet pocket adapter.
- *
- * Portions (C) Copyright 1993, 1994 by Bjorn Ekwall <bj0rn@blox.se>
- *
- * Based on adapter information gathered from DOS packetdriver
- * sources from D-Link Inc: (Special thanks to Henry Ngai of D-Link.)
- * Portions (C) Copyright D-Link SYSTEM Inc. 1991, 1992
- * Copyright, 1988, Russell Nelson, Crynwr Software
- *
- * Adapted to the sample network driver core for linux,
- * written by: Donald Becker <becker@super.org>
- * (Now at <becker@scyld.com>)
- *
- * Valuable assistance from:
- * J. Joshua Kopper <kopper@rtsg.mot.com>
- * Olav Kvittem <Olav.Kvittem@uninett.no>
- * Germano Caronni <caronni@nessie.cs.id.ethz.ch>
- * Jeremy Fitzhardinge <jeremy@suite.sw.oz.au>
- *
- *****************************************************************************/
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *****************************************************************************/
-static const char version[] =
- "de620.c: $Revision: 1.40 $, Bjorn Ekwall <bj0rn@blox.se>\n";
-
-/***********************************************************************
- *
- * "Tuning" section.
- *
- * Compile-time options: (see below for descriptions)
- * -DDE620_IO=0x378 (lpt1)
- * -DDE620_IRQ=7 (lpt1)
- * -DSHUTDOWN_WHEN_LOST
- * -DCOUNT_LOOPS
- * -DLOWSPEED
- * -DREAD_DELAY
- * -DWRITE_DELAY
- */
-
-/*
- * This driver assumes that the printer port is a "normal",
- * dumb, uni-directional port!
- * If your port is "fancy" in any way, please try to set it to "normal"
- * with your BIOS setup. I have no access to machines with bi-directional
- * ports, so I can't test such a driver :-(
- * (Yes, I _know_ it is possible to use DE620 with bidirectional ports...)
- *
- * There are some clones of DE620 out there, with different names.
- * If the current driver does not recognize a clone, try to change
- * the following #define to:
- *
- * #define DE620_CLONE 1
- */
-#define DE620_CLONE 0
-
-/*
- * If the adapter has problems with high speeds, enable this #define
- * otherwise full printerport speed will be attempted.
- *
- * You can tune the READ_DELAY/WRITE_DELAY below if you enable LOWSPEED
- *
-#define LOWSPEED
- */
-
-#ifndef READ_DELAY
-#define READ_DELAY 100 /* adapter internal read delay in 100ns units */
-#endif
-
-#ifndef WRITE_DELAY
-#define WRITE_DELAY 100 /* adapter internal write delay in 100ns units */
-#endif
-
-/*
- * Enable this #define if you want the adapter to do a "ifconfig down" on
- * itself when we have detected that something is possibly wrong with it.
- * The default behaviour is to retry with "adapter_init()" until success.
- * This should be used for debugging purposes only.
- *
-#define SHUTDOWN_WHEN_LOST
- */
-
-#ifdef LOWSPEED
-/*
- * Enable this #define if you want to see debugging output that show how long
- * we have to wait before the DE-620 is ready for the next read/write/command.
- *
-#define COUNT_LOOPS
- */
-#endif
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/string.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/inet.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-
-#include <asm/io.h>
-
-/* Constant definitions for the DE-620 registers, commands and bits */
-#include "de620.h"
-
-typedef unsigned char byte;
-
-/*******************************************************
- * *
- * Definition of D-Link DE-620 Ethernet Pocket adapter *
- * See also "de620.h" *
- * *
- *******************************************************/
-#ifndef DE620_IO /* Compile-time configurable */
-#define DE620_IO 0x378
-#endif
-
-#ifndef DE620_IRQ /* Compile-time configurable */
-#define DE620_IRQ 7
-#endif
-
-#define DATA_PORT (dev->base_addr)
-#define STATUS_PORT (dev->base_addr + 1)
-#define COMMAND_PORT (dev->base_addr + 2)
-
-#define RUNT 60 /* Too small Ethernet packet */
-#define GIANT 1514 /* largest legal size packet, no fcs */
-
-/*
- * Force media with insmod:
- * insmod de620.o bnc=1
- * or
- * insmod de620.o utp=1
- *
- * Force io and/or irq with insmod:
- * insmod de620.o io=0x378 irq=7
- *
- * Make a clone skip the Ethernet-address range check:
- * insmod de620.o clone=1
- */
-static int bnc;
-static int utp;
-static int io = DE620_IO;
-static int irq = DE620_IRQ;
-static int clone = DE620_CLONE;
-
-static spinlock_t de620_lock;
-
-module_param(bnc, int, 0);
-module_param(utp, int, 0);
-module_param(io, int, 0);
-module_param(irq, int, 0);
-module_param(clone, int, 0);
-MODULE_PARM_DESC(bnc, "DE-620 set BNC medium (0-1)");
-MODULE_PARM_DESC(utp, "DE-620 set UTP medium (0-1)");
-MODULE_PARM_DESC(io, "DE-620 I/O base address,required");
-MODULE_PARM_DESC(irq, "DE-620 IRQ number,required");
-MODULE_PARM_DESC(clone, "Check also for non-D-Link DE-620 clones (0-1)");
-
-/***********************************************
- * *
- * Index to functions, as function prototypes. *
- * *
- ***********************************************/
-
-/*
- * Routines used internally. (See also "convenience macros.. below")
- */
-
-/* Put in the device structure. */
-static int de620_open(struct net_device *);
-static int de620_close(struct net_device *);
-static void de620_set_multicast_list(struct net_device *);
-static int de620_start_xmit(struct sk_buff *, struct net_device *);
-
-/* Dispatch from interrupts. */
-static irqreturn_t de620_interrupt(int, void *);
-static int de620_rx_intr(struct net_device *);
-
-/* Initialization */
-static int adapter_init(struct net_device *);
-static int read_eeprom(struct net_device *);
-
-
-/*
- * D-Link driver variables:
- */
-#define SCR_DEF NIBBLEMODE |INTON | SLEEP | AUTOTX
-#define TCR_DEF RXPB /* not used: | TXSUCINT | T16INT */
-#define DE620_RX_START_PAGE 12 /* 12 pages (=3k) reserved for tx */
-#define DEF_NIC_CMD IRQEN | ICEN | DS1
-
-static volatile byte NIC_Cmd;
-static volatile byte next_rx_page;
-static byte first_rx_page;
-static byte last_rx_page;
-static byte EIPRegister;
-
-static struct nic {
- byte NodeID[6];
- byte RAM_Size;
- byte Model;
- byte Media;
- byte SCR;
-} nic_data;
-
-/**********************************************************
- * *
- * Convenience macros/functions for D-Link DE-620 adapter *
- * *
- **********************************************************/
-#define de620_tx_buffs(dd) (inb(STATUS_PORT) & (TXBF0 | TXBF1))
-#define de620_flip_ds(dd) NIC_Cmd ^= DS0 | DS1; outb(NIC_Cmd, COMMAND_PORT);
-
-/* Check for ready-status, and return a nibble (high 4 bits) for data input */
-#ifdef COUNT_LOOPS
-static int tot_cnt;
-#endif
-static inline byte
-de620_ready(struct net_device *dev)
-{
- byte value;
- register short int cnt = 0;
-
- while ((((value = inb(STATUS_PORT)) & READY) == 0) && (cnt <= 1000))
- ++cnt;
-
-#ifdef COUNT_LOOPS
- tot_cnt += cnt;
-#endif
- return value & 0xf0; /* nibble */
-}
-
-static inline void
-de620_send_command(struct net_device *dev, byte cmd)
-{
- de620_ready(dev);
- if (cmd == W_DUMMY)
- outb(NIC_Cmd, COMMAND_PORT);
-
- outb(cmd, DATA_PORT);
-
- outb(NIC_Cmd ^ CS0, COMMAND_PORT);
- de620_ready(dev);
- outb(NIC_Cmd, COMMAND_PORT);
-}
-
-static inline void
-de620_put_byte(struct net_device *dev, byte value)
-{
- /* The de620_ready() makes 7 loops, on the average, on a DX2/66 */
- de620_ready(dev);
- outb(value, DATA_PORT);
- de620_flip_ds(dev);
-}
-
-static inline byte
-de620_read_byte(struct net_device *dev)
-{
- byte value;
-
- /* The de620_ready() makes 7 loops, on the average, on a DX2/66 */
- value = de620_ready(dev); /* High nibble */
- de620_flip_ds(dev);
- value |= de620_ready(dev) >> 4; /* Low nibble */
- return value;
-}
-
-static inline void
-de620_write_block(struct net_device *dev, byte *buffer, int count, int pad)
-{
-#ifndef LOWSPEED
- byte uflip = NIC_Cmd ^ (DS0 | DS1);
- byte dflip = NIC_Cmd;
-#else /* LOWSPEED */
-#ifdef COUNT_LOOPS
- int bytes = count;
-#endif /* COUNT_LOOPS */
-#endif /* LOWSPEED */
-
-#ifdef LOWSPEED
-#ifdef COUNT_LOOPS
- tot_cnt = 0;
-#endif /* COUNT_LOOPS */
- /* No further optimization useful, the limit is in the adapter. */
- for ( ; count > 0; --count, ++buffer) {
- de620_put_byte(dev,*buffer);
- }
- for ( count = pad ; count > 0; --count, ++buffer) {
- de620_put_byte(dev, 0);
- }
- de620_send_command(dev,W_DUMMY);
-#ifdef COUNT_LOOPS
- /* trial debug output: loops per byte in de620_ready() */
- printk("WRITE(%d)\n", tot_cnt/((bytes?bytes:1)));
-#endif /* COUNT_LOOPS */
-#else /* not LOWSPEED */
- for ( ; count > 0; count -=2) {
- outb(*buffer++, DATA_PORT);
- outb(uflip, COMMAND_PORT);
- outb(*buffer++, DATA_PORT);
- outb(dflip, COMMAND_PORT);
- }
- de620_send_command(dev,W_DUMMY);
-#endif /* LOWSPEED */
-}
-
-static inline void
-de620_read_block(struct net_device *dev, byte *data, int count)
-{
-#ifndef LOWSPEED
- byte value;
- byte uflip = NIC_Cmd ^ (DS0 | DS1);
- byte dflip = NIC_Cmd;
-#else /* LOWSPEED */
-#ifdef COUNT_LOOPS
- int bytes = count;
-
- tot_cnt = 0;
-#endif /* COUNT_LOOPS */
-#endif /* LOWSPEED */
-
-#ifdef LOWSPEED
- /* No further optimization useful, the limit is in the adapter. */
- while (count-- > 0) {
- *data++ = de620_read_byte(dev);
- de620_flip_ds(dev);
- }
-#ifdef COUNT_LOOPS
- /* trial debug output: loops per byte in de620_ready() */
- printk("READ(%d)\n", tot_cnt/(2*(bytes?bytes:1)));
-#endif /* COUNT_LOOPS */
-#else /* not LOWSPEED */
- while (count-- > 0) {
- value = inb(STATUS_PORT) & 0xf0; /* High nibble */
- outb(uflip, COMMAND_PORT);
- *data++ = value | inb(STATUS_PORT) >> 4; /* Low nibble */
- outb(dflip , COMMAND_PORT);
- }
-#endif /* LOWSPEED */
-}
-
-static inline void
-de620_set_delay(struct net_device *dev)
-{
- de620_ready(dev);
- outb(W_DFR, DATA_PORT);
- outb(NIC_Cmd ^ CS0, COMMAND_PORT);
-
- de620_ready(dev);
-#ifdef LOWSPEED
- outb(WRITE_DELAY, DATA_PORT);
-#else
- outb(0, DATA_PORT);
-#endif
- de620_flip_ds(dev);
-
- de620_ready(dev);
-#ifdef LOWSPEED
- outb(READ_DELAY, DATA_PORT);
-#else
- outb(0, DATA_PORT);
-#endif
- de620_flip_ds(dev);
-}
-
-static inline void
-de620_set_register(struct net_device *dev, byte reg, byte value)
-{
- de620_ready(dev);
- outb(reg, DATA_PORT);
- outb(NIC_Cmd ^ CS0, COMMAND_PORT);
-
- de620_put_byte(dev, value);
-}
-
-static inline byte
-de620_get_register(struct net_device *dev, byte reg)
-{
- byte value;
-
- de620_send_command(dev,reg);
- value = de620_read_byte(dev);
- de620_send_command(dev,W_DUMMY);
-
- return value;
-}
-
-/*********************************************************************
- *
- * Open/initialize the board.
- *
- * This routine should set everything up anew at each open, even
- * registers that "should" only need to be set once at boot, so that
- * there is a non-reboot way to recover if something goes wrong.
- *
- */
-static int de620_open(struct net_device *dev)
-{
- int ret = request_irq(dev->irq, de620_interrupt, 0, dev->name, dev);
- if (ret) {
- printk (KERN_ERR "%s: unable to get IRQ %d\n", dev->name, dev->irq);
- return ret;
- }
-
- if (adapter_init(dev)) {
- ret = -EIO;
- goto out_free_irq;
- }
-
- netif_start_queue(dev);
- return 0;
-
-out_free_irq:
- free_irq(dev->irq, dev);
- return ret;
-}
-
-/************************************************
- *
- * The inverse routine to de620_open().
- *
- */
-
-static int de620_close(struct net_device *dev)
-{
- netif_stop_queue(dev);
- /* disable recv */
- de620_set_register(dev, W_TCR, RXOFF);
- free_irq(dev->irq, dev);
- return 0;
-}
-
-/*********************************************
- *
- * Set or clear the multicast filter for this adaptor.
- * (no real multicast implemented for the DE-620, but she can be promiscuous...)
- *
- */
-
-static void de620_set_multicast_list(struct net_device *dev)
-{
- if (!netdev_mc_empty(dev) || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
- { /* Enable promiscuous mode */
- de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL);
- }
- else
- { /* Disable promiscuous mode, use normal mode */
- de620_set_register(dev, W_TCR, TCR_DEF);
- }
-}
-
-/*******************************************************
- *
- * Handle timeouts on transmit
- */
-
-static void de620_timeout(struct net_device *dev)
-{
- printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name, "network cable problem");
- /* Restart the adapter. */
- if (!adapter_init(dev)) /* maybe close it */
- netif_wake_queue(dev);
-}
-
-/*******************************************************
- *
- * Copy a buffer to the adapter transmit page memory.
- * Start sending.
- */
-static int de620_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- unsigned long flags;
- int len;
- byte *buffer = skb->data;
- byte using_txbuf;
-
- using_txbuf = de620_tx_buffs(dev); /* Peek at the adapter */
-
- netif_stop_queue(dev);
-
-
- if ((len = skb->len) < RUNT)
- len = RUNT;
- if (len & 1) /* send an even number of bytes */
- ++len;
-
- /* Start real output */
-
- spin_lock_irqsave(&de620_lock, flags);
- pr_debug("de620_start_xmit: len=%d, bufs 0x%02x\n",
- (int)skb->len, using_txbuf);
-
- /* select a free tx buffer. if there is one... */
- switch (using_txbuf) {
- default: /* both are free: use TXBF0 */
- case TXBF1: /* use TXBF0 */
- de620_send_command(dev,W_CR | RW0);
- using_txbuf |= TXBF0;
- break;
-
- case TXBF0: /* use TXBF1 */
- de620_send_command(dev,W_CR | RW1);
- using_txbuf |= TXBF1;
- break;
-
- case (TXBF0 | TXBF1): /* NONE!!! */
- printk(KERN_WARNING "%s: No tx-buffer available!\n", dev->name);
- spin_unlock_irqrestore(&de620_lock, flags);
- return NETDEV_TX_BUSY;
- }
- de620_write_block(dev, buffer, skb->len, len-skb->len);
-
- if(!(using_txbuf == (TXBF0 | TXBF1)))
- netif_wake_queue(dev);
-
- dev->stats.tx_packets++;
- spin_unlock_irqrestore(&de620_lock, flags);
- dev_kfree_skb (skb);
- return NETDEV_TX_OK;
-}
-
-/*****************************************************
- *
- * Handle the network interface interrupts.
- *
- */
-static irqreturn_t
-de620_interrupt(int irq_in, void *dev_id)
-{
- struct net_device *dev = dev_id;
- byte irq_status;
- int bogus_count = 0;
- int again = 0;
-
- spin_lock(&de620_lock);
-
- /* Read the status register (_not_ the status port) */
- irq_status = de620_get_register(dev, R_STS);
-
- pr_debug("de620_interrupt (%2.2X)\n", irq_status);
-
- if (irq_status & RXGOOD) {
- do {
- again = de620_rx_intr(dev);
- pr_debug("again=%d\n", again);
- }
- while (again && (++bogus_count < 100));
- }
-
- if(de620_tx_buffs(dev) != (TXBF0 | TXBF1))
- netif_wake_queue(dev);
-
- spin_unlock(&de620_lock);
- return IRQ_HANDLED;
-}
-
-/**************************************
- *
- * Get a packet from the adapter
- *
- * Send it "upstairs"
- *
- */
-static int de620_rx_intr(struct net_device *dev)
-{
- struct header_buf {
- byte status;
- byte Rx_NextPage;
- unsigned short Rx_ByteCount;
- } header_buf;
- struct sk_buff *skb;
- int size;
- byte *buffer;
- byte pagelink;
- byte curr_page;
-
- pr_debug("de620_rx_intr: next_rx_page = %d\n", next_rx_page);
-
- /* Tell the adapter that we are going to read data, and from where */
- de620_send_command(dev, W_CR | RRN);
- de620_set_register(dev, W_RSA1, next_rx_page);
- de620_set_register(dev, W_RSA0, 0);
-
- /* Deep breath, and away we goooooo */
- de620_read_block(dev, (byte *)&header_buf, sizeof(struct header_buf));
- pr_debug("page status=0x%02x, nextpage=%d, packetsize=%d\n",
- header_buf.status, header_buf.Rx_NextPage,
- header_buf.Rx_ByteCount);
-
- /* Plausible page header? */
- pagelink = header_buf.Rx_NextPage;
- if ((pagelink < first_rx_page) || (last_rx_page < pagelink)) {
- /* Ouch... Forget it! Skip all and start afresh... */
- printk(KERN_WARNING "%s: Ring overrun? Restoring...\n", dev->name);
- /* You win some, you lose some. And sometimes plenty... */
- adapter_init(dev);
- netif_wake_queue(dev);
- dev->stats.rx_over_errors++;
- return 0;
- }
-
- /* OK, this look good, so far. Let's see if it's consistent... */
- /* Let's compute the start of the next packet, based on where we are */
- pagelink = next_rx_page +
- ((header_buf.Rx_ByteCount + (4 - 1 + 0x100)) >> 8);
-
- /* Are we going to wrap around the page counter? */
- if (pagelink > last_rx_page)
- pagelink -= (last_rx_page - first_rx_page + 1);
-
- /* Is the _computed_ next page number equal to what the adapter says? */
- if (pagelink != header_buf.Rx_NextPage) {
- /* Naah, we'll skip this packet. Probably bogus data as well */
- printk(KERN_WARNING "%s: Page link out of sync! Restoring...\n", dev->name);
- next_rx_page = header_buf.Rx_NextPage; /* at least a try... */
- de620_send_command(dev, W_DUMMY);
- de620_set_register(dev, W_NPRF, next_rx_page);
- dev->stats.rx_over_errors++;
- return 0;
- }
- next_rx_page = pagelink;
-
- size = header_buf.Rx_ByteCount - 4;
- if ((size < RUNT) || (GIANT < size)) {
- printk(KERN_WARNING "%s: Illegal packet size: %d!\n", dev->name, size);
- }
- else { /* Good packet? */
- skb = netdev_alloc_skb(dev, size + 2);
- if (skb == NULL) { /* Yeah, but no place to put it... */
- printk(KERN_WARNING "%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size);
- dev->stats.rx_dropped++;
- }
- else { /* Yep! Go get it! */
- skb_reserve(skb,2); /* Align */
- /* skb->data points to the start of sk_buff data area */
- buffer = skb_put(skb,size);
- /* copy the packet into the buffer */
- de620_read_block(dev, buffer, size);
- pr_debug("Read %d bytes\n", size);
- skb->protocol=eth_type_trans(skb,dev);
- netif_rx(skb); /* deliver it "upstairs" */
- /* count all receives */
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += size;
- }
- }
-
- /* Let's peek ahead to see if we have read the last current packet */
- /* NOTE! We're _not_ checking the 'EMPTY'-flag! This seems better... */
- curr_page = de620_get_register(dev, R_CPR);
- de620_set_register(dev, W_NPRF, next_rx_page);
- pr_debug("next_rx_page=%d CPR=%d\n", next_rx_page, curr_page);
-
- return next_rx_page != curr_page; /* That was slightly tricky... */
-}
-
-/*********************************************
- *
- * Reset the adapter to a known state
- *
- */
-static int adapter_init(struct net_device *dev)
-{
- int i;
- static int was_down;
-
- if ((nic_data.Model == 3) || (nic_data.Model == 0)) { /* CT */
- EIPRegister = NCTL0;
- if (nic_data.Media != 1)
- EIPRegister |= NIS0; /* not BNC */
- }
- else if (nic_data.Model == 2) { /* UTP */
- EIPRegister = NCTL0 | NIS0;
- }
-
- if (utp)
- EIPRegister = NCTL0 | NIS0;
- if (bnc)
- EIPRegister = NCTL0;
-
- de620_send_command(dev, W_CR | RNOP | CLEAR);
- de620_send_command(dev, W_CR | RNOP);
-
- de620_set_register(dev, W_SCR, SCR_DEF);
- /* disable recv to wait init */
- de620_set_register(dev, W_TCR, RXOFF);
-
- /* Set the node ID in the adapter */
- for (i = 0; i < 6; ++i) { /* W_PARn = 0xaa + n */
- de620_set_register(dev, W_PAR0 + i, dev->dev_addr[i]);
- }
-
- de620_set_register(dev, W_EIP, EIPRegister);
-
- next_rx_page = first_rx_page = DE620_RX_START_PAGE;
- if (nic_data.RAM_Size)
- last_rx_page = nic_data.RAM_Size - 1;
- else /* 64k RAM */
- last_rx_page = 255;
-
- de620_set_register(dev, W_SPR, first_rx_page); /* Start Page Register*/
- de620_set_register(dev, W_EPR, last_rx_page); /* End Page Register */
- de620_set_register(dev, W_CPR, first_rx_page);/*Current Page Register*/
- de620_send_command(dev, W_NPR | first_rx_page); /* Next Page Register*/
- de620_send_command(dev, W_DUMMY);
- de620_set_delay(dev);
-
- /* Final sanity check: Anybody out there? */
- /* Let's hope some bits from the statusregister make a good check */
-#define CHECK_MASK ( 0 | TXSUC | T16 | 0 | RXCRC | RXSHORT | 0 | 0 )
-#define CHECK_OK ( 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 )
- /* success: X 0 0 X 0 0 X X */
- /* ignore: EEDI RXGOOD COLS LNKS*/
-
- if (((i = de620_get_register(dev, R_STS)) & CHECK_MASK) != CHECK_OK) {
- printk(KERN_ERR "%s: Something has happened to the DE-620! Please check it"
-#ifdef SHUTDOWN_WHEN_LOST
- " and do a new ifconfig"
-#endif
- "! (%02x)\n", dev->name, i);
-#ifdef SHUTDOWN_WHEN_LOST
- /* Goodbye, cruel world... */
- dev->flags &= ~IFF_UP;
- de620_close(dev);
-#endif
- was_down = 1;
- return 1; /* failed */
- }
- if (was_down) {
- printk(KERN_WARNING "%s: Thanks, I feel much better now!\n", dev->name);
- was_down = 0;
- }
-
- /* All OK, go ahead... */
- de620_set_register(dev, W_TCR, TCR_DEF);
-
- return 0; /* all ok */
-}
-
-static const struct net_device_ops de620_netdev_ops = {
- .ndo_open = de620_open,
- .ndo_stop = de620_close,
- .ndo_start_xmit = de620_start_xmit,
- .ndo_tx_timeout = de620_timeout,
- .ndo_set_rx_mode = de620_set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/******************************************************************************
- *
- * Only start-up code below
- *
- */
-/****************************************
- *
- * Check if there is a DE-620 connected
- */
-struct net_device * __init de620_probe(int unit)
-{
- byte checkbyte = 0xa5;
- struct net_device *dev;
- int err = -ENOMEM;
- int i;
-
- dev = alloc_etherdev(0);
- if (!dev)
- goto out;
-
- spin_lock_init(&de620_lock);
-
- /*
- * This is where the base_addr and irq gets set.
- * Tunable at compile-time and insmod-time
- */
- dev->base_addr = io;
- dev->irq = irq;
-
- /* allow overriding parameters on command line */
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- }
-
- pr_debug("%s", version);
-
- printk(KERN_INFO "D-Link DE-620 pocket adapter");
-
- if (!request_region(dev->base_addr, 3, "de620")) {
- printk(" io 0x%3lX, which is busy.\n", dev->base_addr);
- err = -EBUSY;
- goto out1;
- }
-
- /* Initially, configure basic nibble mode, so we can read the EEPROM */
- NIC_Cmd = DEF_NIC_CMD;
- de620_set_register(dev, W_EIP, EIPRegister);
-
- /* Anybody out there? */
- de620_set_register(dev, W_CPR, checkbyte);
- checkbyte = de620_get_register(dev, R_CPR);
-
- if ((checkbyte != 0xa5) || (read_eeprom(dev) != 0)) {
- printk(" not identified in the printer port\n");
- err = -ENODEV;
- goto out2;
- }
-
- /* else, got it! */
- dev->dev_addr[0] = nic_data.NodeID[0];
- for (i = 1; i < ETH_ALEN; i++) {
- dev->dev_addr[i] = nic_data.NodeID[i];
- dev->broadcast[i] = 0xff;
- }
-
- printk(", Ethernet Address: %pM", dev->dev_addr);
-
- printk(" (%dk RAM,",
- (nic_data.RAM_Size) ? (nic_data.RAM_Size >> 2) : 64);
-
- if (nic_data.Media == 1)
- printk(" BNC)\n");
- else
- printk(" UTP)\n");
-
- dev->netdev_ops = &de620_netdev_ops;
- dev->watchdog_timeo = HZ*2;
-
- /* base_addr and irq are already set, see above! */
-
- /* dump eeprom */
- pr_debug("\nEEPROM contents:\n"
- "RAM_Size = 0x%02X\n"
- "NodeID = %pM\n"
- "Model = %d\n"
- "Media = %d\n"
- "SCR = 0x%02x\n", nic_data.RAM_Size, nic_data.NodeID,
- nic_data.Model, nic_data.Media, nic_data.SCR);
-
- err = register_netdev(dev);
- if (err)
- goto out2;
- return dev;
-
-out2:
- release_region(dev->base_addr, 3);
-out1:
- free_netdev(dev);
-out:
- return ERR_PTR(err);
-}
-
-/**********************************
- *
- * Read info from on-board EEPROM
- *
- * Note: Bitwise serial I/O to/from the EEPROM vi the status _register_!
- */
-#define sendit(dev,data) de620_set_register(dev, W_EIP, data | EIPRegister);
-
-static unsigned short __init ReadAWord(struct net_device *dev, int from)
-{
- unsigned short data;
- int nbits;
-
- /* cs [__~~] SET SEND STATE */
- /* di [____] */
- /* sck [_~~_] */
- sendit(dev, 0); sendit(dev, 1); sendit(dev, 5); sendit(dev, 4);
-
- /* Send the 9-bit address from where we want to read the 16-bit word */
- for (nbits = 9; nbits > 0; --nbits, from <<= 1) {
- if (from & 0x0100) { /* bit set? */
- /* cs [~~~~] SEND 1 */
- /* di [~~~~] */
- /* sck [_~~_] */
- sendit(dev, 6); sendit(dev, 7); sendit(dev, 7); sendit(dev, 6);
- }
- else {
- /* cs [~~~~] SEND 0 */
- /* di [____] */
- /* sck [_~~_] */
- sendit(dev, 4); sendit(dev, 5); sendit(dev, 5); sendit(dev, 4);
- }
- }
-
- /* Shift in the 16-bit word. The bits appear serially in EEDI (=0x80) */
- for (data = 0, nbits = 16; nbits > 0; --nbits) {
- /* cs [~~~~] SEND 0 */
- /* di [____] */
- /* sck [_~~_] */
- sendit(dev, 4); sendit(dev, 5); sendit(dev, 5); sendit(dev, 4);
- data = (data << 1) | ((de620_get_register(dev, R_STS) & EEDI) >> 7);
- }
- /* cs [____] RESET SEND STATE */
- /* di [____] */
- /* sck [_~~_] */
- sendit(dev, 0); sendit(dev, 1); sendit(dev, 1); sendit(dev, 0);
-
- return data;
-}
-
-static int __init read_eeprom(struct net_device *dev)
-{
- unsigned short wrd;
-
- /* D-Link Ethernet addresses are in the series 00:80:c8:7X:XX:XX:XX */
- wrd = ReadAWord(dev, 0x1aa); /* bytes 0 + 1 of NodeID */
- if (!clone && (wrd != htons(0x0080))) /* Valid D-Link ether sequence? */
- return -1; /* Nope, not a DE-620 */
- nic_data.NodeID[0] = wrd & 0xff;
- nic_data.NodeID[1] = wrd >> 8;
-
- wrd = ReadAWord(dev, 0x1ab); /* bytes 2 + 3 of NodeID */
- if (!clone && ((wrd & 0xff) != 0xc8)) /* Valid D-Link ether sequence? */
- return -1; /* Nope, not a DE-620 */
- nic_data.NodeID[2] = wrd & 0xff;
- nic_data.NodeID[3] = wrd >> 8;
-
- wrd = ReadAWord(dev, 0x1ac); /* bytes 4 + 5 of NodeID */
- nic_data.NodeID[4] = wrd & 0xff;
- nic_data.NodeID[5] = wrd >> 8;
-
- wrd = ReadAWord(dev, 0x1ad); /* RAM size in pages (256 bytes). 0 = 64k */
- nic_data.RAM_Size = (wrd >> 8);
-
- wrd = ReadAWord(dev, 0x1ae); /* hardware model (CT = 3) */
- nic_data.Model = (wrd & 0xff);
-
- wrd = ReadAWord(dev, 0x1af); /* media (indicates BNC/UTP) */
- nic_data.Media = (wrd & 0xff);
-
- wrd = ReadAWord(dev, 0x1a8); /* System Configuration Register */
- nic_data.SCR = (wrd >> 8);
-
- return 0; /* no errors */
-}
-
-/******************************************************************************
- *
- * Loadable module skeleton
- *
- */
-#ifdef MODULE
-static struct net_device *de620_dev;
-
-int __init init_module(void)
-{
- de620_dev = de620_probe(-1);
- if (IS_ERR(de620_dev))
- return PTR_ERR(de620_dev);
- return 0;
-}
-
-void cleanup_module(void)
-{
- unregister_netdev(de620_dev);
- release_region(de620_dev->base_addr, 3);
- free_netdev(de620_dev);
-}
-#endif /* MODULE */
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/dlink/de620.h b/drivers/net/ethernet/dlink/de620.h
deleted file mode 100644
index e8d9a88f4cb5..000000000000
--- a/drivers/net/ethernet/dlink/de620.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*********************************************************
- * *
- * Definition of D-Link DE-620 Ethernet Pocket adapter *
- * *
- *********************************************************/
-
-/* DE-620's CMD port Command */
-#define CS0 0x08 /* 1->0 command strobe */
-#define ICEN 0x04 /* 0=enable DL3520 host interface */
-#define DS0 0x02 /* 1->0 data strobe 0 */
-#define DS1 0x01 /* 1->0 data strobe 1 */
-
-#define WDIR 0x20 /* general 0=read 1=write */
-#define RDIR 0x00 /* (not 100% confirm ) */
-#define PS2WDIR 0x00 /* ps/2 mode 1=read, 0=write */
-#define PS2RDIR 0x20
-
-#define IRQEN 0x10 /* 1 = enable printer IRQ line */
-#define SELECTIN 0x08 /* 1 = select printer */
-#define INITP 0x04 /* 0 = initial printer */
-#define AUTOFEED 0x02 /* 1 = printer auto form feed */
-#define STROBE 0x01 /* 0->1 data strobe */
-
-#define RESET 0x08
-#define NIS0 0x20 /* 0 = BNC, 1 = UTP */
-#define NCTL0 0x10
-
-/* DE-620 DIC Command */
-#define W_DUMMY 0x00 /* DIC reserved command */
-#define W_CR 0x20 /* DIC write command register */
-#define W_NPR 0x40 /* DIC write Next Page Register */
-#define W_TBR 0x60 /* DIC write Tx Byte Count 1 reg */
-#define W_RSA 0x80 /* DIC write Remote Start Addr 1 */
-
-/* DE-620's STAT port bits 7-4 */
-#define EMPTY 0x80 /* 1 = receive buffer empty */
-#define INTLEVEL 0x40 /* 1 = interrupt level is high */
-#define TXBF1 0x20 /* 1 = transmit buffer 1 is in use */
-#define TXBF0 0x10 /* 1 = transmit buffer 0 is in use */
-#define READY 0x08 /* 1 = h/w ready to accept cmd/data */
-
-/* IDC 1 Command */
-#define W_RSA1 0xa0 /* write remote start address 1 */
-#define W_RSA0 0xa1 /* write remote start address 0 */
-#define W_NPRF 0xa2 /* write next page register NPR15-NPR8 */
-#define W_DFR 0xa3 /* write delay factor register */
-#define W_CPR 0xa4 /* write current page register */
-#define W_SPR 0xa5 /* write start page register */
-#define W_EPR 0xa6 /* write end page register */
-#define W_SCR 0xa7 /* write system configuration register */
-#define W_TCR 0xa8 /* write Transceiver Configuration reg */
-#define W_EIP 0xa9 /* write EEPM Interface port */
-#define W_PAR0 0xaa /* write physical address register 0 */
-#define W_PAR1 0xab /* write physical address register 1 */
-#define W_PAR2 0xac /* write physical address register 2 */
-#define W_PAR3 0xad /* write physical address register 3 */
-#define W_PAR4 0xae /* write physical address register 4 */
-#define W_PAR5 0xaf /* write physical address register 5 */
-
-/* IDC 2 Command */
-#define R_STS 0xc0 /* read status register */
-#define R_CPR 0xc1 /* read current page register */
-#define R_BPR 0xc2 /* read boundary page register */
-#define R_TDR 0xc3 /* read time domain reflectometry reg */
-
-/* STATUS Register */
-#define EEDI 0x80 /* EEPM DO pin */
-#define TXSUC 0x40 /* tx success */
-#define T16 0x20 /* tx fail 16 times */
-#define TS1 0x40 /* 0=Tx success, 1=T16 */
-#define TS0 0x20 /* 0=Tx success, 1=T16 */
-#define RXGOOD 0x10 /* rx a good packet */
-#define RXCRC 0x08 /* rx a CRC error packet */
-#define RXSHORT 0x04 /* rx a short packet */
-#define COLS 0x02 /* coaxial collision status */
-#define LNKS 0x01 /* UTP link status */
-
-/* Command Register */
-#define CLEAR 0x10 /* reset part of hardware */
-#define NOPER 0x08 /* No Operation */
-#define RNOP 0x08
-#define RRA 0x06 /* After RR then auto-advance NPR & BPR(=NPR-1) */
-#define RRN 0x04 /* Normal Remote Read mode */
-#define RW1 0x02 /* Remote Write tx buffer 1 ( page 6 - 11 ) */
-#define RW0 0x00 /* Remote Write tx buffer 0 ( page 0 - 5 ) */
-#define TXEN 0x01 /* 0->1 tx enable */
-
-/* System Configuration Register */
-#define TESTON 0x80 /* test host data transfer reliability */
-#define SLEEP 0x40 /* sleep mode */
-#if 0
-#define FASTMODE 0x04 /* fast mode for intel 82360SL fast mode */
-#define BYTEMODE 0x02 /* byte mode */
-#else
-#define FASTMODE 0x20 /* fast mode for intel 82360SL fast mode */
-#define BYTEMODE 0x10 /* byte mode */
-#endif
-#define NIBBLEMODE 0x00 /* nibble mode */
-#define IRQINV 0x08 /* turn off IRQ line inverter */
-#define IRQNML 0x00 /* turn on IRQ line inverter */
-#define INTON 0x04
-#define AUTOFFSET 0x02 /* auto shift address to TPR+12 */
-#define AUTOTX 0x01 /* auto tx when leave RW mode */
-
-/* Transceiver Configuration Register */
-#define JABBER 0x80 /* generate jabber condition */
-#define TXSUCINT 0x40 /* enable tx success interrupt */
-#define T16INT 0x20 /* enable T16 interrupt */
-#define RXERRPKT 0x10 /* accept CRC error or short packet */
-#define EXTERNALB2 0x0C /* external loopback 2 */
-#define EXTERNALB1 0x08 /* external loopback 1 */
-#define INTERNALB 0x04 /* internal loopback */
-#define NMLOPERATE 0x00 /* normal operation */
-#define RXPBM 0x03 /* rx physical, broadcast, multicast */
-#define RXPB 0x02 /* rx physical, broadcast */
-#define RXALL 0x01 /* rx all packet */
-#define RXOFF 0x00 /* rx disable */
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 1d342d37915c..110d26f4c602 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1156,9 +1156,10 @@ set_multicast (struct net_device *dev)
static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strcpy(info->driver, "dl2k");
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(np->pdev));
+
+ strlcpy(info->driver, "dl2k", sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 28fc11b2f1ea..50d9c6315930 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -530,7 +530,6 @@ static int sundance_probe1(struct pci_dev *pdev,
for (i = 0; i < 3; i++)
((__le16 *)dev->dev_addr)[i] =
cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
np = netdev_priv(dev);
np->base = ioaddr;
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 2c177b329c8b..f3d60eb13c3a 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -281,11 +281,11 @@ static int dnet_mii_probe(struct net_device *dev)
/* attach the mac to the phy */
if (bp->capabilities & DNET_HAS_RMII) {
phydev = phy_connect(dev, dev_name(&phydev->dev),
- &dnet_handle_link_change, 0,
+ &dnet_handle_link_change,
PHY_INTERFACE_MODE_RMII);
} else {
phydev = phy_connect(dev, dev_name(&phydev->dev),
- &dnet_handle_link_change, 0,
+ &dnet_handle_link_change,
PHY_INTERFACE_MODE_MII);
}
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 3bc1912afba9..28ceb8414185 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,15 +34,15 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "4.4.161.0u"
+#define DRV_VER "4.6.62.0u"
#define DRV_NAME "be2net"
-#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
-#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
-#define OC_NAME "Emulex OneConnect 10Gbps NIC"
+#define BE_NAME "Emulex BladeEngine2"
+#define BE3_NAME "Emulex BladeEngine3"
+#define OC_NAME "Emulex OneConnect"
#define OC_NAME_BE OC_NAME "(be3)"
#define OC_NAME_LANCER OC_NAME "(Lancer)"
#define OC_NAME_SH OC_NAME "(Skyhawk)"
-#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
+#define DRV_DESC "Emulex OneConnect 10Gbps NIC Driver"
#define BE_VENDOR_ID 0x19a2
#define EMULEX_VENDOR_ID 0x10df
@@ -190,6 +190,7 @@ struct be_eq_obj {
u8 idx; /* array index */
u16 tx_budget;
+ u16 spurious_intr;
struct napi_struct napi;
struct be_adapter *adapter;
} ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 8a250c38fb82..071aea79d218 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -93,13 +93,16 @@ static void be_mcc_notify(struct be_adapter *adapter)
* little endian) */
static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
{
+ u32 flags;
+
if (compl->flags != 0) {
- compl->flags = le32_to_cpu(compl->flags);
- BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
- return true;
- } else {
- return false;
+ flags = le32_to_cpu(compl->flags);
+ if (flags & CQE_FLAGS_VALID_MASK) {
+ compl->flags = flags;
+ return true;
+ }
}
+ return false;
}
/* Need to reset the entire word that houses the valid bit */
@@ -3138,6 +3141,39 @@ err:
return status;
}
+int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
+ int vf_num)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_iface_list *req;
+ struct be_cmd_resp_get_iface_list *resp;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
+ wrb, NULL);
+ req->hdr.domain = vf_num + 1;
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ resp = (struct be_cmd_resp_get_iface_list *)req;
+ vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
+ }
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
/* Uses sync mcc */
int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index d6552e19ffee..96970860c915 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -203,6 +203,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_GET_FN_PRIVILEGES 170
#define OPCODE_COMMON_READ_OBJECT 171
#define OPCODE_COMMON_WRITE_OBJECT 172
+#define OPCODE_COMMON_GET_IFACE_LIST 194
#define OPCODE_COMMON_ENABLE_DISABLE_VF 196
#define OPCODE_ETH_RSS_CONFIG 1
@@ -1795,6 +1796,23 @@ static inline bool check_privilege(struct be_adapter *adapter, u32 flags)
return flags & adapter->cmd_privileges ? true : false;
}
+/************** Get IFACE LIST *******************/
+struct be_if_desc {
+ u32 if_id;
+ u32 cap_flags;
+ u32 en_flags;
+};
+
+struct be_cmd_req_get_iface_list {
+ struct be_cmd_req_hdr hdr;
+};
+
+struct be_cmd_resp_get_iface_list {
+ struct be_cmd_req_hdr hdr;
+ u32 if_cnt;
+ struct be_if_desc if_desc;
+};
+
extern int be_pci_fnum_get(struct be_adapter *adapter);
extern int be_fw_wait_ready(struct be_adapter *adapter);
extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -1917,4 +1935,6 @@ extern int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
u8 domain);
+extern int be_cmd_get_if_id(struct be_adapter *adapter,
+ struct be_vf_cfg *vf_cfg, int vf_num);
extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 00454a10f88d..76b302f30c87 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -183,12 +183,12 @@ static void be_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version));
- strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN);
- if (memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN) != 0) {
- strcat(drvinfo->fw_version, " [");
- strcat(drvinfo->fw_version, fw_on_flash);
- strcat(drvinfo->fw_version, "]");
- }
+ if (!memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN))
+ strlcpy(drvinfo->fw_version, adapter->fw_ver,
+ sizeof(drvinfo->fw_version));
+ else
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%s [%s]", adapter->fw_ver, fw_on_flash);
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 9dca22be8125..3860888ac711 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -25,7 +25,7 @@
MODULE_VERSION(DRV_VER);
MODULE_DEVICE_TABLE(pci, be_dev_ids);
MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
-MODULE_AUTHOR("ServerEngines Corporation");
+MODULE_AUTHOR("Emulex Corporation");
MODULE_LICENSE("GPL");
static unsigned int num_vfs;
@@ -2026,19 +2026,30 @@ static irqreturn_t be_intx(int irq, void *dev)
struct be_adapter *adapter = eqo->adapter;
int num_evts = 0;
- /* On Lancer, clear-intr bit of the EQ DB does not work.
- * INTx is de-asserted only on notifying num evts.
+ /* IRQ is not expected when NAPI is scheduled as the EQ
+ * will not be armed.
+ * But, this can happen on Lancer INTx where it takes
+ * a while to de-assert INTx or in BE2 where occasionaly
+ * an interrupt may be raised even when EQ is unarmed.
+ * If NAPI is already scheduled, then counting & notifying
+ * events will orphan them.
*/
- if (lancer_chip(adapter))
+ if (napi_schedule_prep(&eqo->napi)) {
num_evts = events_get(eqo);
+ __napi_schedule(&eqo->napi);
+ if (num_evts)
+ eqo->spurious_intr = 0;
+ }
+ be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
- /* The EQ-notify may not de-assert INTx rightaway, causing
- * the ISR to be invoked again. So, return HANDLED even when
- * num_evts is zero.
+ /* Return IRQ_HANDLED only for the the first spurious intr
+ * after a valid intr to stop the kernel from branding
+ * this irq as a bad one!
*/
- be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
- napi_schedule(&eqo->napi);
- return IRQ_HANDLED;
+ if (num_evts || eqo->spurious_intr++ == 0)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
}
static irqreturn_t be_msix(int irq, void *dev)
@@ -2586,7 +2597,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
* These addresses are programmed in the ASIC by the PF and the VF driver
* queries for the MAC address during its probe.
*/
-static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
+static int be_vf_eth_addr_config(struct be_adapter *adapter)
{
u32 vf;
int status = 0;
@@ -2615,13 +2626,34 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
return status;
}
+static int be_vfs_mac_query(struct be_adapter *adapter)
+{
+ int status, vf;
+ u8 mac[ETH_ALEN];
+ struct be_vf_cfg *vf_cfg;
+ bool active;
+
+ for_all_vfs(adapter, vf_cfg, vf) {
+ be_cmd_get_mac_from_list(adapter, mac, &active,
+ &vf_cfg->pmac_id, 0);
+
+ status = be_cmd_mac_addr_query(adapter, mac, false,
+ vf_cfg->if_handle, 0);
+ if (status)
+ return status;
+ memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
+ }
+ return 0;
+}
+
static void be_vf_clear(struct be_adapter *adapter)
{
struct be_vf_cfg *vf_cfg;
u32 vf;
if (be_find_vfs(adapter, ASSIGNED)) {
- dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
+ dev_warn(&adapter->pdev->dev,
+ "VFs are assigned to VMs: not disabling VFs\n");
goto done;
}
@@ -2670,21 +2702,29 @@ static int be_clear(struct be_adapter *adapter)
return 0;
}
-static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
- u32 *cap_flags, u8 domain)
+static int be_vfs_if_create(struct be_adapter *adapter)
{
- bool profile_present = false;
+ struct be_vf_cfg *vf_cfg;
+ u32 cap_flags, en_flags, vf;
int status;
- if (lancer_chip(adapter)) {
- status = be_cmd_get_profile_config(adapter, cap_flags, domain);
- if (!status)
- profile_present = true;
- }
+ cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST;
+
+ for_all_vfs(adapter, vf_cfg, vf) {
+ if (!BE3_chip(adapter))
+ be_cmd_get_profile_config(adapter, &cap_flags, vf + 1);
- if (!profile_present)
- *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST;
+ /* If a FW profile exists, then cap_flags are updated */
+ en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
+ BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
+ status = be_cmd_if_create(adapter, cap_flags, en_flags,
+ &vf_cfg->if_handle, vf + 1);
+ if (status)
+ goto err;
+ }
+err:
+ return status;
}
static int be_vf_setup_init(struct be_adapter *adapter)
@@ -2707,65 +2747,70 @@ static int be_vf_setup_init(struct be_adapter *adapter)
static int be_vf_setup(struct be_adapter *adapter)
{
struct be_vf_cfg *vf_cfg;
- struct device *dev = &adapter->pdev->dev;
- u32 cap_flags, en_flags, vf;
u16 def_vlan, lnk_speed;
- int status, enabled_vfs;
-
- enabled_vfs = be_find_vfs(adapter, ENABLED);
- if (enabled_vfs) {
- dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
- dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
- return 0;
- }
-
- if (num_vfs > adapter->dev_num_vfs) {
- dev_warn(dev, "Device supports %d VFs and not %d\n",
- adapter->dev_num_vfs, num_vfs);
- num_vfs = adapter->dev_num_vfs;
- }
+ int status, old_vfs, vf;
+ struct device *dev = &adapter->pdev->dev;
- status = pci_enable_sriov(adapter->pdev, num_vfs);
- if (!status) {
- adapter->num_vfs = num_vfs;
+ old_vfs = be_find_vfs(adapter, ENABLED);
+ if (old_vfs) {
+ dev_info(dev, "%d VFs are already enabled\n", old_vfs);
+ if (old_vfs != num_vfs)
+ dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
+ adapter->num_vfs = old_vfs;
} else {
- /* Platform doesn't support SRIOV though device supports it */
- dev_warn(dev, "SRIOV enable failed\n");
- return 0;
+ if (num_vfs > adapter->dev_num_vfs)
+ dev_info(dev, "Device supports %d VFs and not %d\n",
+ adapter->dev_num_vfs, num_vfs);
+ adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
+
+ status = pci_enable_sriov(adapter->pdev, num_vfs);
+ if (status) {
+ dev_err(dev, "SRIOV enable failed\n");
+ adapter->num_vfs = 0;
+ return 0;
+ }
}
status = be_vf_setup_init(adapter);
if (status)
goto err;
- for_all_vfs(adapter, vf_cfg, vf) {
- be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
-
- en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
- BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST);
-
- status = be_cmd_if_create(adapter, cap_flags, en_flags,
- &vf_cfg->if_handle, vf + 1);
+ if (old_vfs) {
+ for_all_vfs(adapter, vf_cfg, vf) {
+ status = be_cmd_get_if_id(adapter, vf_cfg, vf);
+ if (status)
+ goto err;
+ }
+ } else {
+ status = be_vfs_if_create(adapter);
if (status)
goto err;
}
- if (!enabled_vfs) {
+ if (old_vfs) {
+ status = be_vfs_mac_query(adapter);
+ if (status)
+ goto err;
+ } else {
status = be_vf_eth_addr_config(adapter);
if (status)
goto err;
}
for_all_vfs(adapter, vf_cfg, vf) {
- lnk_speed = 1000;
- status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
- if (status)
- goto err;
- vf_cfg->tx_rate = lnk_speed * 10;
+ /* BE3 FW, by default, caps VF TX-rate to 100mbps.
+ * Allow full available bandwidth
+ */
+ if (BE3_chip(adapter) && !old_vfs)
+ be_cmd_set_qos(adapter, 1000, vf+1);
+
+ status = be_cmd_link_status_query(adapter, &lnk_speed,
+ NULL, vf + 1);
+ if (!status)
+ vf_cfg->tx_rate = lnk_speed;
status = be_cmd_get_hsw_config(adapter, &def_vlan,
- vf + 1, vf_cfg->if_handle);
+ vf + 1, vf_cfg->if_handle);
if (status)
goto err;
vf_cfg->def_vid = def_vlan;
@@ -2774,6 +2819,8 @@ static int be_vf_setup(struct be_adapter *adapter)
}
return 0;
err:
+ dev_err(dev, "VF setup failed\n");
+ be_vf_clear(adapter);
return status;
}
@@ -2827,12 +2874,12 @@ static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
static void be_get_resources(struct be_adapter *adapter)
{
- int status;
+ u16 dev_num_vfs;
+ int pos, status;
bool profile_present = false;
- if (lancer_chip(adapter)) {
+ if (!BEx_chip(adapter)) {
status = be_cmd_get_func_config(adapter);
-
if (!status)
profile_present = true;
}
@@ -2888,13 +2935,21 @@ static void be_get_resources(struct be_adapter *adapter)
if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
}
+
+ pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos) {
+ pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
+ &dev_num_vfs);
+ if (BE3_chip(adapter))
+ dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
+ adapter->dev_num_vfs = dev_num_vfs;
+ }
}
/* Routine to query per function resource limits */
static int be_get_config(struct be_adapter *adapter)
{
- int pos, status;
- u16 dev_num_vfs;
+ int status;
status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
&adapter->function_mode,
@@ -2912,14 +2967,6 @@ static int be_get_config(struct be_adapter *adapter)
goto err;
}
- pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
- if (pos) {
- pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
- &dev_num_vfs);
- if (!lancer_chip(adapter))
- dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
- adapter->dev_num_vfs = dev_num_vfs;
- }
err:
return status;
}
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 8db1c06008de..5722bc61fa58 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -206,7 +206,7 @@ struct ethoc {
unsigned int num_rx;
unsigned int cur_rx;
- void** vma;
+ void **vma;
struct net_device *netdev;
struct napi_struct napi;
@@ -292,7 +292,7 @@ static int ethoc_init_ring(struct ethoc *dev, unsigned long mem_start)
{
struct ethoc_bd bd;
int i;
- void* vma;
+ void *vma;
dev->cur_tx = 0;
dev->dty_tx = 0;
@@ -447,8 +447,8 @@ static int ethoc_rx(struct net_device *dev, int limit)
netif_receive_skb(skb);
} else {
if (net_ratelimit())
- dev_warn(&dev->dev, "low on memory - "
- "packet dropped\n");
+ dev_warn(&dev->dev,
+ "low on memory - packet dropped\n");
dev->stats.rx_dropped++;
break;
@@ -555,9 +555,8 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
pending = ethoc_read(priv, INT_SOURCE);
pending &= mask;
- if (unlikely(pending == 0)) {
+ if (unlikely(pending == 0))
return IRQ_NONE;
- }
ethoc_ack_irq(priv, pending);
@@ -620,7 +619,7 @@ static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ);
- for (i=0; i < 5; i++) {
+ for (i = 0; i < 5; i++) {
u32 status = ethoc_read(priv, MIISTATUS);
if (!(status & MIISTATUS_BUSY)) {
u32 data = ethoc_read(priv, MIIRX_DATA);
@@ -628,7 +627,7 @@ static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
ethoc_write(priv, MIICOMMAND, 0);
return data;
}
- usleep_range(100,200);
+ usleep_range(100, 200);
}
return -EBUSY;
@@ -643,14 +642,14 @@ static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
ethoc_write(priv, MIITX_DATA, val);
ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE);
- for (i=0; i < 5; i++) {
+ for (i = 0; i < 5; i++) {
u32 stat = ethoc_read(priv, MIISTATUS);
if (!(stat & MIISTATUS_BUSY)) {
/* reset MII command register */
ethoc_write(priv, MIICOMMAND, 0);
return 0;
}
- usleep_range(100,200);
+ usleep_range(100, 200);
}
return -EBUSY;
@@ -671,19 +670,18 @@ static int ethoc_mdio_probe(struct net_device *dev)
struct phy_device *phy;
int err;
- if (priv->phy_id != -1) {
+ if (priv->phy_id != -1)
phy = priv->mdio->phy_map[priv->phy_id];
- } else {
+ else
phy = phy_find_first(priv->mdio);
- }
if (!phy) {
dev_err(&dev->dev, "no PHY found\n");
return -ENXIO;
}
- err = phy_connect_direct(dev, phy, ethoc_mdio_poll, 0,
- PHY_INTERFACE_MODE_GMII);
+ err = phy_connect_direct(dev, phy, ethoc_mdio_poll,
+ PHY_INTERFACE_MODE_GMII);
if (err) {
dev_err(&dev->dev, "could not attach to PHY\n");
return err;
@@ -771,21 +769,24 @@ static int ethoc_config(struct net_device *dev, struct ifmap *map)
return -ENOSYS;
}
-static int ethoc_set_mac_address(struct net_device *dev, void *addr)
+static void ethoc_do_set_mac_address(struct net_device *dev)
{
struct ethoc *priv = netdev_priv(dev);
- u8 *mac = (u8 *)addr;
-
- if (!is_valid_ether_addr(mac))
- return -EADDRNOTAVAIL;
+ unsigned char *mac = dev->dev_addr;
ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) |
(mac[4] << 8) | (mac[5] << 0));
ethoc_write(priv, MAC_ADDR1, (mac[0] << 8) | (mac[1] << 0));
+}
- memcpy(dev->dev_addr, mac, ETH_ALEN);
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
+static int ethoc_set_mac_address(struct net_device *dev, void *p)
+{
+ const struct sockaddr *addr = p;
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+ memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ ethoc_do_set_mac_address(dev);
return 0;
}
@@ -1022,7 +1023,7 @@ static int ethoc_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n",
priv->num_tx, priv->num_rx);
- priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL);
+ priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void *), GFP_KERNEL);
if (!priv->vma) {
ret = -ENOMEM;
goto error;
@@ -1038,7 +1039,7 @@ static int ethoc_probe(struct platform_device *pdev)
#ifdef CONFIG_OF
{
- const uint8_t* mac;
+ const uint8_t *mac;
mac = of_get_property(pdev->dev.of_node,
"local-mac-address",
@@ -1050,25 +1051,23 @@ static int ethoc_probe(struct platform_device *pdev)
}
/* Check that the given MAC address is valid. If it isn't, read the
- * current MAC from the controller. */
+ * current MAC from the controller.
+ */
if (!is_valid_ether_addr(netdev->dev_addr))
ethoc_get_mac_address(netdev, netdev->dev_addr);
/* Check the MAC again for validity, if it still isn't choose and
- * program a random one. */
+ * program a random one.
+ */
if (!is_valid_ether_addr(netdev->dev_addr)) {
eth_random_addr(netdev->dev_addr);
random_mac = true;
}
- ret = ethoc_set_mac_address(netdev, netdev->dev_addr);
- if (ret) {
- dev_err(&netdev->dev, "failed to set MAC address\n");
- goto error;
- }
+ ethoc_do_set_mac_address(netdev);
if (random_mac)
- netdev->addr_assign_type |= NET_ADDR_RANDOM;
+ netdev->addr_assign_type = NET_ADDR_RANDOM;
/* register MII bus */
priv->mdio = mdiobus_alloc();
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 74d749e29aab..7c361d1db94c 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -858,8 +858,7 @@ static int ftgmac100_mii_probe(struct ftgmac100 *priv)
}
phydev = phy_connect(netdev, dev_name(&phydev->dev),
- &ftgmac100_adjust_link, 0,
- PHY_INTERFACE_MODE_GMII);
+ &ftgmac100_adjust_link, PHY_INTERFACE_MODE_GMII);
if (IS_ERR(phydev)) {
netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
@@ -955,9 +954,9 @@ static int ftgmac100_mdiobus_reset(struct mii_bus *bus)
static void ftgmac100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, dev_name(&netdev->dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
static int ftgmac100_get_settings(struct net_device *netdev,
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index b901a01e3fa5..b5ea8fbd8a76 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -820,9 +820,9 @@ static void ftmac100_mdio_write(struct net_device *netdev, int phy_id, int reg,
static void ftmac100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, dev_name(&netdev->dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
static int ftmac100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index ec490d741fc0..6048dc8604ee 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -26,6 +26,7 @@ config FEC
ARCH_MXC || SOC_IMX28)
default ARCH_MXC || SOC_IMX28 if ARM
select PHYLIB
+ select PTP_1588_CLOCK
---help---
Say Y here if you want to use the built-in 10/100 Fast ethernet
controller on some Motorola ColdFire and Freescale i.MX processors.
@@ -92,12 +93,4 @@ config GIANFAR
This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
and MPC86xx family of chips, and the FEC on the 8540.
-config FEC_PTP
- bool "PTP Hardware Clock (PHC)"
- depends on FEC && ARCH_MXC && !SOC_IMX25 && !SOC_IMX27 && !SOC_IMX35 && !SOC_IMX5
- select PTP_1588_CLOCK
- --help---
- Say Y here if you want to use PTP Hardware Clock (PHC) in the
- driver. Only the basic clock operations have been implemented.
-
endif # NET_VENDOR_FREESCALE
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index d4d19b3d00ae..b7d58fe6f531 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -2,8 +2,7 @@
# Makefile for the Freescale network device drivers.
#
-obj-$(CONFIG_FEC) += fec.o
-obj-$(CONFIG_FEC_PTP) += fec_ptp.o
+obj-$(CONFIG_FEC) += fec.o fec_ptp.o
obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index 0704bcab178a..29d82cf1528e 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -67,6 +67,15 @@
#endif
#define DRIVER_NAME "fec"
+#define FEC_NAPI_WEIGHT 64
+
+/* Pause frame feild and FIFO threshold */
+#define FEC_ENET_FCE (1 << 5)
+#define FEC_ENET_RSEM_V 0x84
+#define FEC_ENET_RSFL_V 16
+#define FEC_ENET_RAEM_V 0x8
+#define FEC_ENET_RAFL_V 0x8
+#define FEC_ENET_OPD_V 0xFFF0
/* Controller is ENET-MAC */
#define FEC_QUIRK_ENET_MAC (1 << 0)
@@ -76,6 +85,8 @@
#define FEC_QUIRK_USE_GASKET (1 << 2)
/* Controller has GBIT support */
#define FEC_QUIRK_HAS_GBIT (1 << 3)
+/* Controller has extend desc buffer */
+#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4)
static struct platform_device_id fec_devtype[] = {
{
@@ -93,7 +104,8 @@ static struct platform_device_id fec_devtype[] = {
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
}, {
.name = "imx6q-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT,
+ .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX,
}, {
/* sentinel */
}
@@ -140,7 +152,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#endif
#endif /* CONFIG_M5272 */
-#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE)
+#if (((RX_RING_SIZE + TX_RING_SIZE) * 32) > PAGE_SIZE)
#error "FEC: descriptor ring size constants too large"
#endif
@@ -157,6 +169,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
+#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
/* The FEC stores dest/src/type, data, and checksum for receive packets.
*/
@@ -190,8 +203,29 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
/* Transmitter timeout */
#define TX_TIMEOUT (2 * HZ)
+#define FEC_PAUSE_FLAG_AUTONEG 0x1
+#define FEC_PAUSE_FLAG_ENABLE 0x2
+
static int mii_cnt;
+static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, int is_ex)
+{
+ struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp;
+ if (is_ex)
+ return (struct bufdesc *)(ex + 1);
+ else
+ return bdp + 1;
+}
+
+static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, int is_ex)
+{
+ struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp;
+ if (is_ex)
+ return (struct bufdesc *)(ex - 1);
+ else
+ return bdp - 1;
+}
+
static void *swap_buffer(void *bufaddr, int len)
{
int i;
@@ -248,7 +282,11 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
*/
if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
unsigned int index;
- index = bdp - fep->tx_bd_base;
+ if (fep->bufdesc_ex)
+ index = (struct bufdesc_ex *)bdp -
+ (struct bufdesc_ex *)fep->tx_bd_base;
+ else
+ index = bdp - fep->tx_bd_base;
memcpy(fep->tx_bounce[index], skb->data, skb->len);
bufaddr = fep->tx_bounce[index];
}
@@ -280,17 +318,19 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
| BD_ENET_TX_LAST | BD_ENET_TX_TC);
bdp->cbd_sc = status;
-#ifdef CONFIG_FEC_PTP
- bdp->cbd_bdu = 0;
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ if (fep->bufdesc_ex) {
+
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ ebdp->cbd_bdu = 0;
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
fep->hwts_tx_en)) {
- bdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
+ ebdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- } else {
+ } else {
- bdp->cbd_esc = BD_ENET_TX_INT;
+ ebdp->cbd_esc = BD_ENET_TX_INT;
+ }
}
-#endif
/* Trigger transmission start */
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
@@ -298,7 +338,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (status & BD_ENET_TX_WRAP)
bdp = fep->tx_bd_base;
else
- bdp++;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
if (bdp == fep->dirty_tx) {
fep->tx_full = 1;
@@ -359,8 +399,12 @@ fec_restart(struct net_device *ndev, int duplex)
/* Set receive and transmit descriptor base. */
writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
- writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
- fep->hwp + FEC_X_DES_START);
+ if (fep->bufdesc_ex)
+ writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex)
+ * RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
+ else
+ writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
+ * RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
fep->cur_rx = fep->rx_bd_base;
@@ -439,6 +483,25 @@ fec_restart(struct net_device *ndev, int duplex)
}
#endif
}
+
+ /* enable pause frame*/
+ if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
+ ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
+ fep->phy_dev && fep->phy_dev->pause)) {
+ rcntl |= FEC_ENET_FCE;
+
+ /* set FIFO thresh hold parameter to reduce overrun */
+ writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
+ writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
+ writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
+ writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
+
+ /* OPD */
+ writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
+ } else {
+ rcntl &= ~FEC_ENET_FCE;
+ }
+
writel(rcntl, fep->hwp + FEC_R_CNTRL);
if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
@@ -448,17 +511,16 @@ fec_restart(struct net_device *ndev, int duplex)
writel(1 << 8, fep->hwp + FEC_X_WMRK);
}
-#ifdef CONFIG_FEC_PTP
- ecntl |= (1 << 4);
-#endif
+ if (fep->bufdesc_ex)
+ ecntl |= (1 << 4);
/* And last, enable the transmit and receive processing */
writel(ecntl, fep->hwp + FEC_ECNTRL);
writel(0, fep->hwp + FEC_R_DES_ACTIVE);
-#ifdef CONFIG_FEC_PTP
- fec_ptp_start_cyclecounter(ndev);
-#endif
+ if (fep->bufdesc_ex)
+ fec_ptp_start_cyclecounter(ndev);
+
/* Enable interrupts we wish to service */
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
}
@@ -544,19 +606,20 @@ fec_enet_tx(struct net_device *ndev)
ndev->stats.tx_packets++;
}
-#ifdef CONFIG_FEC_PTP
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
+ fep->bufdesc_ex) {
struct skb_shared_hwtstamps shhwtstamps;
unsigned long flags;
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
spin_lock_irqsave(&fep->tmreg_lock, flags);
shhwtstamps.hwtstamp = ns_to_ktime(
- timecounter_cyc2time(&fep->tc, bdp->ts));
+ timecounter_cyc2time(&fep->tc, ebdp->ts));
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
skb_tstamp_tx(skb, &shhwtstamps);
}
-#endif
+
if (status & BD_ENET_TX_READY)
printk("HEY! Enet xmit interrupt and TX_READY.\n");
@@ -575,7 +638,7 @@ fec_enet_tx(struct net_device *ndev)
if (status & BD_ENET_TX_WRAP)
bdp = fep->tx_bd_base;
else
- bdp++;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
/* Since we have freed up a buffer, the ring is no longer full
*/
@@ -595,8 +658,8 @@ fec_enet_tx(struct net_device *ndev)
* not been given to the system, we just set the empty indicator,
* effectively tossing the packet.
*/
-static void
-fec_enet_rx(struct net_device *ndev)
+static int
+fec_enet_rx(struct net_device *ndev, int budget)
{
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
@@ -606,13 +669,12 @@ fec_enet_rx(struct net_device *ndev)
struct sk_buff *skb;
ushort pkt_len;
__u8 *data;
+ int pkt_received = 0;
#ifdef CONFIG_M532x
flush_cache_all();
#endif
- spin_lock(&fep->hw_lock);
-
/* First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition.
*/
@@ -620,6 +682,10 @@ fec_enet_rx(struct net_device *ndev)
while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
+ if (pkt_received >= budget)
+ break;
+ pkt_received++;
+
/* Since we have allocated space to hold a complete frame,
* the last indicator should be set.
*/
@@ -683,23 +749,25 @@ fec_enet_rx(struct net_device *ndev)
skb_put(skb, pkt_len - 4); /* Make room */
skb_copy_to_linear_data(skb, data, pkt_len - 4);
skb->protocol = eth_type_trans(skb, ndev);
-#ifdef CONFIG_FEC_PTP
+
/* Get receive timestamp from the skb */
- if (fep->hwts_rx_en) {
+ if (fep->hwts_rx_en && fep->bufdesc_ex) {
struct skb_shared_hwtstamps *shhwtstamps =
skb_hwtstamps(skb);
unsigned long flags;
+ struct bufdesc_ex *ebdp =
+ (struct bufdesc_ex *)bdp;
memset(shhwtstamps, 0, sizeof(*shhwtstamps));
spin_lock_irqsave(&fep->tmreg_lock, flags);
shhwtstamps->hwtstamp = ns_to_ktime(
- timecounter_cyc2time(&fep->tc, bdp->ts));
+ timecounter_cyc2time(&fep->tc, ebdp->ts));
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
}
-#endif
+
if (!skb_defer_rx_timestamp(skb))
- netif_rx(skb);
+ napi_gro_receive(&fep->napi, skb);
}
bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
@@ -712,17 +780,19 @@ rx_processing_done:
status |= BD_ENET_RX_EMPTY;
bdp->cbd_sc = status;
-#ifdef CONFIG_FEC_PTP
- bdp->cbd_esc = BD_ENET_RX_INT;
- bdp->cbd_prot = 0;
- bdp->cbd_bdu = 0;
-#endif
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+ ebdp->cbd_esc = BD_ENET_RX_INT;
+ ebdp->cbd_prot = 0;
+ ebdp->cbd_bdu = 0;
+ }
/* Update BD pointer to next entry */
if (status & BD_ENET_RX_WRAP)
bdp = fep->rx_bd_base;
else
- bdp++;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
/* Doing this here will keep the FEC running while we process
* incoming frames. On a heavily loaded network, we should be
* able to keep up at the expense of system resources.
@@ -731,7 +801,7 @@ rx_processing_done:
}
fep->cur_rx = bdp;
- spin_unlock(&fep->hw_lock);
+ return pkt_received;
}
static irqreturn_t
@@ -748,7 +818,13 @@ fec_enet_interrupt(int irq, void *dev_id)
if (int_events & FEC_ENET_RXF) {
ret = IRQ_HANDLED;
- fec_enet_rx(ndev);
+
+ /* Disable the RX interrupt */
+ if (napi_schedule_prep(&fep->napi)) {
+ writel(FEC_RX_DISABLED_IMASK,
+ fep->hwp + FEC_IMASK);
+ __napi_schedule(&fep->napi);
+ }
}
/* Transmit OK, or non-fatal error. Update the buffer
@@ -769,10 +845,21 @@ fec_enet_interrupt(int irq, void *dev_id)
return ret;
}
+static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
+{
+ struct net_device *ndev = napi->dev;
+ int pkts = fec_enet_rx(ndev, budget);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ if (pkts < budget) {
+ napi_complete(napi);
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+ }
+ return pkts;
+}
/* ------------------------------------------------------------------------- */
-static void __inline__ fec_get_mac(struct net_device *ndev)
+static void fec_get_mac(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
@@ -973,7 +1060,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
}
snprintf(phy_name, sizeof(phy_name), PHY_ID_FMT, mdio_bus_id, phy_id);
- phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0,
+ phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
fep->phy_interface);
if (IS_ERR(phy_dev)) {
printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name);
@@ -981,8 +1068,10 @@ static int fec_enet_mii_probe(struct net_device *ndev)
}
/* mask with MAC supported features */
- if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT)
+ if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) {
phy_dev->supported &= PHY_GBIT_FEATURES;
+ phy_dev->supported |= SUPPORTED_Pause;
+ }
else
phy_dev->supported &= PHY_BASIC_FEATURES;
@@ -1133,17 +1222,95 @@ static void fec_enet_get_drvinfo(struct net_device *ndev,
{
struct fec_enet_private *fep = netdev_priv(ndev);
- strcpy(info->driver, fep->pdev->dev.driver->name);
- strcpy(info->version, "Revision: 1.0");
- strcpy(info->bus_info, dev_name(&ndev->dev));
+ strlcpy(info->driver, fep->pdev->dev.driver->name,
+ sizeof(info->driver));
+ strlcpy(info->version, "Revision: 1.0", sizeof(info->version));
+ strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
+}
+
+static int fec_enet_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ if (fep->bufdesc_ex) {
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ if (fep->ptp_clock)
+ info->phc_index = ptp_clock_index(fep->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+ return 0;
+ } else {
+ return ethtool_op_get_ts_info(ndev, info);
+ }
+}
+
+static void fec_enet_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
+ pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
+ pause->rx_pause = pause->tx_pause;
+}
+
+static int fec_enet_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ if (pause->tx_pause != pause->rx_pause) {
+ netdev_info(ndev,
+ "hardware only support enable/disable both tx and rx");
+ return -EINVAL;
+ }
+
+ fep->pause_flag = 0;
+
+ /* tx pause must be same as rx pause */
+ fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
+ fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
+
+ if (pause->rx_pause || pause->autoneg) {
+ fep->phy_dev->supported |= ADVERTISED_Pause;
+ fep->phy_dev->advertising |= ADVERTISED_Pause;
+ } else {
+ fep->phy_dev->supported &= ~ADVERTISED_Pause;
+ fep->phy_dev->advertising &= ~ADVERTISED_Pause;
+ }
+
+ if (pause->autoneg) {
+ if (netif_running(ndev))
+ fec_stop(ndev);
+ phy_start_aneg(fep->phy_dev);
+ }
+ if (netif_running(ndev))
+ fec_restart(ndev, 0);
+
+ return 0;
}
static const struct ethtool_ops fec_enet_ethtool_ops = {
+ .get_pauseparam = fec_enet_get_pauseparam,
+ .set_pauseparam = fec_enet_set_pauseparam,
.get_settings = fec_enet_get_settings,
.set_settings = fec_enet_set_settings,
.get_drvinfo = fec_enet_get_drvinfo,
.get_link = ethtool_op_get_link,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = fec_enet_get_ts_info,
};
static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
@@ -1157,10 +1324,9 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
if (!phydev)
return -ENODEV;
-#ifdef CONFIG_FEC_PTP
- if (cmd == SIOCSHWTSTAMP)
+ if (cmd == SIOCSHWTSTAMP && fep->bufdesc_ex)
return fec_ptp_ioctl(ndev, rq, cmd);
-#endif
+
return phy_mii_ioctl(phydev, rq, cmd);
}
@@ -1180,7 +1346,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
if (skb)
dev_kfree_skb(skb);
- bdp++;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
bdp = fep->tx_bd_base;
@@ -1207,14 +1373,17 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
bdp->cbd_sc = BD_ENET_RX_EMPTY;
-#ifdef CONFIG_FEC_PTP
- bdp->cbd_esc = BD_ENET_RX_INT;
-#endif
- bdp++;
+
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ ebdp->cbd_esc = BD_ENET_RX_INT;
+ }
+
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
/* Set the last buffer to wrap. */
- bdp--;
+ bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
bdp = fep->tx_bd_base;
@@ -1224,14 +1393,16 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
bdp->cbd_sc = 0;
bdp->cbd_bufaddr = 0;
-#ifdef CONFIG_FEC_PTP
- bdp->cbd_esc = BD_ENET_RX_INT;
-#endif
- bdp++;
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ ebdp->cbd_esc = BD_ENET_RX_INT;
+ }
+
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
/* Set the last buffer to wrap. */
- bdp--;
+ bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
return 0;
@@ -1243,6 +1414,8 @@ fec_enet_open(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
int ret;
+ napi_enable(&fep->napi);
+
/* I should reset the ring buffers here, but I don't yet know
* a simple way to do that.
*/
@@ -1444,24 +1617,31 @@ static int fec_enet_init(struct net_device *ndev)
/* Set receive and transmit descriptor base. */
fep->rx_bd_base = cbd_base;
- fep->tx_bd_base = cbd_base + RX_RING_SIZE;
+ if (fep->bufdesc_ex)
+ fep->tx_bd_base = (struct bufdesc *)
+ (((struct bufdesc_ex *)cbd_base) + RX_RING_SIZE);
+ else
+ fep->tx_bd_base = cbd_base + RX_RING_SIZE;
/* The FEC Ethernet specific entries in the device structure */
ndev->watchdog_timeo = TX_TIMEOUT;
ndev->netdev_ops = &fec_netdev_ops;
ndev->ethtool_ops = &fec_enet_ethtool_ops;
+ writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
+ netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT);
+
/* Initialize the receive buffer descriptors. */
bdp = fep->rx_bd_base;
for (i = 0; i < RX_RING_SIZE; i++) {
/* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = 0;
- bdp++;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
/* Set the last buffer to wrap */
- bdp--;
+ bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
/* ...and the same for transmit */
@@ -1471,11 +1651,11 @@ static int fec_enet_init(struct net_device *ndev)
/* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = 0;
bdp->cbd_bufaddr = 0;
- bdp++;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
/* Set the last buffer to wrap */
- bdp--;
+ bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
fec_restart(ndev, 0);
@@ -1509,22 +1689,25 @@ static void fec_reset_phy(struct platform_device *pdev)
msec = 1;
phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
+ if (!gpio_is_valid(phy_reset))
+ return;
+
err = devm_gpio_request_one(&pdev->dev, phy_reset,
GPIOF_OUT_INIT_LOW, "phy-reset");
if (err) {
- pr_debug("FEC: failed to get gpio phy-reset: %d\n", err);
+ dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
return;
}
msleep(msec);
gpio_set_value(phy_reset, 1);
}
#else /* CONFIG_OF */
-static inline int fec_get_phy_mode_dt(struct platform_device *pdev)
+static int fec_get_phy_mode_dt(struct platform_device *pdev)
{
return -ENODEV;
}
-static inline void fec_reset_phy(struct platform_device *pdev)
+static void fec_reset_phy(struct platform_device *pdev)
{
/*
* In case of platform probe, the reset has been done
@@ -1570,10 +1753,17 @@ fec_probe(struct platform_device *pdev)
/* setup board info structure */
fep = netdev_priv(ndev);
+ /* default enable pause frame auto negotiation */
+ if (pdev->id_entry &&
+ (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT))
+ fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
+
fep->hwp = ioremap(r->start, resource_size(r));
fep->pdev = pdev;
fep->dev_id = dev_id++;
+ fep->bufdesc_ex = 0;
+
if (!fep->hwp) {
ret = -ENOMEM;
goto failed_ioremap;
@@ -1628,19 +1818,19 @@ fec_probe(struct platform_device *pdev)
goto failed_clk;
}
-#ifdef CONFIG_FEC_PTP
fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
+ fep->bufdesc_ex =
+ pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX;
if (IS_ERR(fep->clk_ptp)) {
ret = PTR_ERR(fep->clk_ptp);
- goto failed_clk;
+ fep->bufdesc_ex = 0;
}
-#endif
clk_prepare_enable(fep->clk_ahb);
clk_prepare_enable(fep->clk_ipg);
-#ifdef CONFIG_FEC_PTP
- clk_prepare_enable(fep->clk_ptp);
-#endif
+ if (!IS_ERR(fep->clk_ptp))
+ clk_prepare_enable(fep->clk_ptp);
+
reg_phy = devm_regulator_get(&pdev->dev, "phy");
if (!IS_ERR(reg_phy)) {
ret = regulator_enable(reg_phy);
@@ -1653,6 +1843,9 @@ fec_probe(struct platform_device *pdev)
fec_reset_phy(pdev);
+ if (fep->bufdesc_ex)
+ fec_ptp_init(ndev, pdev);
+
ret = fec_enet_init(ndev);
if (ret)
goto failed_init;
@@ -1668,10 +1861,6 @@ fec_probe(struct platform_device *pdev)
if (ret)
goto failed_register;
-#ifdef CONFIG_FEC_PTP
- fec_ptp_init(ndev, pdev);
-#endif
-
return 0;
failed_register:
@@ -1681,9 +1870,8 @@ failed_init:
failed_regulator:
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
-#ifdef CONFIG_FEC_PTP
- clk_disable_unprepare(fep->clk_ptp);
-#endif
+ if (!IS_ERR(fep->clk_ptp))
+ clk_disable_unprepare(fep->clk_ptp);
failed_pin:
failed_clk:
for (i = 0; i < FEC_IRQ_NUM; i++) {
@@ -1716,12 +1904,10 @@ fec_drv_remove(struct platform_device *pdev)
if (irq > 0)
free_irq(irq, ndev);
}
-#ifdef CONFIG_FEC_PTP
del_timer_sync(&fep->time_keep);
clk_disable_unprepare(fep->clk_ptp);
if (fep->ptp_clock)
ptp_clock_unregister(fep->ptp_clock);
-#endif
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
iounmap(fep->hwp);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index c5a3bc1475c7..01579b8e37c4 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -13,11 +13,9 @@
#define FEC_H
/****************************************************************************/
-#ifdef CONFIG_FEC_PTP
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
-#endif
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
@@ -50,6 +48,10 @@
#define FEC_R_DES_START 0x180 /* Receive descriptor ring */
#define FEC_X_DES_START 0x184 /* Transmit descriptor ring */
#define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */
+#define FEC_R_FIFO_RSFL 0x190 /* Receive FIFO section full threshold */
+#define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */
+#define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */
+#define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */
#define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */
#define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */
@@ -94,14 +96,17 @@ struct bufdesc {
unsigned short cbd_datlen; /* Data length */
unsigned short cbd_sc; /* Control and status info */
unsigned long cbd_bufaddr; /* Buffer address */
-#ifdef CONFIG_FEC_PTP
+};
+
+struct bufdesc_ex {
+ struct bufdesc desc;
unsigned long cbd_esc;
unsigned long cbd_prot;
unsigned long cbd_bdu;
unsigned long ts;
unsigned short res0[4];
-#endif
};
+
#else
struct bufdesc {
unsigned short cbd_sc; /* Control and status info */
@@ -203,9 +208,7 @@ struct fec_enet_private {
struct clk *clk_ipg;
struct clk *clk_ahb;
-#ifdef CONFIG_FEC_PTP
struct clk *clk_ptp;
-#endif
/* The saved address of a sent-in-place packet/buffer, for skfree(). */
unsigned char *tx_bounce[TX_RING_SIZE];
@@ -243,8 +246,11 @@ struct fec_enet_private {
int full_duplex;
struct completion mdio_done;
int irq[FEC_IRQ_NUM];
+ int bufdesc_ex;
+ int pause_flag;
+
+ struct napi_struct napi;
-#ifdef CONFIG_FEC_PTP
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
unsigned long last_overflow_check;
@@ -257,15 +263,12 @@ struct fec_enet_private {
int hwts_rx_en;
int hwts_tx_en;
struct timer_list time_keep;
-#endif
};
-#ifdef CONFIG_FEC_PTP
void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev);
void fec_ptp_start_cyclecounter(struct net_device *ndev);
int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd);
-#endif
/****************************************************************************/
#endif /* FEC_H */
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 817d081d2cd8..77943a6a1b8c 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -29,6 +29,7 @@
#include <linux/delay.h>
#include <linux/of_device.h>
#include <linux/of_mdio.h>
+#include <linux/of_net.h>
#include <linux/of_platform.h>
#include <linux/netdevice.h>
@@ -40,8 +41,8 @@
#include <asm/delay.h>
#include <asm/mpc52xx.h>
-#include <sysdev/bestcomm/bestcomm.h>
-#include <sysdev/bestcomm/fec.h>
+#include <linux/fsl/bestcomm/bestcomm.h>
+#include <linux/fsl/bestcomm/fec.h>
#include "fec_mpc52xx.h"
@@ -76,10 +77,6 @@ static void mpc52xx_fec_stop(struct net_device *dev);
static void mpc52xx_fec_start(struct net_device *dev);
static void mpc52xx_fec_reset(struct net_device *dev);
-static u8 mpc52xx_fec_mac_addr[6];
-module_param_array_named(mac, mpc52xx_fec_mac_addr, byte, NULL, 0);
-MODULE_PARM_DESC(mac, "six hex digits, ie. 0x1,0x2,0xc0,0x01,0xba,0xbe");
-
#define MPC52xx_MESSAGES_DEFAULT ( NETIF_MSG_DRV | NETIF_MSG_PROBE | \
NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
static int debug = -1; /* the above default */
@@ -110,15 +107,6 @@ static void mpc52xx_fec_set_paddr(struct net_device *dev, u8 *mac)
out_be32(&fec->paddr2, (*(u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE);
}
-static void mpc52xx_fec_get_paddr(struct net_device *dev, u8 *mac)
-{
- struct mpc52xx_fec_priv *priv = netdev_priv(dev);
- struct mpc52xx_fec __iomem *fec = priv->fec;
-
- *(u32 *)(&mac[0]) = in_be32(&fec->paddr1);
- *(u16 *)(&mac[4]) = in_be32(&fec->paddr2) >> 16;
-}
-
static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sock = addr;
@@ -853,6 +841,8 @@ static int mpc52xx_fec_probe(struct platform_device *op)
struct resource mem;
const u32 *prop;
int prop_size;
+ struct device_node *np = op->dev.of_node;
+ const char *mac_addr;
phys_addr_t rx_fifo;
phys_addr_t tx_fifo;
@@ -866,7 +856,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
priv->ndev = ndev;
/* Reserve FEC control zone */
- rv = of_address_to_resource(op->dev.of_node, 0, &mem);
+ rv = of_address_to_resource(np, 0, &mem);
if (rv) {
printk(KERN_ERR DRIVER_NAME ": "
"Error while parsing device node resource\n" );
@@ -919,7 +909,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
/* Get the IRQ we need one by one */
/* Control */
- ndev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
+ ndev->irq = irq_of_parse_and_map(np, 0);
/* RX */
priv->r_irq = bcom_get_task_irq(priv->rx_dmatsk);
@@ -927,11 +917,33 @@ static int mpc52xx_fec_probe(struct platform_device *op)
/* TX */
priv->t_irq = bcom_get_task_irq(priv->tx_dmatsk);
- /* MAC address init */
- if (!is_zero_ether_addr(mpc52xx_fec_mac_addr))
- memcpy(ndev->dev_addr, mpc52xx_fec_mac_addr, 6);
- else
- mpc52xx_fec_get_paddr(ndev, ndev->dev_addr);
+ /*
+ * MAC address init:
+ *
+ * First try to read MAC address from DT
+ */
+ mac_addr = of_get_mac_address(np);
+ if (mac_addr) {
+ memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+ } else {
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+
+ /*
+ * If the MAC addresse is not provided via DT then read
+ * it back from the controller regs
+ */
+ *(u32 *)(&ndev->dev_addr[0]) = in_be32(&fec->paddr1);
+ *(u16 *)(&ndev->dev_addr[4]) = in_be32(&fec->paddr2) >> 16;
+ }
+
+ /*
+ * Check if the MAC address is valid, if not get a random one
+ */
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ eth_hw_addr_random(ndev);
+ dev_warn(&ndev->dev, "using random MAC address %pM\n",
+ ndev->dev_addr);
+ }
priv->msg_enable = netif_msg_init(debug, MPC52xx_MESSAGES_DEFAULT);
@@ -942,20 +954,20 @@ static int mpc52xx_fec_probe(struct platform_device *op)
/* Start with safe defaults for link connection */
priv->speed = 100;
priv->duplex = DUPLEX_HALF;
- priv->mdio_speed = ((mpc5xxx_get_bus_frequency(op->dev.of_node) >> 20) / 5) << 1;
+ priv->mdio_speed = ((mpc5xxx_get_bus_frequency(np) >> 20) / 5) << 1;
/* The current speed preconfigures the speed of the MII link */
- prop = of_get_property(op->dev.of_node, "current-speed", &prop_size);
+ prop = of_get_property(np, "current-speed", &prop_size);
if (prop && (prop_size >= sizeof(u32) * 2)) {
priv->speed = prop[0];
priv->duplex = prop[1] ? DUPLEX_FULL : DUPLEX_HALF;
}
/* If there is a phy handle, then get the PHY node */
- priv->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
+ priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
/* the 7-wire property means don't use MII mode */
- if (of_find_property(op->dev.of_node, "fsl,7-wire-mode", NULL)) {
+ if (of_find_property(np, "fsl,7-wire-mode", NULL)) {
priv->seven_wire_mode = 1;
dev_info(&ndev->dev, "using 7-wire PHY mode\n");
}
@@ -970,6 +982,8 @@ static int mpc52xx_fec_probe(struct platform_device *op)
/* We're done ! */
dev_set_drvdata(&op->dev, ndev);
+ printk(KERN_INFO "%s: %s MAC %pM\n",
+ ndev->name, op->dev.of_node->full_name, ndev->dev_addr);
return 0;
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index c40526c78c20..1f17ca0f2201 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -104,7 +104,7 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev)
unsigned long flags;
int inc;
- inc = 1000000000 / clk_get_rate(fep->clk_ptp);
+ inc = 1000000000 / fep->cycle_speed;
/* grab the ptp lock */
spin_lock_irqsave(&fep->tmreg_lock, flags);
@@ -363,6 +363,8 @@ void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev)
fep->ptp_caps.settime = fec_ptp_settime;
fep->ptp_caps.enable = fec_ptp_enable;
+ fep->cycle_speed = clk_get_rate(fep->clk_ptp);
+
spin_lock_init(&fep->tmreg_lock);
fec_ptp_start_cyclecounter(ndev);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index e9879c5af7ba..46df28893c10 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -888,8 +888,8 @@ static struct net_device_stats *fs_enet_get_stats(struct net_device *dev)
static void fs_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static int fs_get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index bffb2edd6858..4b5e8a692481 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -133,8 +133,8 @@ static void gfar_netpoll(struct net_device *dev);
#endif
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
-static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
- int amount_pull, struct napi_struct *napi);
+static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
+ int amount_pull, struct napi_struct *napi);
void gfar_halt(struct net_device *dev);
static void gfar_halt_nodisable(struct net_device *dev);
void gfar_start(struct net_device *dev);
@@ -231,7 +231,7 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
dma_addr_t addr;
int i, j, k;
struct gfar_private *priv = netdev_priv(ndev);
- struct device *dev = &priv->ofdev->dev;
+ struct device *dev = priv->dev;
struct gfar_priv_tx_q *tx_queue = NULL;
struct gfar_priv_rx_q *rx_queue = NULL;
@@ -277,14 +277,12 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
/* Setup the skbuff rings */
for (i = 0; i < priv->num_tx_queues; i++) {
tx_queue = priv->tx_queue[i];
- tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
- tx_queue->tx_ring_size,
- GFP_KERNEL);
- if (!tx_queue->tx_skbuff) {
- netif_err(priv, ifup, ndev,
- "Could not allocate tx_skbuff\n");
+ tx_queue->tx_skbuff =
+ kmalloc_array(tx_queue->tx_ring_size,
+ sizeof(*tx_queue->tx_skbuff),
+ GFP_KERNEL);
+ if (!tx_queue->tx_skbuff)
goto cleanup;
- }
for (k = 0; k < tx_queue->tx_ring_size; k++)
tx_queue->tx_skbuff[k] = NULL;
@@ -292,15 +290,12 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
- rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
- rx_queue->rx_ring_size,
- GFP_KERNEL);
-
- if (!rx_queue->rx_skbuff) {
- netif_err(priv, ifup, ndev,
- "Could not allocate rx_skbuff\n");
+ rx_queue->rx_skbuff =
+ kmalloc_array(rx_queue->rx_ring_size,
+ sizeof(*rx_queue->rx_skbuff),
+ GFP_KERNEL);
+ if (!rx_queue->rx_skbuff)
goto cleanup;
- }
for (j = 0; j < rx_queue->rx_ring_size; j++)
rx_queue->rx_skbuff[j] = NULL;
@@ -349,14 +344,23 @@ static void gfar_init_mac(struct net_device *ndev)
/* Configure the coalescing support */
gfar_configure_coalescing(priv, 0xFF, 0xFF);
+ /* set this when rx hw offload (TOE) functions are being used */
+ priv->uses_rxfcb = 0;
+
if (priv->rx_filer_enable) {
rctrl |= RCTRL_FILREN;
/* Program the RIR0 reg with the required distribution */
gfar_write(&regs->rir0, DEFAULT_RIR0);
}
- if (ndev->features & NETIF_F_RXCSUM)
+ /* Restore PROMISC mode */
+ if (ndev->flags & IFF_PROMISC)
+ rctrl |= RCTRL_PROM;
+
+ if (ndev->features & NETIF_F_RXCSUM) {
rctrl |= RCTRL_CHECKSUMMING;
+ priv->uses_rxfcb = 1;
+ }
if (priv->extended_hash) {
rctrl |= RCTRL_EXTHASH;
@@ -378,11 +382,15 @@ static void gfar_init_mac(struct net_device *ndev)
}
/* Enable HW time stamping if requested from user space */
- if (priv->hwts_rx_en)
+ if (priv->hwts_rx_en) {
rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
+ priv->uses_rxfcb = 1;
+ }
- if (ndev->features & NETIF_F_HW_VLAN_RX)
+ if (ndev->features & NETIF_F_HW_VLAN_RX) {
rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
+ priv->uses_rxfcb = 1;
+ }
/* Init rctrl based on our settings */
gfar_write(&regs->rctrl, rctrl);
@@ -501,20 +509,6 @@ void unlock_tx_qs(struct gfar_private *priv)
spin_unlock(&priv->tx_queue[i]->txlock);
}
-static bool gfar_is_vlan_on(struct gfar_private *priv)
-{
- return (priv->ndev->features & NETIF_F_HW_VLAN_RX) ||
- (priv->ndev->features & NETIF_F_HW_VLAN_TX);
-}
-
-/* Returns 1 if incoming frames use an FCB */
-static inline int gfar_uses_fcb(struct gfar_private *priv)
-{
- return gfar_is_vlan_on(priv) ||
- (priv->ndev->features & NETIF_F_RXCSUM) ||
- (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
-}
-
static void free_tx_pointers(struct gfar_private *priv)
{
int i;
@@ -540,6 +534,19 @@ static void unmap_group_regs(struct gfar_private *priv)
iounmap(priv->gfargrp[i].regs);
}
+static void free_gfar_dev(struct gfar_private *priv)
+{
+ int i, j;
+
+ for (i = 0; i < priv->num_grps; i++)
+ for (j = 0; j < GFAR_NUM_IRQS; j++) {
+ kfree(priv->gfargrp[i].irqinfo[j]);
+ priv->gfargrp[i].irqinfo[j] = NULL;
+ }
+
+ free_netdev(priv->ndev);
+}
+
static void disable_napi(struct gfar_private *priv)
{
int i;
@@ -559,40 +566,46 @@ static void enable_napi(struct gfar_private *priv)
static int gfar_parse_group(struct device_node *np,
struct gfar_private *priv, const char *model)
{
+ struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
u32 *queue_mask;
+ int i;
+
+ for (i = 0; i < GFAR_NUM_IRQS; i++) {
+ grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
+ GFP_KERNEL);
+ if (!grp->irqinfo[i])
+ return -ENOMEM;
+ }
- priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
- if (!priv->gfargrp[priv->num_grps].regs)
+ grp->regs = of_iomap(np, 0);
+ if (!grp->regs)
return -ENOMEM;
- priv->gfargrp[priv->num_grps].interruptTransmit =
- irq_of_parse_and_map(np, 0);
+ gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
/* If we aren't the FEC we have multiple interrupts */
if (model && strcasecmp(model, "FEC")) {
- priv->gfargrp[priv->num_grps].interruptReceive =
- irq_of_parse_and_map(np, 1);
- priv->gfargrp[priv->num_grps].interruptError =
- irq_of_parse_and_map(np,2);
- if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ ||
- priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ ||
- priv->gfargrp[priv->num_grps].interruptError == NO_IRQ)
+ gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
+ gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
+ if (gfar_irq(grp, TX)->irq == NO_IRQ ||
+ gfar_irq(grp, RX)->irq == NO_IRQ ||
+ gfar_irq(grp, ER)->irq == NO_IRQ)
return -EINVAL;
}
- priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
- priv->gfargrp[priv->num_grps].priv = priv;
- spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
+ grp->grp_id = priv->num_grps;
+ grp->priv = priv;
+ spin_lock_init(&grp->grplock);
if (priv->mode == MQ_MG_MODE) {
queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
- priv->gfargrp[priv->num_grps].rx_bit_map = queue_mask ?
+ grp->rx_bit_map = queue_mask ?
*queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
- priv->gfargrp[priv->num_grps].tx_bit_map = queue_mask ?
+ grp->tx_bit_map = queue_mask ?
*queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
} else {
- priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
- priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
+ grp->rx_bit_map = 0xFF;
+ grp->tx_bit_map = 0xFF;
}
priv->num_grps++;
@@ -645,7 +658,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
return -ENOMEM;
priv = netdev_priv(dev);
- priv->node = ofdev->dev.of_node;
priv->ndev = dev;
priv->num_tx_queues = num_tx_qs;
@@ -777,7 +789,7 @@ tx_alloc_failed:
free_tx_pointers(priv);
err_grp_init:
unmap_group_regs(priv);
- free_netdev(dev);
+ free_gfar_dev(priv);
return err;
}
@@ -983,7 +995,7 @@ static int gfar_probe(struct platform_device *ofdev)
priv = netdev_priv(dev);
priv->ndev = dev;
priv->ofdev = ofdev;
- priv->node = ofdev->dev.of_node;
+ priv->dev = &ofdev->dev;
SET_NETDEV_DEV(dev, &ofdev->dev);
spin_lock_init(&priv->bflock);
@@ -1020,8 +1032,6 @@ static int gfar_probe(struct platform_device *ofdev)
/* Set the dev->base_addr to the gfar reg region */
dev->base_addr = (unsigned long) regs;
- SET_NETDEV_DEV(dev, &ofdev->dev);
-
/* Fill in the dev structure */
dev->watchdog_timeo = TX_TIMEOUT;
dev->mtu = 1500;
@@ -1182,15 +1192,16 @@ static int gfar_probe(struct platform_device *ofdev)
/* fill out IRQ number and name fields */
for (i = 0; i < priv->num_grps; i++) {
+ struct gfar_priv_grp *grp = &priv->gfargrp[i];
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
- sprintf(priv->gfargrp[i].int_name_tx, "%s%s%c%s",
+ sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
dev->name, "_g", '0' + i, "_tx");
- sprintf(priv->gfargrp[i].int_name_rx, "%s%s%c%s",
+ sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
dev->name, "_g", '0' + i, "_rx");
- sprintf(priv->gfargrp[i].int_name_er, "%s%s%c%s",
+ sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
dev->name, "_g", '0' + i, "_er");
} else
- strcpy(priv->gfargrp[i].int_name_tx, dev->name);
+ strcpy(gfar_irq(grp, TX)->name, dev->name);
}
/* Initialize the filer table */
@@ -1223,7 +1234,7 @@ register_fail:
of_node_put(priv->phy_node);
if (priv->tbi_node)
of_node_put(priv->tbi_node);
- free_netdev(dev);
+ free_gfar_dev(priv);
return err;
}
@@ -1240,7 +1251,7 @@ static int gfar_remove(struct platform_device *ofdev)
unregister_netdev(priv->ndev);
unmap_group_regs(priv);
- free_netdev(priv->ndev);
+ free_gfar_dev(priv);
return 0;
}
@@ -1648,9 +1659,9 @@ void gfar_halt(struct net_device *dev)
static void free_grp_irqs(struct gfar_priv_grp *grp)
{
- free_irq(grp->interruptError, grp);
- free_irq(grp->interruptTransmit, grp);
- free_irq(grp->interruptReceive, grp);
+ free_irq(gfar_irq(grp, TX)->irq, grp);
+ free_irq(gfar_irq(grp, RX)->irq, grp);
+ free_irq(gfar_irq(grp, ER)->irq, grp);
}
void stop_gfar(struct net_device *dev)
@@ -1679,7 +1690,7 @@ void stop_gfar(struct net_device *dev)
free_grp_irqs(&priv->gfargrp[i]);
} else {
for (i = 0; i < priv->num_grps; i++)
- free_irq(priv->gfargrp[i].interruptTransmit,
+ free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
&priv->gfargrp[i]);
}
@@ -1698,13 +1709,13 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
if (!tx_queue->tx_skbuff[i])
continue;
- dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
+ dma_unmap_single(priv->dev, txbdp->bufPtr,
txbdp->length, DMA_TO_DEVICE);
txbdp->lstatus = 0;
for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
j++) {
txbdp++;
- dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
+ dma_unmap_page(priv->dev, txbdp->bufPtr,
txbdp->length, DMA_TO_DEVICE);
}
txbdp++;
@@ -1725,8 +1736,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
for (i = 0; i < rx_queue->rx_ring_size; i++) {
if (rx_queue->rx_skbuff[i]) {
- dma_unmap_single(&priv->ofdev->dev,
- rxbdp->bufPtr, priv->rx_buffer_size,
+ dma_unmap_single(priv->dev, rxbdp->bufPtr,
+ priv->rx_buffer_size,
DMA_FROM_DEVICE);
dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
rx_queue->rx_skbuff[i] = NULL;
@@ -1765,7 +1776,7 @@ static void free_skb_resources(struct gfar_private *priv)
free_skb_rx_queue(rx_queue);
}
- dma_free_coherent(&priv->ofdev->dev,
+ dma_free_coherent(priv->dev,
sizeof(struct txbd8) * priv->total_tx_ring_size +
sizeof(struct rxbd8) * priv->total_rx_ring_size,
priv->tx_queue[0]->tx_bd_base,
@@ -1854,32 +1865,34 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
/* Install our interrupt handlers for Error,
* Transmit, and Receive
*/
- if ((err = request_irq(grp->interruptError, gfar_error,
- 0, grp->int_name_er, grp)) < 0) {
+ err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
+ gfar_irq(grp, ER)->name, grp);
+ if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
- grp->interruptError);
+ gfar_irq(grp, ER)->irq);
goto err_irq_fail;
}
-
- if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
- 0, grp->int_name_tx, grp)) < 0) {
+ err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
+ gfar_irq(grp, TX)->name, grp);
+ if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
- grp->interruptTransmit);
+ gfar_irq(grp, TX)->irq);
goto tx_irq_fail;
}
-
- if ((err = request_irq(grp->interruptReceive, gfar_receive,
- 0, grp->int_name_rx, grp)) < 0) {
+ err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
+ gfar_irq(grp, RX)->name, grp);
+ if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
- grp->interruptReceive);
+ gfar_irq(grp, RX)->irq);
goto rx_irq_fail;
}
} else {
- if ((err = request_irq(grp->interruptTransmit, gfar_interrupt,
- 0, grp->int_name_tx, grp)) < 0) {
+ err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
+ gfar_irq(grp, TX)->name, grp);
+ if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
- grp->interruptTransmit);
+ gfar_irq(grp, TX)->irq);
goto err_irq_fail;
}
}
@@ -1887,9 +1900,9 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
return 0;
rx_irq_fail:
- free_irq(grp->interruptTransmit, grp);
+ free_irq(gfar_irq(grp, TX)->irq, grp);
tx_irq_fail:
- free_irq(grp->interruptError, grp);
+ free_irq(gfar_irq(grp, ER)->irq, grp);
err_irq_fail:
return err;
@@ -2143,7 +2156,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (i == nr_frags - 1)
lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
- bufaddr = skb_frag_dma_map(&priv->ofdev->dev,
+ bufaddr = skb_frag_dma_map(priv->dev,
&skb_shinfo(skb)->frags[i],
0,
length,
@@ -2195,7 +2208,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
lstatus |= BD_LFLAG(TXBD_TOE);
}
- txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
+ txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
/* If time stamping is requested one additional TxBD must be set up. The
@@ -2308,10 +2321,13 @@ void gfar_check_rx_parser_mode(struct gfar_private *priv)
tempval = gfar_read(&regs->rctrl);
/* If parse is no longer required, then disable parser */
- if (tempval & RCTRL_REQ_PARSER)
+ if (tempval & RCTRL_REQ_PARSER) {
tempval |= RCTRL_PRSDEP_INIT;
- else
+ priv->uses_rxfcb = 1;
+ } else {
tempval &= ~RCTRL_PRSDEP_INIT;
+ priv->uses_rxfcb = 0;
+ }
gfar_write(&regs->rctrl, tempval);
}
@@ -2344,6 +2360,7 @@ void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
tempval = gfar_read(&regs->rctrl);
tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
gfar_write(&regs->rctrl, tempval);
+ priv->uses_rxfcb = 1;
} else {
/* Disable VLAN tag extraction */
tempval = gfar_read(&regs->rctrl);
@@ -2367,15 +2384,12 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
int oldsize = priv->rx_buffer_size;
int frame_size = new_mtu + ETH_HLEN;
- if (gfar_is_vlan_on(priv))
- frame_size += VLAN_HLEN;
-
if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
netif_err(priv, drv, dev, "Invalid MTU setting\n");
return -EINVAL;
}
- if (gfar_uses_fcb(priv))
+ if (priv->uses_rxfcb)
frame_size += GMAC_FCB_LEN;
frame_size += priv->padding;
@@ -2508,7 +2522,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
} else
buflen = bdp->length;
- dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
+ dma_unmap_single(priv->dev, bdp->bufPtr,
buflen, DMA_TO_DEVICE);
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
@@ -2527,7 +2541,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
bdp = next_txbd(bdp, base, tx_ring_size);
for (i = 0; i < frags; i++) {
- dma_unmap_page(&priv->ofdev->dev, bdp->bufPtr,
+ dma_unmap_page(priv->dev, bdp->bufPtr,
bdp->length, DMA_TO_DEVICE);
bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
bdp = next_txbd(bdp, base, tx_ring_size);
@@ -2593,7 +2607,7 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
struct gfar_private *priv = netdev_priv(dev);
dma_addr_t buf;
- buf = dma_map_single(&priv->ofdev->dev, skb->data,
+ buf = dma_map_single(priv->dev, skb->data,
priv->rx_buffer_size, DMA_FROM_DEVICE);
gfar_init_rxbdp(rx_queue, bdp, buf);
}
@@ -2627,7 +2641,7 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
if (status & RXBD_TRUNCATED) {
stats->rx_length_errors++;
- estats->rx_trunc++;
+ atomic64_inc(&estats->rx_trunc);
return;
}
@@ -2636,20 +2650,20 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
stats->rx_length_errors++;
if (status & RXBD_LARGE)
- estats->rx_large++;
+ atomic64_inc(&estats->rx_large);
else
- estats->rx_short++;
+ atomic64_inc(&estats->rx_short);
}
if (status & RXBD_NONOCTET) {
stats->rx_frame_errors++;
- estats->rx_nonoctet++;
+ atomic64_inc(&estats->rx_nonoctet);
}
if (status & RXBD_CRCERR) {
- estats->rx_crcerr++;
+ atomic64_inc(&estats->rx_crcerr);
stats->rx_crc_errors++;
}
if (status & RXBD_OVERRUN) {
- estats->rx_overrun++;
+ atomic64_inc(&estats->rx_overrun);
stats->rx_crc_errors++;
}
}
@@ -2674,8 +2688,8 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
-static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
- int amount_pull, struct napi_struct *napi)
+static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
+ int amount_pull, struct napi_struct *napi)
{
struct gfar_private *priv = netdev_priv(dev);
struct rxfcb *fcb = NULL;
@@ -2722,10 +2736,8 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
/* Send the packet up the stack */
ret = napi_gro_receive(napi, skb);
- if (GRO_DROP == ret)
- priv->extra_stats.kernel_dropped++;
-
- return 0;
+ if (unlikely(GRO_DROP == ret))
+ atomic64_inc(&priv->extra_stats.kernel_dropped);
}
/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
@@ -2746,7 +2758,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
bdp = rx_queue->cur_rx;
base = rx_queue->rx_bd_base;
- amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
+ amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
struct sk_buff *newskb;
@@ -2758,7 +2770,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
- dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
+ dma_unmap_single(priv->dev, bdp->bufPtr,
priv->rx_buffer_size, DMA_FROM_DEVICE);
if (unlikely(!(bdp->status & RXBD_ERR) &&
@@ -2791,7 +2803,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
} else {
netif_warn(priv, rx_err, dev, "Missing skb!\n");
rx_queue->stats.rx_dropped++;
- priv->extra_stats.rx_skbmissing++;
+ atomic64_inc(&priv->extra_stats.rx_skbmissing);
}
}
@@ -3224,7 +3236,7 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
netif_dbg(priv, tx_err, dev,
"TX FIFO underrun, packet dropped\n");
dev->stats.tx_dropped++;
- priv->extra_stats.tx_underrun++;
+ atomic64_inc(&priv->extra_stats.tx_underrun);
local_irq_save(flags);
lock_tx_qs(priv);
@@ -3239,7 +3251,7 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
}
if (events & IEVENT_BSY) {
dev->stats.rx_errors++;
- priv->extra_stats.rx_bsy++;
+ atomic64_inc(&priv->extra_stats.rx_bsy);
gfar_receive(irq, grp_id);
@@ -3248,19 +3260,19 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
}
if (events & IEVENT_BABR) {
dev->stats.rx_errors++;
- priv->extra_stats.rx_babr++;
+ atomic64_inc(&priv->extra_stats.rx_babr);
netif_dbg(priv, rx_err, dev, "babbling RX error\n");
}
if (events & IEVENT_EBERR) {
- priv->extra_stats.eberr++;
+ atomic64_inc(&priv->extra_stats.eberr);
netif_dbg(priv, rx_err, dev, "bus error\n");
}
if (events & IEVENT_RXC)
netif_dbg(priv, rx_status, dev, "control frame\n");
if (events & IEVENT_BABT) {
- priv->extra_stats.tx_babt++;
+ atomic64_inc(&priv->extra_stats.tx_babt);
netif_dbg(priv, tx_err, dev, "babbling TX error\n");
}
return IRQ_HANDLED;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 22eabc13ca99..63a28d294e20 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -627,36 +627,29 @@ struct rmon_mib
};
struct gfar_extra_stats {
- u64 kernel_dropped;
- u64 rx_large;
- u64 rx_short;
- u64 rx_nonoctet;
- u64 rx_crcerr;
- u64 rx_overrun;
- u64 rx_bsy;
- u64 rx_babr;
- u64 rx_trunc;
- u64 eberr;
- u64 tx_babt;
- u64 tx_underrun;
- u64 rx_skbmissing;
- u64 tx_timeout;
+ atomic64_t kernel_dropped;
+ atomic64_t rx_large;
+ atomic64_t rx_short;
+ atomic64_t rx_nonoctet;
+ atomic64_t rx_crcerr;
+ atomic64_t rx_overrun;
+ atomic64_t rx_bsy;
+ atomic64_t rx_babr;
+ atomic64_t rx_trunc;
+ atomic64_t eberr;
+ atomic64_t tx_babt;
+ atomic64_t tx_underrun;
+ atomic64_t rx_skbmissing;
+ atomic64_t tx_timeout;
};
#define GFAR_RMON_LEN ((sizeof(struct rmon_mib) - 16)/sizeof(u32))
-#define GFAR_EXTRA_STATS_LEN (sizeof(struct gfar_extra_stats)/sizeof(u64))
+#define GFAR_EXTRA_STATS_LEN \
+ (sizeof(struct gfar_extra_stats)/sizeof(atomic64_t))
-/* Number of stats in the stats structure (ignore car and cam regs)*/
+/* Number of stats exported via ethtool */
#define GFAR_STATS_LEN (GFAR_RMON_LEN + GFAR_EXTRA_STATS_LEN)
-#define GFAR_INFOSTR_LEN 32
-
-struct gfar_stats {
- u64 extra[GFAR_EXTRA_STATS_LEN];
- u64 rmon[GFAR_RMON_LEN];
-};
-
-
struct gfar {
u32 tsec_id; /* 0x.000 - Controller ID register */
u32 tsec_id2; /* 0x.004 - Controller ID2 register */
@@ -937,26 +930,25 @@ struct tx_q_stats {
* @txtime: coalescing value if based on time
*/
struct gfar_priv_tx_q {
+ /* cacheline 1 */
spinlock_t txlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
- struct sk_buff ** tx_skbuff;
- /* Buffer descriptor pointers */
- dma_addr_t tx_bd_dma_base;
struct txbd8 *tx_bd_base;
struct txbd8 *cur_tx;
- struct txbd8 *dirty_tx;
+ unsigned int num_txbdfree;
+ unsigned short skb_curtx;
+ unsigned short tx_ring_size;
struct tx_q_stats stats;
- struct net_device *dev;
struct gfar_priv_grp *grp;
- u16 skb_curtx;
- u16 skb_dirtytx;
- u16 qindex;
- unsigned int tx_ring_size;
- unsigned int num_txbdfree;
+ /* cacheline 2 */
+ struct net_device *dev;
+ struct sk_buff **tx_skbuff;
+ struct txbd8 *dirty_tx;
+ unsigned short skb_dirtytx;
+ unsigned short qindex;
/* Configuration info for the coalescing features */
- unsigned char txcoalescing;
+ unsigned int txcoalescing;
unsigned long txic;
- unsigned short txcount;
- unsigned short txtime;
+ dma_addr_t tx_bd_dma_base;
};
/*
@@ -999,18 +991,25 @@ struct gfar_priv_rx_q {
unsigned long rxic;
};
+enum gfar_irqinfo_id {
+ GFAR_TX = 0,
+ GFAR_RX = 1,
+ GFAR_ER = 2,
+ GFAR_NUM_IRQS = 3
+};
+
+struct gfar_irqinfo {
+ unsigned int irq;
+ char name[GFAR_INT_NAME_MAX];
+};
+
/**
* struct gfar_priv_grp - per group structure
* @napi: the napi poll function
* @priv: back pointer to the priv structure
* @regs: the ioremapped register space for this group
* @grp_id: group id for this group
- * @interruptTransmit: The TX interrupt number for this group
- * @interruptReceive: The RX interrupt number for this group
- * @interruptError: The ERROR interrupt number for this group
- * @int_name_tx: tx interrupt name for this group
- * @int_name_rx: rx interrupt name for this group
- * @int_name_er: er interrupt name for this group
+ * @irqinfo: TX/RX/ER irq data for this group
*/
struct gfar_priv_grp {
@@ -1019,23 +1018,20 @@ struct gfar_priv_grp {
struct gfar_private *priv;
struct gfar __iomem *regs;
unsigned int grp_id;
- unsigned long rx_bit_map;
- unsigned long tx_bit_map;
- unsigned long num_tx_queues;
unsigned long num_rx_queues;
+ unsigned long rx_bit_map;
+ /* cacheline 3 */
unsigned int rstat;
unsigned int tstat;
- unsigned int imask;
- unsigned int ievent;
- unsigned int interruptTransmit;
- unsigned int interruptReceive;
- unsigned int interruptError;
-
- char int_name_tx[GFAR_INT_NAME_MAX];
- char int_name_rx[GFAR_INT_NAME_MAX];
- char int_name_er[GFAR_INT_NAME_MAX];
+ unsigned long num_tx_queues;
+ unsigned long tx_bit_map;
+
+ struct gfar_irqinfo *irqinfo[GFAR_NUM_IRQS];
};
+#define gfar_irq(grp, ID) \
+ ((grp)->irqinfo[GFAR_##ID])
+
enum gfar_errata {
GFAR_ERRATA_74 = 0x01,
GFAR_ERRATA_76 = 0x02,
@@ -1053,28 +1049,65 @@ enum gfar_errata {
* the buffer descriptor determines the actual condition.
*/
struct gfar_private {
-
- /* Indicates how many tx, rx queues are enabled */
- unsigned int num_tx_queues;
unsigned int num_rx_queues;
- unsigned int num_grps;
- unsigned int mode;
- /* The total tx and rx ring size for the enabled queues */
- unsigned int total_tx_ring_size;
- unsigned int total_rx_ring_size;
-
- struct device_node *node;
+ struct device *dev;
struct net_device *ndev;
- struct platform_device *ofdev;
enum gfar_errata errata;
+ unsigned int rx_buffer_size;
+
+ u16 uses_rxfcb;
+ u16 padding;
+
+ /* HW time stamping enabled flag */
+ int hwts_rx_en;
+ int hwts_tx_en;
- struct gfar_priv_grp gfargrp[MAXGROUPS];
struct gfar_priv_tx_q *tx_queue[MAX_TX_QS];
struct gfar_priv_rx_q *rx_queue[MAX_RX_QS];
+ struct gfar_priv_grp gfargrp[MAXGROUPS];
+
+ u32 device_flags;
+
+ unsigned int mode;
+ unsigned int num_tx_queues;
+ unsigned int num_grps;
+
+ /* Network Statistics */
+ struct gfar_extra_stats extra_stats;
+
+ /* PHY stuff */
+ phy_interface_t interface;
+ struct device_node *phy_node;
+ struct device_node *tbi_node;
+ struct phy_device *phydev;
+ struct mii_bus *mii_bus;
+ int oldspeed;
+ int oldduplex;
+ int oldlink;
+
+ /* Bitfield update lock */
+ spinlock_t bflock;
+
+ uint32_t msg_enable;
+
+ struct work_struct reset_task;
+
+ struct platform_device *ofdev;
+ unsigned char
+ extended_hash:1,
+ bd_stash_en:1,
+ rx_filer_enable:1,
+ /* Wake-on-LAN enabled */
+ wol_en:1,
+ /* Enable priorty based Tx scheduling in Hw */
+ prio_sched_en:1;
+
+ /* The total tx and rx ring size for the enabled queues */
+ unsigned int total_tx_ring_size;
+ unsigned int total_rx_ring_size;
/* RX per device parameters */
- unsigned int rx_buffer_size;
unsigned int rx_stash_size;
unsigned int rx_stash_index;
@@ -1093,39 +1126,6 @@ struct gfar_private {
unsigned int fifo_starve;
unsigned int fifo_starve_off;
- /* Bitfield update lock */
- spinlock_t bflock;
-
- phy_interface_t interface;
- struct device_node *phy_node;
- struct device_node *tbi_node;
- u32 device_flags;
- unsigned char
- extended_hash:1,
- bd_stash_en:1,
- rx_filer_enable:1,
- wol_en:1, /* Wake-on-LAN enabled */
- prio_sched_en:1; /* Enable priorty based Tx scheduling in Hw */
- unsigned short padding;
-
- /* PHY stuff */
- struct phy_device *phydev;
- struct mii_bus *mii_bus;
- int oldspeed;
- int oldduplex;
- int oldlink;
-
- uint32_t msg_enable;
-
- struct work_struct reset_task;
-
- /* Network Statistics */
- struct gfar_extra_stats extra_stats;
-
- /* HW time stamping enabled flag */
- int hwts_rx_en;
- int hwts_tx_en;
-
/*Filer table*/
unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
@@ -1138,16 +1138,16 @@ static inline int gfar_has_errata(struct gfar_private *priv,
return priv->errata & err;
}
-static inline u32 gfar_read(volatile unsigned __iomem *addr)
+static inline u32 gfar_read(unsigned __iomem *addr)
{
u32 val;
- val = in_be32(addr);
+ val = ioread32be(addr);
return val;
}
-static inline void gfar_write(volatile unsigned __iomem *addr, u32 val)
+static inline void gfar_write(unsigned __iomem *addr, u32 val)
{
- out_be32(addr, val);
+ iowrite32be(val, addr);
}
static inline void gfar_write_filer(struct gfar_private *priv,
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index ab6762caa957..75e89acf4912 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -149,20 +149,17 @@ static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
int i;
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
- u64 *extra = (u64 *) & priv->extra_stats;
+ atomic64_t *extra = (atomic64_t *)&priv->extra_stats;
+
+ for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
+ buf[i] = atomic64_read(&extra[i]);
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
- struct gfar_stats *stats = (struct gfar_stats *) buf;
-
- for (i = 0; i < GFAR_RMON_LEN; i++)
- stats->rmon[i] = (u64) gfar_read(&rmon[i]);
- for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
- stats->extra[i] = extra[i];
- } else
- for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
- buf[i] = extra[i];
+ for (; i < GFAR_STATS_LEN; i++, rmon++)
+ buf[i] = (u64) gfar_read(rmon);
+ }
}
static int gfar_sset_count(struct net_device *dev, int sset)
@@ -184,10 +181,11 @@ static int gfar_sset_count(struct net_device *dev, int sset)
static void gfar_gdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strncpy(drvinfo->driver, DRV_NAME, GFAR_INFOSTR_LEN);
- strncpy(drvinfo->version, gfar_driver_version, GFAR_INFOSTR_LEN);
- strncpy(drvinfo->fw_version, "N/A", GFAR_INFOSTR_LEN);
- strncpy(drvinfo->bus_info, "N/A", GFAR_INFOSTR_LEN);
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, gfar_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
drvinfo->regdump_len = 0;
drvinfo->eedump_len = 0;
}
@@ -715,12 +713,11 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
int j = MAX_FILER_IDX, l = 0x0;
int ret = 1;
- local_rqfpr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
- GFP_KERNEL);
- local_rqfcr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
- GFP_KERNEL);
+ local_rqfpr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
+ GFP_KERNEL);
+ local_rqfcr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
+ GFP_KERNEL);
if (!local_rqfpr || !local_rqfcr) {
- pr_err("Out of memory\n");
ret = 0;
goto err;
}
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 37b035306013..1ebf7128ec04 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -350,10 +350,10 @@ static void
uec_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strncpy(drvinfo->driver, DRV_NAME, 32);
- strncpy(drvinfo->version, DRV_VERSION, 32);
- strncpy(drvinfo->fw_version, "N/A", 32);
- strncpy(drvinfo->bus_info, "QUICC ENGINE", 32);
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info));
drvinfo->eedump_len = 0;
drvinfo->regdump_len = uec_get_regs_len(netdev);
}
diff --git a/drivers/net/ethernet/fujitsu/Kconfig b/drivers/net/ethernet/fujitsu/Kconfig
index dffee9d44fd5..6231bc02b964 100644
--- a/drivers/net/ethernet/fujitsu/Kconfig
+++ b/drivers/net/ethernet/fujitsu/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_FUJITSU
bool "Fujitsu devices"
default y
- depends on ISA || PCMCIA || ((ISA || MCA_LEGACY) && EXPERIMENTAL)
+ depends on ISA || PCMCIA
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -17,18 +17,6 @@ config NET_VENDOR_FUJITSU
if NET_VENDOR_FUJITSU
-config AT1700
- tristate "AT1700/1720 support (EXPERIMENTAL)"
- depends on (ISA || MCA_LEGACY) && EXPERIMENTAL
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called at1700.
-
config PCMCIA_FMVJ18X
tristate "Fujitsu FMV-J18x PCMCIA support"
depends on PCMCIA
@@ -40,15 +28,4 @@ config PCMCIA_FMVJ18X
To compile this driver as a module, choose M here: the module will be
called fmvj18x_cs. If unsure, say N.
-config ETH16I
- tristate "ICL EtherTeam 16i/32 support"
- depends on ISA
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called eth16i.
-
endif # NET_VENDOR_FUJITSU
diff --git a/drivers/net/ethernet/fujitsu/Makefile b/drivers/net/ethernet/fujitsu/Makefile
index 2730ae67d3aa..21561fdcc69f 100644
--- a/drivers/net/ethernet/fujitsu/Makefile
+++ b/drivers/net/ethernet/fujitsu/Makefile
@@ -2,6 +2,4 @@
# Makefile for the Fujitsu network device drivers.
#
-obj-$(CONFIG_AT1700) += at1700.o
-obj-$(CONFIG_ETH16I) += eth16i.o
obj-$(CONFIG_PCMCIA_FMVJ18X) += fmvj18x_cs.o
diff --git a/drivers/net/ethernet/fujitsu/at1700.c b/drivers/net/ethernet/fujitsu/at1700.c
deleted file mode 100644
index 4b80dc4531ad..000000000000
--- a/drivers/net/ethernet/fujitsu/at1700.c
+++ /dev/null
@@ -1,791 +0,0 @@
-/* at1700.c: A network device driver for the Allied Telesis AT1700.
-
- Written 1993-98 by Donald Becker.
-
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
- This is a device driver for the Allied Telesis AT1700, and
- Fujitsu FMV-181/182/181A/182A/183/184/183A/184A, which are
- straight-forward Fujitsu MB86965 implementations.
-
- Modification for Fujitsu FMV-18X cards is done by Yutaka Tamiya
- (tamy@flab.fujitsu.co.jp).
-
- Sources:
- The Fujitsu MB86965 datasheet.
-
- After the initial version of this driver was written Gerry Sawkins of
- ATI provided their EEPROM configuration code header file.
- Thanks to NIIBE Yutaka <gniibe@mri.co.jp> for bug fixes.
-
- MCA bus (AT1720) support (now deleted) by Rene Schmit <rene@bss.lu>
-
- Bugs:
- The MB86965 has a design flaw that makes all probes unreliable. Not
- only is it difficult to detect, it also moves around in I/O space in
- response to inb()s from other device probes!
-*/
-
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/skbuff.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/crc32.h>
-#include <linux/bitops.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-static char version[] __initdata =
- "at1700.c:v1.16 9/11/06 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
-
-#define DRV_NAME "at1700"
-
-/* Tunable parameters. */
-
-/* When to switch from the 64-entry multicast filter to Rx-all-multicast. */
-#define MC_FILTERBREAK 64
-
-/* These unusual address orders are used to verify the CONFIG register. */
-
-static int fmv18x_probe_list[] __initdata = {
- 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x300, 0x340, 0
-};
-
-/*
- * ISA
- */
-
-static unsigned at1700_probe_list[] __initdata = {
- 0x260, 0x280, 0x2a0, 0x240, 0x340, 0x320, 0x380, 0x300, 0
-};
-
-/* use 0 for production, 1 for verification, >2 for debug */
-#ifndef NET_DEBUG
-#define NET_DEBUG 1
-#endif
-static unsigned int net_debug = NET_DEBUG;
-
-typedef unsigned char uchar;
-
-/* Information that need to be kept for each board. */
-struct net_local {
- spinlock_t lock;
- unsigned char mc_filter[8];
- uint jumpered:1; /* Set iff the board has jumper config. */
- uint tx_started:1; /* Packets are on the Tx queue. */
- uint tx_queue_ready:1; /* Tx queue is ready to be sent. */
- uint rx_started:1; /* Packets are Rxing. */
- uchar tx_queue; /* Number of packet on the Tx queue. */
- ushort tx_queue_len; /* Current length of the Tx queue. */
-};
-
-
-/* Offsets from the base address. */
-#define STATUS 0
-#define TX_STATUS 0
-#define RX_STATUS 1
-#define TX_INTR 2 /* Bit-mapped interrupt enable registers. */
-#define RX_INTR 3
-#define TX_MODE 4
-#define RX_MODE 5
-#define CONFIG_0 6 /* Misc. configuration settings. */
-#define CONFIG_1 7
-/* Run-time register bank 2 definitions. */
-#define DATAPORT 8 /* Word-wide DMA or programmed-I/O dataport. */
-#define TX_START 10
-#define COL16CNTL 11 /* Control Reg for 16 collisions */
-#define MODE13 13
-#define RX_CTRL 14
-/* Configuration registers only on the '865A/B chips. */
-#define EEPROM_Ctrl 16
-#define EEPROM_Data 17
-#define CARDSTATUS 16 /* FMV-18x Card Status */
-#define CARDSTATUS1 17 /* FMV-18x Card Status */
-#define IOCONFIG 18 /* Either read the jumper, or move the I/O. */
-#define IOCONFIG1 19
-#define SAPROM 20 /* The station address PROM, if no EEPROM. */
-#define MODE24 24
-#define RESET 31 /* Write to reset some parts of the chip. */
-#define AT1700_IO_EXTENT 32
-#define PORT_OFFSET(o) (o)
-
-
-#define TX_TIMEOUT (HZ/10)
-
-
-/* Index to functions, as function prototypes. */
-
-static int at1700_probe1(struct net_device *dev, int ioaddr);
-static int read_eeprom(long ioaddr, int location);
-static int net_open(struct net_device *dev);
-static netdev_tx_t net_send_packet(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t net_interrupt(int irq, void *dev_id);
-static void net_rx(struct net_device *dev);
-static int net_close(struct net_device *dev);
-static void set_rx_mode(struct net_device *dev);
-static void net_tx_timeout (struct net_device *dev);
-
-
-/* Check for a network adaptor of this type, and return '0' iff one exists.
- If dev->base_addr == 0, probe all likely locations.
- If dev->base_addr == 1, always return failure.
- If dev->base_addr == 2, allocate space for the device and return success
- (detachable devices only).
- */
-
-static int io = 0x260;
-
-static int irq;
-
-static void cleanup_card(struct net_device *dev)
-{
- free_irq(dev->irq, NULL);
- release_region(dev->base_addr, AT1700_IO_EXTENT);
-}
-
-struct net_device * __init at1700_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
- unsigned *port;
- int err = 0;
-
- if (!dev)
- return ERR_PTR(-ENODEV);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- io = dev->base_addr;
- irq = dev->irq;
- } else {
- dev->base_addr = io;
- dev->irq = irq;
- }
-
- if (io > 0x1ff) { /* Check a single specified location. */
- err = at1700_probe1(dev, io);
- } else if (io != 0) { /* Don't probe at all. */
- err = -ENXIO;
- } else {
- for (port = at1700_probe_list; *port; port++) {
- if (at1700_probe1(dev, *port) == 0)
- break;
- dev->irq = irq;
- }
- if (!*port)
- err = -ENODEV;
- }
- if (err)
- goto out;
- err = register_netdev(dev);
- if (err)
- goto out1;
- return dev;
-out1:
- cleanup_card(dev);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-static const struct net_device_ops at1700_netdev_ops = {
- .ndo_open = net_open,
- .ndo_stop = net_close,
- .ndo_start_xmit = net_send_packet,
- .ndo_set_rx_mode = set_rx_mode,
- .ndo_tx_timeout = net_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/* The Fujitsu datasheet suggests that the NIC be probed for by checking its
- "signature", the default bit pattern after a reset. This *doesn't* work --
- there is no way to reset the bus interface without a complete power-cycle!
-
- It turns out that ATI came to the same conclusion I did: the only thing
- that can be done is checking a few bits and then diving right into an
- EEPROM read. */
-
-static int __init at1700_probe1(struct net_device *dev, int ioaddr)
-{
- static const char fmv_irqmap[4] = {3, 7, 10, 15};
- static const char fmv_irqmap_pnp[8] = {3, 4, 5, 7, 9, 10, 11, 15};
- static const char at1700_irqmap[8] = {3, 4, 5, 9, 10, 11, 14, 15};
- unsigned int i, irq, is_fmv18x = 0, is_at1700 = 0;
- int ret = -ENODEV;
- struct net_local *lp = netdev_priv(dev);
-
- if (!request_region(ioaddr, AT1700_IO_EXTENT, DRV_NAME))
- return -EBUSY;
-
- /* Resetting the chip doesn't reset the ISA interface, so don't bother.
- That means we have to be careful with the register values we probe
- for.
- */
-#ifdef notdef
- printk("at1700 probe at %#x, eeprom is %4.4x %4.4x %4.4x ctrl %4.4x.\n",
- ioaddr, read_eeprom(ioaddr, 4), read_eeprom(ioaddr, 5),
- read_eeprom(ioaddr, 6), inw(ioaddr + EEPROM_Ctrl));
-#endif
- /* We must check for the EEPROM-config boards first, else accessing
- IOCONFIG0 will move the board! */
- if (at1700_probe_list[inb(ioaddr + IOCONFIG1) & 0x07] == ioaddr &&
- read_eeprom(ioaddr, 4) == 0x0000 &&
- (read_eeprom(ioaddr, 5) & 0xff00) == 0xF400)
- is_at1700 = 1;
- else if (inb(ioaddr + SAPROM ) == 0x00 &&
- inb(ioaddr + SAPROM + 1) == 0x00 &&
- inb(ioaddr + SAPROM + 2) == 0x0e)
- is_fmv18x = 1;
- else {
- goto err_out;
- }
-
- /* Reset the internal state machines. */
- outb(0, ioaddr + RESET);
-
- if (is_at1700) {
- irq = at1700_irqmap[(read_eeprom(ioaddr, 12)&0x04)
- | (read_eeprom(ioaddr, 0)>>14)];
- } else {
- /* Check PnP mode for FMV-183/184/183A/184A. */
- /* This PnP routine is very poor. IO and IRQ should be known. */
- if (inb(ioaddr + CARDSTATUS1) & 0x20) {
- irq = dev->irq;
- for (i = 0; i < 8; i++) {
- if (irq == fmv_irqmap_pnp[i])
- break;
- }
- if (i == 8) {
- goto err_out;
- }
- } else {
- if (fmv18x_probe_list[inb(ioaddr + IOCONFIG) & 0x07] != ioaddr)
- goto err_out;
- irq = fmv_irqmap[(inb(ioaddr + IOCONFIG)>>6) & 0x03];
- }
- }
-
- printk("%s: %s found at %#3x, IRQ %d, address ", dev->name,
- is_at1700 ? "AT1700" : "FMV-18X", ioaddr, irq);
-
- dev->base_addr = ioaddr;
- dev->irq = irq;
-
- if (is_at1700) {
- for(i = 0; i < 3; i++) {
- unsigned short eeprom_val = read_eeprom(ioaddr, 4+i);
- ((unsigned short *)dev->dev_addr)[i] = ntohs(eeprom_val);
- }
- } else {
- for(i = 0; i < 6; i++) {
- unsigned char val = inb(ioaddr + SAPROM + i);
- dev->dev_addr[i] = val;
- }
- }
- printk("%pM", dev->dev_addr);
-
- /* The EEPROM word 12 bit 0x0400 means use regular 100 ohm 10baseT signals,
- rather than 150 ohm shielded twisted pair compensation.
- 0x0000 == auto-sense the interface
- 0x0800 == use TP interface
- 0x1800 == use coax interface
- */
- {
- const char *porttype[] = {"auto-sense", "10baseT", "auto-sense", "10base2"};
- if (is_at1700) {
- ushort setup_value = read_eeprom(ioaddr, 12);
- dev->if_port = setup_value >> 8;
- } else {
- ushort setup_value = inb(ioaddr + CARDSTATUS);
- switch (setup_value & 0x07) {
- case 0x01: /* 10base5 */
- case 0x02: /* 10base2 */
- dev->if_port = 0x18; break;
- case 0x04: /* 10baseT */
- dev->if_port = 0x08; break;
- default: /* auto-sense */
- dev->if_port = 0x00; break;
- }
- }
- printk(" %s interface.\n", porttype[(dev->if_port>>3) & 3]);
- }
-
- /* Set the configuration register 0 to 32K 100ns. byte-wide memory, 16 bit
- bus access, two 4K Tx queues, and disabled Tx and Rx. */
- outb(0xda, ioaddr + CONFIG_0);
-
- /* Set the station address in bank zero. */
- outb(0x00, ioaddr + CONFIG_1);
- for (i = 0; i < 6; i++)
- outb(dev->dev_addr[i], ioaddr + PORT_OFFSET(8 + i));
-
- /* Switch to bank 1 and set the multicast table to accept none. */
- outb(0x04, ioaddr + CONFIG_1);
- for (i = 0; i < 8; i++)
- outb(0x00, ioaddr + PORT_OFFSET(8 + i));
-
-
- /* Switch to bank 2 */
- /* Lock our I/O address, and set manual processing mode for 16 collisions. */
- outb(0x08, ioaddr + CONFIG_1);
- outb(dev->if_port, ioaddr + MODE13);
- outb(0x00, ioaddr + COL16CNTL);
-
- if (net_debug)
- printk(version);
-
- dev->netdev_ops = &at1700_netdev_ops;
- dev->watchdog_timeo = TX_TIMEOUT;
-
- spin_lock_init(&lp->lock);
-
- lp->jumpered = is_fmv18x;
- /* Snarf the interrupt vector now. */
- ret = request_irq(irq, net_interrupt, 0, DRV_NAME, dev);
- if (ret) {
- printk(KERN_ERR "AT1700 at %#3x is unusable due to a "
- "conflict on IRQ %d.\n",
- ioaddr, irq);
- goto err_out;
- }
-
- return 0;
-
-err_out:
- release_region(ioaddr, AT1700_IO_EXTENT);
- return ret;
-}
-
-
-/* EEPROM_Ctrl bits. */
-#define EE_SHIFT_CLK 0x40 /* EEPROM shift clock, in reg. 16. */
-#define EE_CS 0x20 /* EEPROM chip select, in reg. 16. */
-#define EE_DATA_WRITE 0x80 /* EEPROM chip data in, in reg. 17. */
-#define EE_DATA_READ 0x80 /* EEPROM chip data out, in reg. 17. */
-
-/* The EEPROM commands include the alway-set leading bit. */
-#define EE_WRITE_CMD (5 << 6)
-#define EE_READ_CMD (6 << 6)
-#define EE_ERASE_CMD (7 << 6)
-
-static int __init read_eeprom(long ioaddr, int location)
-{
- int i;
- unsigned short retval = 0;
- long ee_addr = ioaddr + EEPROM_Ctrl;
- long ee_daddr = ioaddr + EEPROM_Data;
- int read_cmd = location | EE_READ_CMD;
-
- /* Shift the read command bits out. */
- for (i = 9; i >= 0; i--) {
- short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
- outb(EE_CS, ee_addr);
- outb(dataval, ee_daddr);
- outb(EE_CS | EE_SHIFT_CLK, ee_addr); /* EEPROM clock tick. */
- }
- outb(EE_DATA_WRITE, ee_daddr);
- for (i = 16; i > 0; i--) {
- outb(EE_CS, ee_addr);
- outb(EE_CS | EE_SHIFT_CLK, ee_addr);
- retval = (retval << 1) | ((inb(ee_daddr) & EE_DATA_READ) ? 1 : 0);
- }
-
- /* Terminate the EEPROM access. */
- outb(EE_CS, ee_addr);
- outb(EE_SHIFT_CLK, ee_addr);
- outb(0, ee_addr);
- return retval;
-}
-
-
-
-static int net_open(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- /* Set the configuration register 0 to 32K 100ns. byte-wide memory, 16 bit
- bus access, and two 4K Tx queues. */
- outb(0x5a, ioaddr + CONFIG_0);
-
- /* Powerup, switch to register bank 2, and enable the Rx and Tx. */
- outb(0xe8, ioaddr + CONFIG_1);
-
- lp->tx_started = 0;
- lp->tx_queue_ready = 1;
- lp->rx_started = 0;
- lp->tx_queue = 0;
- lp->tx_queue_len = 0;
-
- /* Turn on hardware Tx and Rx interrupts. */
- outb(0x82, ioaddr + TX_INTR);
- outb(0x81, ioaddr + RX_INTR);
-
- /* Enable the IRQ on boards of fmv18x it is feasible. */
- if (lp->jumpered) {
- outb(0x80, ioaddr + IOCONFIG1);
- }
-
- netif_start_queue(dev);
- return 0;
-}
-
-static void net_tx_timeout (struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- printk ("%s: transmit timed out with status %04x, %s?\n", dev->name,
- inw (ioaddr + STATUS), inb (ioaddr + TX_STATUS) & 0x80
- ? "IRQ conflict" : "network cable problem");
- printk ("%s: timeout registers: %04x %04x %04x %04x %04x %04x %04x %04x.\n",
- dev->name, inw(ioaddr + TX_STATUS), inw(ioaddr + TX_INTR), inw(ioaddr + TX_MODE),
- inw(ioaddr + CONFIG_0), inw(ioaddr + DATAPORT), inw(ioaddr + TX_START),
- inw(ioaddr + MODE13 - 1), inw(ioaddr + RX_CTRL));
- dev->stats.tx_errors++;
- /* ToDo: We should try to restart the adaptor... */
- outw(0xffff, ioaddr + MODE24);
- outw (0xffff, ioaddr + TX_STATUS);
- outb (0x5a, ioaddr + CONFIG_0);
- outb (0xe8, ioaddr + CONFIG_1);
- outw (0x8182, ioaddr + TX_INTR);
- outb (0x00, ioaddr + TX_START);
- outb (0x03, ioaddr + COL16CNTL);
-
- dev->trans_start = jiffies; /* prevent tx timeout */
-
- lp->tx_started = 0;
- lp->tx_queue_ready = 1;
- lp->rx_started = 0;
- lp->tx_queue = 0;
- lp->tx_queue_len = 0;
-
- netif_wake_queue(dev);
-}
-
-
-static netdev_tx_t net_send_packet (struct sk_buff *skb,
- struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
- short len = skb->len;
- unsigned char *buf = skb->data;
- static u8 pad[ETH_ZLEN];
-
- netif_stop_queue (dev);
-
- /* We may not start transmitting unless we finish transferring
- a packet into the Tx queue. During executing the following
- codes we possibly catch a Tx interrupt. Thus we flag off
- tx_queue_ready, so that we prevent the interrupt routine
- (net_interrupt) to start transmitting. */
- lp->tx_queue_ready = 0;
- {
- outw (length, ioaddr + DATAPORT);
- /* Packet data */
- outsw (ioaddr + DATAPORT, buf, len >> 1);
- /* Check for dribble byte */
- if (len & 1) {
- outw(skb->data[skb->len-1], ioaddr + DATAPORT);
- len++;
- }
- /* Check for packet padding */
- if (length != skb->len)
- outsw(ioaddr + DATAPORT, pad, (length - len + 1) >> 1);
-
- lp->tx_queue++;
- lp->tx_queue_len += length + 2;
- }
- lp->tx_queue_ready = 1;
-
- if (lp->tx_started == 0) {
- /* If the Tx is idle, always trigger a transmit. */
- outb (0x80 | lp->tx_queue, ioaddr + TX_START);
- lp->tx_queue = 0;
- lp->tx_queue_len = 0;
- lp->tx_started = 1;
- netif_start_queue (dev);
- } else if (lp->tx_queue_len < 4096 - 1502)
- /* Yes, there is room for one more packet. */
- netif_start_queue (dev);
- dev_kfree_skb (skb);
-
- return NETDEV_TX_OK;
-}
-
-/* The typical workload of the driver:
- Handle the network interface interrupts. */
-static irqreturn_t net_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct net_local *lp;
- int ioaddr, status;
- int handled = 0;
-
- if (dev == NULL) {
- printk ("at1700_interrupt(): irq %d for unknown device.\n", irq);
- return IRQ_NONE;
- }
-
- ioaddr = dev->base_addr;
- lp = netdev_priv(dev);
-
- spin_lock (&lp->lock);
-
- status = inw(ioaddr + TX_STATUS);
- outw(status, ioaddr + TX_STATUS);
-
- if (net_debug > 4)
- printk("%s: Interrupt with status %04x.\n", dev->name, status);
- if (lp->rx_started == 0 &&
- (status & 0xff00 || (inb(ioaddr + RX_MODE) & 0x40) == 0)) {
- /* Got a packet(s).
- We cannot execute net_rx more than once at the same time for
- the same device. During executing net_rx, we possibly catch a
- Tx interrupt. Thus we flag on rx_started, so that we prevent
- the interrupt routine (net_interrupt) to dive into net_rx
- again. */
- handled = 1;
- lp->rx_started = 1;
- outb(0x00, ioaddr + RX_INTR); /* Disable RX intr. */
- net_rx(dev);
- outb(0x81, ioaddr + RX_INTR); /* Enable RX intr. */
- lp->rx_started = 0;
- }
- if (status & 0x00ff) {
- handled = 1;
- if (status & 0x02) {
- /* More than 16 collisions occurred */
- if (net_debug > 4)
- printk("%s: 16 Collision occur during Txing.\n", dev->name);
- /* Cancel sending a packet. */
- outb(0x03, ioaddr + COL16CNTL);
- dev->stats.collisions++;
- }
- if (status & 0x82) {
- dev->stats.tx_packets++;
- /* The Tx queue has any packets and is not being
- transferred a packet from the host, start
- transmitting. */
- if (lp->tx_queue && lp->tx_queue_ready) {
- outb(0x80 | lp->tx_queue, ioaddr + TX_START);
- lp->tx_queue = 0;
- lp->tx_queue_len = 0;
- dev->trans_start = jiffies;
- netif_wake_queue (dev);
- } else {
- lp->tx_started = 0;
- netif_wake_queue (dev);
- }
- }
- }
-
- spin_unlock (&lp->lock);
- return IRQ_RETVAL(handled);
-}
-
-/* We have a good packet(s), get it/them out of the buffers. */
-static void
-net_rx(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- int boguscount = 5;
-
- while ((inb(ioaddr + RX_MODE) & 0x40) == 0) {
- ushort status = inw(ioaddr + DATAPORT);
- ushort pkt_len = inw(ioaddr + DATAPORT);
-
- if (net_debug > 4)
- printk("%s: Rxing packet mode %02x status %04x.\n",
- dev->name, inb(ioaddr + RX_MODE), status);
-#ifndef final_version
- if (status == 0) {
- outb(0x05, ioaddr + RX_CTRL);
- break;
- }
-#endif
-
- if ((status & 0xF0) != 0x20) { /* There was an error. */
- dev->stats.rx_errors++;
- if (status & 0x08) dev->stats.rx_length_errors++;
- if (status & 0x04) dev->stats.rx_frame_errors++;
- if (status & 0x02) dev->stats.rx_crc_errors++;
- if (status & 0x01) dev->stats.rx_over_errors++;
- } else {
- /* Malloc up new buffer. */
- struct sk_buff *skb;
-
- if (pkt_len > 1550) {
- printk("%s: The AT1700 claimed a very large packet, size %d.\n",
- dev->name, pkt_len);
- /* Prime the FIFO and then flush the packet. */
- inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT);
- outb(0x05, ioaddr + RX_CTRL);
- dev->stats.rx_errors++;
- break;
- }
- skb = netdev_alloc_skb(dev, pkt_len + 3);
- if (skb == NULL) {
- printk("%s: Memory squeeze, dropping packet (len %d).\n",
- dev->name, pkt_len);
- /* Prime the FIFO and then flush the packet. */
- inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT);
- outb(0x05, ioaddr + RX_CTRL);
- dev->stats.rx_dropped++;
- break;
- }
- skb_reserve(skb,2);
-
- insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1);
- skb->protocol=eth_type_trans(skb, dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- }
- if (--boguscount <= 0)
- break;
- }
-
- /* If any worth-while packets have been received, dev_rint()
- has done a mark_bh(NET_BH) for us and will work on them
- when we get to the bottom-half routine. */
- {
- int i;
- for (i = 0; i < 20; i++) {
- if ((inb(ioaddr + RX_MODE) & 0x40) == 0x40)
- break;
- inw(ioaddr + DATAPORT); /* dummy status read */
- outb(0x05, ioaddr + RX_CTRL);
- }
-
- if (net_debug > 5)
- printk("%s: Exint Rx packet with mode %02x after %d ticks.\n",
- dev->name, inb(ioaddr + RX_MODE), i);
- }
-}
-
-/* The inverse routine to net_open(). */
-static int net_close(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- netif_stop_queue(dev);
-
- /* Set configuration register 0 to disable Tx and Rx. */
- outb(0xda, ioaddr + CONFIG_0);
-
- /* No statistic counters on the chip to update. */
-
- /* Disable the IRQ on boards of fmv18x where it is feasible. */
- if (lp->jumpered)
- outb(0x00, ioaddr + IOCONFIG1);
-
- /* Power-down the chip. Green, green, green! */
- outb(0x00, ioaddr + CONFIG_1);
- return 0;
-}
-
-/*
- Set the multicast/promiscuous mode for this adaptor.
-*/
-
-static void
-set_rx_mode(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- struct net_local *lp = netdev_priv(dev);
- unsigned char mc_filter[8]; /* Multicast hash filter */
- unsigned long flags;
-
- if (dev->flags & IFF_PROMISC) {
- memset(mc_filter, 0xff, sizeof(mc_filter));
- outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
- } else if (netdev_mc_count(dev) > MC_FILTERBREAK ||
- (dev->flags & IFF_ALLMULTI)) {
- /* Too many to filter perfectly -- accept all multicasts. */
- memset(mc_filter, 0xff, sizeof(mc_filter));
- outb(2, ioaddr + RX_MODE); /* Use normal mode. */
- } else if (netdev_mc_empty(dev)) {
- memset(mc_filter, 0x00, sizeof(mc_filter));
- outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
- } else {
- struct netdev_hw_addr *ha;
-
- memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(ha, dev) {
- unsigned int bit =
- ether_crc_le(ETH_ALEN, ha->addr) >> 26;
- mc_filter[bit >> 3] |= (1 << bit);
- }
- outb(0x02, ioaddr + RX_MODE); /* Use normal mode. */
- }
-
- spin_lock_irqsave (&lp->lock, flags);
- if (memcmp(mc_filter, lp->mc_filter, sizeof(mc_filter))) {
- int i;
- int saved_bank = inw(ioaddr + CONFIG_0);
- /* Switch to bank 1 and set the multicast table. */
- outw((saved_bank & ~0x0C00) | 0x0480, ioaddr + CONFIG_0);
- for (i = 0; i < 8; i++)
- outb(mc_filter[i], ioaddr + PORT_OFFSET(8 + i));
- memcpy(lp->mc_filter, mc_filter, sizeof(mc_filter));
- outw(saved_bank, ioaddr + CONFIG_0);
- }
- spin_unlock_irqrestore (&lp->lock, flags);
-}
-
-#ifdef MODULE
-static struct net_device *dev_at1700;
-
-module_param(io, int, 0);
-module_param(irq, int, 0);
-module_param(net_debug, int, 0);
-MODULE_PARM_DESC(io, "AT1700/FMV18X I/O base address");
-MODULE_PARM_DESC(irq, "AT1700/FMV18X IRQ number");
-MODULE_PARM_DESC(net_debug, "AT1700/FMV18X debug level (0-6)");
-
-static int __init at1700_module_init(void)
-{
- if (io == 0)
- printk("at1700: You should not use auto-probing with insmod!\n");
- dev_at1700 = at1700_probe(-1);
- if (IS_ERR(dev_at1700))
- return PTR_ERR(dev_at1700);
- return 0;
-}
-
-static void __exit at1700_module_exit(void)
-{
- unregister_netdev(dev_at1700);
- cleanup_card(dev_at1700);
- free_netdev(dev_at1700);
-}
-module_init(at1700_module_init);
-module_exit(at1700_module_exit);
-#endif /* MODULE */
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/fujitsu/eth16i.c b/drivers/net/ethernet/fujitsu/eth16i.c
deleted file mode 100644
index a992d1f7e0d2..000000000000
--- a/drivers/net/ethernet/fujitsu/eth16i.c
+++ /dev/null
@@ -1,1483 +0,0 @@
-/* eth16i.c An ICL EtherTeam 16i and 32 EISA ethernet driver for Linux
-
- Written 1994-1999 by Mika Kuoppala
-
- Copyright (C) 1994-1999 by Mika Kuoppala
- Based on skeleton.c and heavily on at1700.c by Donald Becker
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- The author may be reached as miku@iki.fi
-
- This driver supports following cards :
- - ICL EtherTeam 16i
- - ICL EtherTeam 32 EISA
- (Uses true 32 bit transfers rather than 16i compatibility mode)
-
- Example Module usage:
- insmod eth16i.o io=0x2a0 mediatype=bnc
-
- mediatype can be one of the following: bnc,tp,dix,auto,eprom
-
- 'auto' will try to autoprobe mediatype.
- 'eprom' will use whatever type defined in eprom.
-
- I have benchmarked driver with PII/300Mhz as a ftp client
- and 486/33Mhz as a ftp server. Top speed was 1128.37 kilobytes/sec.
-
- Sources:
- - skeleton.c a sample network driver core for linux,
- written by Donald Becker <becker@scyld.com>
- - at1700.c a driver for Allied Telesis AT1700, written
- by Donald Becker.
- - e16iSRV.asm a Netware 3.X Server Driver for ICL EtherTeam16i
- written by Markku Viima
- - The Fujitsu MB86965 databook.
-
- Author thanks following persons due to their valueble assistance:
- Markku Viima (ICL)
- Ari Valve (ICL)
- Donald Becker
- Kurt Huwig <kurt@huwig.de>
-
- Revision history:
-
- Version Date Description
-
- 0.01 15.12-94 Initial version (card detection)
- 0.02 23.01-95 Interrupt is now hooked correctly
- 0.03 01.02-95 Rewrote initialization part
- 0.04 07.02-95 Base skeleton done...
- Made a few changes to signature checking
- to make it a bit reliable.
- - fixed bug in tx_buf mapping
- - fixed bug in initialization (DLC_EN
- wasn't enabled when initialization
- was done.)
- 0.05 08.02-95 If there were more than one packet to send,
- transmit was jammed due to invalid
- register write...now fixed
- 0.06 19.02-95 Rewrote interrupt handling
- 0.07 13.04-95 Wrote EEPROM read routines
- Card configuration now set according to
- data read from EEPROM
- 0.08 23.06-95 Wrote part that tries to probe used interface
- port if AUTO is selected
-
- 0.09 01.09-95 Added module support
-
- 0.10 04.09-95 Fixed receive packet allocation to work
- with kernels > 1.3.x
-
- 0.20 20.09-95 Added support for EtherTeam32 EISA
-
- 0.21 17.10-95 Removed the unnecessary extern
- init_etherdev() declaration. Some
- other cleanups.
-
- 0.22 22.02-96 Receive buffer was not flushed
- correctly when faulty packet was
- received. Now fixed.
-
- 0.23 26.02-96 Made resetting the adapter
- more reliable.
-
- 0.24 27.02-96 Rewrote faulty packet handling in eth16i_rx
-
- 0.25 22.05-96 kfree() was missing from cleanup_module.
-
- 0.26 11.06-96 Sometimes card was not found by
- check_signature(). Now made more reliable.
-
- 0.27 23.06-96 Oops. 16 consecutive collisions halted
- adapter. Now will try to retransmit
- MAX_COL_16 times before finally giving up.
-
- 0.28 28.10-97 Added dev_id parameter (NULL) for free_irq
-
- 0.29 29.10-97 Multiple card support for module users
-
- 0.30 30.10-97 Fixed irq allocation bug.
- (request_irq moved from probe to open)
-
- 0.30a 21.08-98 Card detection made more relaxed. Driver
- had problems with some TCP/IP-PROM boots
- to find the card. Suggested by
- Kurt Huwig <kurt@huwig.de>
-
- 0.31 28.08-98 Media interface port can now be selected
- with module parameters or kernel
- boot parameters.
-
- 0.32 31.08-98 IRQ was never freed if open/close
- pair wasn't called. Now fixed.
-
- 0.33 10.09-98 When eth16i_open() was called after
- eth16i_close() chip never recovered.
- Now more shallow reset is made on
- close.
-
- 0.34 29.06-99 Fixed one bad #ifdef.
- Changed ioaddr -> io for consistency
-
- 0.35 01.07-99 transmit,-receive bytes were never
- updated in stats.
-
- Bugs:
- In some cases the media interface autoprobing code doesn't find
- the correct interface type. In this case you can
- manually choose the interface type in DOS with E16IC.EXE which is
- configuration software for EtherTeam16i and EtherTeam32 cards.
- This is also true for IRQ setting. You cannot use module
- parameter to configure IRQ of the card (yet).
-
- To do:
- - Real multicast support
- - Rewrite the media interface autoprobing code. Its _horrible_ !
- - Possibly merge all the MB86965 specific code to external
- module for use by eth16.c and Donald's at1700.c
- - IRQ configuration with module parameter. I will do
- this when i will get enough info about setting
- irq without configuration utility.
-*/
-
-static char *version =
- "eth16i.c: v0.35 01-Jul-1999 Mika Kuoppala (miku@iki.fi)\n";
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/bitops.h>
-#include <linux/jiffies.h>
-#include <linux/io.h>
-
-#include <asm/dma.h>
-
-
-
-/* Few macros */
-#define BITSET(ioaddr, bnum) ((outb(((inb(ioaddr)) | (bnum)), ioaddr)))
-#define BITCLR(ioaddr, bnum) ((outb(((inb(ioaddr)) & (~(bnum))), ioaddr)))
-
-/* This is the I/O address space for Etherteam 16i adapter. */
-#define ETH16I_IO_EXTENT 32
-
-/* Ticks before deciding that transmit has timed out */
-#define TX_TIMEOUT (400*HZ/1000)
-
-/* Maximum loop count when receiving packets */
-#define MAX_RX_LOOP 20
-
-/* Some interrupt masks */
-#define ETH16I_INTR_ON 0xef8a /* Higher is receive mask */
-#define ETH16I_INTR_OFF 0x0000
-
-/* Buffers header status byte meanings */
-#define PKT_GOOD BIT(5)
-#define PKT_GOOD_RMT BIT(4)
-#define PKT_SHORT BIT(3)
-#define PKT_ALIGN_ERR BIT(2)
-#define PKT_CRC_ERR BIT(1)
-#define PKT_RX_BUF_OVERFLOW BIT(0)
-
-/* Transmit status register (DLCR0) */
-#define TX_STATUS_REG 0
-#define TX_DONE BIT(7)
-#define NET_BUSY BIT(6)
-#define TX_PKT_RCD BIT(5)
-#define CR_LOST BIT(4)
-#define TX_JABBER_ERR BIT(3)
-#define COLLISION BIT(2)
-#define COLLISIONS_16 BIT(1)
-
-/* Receive status register (DLCR1) */
-#define RX_STATUS_REG 1
-#define RX_PKT BIT(7) /* Packet received */
-#define BUS_RD_ERR BIT(6)
-#define SHORT_PKT_ERR BIT(3)
-#define ALIGN_ERR BIT(2)
-#define CRC_ERR BIT(1)
-#define RX_BUF_OVERFLOW BIT(0)
-
-/* Transmit Interrupt Enable Register (DLCR2) */
-#define TX_INTR_REG 2
-#define TX_INTR_DONE BIT(7)
-#define TX_INTR_COL BIT(2)
-#define TX_INTR_16_COL BIT(1)
-
-/* Receive Interrupt Enable Register (DLCR3) */
-#define RX_INTR_REG 3
-#define RX_INTR_RECEIVE BIT(7)
-#define RX_INTR_SHORT_PKT BIT(3)
-#define RX_INTR_CRC_ERR BIT(1)
-#define RX_INTR_BUF_OVERFLOW BIT(0)
-
-/* Transmit Mode Register (DLCR4) */
-#define TRANSMIT_MODE_REG 4
-#define LOOPBACK_CONTROL BIT(1)
-#define CONTROL_OUTPUT BIT(2)
-
-/* Receive Mode Register (DLCR5) */
-#define RECEIVE_MODE_REG 5
-#define RX_BUFFER_EMPTY BIT(6)
-#define ACCEPT_BAD_PACKETS BIT(5)
-#define RECEIVE_SHORT_ADDR BIT(4)
-#define ACCEPT_SHORT_PACKETS BIT(3)
-#define REMOTE_RESET BIT(2)
-
-#define ADDRESS_FILTER_MODE BIT(1) | BIT(0)
-#define REJECT_ALL 0
-#define ACCEPT_ALL 3
-#define MODE_1 1 /* NODE ID, BC, MC, 2-24th bit */
-#define MODE_2 2 /* NODE ID, BC, MC, Hash Table */
-
-/* Configuration Register 0 (DLCR6) */
-#define CONFIG_REG_0 6
-#define DLC_EN BIT(7)
-#define SRAM_CYCLE_TIME_100NS BIT(6)
-#define SYSTEM_BUS_WIDTH_8 BIT(5) /* 1 = 8bit, 0 = 16bit */
-#define BUFFER_WIDTH_8 BIT(4) /* 1 = 8bit, 0 = 16bit */
-#define TBS1 BIT(3)
-#define TBS0 BIT(2)
-#define SRAM_BS1 BIT(1) /* 00=8kb, 01=16kb */
-#define SRAM_BS0 BIT(0) /* 10=32kb, 11=64kb */
-
-#ifndef ETH16I_TX_BUF_SIZE /* 0 = 2kb, 1 = 4kb */
-#define ETH16I_TX_BUF_SIZE 3 /* 2 = 8kb, 3 = 16kb */
-#endif
-#define TX_BUF_1x2048 0
-#define TX_BUF_2x2048 1
-#define TX_BUF_2x4098 2
-#define TX_BUF_2x8192 3
-
-/* Configuration Register 1 (DLCR7) */
-#define CONFIG_REG_1 7
-#define POWERUP BIT(5)
-
-/* Transmit start register */
-#define TRANSMIT_START_REG 10
-#define TRANSMIT_START_RB 2
-#define TX_START BIT(7) /* Rest of register bit indicate*/
- /* number of packets in tx buffer*/
-/* Node ID registers (DLCR8-13) */
-#define NODE_ID_0 8
-#define NODE_ID_RB 0
-
-/* Hash Table registers (HT8-15) */
-#define HASH_TABLE_0 8
-#define HASH_TABLE_RB 1
-
-/* Buffer memory ports */
-#define BUFFER_MEM_PORT_LB 8
-#define DATAPORT BUFFER_MEM_PORT_LB
-#define BUFFER_MEM_PORT_HB 9
-
-/* 16 Collision control register (BMPR11) */
-#define COL_16_REG 11
-#define HALT_ON_16 0x00
-#define RETRANS_AND_HALT_ON_16 0x02
-
-/* Maximum number of attempts to send after 16 concecutive collisions */
-#define MAX_COL_16 10
-
-/* DMA Burst and Transceiver Mode Register (BMPR13) */
-#define TRANSCEIVER_MODE_REG 13
-#define TRANSCEIVER_MODE_RB 2
-#define IO_BASE_UNLOCK BIT(7)
-#define LOWER_SQUELCH_TRESH BIT(6)
-#define LINK_TEST_DISABLE BIT(5)
-#define AUI_SELECT BIT(4)
-#define DIS_AUTO_PORT_SEL BIT(3)
-
-/* Filter Self Receive Register (BMPR14) */
-#define FILTER_SELF_RX_REG 14
-#define SKIP_RX_PACKET BIT(2)
-#define FILTER_SELF_RECEIVE BIT(0)
-
-/* EEPROM Control Register (BMPR 16) */
-#define EEPROM_CTRL_REG 16
-
-/* EEPROM Data Register (BMPR 17) */
-#define EEPROM_DATA_REG 17
-
-/* NMC93CSx6 EEPROM Control Bits */
-#define CS_0 0x00
-#define CS_1 0x20
-#define SK_0 0x00
-#define SK_1 0x40
-#define DI_0 0x00
-#define DI_1 0x80
-
-/* NMC93CSx6 EEPROM Instructions */
-#define EEPROM_READ 0x80
-
-/* NMC93CSx6 EEPROM Addresses */
-#define E_NODEID_0 0x02
-#define E_NODEID_1 0x03
-#define E_NODEID_2 0x04
-#define E_PORT_SELECT 0x14
- #define E_PORT_BNC 0x00
- #define E_PORT_DIX 0x01
- #define E_PORT_TP 0x02
- #define E_PORT_AUTO 0x03
- #define E_PORT_FROM_EPROM 0x04
-#define E_PRODUCT_CFG 0x30
-
-
-/* Macro to slow down io between EEPROM clock transitions */
-#define eeprom_slow_io() do { int _i = 40; while(--_i > 0) { inb(0x80); }}while(0)
-
-/* Jumperless Configuration Register (BMPR19) */
-#define JUMPERLESS_CONFIG 19
-
-/* ID ROM registers, writing to them also resets some parts of chip */
-#define ID_ROM_0 24
-#define ID_ROM_7 31
-#define RESET ID_ROM_0
-
-/* This is the I/O address list to be probed when seeking the card */
-static unsigned int eth16i_portlist[] __initdata = {
- 0x260, 0x280, 0x2A0, 0x240, 0x340, 0x320, 0x380, 0x300, 0
-};
-
-static unsigned int eth32i_portlist[] __initdata = {
- 0x1000, 0x2000, 0x3000, 0x4000, 0x5000, 0x6000, 0x7000, 0x8000,
- 0x9000, 0xA000, 0xB000, 0xC000, 0xD000, 0xE000, 0xF000, 0
-};
-
-/* This is the Interrupt lookup table for Eth16i card */
-static unsigned int eth16i_irqmap[] __initdata = { 9, 10, 5, 15, 0 };
-#define NUM_OF_ISA_IRQS 4
-
-/* This is the Interrupt lookup table for Eth32i card */
-static unsigned int eth32i_irqmap[] __initdata = { 3, 5, 7, 9, 10, 11, 12, 15, 0 };
-#define EISA_IRQ_REG 0xc89
-#define NUM_OF_EISA_IRQS 8
-
-static unsigned int eth16i_tx_buf_map[] = { 2048, 2048, 4096, 8192 };
-
-/* Use 0 for production, 1 for verification, >2 for debug */
-#ifndef ETH16I_DEBUG
-#define ETH16I_DEBUG 0
-#endif
-static unsigned int eth16i_debug = ETH16I_DEBUG;
-
-/* Information for each board */
-
-struct eth16i_local {
- unsigned char tx_started;
- unsigned char tx_buf_busy;
- unsigned short tx_queue; /* Number of packets in transmit buffer */
- unsigned short tx_queue_len;
- unsigned int tx_buf_size;
- unsigned long open_time;
- unsigned long tx_buffered_packets;
- unsigned long tx_buffered_bytes;
- unsigned long col_16;
- spinlock_t lock;
-};
-
-/* Function prototypes */
-
-static int eth16i_probe1(struct net_device *dev, int ioaddr);
-static int eth16i_check_signature(int ioaddr);
-static int eth16i_probe_port(int ioaddr);
-static void eth16i_set_port(int ioaddr, int porttype);
-static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l);
-static int eth16i_receive_probe_packet(int ioaddr);
-static int eth16i_get_irq(int ioaddr);
-static int eth16i_read_eeprom(int ioaddr, int offset);
-static int eth16i_read_eeprom_word(int ioaddr);
-static void eth16i_eeprom_cmd(int ioaddr, unsigned char command);
-static int eth16i_open(struct net_device *dev);
-static int eth16i_close(struct net_device *dev);
-static netdev_tx_t eth16i_tx(struct sk_buff *skb, struct net_device *dev);
-static void eth16i_rx(struct net_device *dev);
-static void eth16i_timeout(struct net_device *dev);
-static irqreturn_t eth16i_interrupt(int irq, void *dev_id);
-static void eth16i_reset(struct net_device *dev);
-static void eth16i_timeout(struct net_device *dev);
-static void eth16i_skip_packet(struct net_device *dev);
-static void eth16i_multicast(struct net_device *dev);
-static void eth16i_select_regbank(unsigned char regbank, int ioaddr);
-static void eth16i_initialize(struct net_device *dev, int boot);
-
-#if 0
-static int eth16i_set_irq(struct net_device *dev);
-#endif
-
-#ifdef MODULE
-static ushort eth16i_parse_mediatype(const char* s);
-#endif
-
-static char cardname[] __initdata = "ICL EtherTeam 16i/32";
-
-static int __init do_eth16i_probe(struct net_device *dev)
-{
- int i;
- int ioaddr;
- int base_addr = dev->base_addr;
-
- if(eth16i_debug > 4)
- printk(KERN_DEBUG "Probing started for %s\n", cardname);
-
- if(base_addr > 0x1ff) /* Check only single location */
- return eth16i_probe1(dev, base_addr);
- else if(base_addr != 0) /* Don't probe at all */
- return -ENXIO;
-
- /* Seek card from the ISA io address space */
- for(i = 0; (ioaddr = eth16i_portlist[i]) ; i++)
- if(eth16i_probe1(dev, ioaddr) == 0)
- return 0;
-
- /* Seek card from the EISA io address space */
- for(i = 0; (ioaddr = eth32i_portlist[i]) ; i++)
- if(eth16i_probe1(dev, ioaddr) == 0)
- return 0;
-
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init eth16i_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct eth16i_local));
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_eth16i_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static const struct net_device_ops eth16i_netdev_ops = {
- .ndo_open = eth16i_open,
- .ndo_stop = eth16i_close,
- .ndo_start_xmit = eth16i_tx,
- .ndo_set_rx_mode = eth16i_multicast,
- .ndo_tx_timeout = eth16i_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int __init eth16i_probe1(struct net_device *dev, int ioaddr)
-{
- struct eth16i_local *lp = netdev_priv(dev);
- static unsigned version_printed;
- int retval;
-
- /* Let's grab the region */
- if (!request_region(ioaddr, ETH16I_IO_EXTENT, cardname))
- return -EBUSY;
-
- /*
- The MB86985 chip has on register which holds information in which
- io address the chip lies. First read this register and compare
- it to our current io address and if match then this could
- be our chip.
- */
-
- if(ioaddr < 0x1000) {
- if(eth16i_portlist[(inb(ioaddr + JUMPERLESS_CONFIG) & 0x07)]
- != ioaddr) {
- retval = -ENODEV;
- goto out;
- }
- }
-
- /* Now we will go a bit deeper and try to find the chip's signature */
-
- if(eth16i_check_signature(ioaddr) != 0) {
- retval = -ENODEV;
- goto out;
- }
-
- /*
- Now it seems that we have found a ethernet chip in this particular
- ioaddr. The MB86985 chip has this feature, that when you read a
- certain register it will increase it's io base address to next
- configurable slot. Now when we have found the chip, first thing is
- to make sure that the chip's ioaddr will hold still here.
- */
-
- eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr);
- outb(0x00, ioaddr + TRANSCEIVER_MODE_REG);
-
- outb(0x00, ioaddr + RESET); /* Reset some parts of chip */
- BITSET(ioaddr + CONFIG_REG_0, BIT(7)); /* Disable the data link */
-
- if( (eth16i_debug & version_printed++) == 0)
- printk(KERN_INFO "%s", version);
-
- dev->base_addr = ioaddr;
- dev->irq = eth16i_get_irq(ioaddr);
-
- /* Try to obtain interrupt vector */
-
- if ((retval = request_irq(dev->irq, (void *)&eth16i_interrupt, 0, cardname, dev))) {
- printk(KERN_WARNING "%s at %#3x, but is unusable due to conflicting IRQ %d.\n",
- cardname, ioaddr, dev->irq);
- goto out;
- }
-
- printk(KERN_INFO "%s: %s at %#3x, IRQ %d, ",
- dev->name, cardname, ioaddr, dev->irq);
-
-
- /* Now we will have to lock the chip's io address */
- eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr);
- outb(0x38, ioaddr + TRANSCEIVER_MODE_REG);
-
- eth16i_initialize(dev, 1); /* Initialize rest of the chip's registers */
-
- /* Now let's same some energy by shutting down the chip ;) */
- BITCLR(ioaddr + CONFIG_REG_1, POWERUP);
-
- /* Initialize the device structure */
- dev->netdev_ops = &eth16i_netdev_ops;
- dev->watchdog_timeo = TX_TIMEOUT;
- spin_lock_init(&lp->lock);
-
- retval = register_netdev(dev);
- if (retval)
- goto out1;
- return 0;
-out1:
- free_irq(dev->irq, dev);
-out:
- release_region(ioaddr, ETH16I_IO_EXTENT);
- return retval;
-}
-
-
-static void eth16i_initialize(struct net_device *dev, int boot)
-{
- int ioaddr = dev->base_addr;
- int i, node_w = 0;
- unsigned char node_byte = 0;
-
- /* Setup station address */
- eth16i_select_regbank(NODE_ID_RB, ioaddr);
- for(i = 0 ; i < 3 ; i++) {
- unsigned short node_val = eth16i_read_eeprom(ioaddr, E_NODEID_0 + i);
- ((unsigned short *)dev->dev_addr)[i] = ntohs(node_val);
- }
-
- for(i = 0; i < 6; i++) {
- outb( ((unsigned char *)dev->dev_addr)[i], ioaddr + NODE_ID_0 + i);
- if(boot) {
- printk("%02x", inb(ioaddr + NODE_ID_0 + i));
- if(i != 5)
- printk(":");
- }
- }
-
- /* Now we will set multicast addresses to accept none */
- eth16i_select_regbank(HASH_TABLE_RB, ioaddr);
- for(i = 0; i < 8; i++)
- outb(0x00, ioaddr + HASH_TABLE_0 + i);
-
- /*
- Now let's disable the transmitter and receiver, set the buffer ram
- cycle time, bus width and buffer data path width. Also we shall
- set transmit buffer size and total buffer size.
- */
-
- eth16i_select_regbank(2, ioaddr);
-
- node_byte = 0;
- node_w = eth16i_read_eeprom(ioaddr, E_PRODUCT_CFG);
-
- if( (node_w & 0xFF00) == 0x0800)
- node_byte |= BUFFER_WIDTH_8;
-
- node_byte |= SRAM_BS1;
-
- if( (node_w & 0x00FF) == 64)
- node_byte |= SRAM_BS0;
-
- node_byte |= DLC_EN | SRAM_CYCLE_TIME_100NS | (ETH16I_TX_BUF_SIZE << 2);
-
- outb(node_byte, ioaddr + CONFIG_REG_0);
-
- /* We shall halt the transmitting, if 16 collisions are detected */
- outb(HALT_ON_16, ioaddr + COL_16_REG);
-
-#ifdef MODULE
- /* if_port already set by init_module() */
-#else
- dev->if_port = (dev->mem_start < E_PORT_FROM_EPROM) ?
- dev->mem_start : E_PORT_FROM_EPROM;
-#endif
-
- /* Set interface port type */
- if(boot) {
- static const char * const porttype[] = {
- "BNC", "DIX", "TP", "AUTO", "FROM_EPROM"
- };
-
- switch(dev->if_port)
- {
-
- case E_PORT_FROM_EPROM:
- dev->if_port = eth16i_read_eeprom(ioaddr, E_PORT_SELECT);
- break;
-
- case E_PORT_AUTO:
- dev->if_port = eth16i_probe_port(ioaddr);
- break;
-
- case E_PORT_BNC:
- case E_PORT_TP:
- case E_PORT_DIX:
- break;
- }
-
- printk(" %s interface.\n", porttype[dev->if_port]);
-
- eth16i_set_port(ioaddr, dev->if_port);
- }
-
- /* Set Receive Mode to normal operation */
- outb(MODE_2, ioaddr + RECEIVE_MODE_REG);
-}
-
-static int eth16i_probe_port(int ioaddr)
-{
- int i;
- int retcode;
- unsigned char dummy_packet[64];
-
- /* Powerup the chip */
- outb(0xc0 | POWERUP, ioaddr + CONFIG_REG_1);
-
- BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
-
- eth16i_select_regbank(NODE_ID_RB, ioaddr);
-
- for(i = 0; i < 6; i++) {
- dummy_packet[i] = inb(ioaddr + NODE_ID_0 + i);
- dummy_packet[i+6] = inb(ioaddr + NODE_ID_0 + i);
- }
-
- dummy_packet[12] = 0x00;
- dummy_packet[13] = 0x04;
- memset(dummy_packet + 14, 0, sizeof(dummy_packet) - 14);
-
- eth16i_select_regbank(2, ioaddr);
-
- for(i = 0; i < 3; i++) {
- BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
- BITCLR(ioaddr + CONFIG_REG_0, DLC_EN);
- eth16i_set_port(ioaddr, i);
-
- if(eth16i_debug > 1)
- printk(KERN_DEBUG "Set port number %d\n", i);
-
- retcode = eth16i_send_probe_packet(ioaddr, dummy_packet, 64);
- if(retcode == 0) {
- retcode = eth16i_receive_probe_packet(ioaddr);
- if(retcode != -1) {
- if(eth16i_debug > 1)
- printk(KERN_DEBUG "Eth16i interface port found at %d\n", i);
- return i;
- }
- }
- else {
- if(eth16i_debug > 1)
- printk(KERN_DEBUG "TRANSMIT_DONE timeout when probing interface port\n");
- }
- }
-
- if( eth16i_debug > 1)
- printk(KERN_DEBUG "Using default port\n");
-
- return E_PORT_BNC;
-}
-
-static void eth16i_set_port(int ioaddr, int porttype)
-{
- unsigned short temp = 0;
-
- eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr);
- outb(LOOPBACK_CONTROL, ioaddr + TRANSMIT_MODE_REG);
-
- temp |= DIS_AUTO_PORT_SEL;
-
- switch(porttype) {
-
- case E_PORT_BNC :
- temp |= AUI_SELECT;
- break;
-
- case E_PORT_TP :
- break;
-
- case E_PORT_DIX :
- temp |= AUI_SELECT;
- BITSET(ioaddr + TRANSMIT_MODE_REG, CONTROL_OUTPUT);
- break;
- }
-
- outb(temp, ioaddr + TRANSCEIVER_MODE_REG);
-
- if(eth16i_debug > 1) {
- printk(KERN_DEBUG "TRANSMIT_MODE_REG = %x\n", inb(ioaddr + TRANSMIT_MODE_REG));
- printk(KERN_DEBUG "TRANSCEIVER_MODE_REG = %x\n",
- inb(ioaddr+TRANSCEIVER_MODE_REG));
- }
-}
-
-static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l)
-{
- unsigned long starttime;
-
- outb(0xff, ioaddr + TX_STATUS_REG);
-
- outw(l, ioaddr + DATAPORT);
- outsw(ioaddr + DATAPORT, (unsigned short *)b, (l + 1) >> 1);
-
- starttime = jiffies;
- outb(TX_START | 1, ioaddr + TRANSMIT_START_REG);
-
- while( (inb(ioaddr + TX_STATUS_REG) & 0x80) == 0) {
- if( time_after(jiffies, starttime + TX_TIMEOUT)) {
- return -1;
- }
- }
-
- return 0;
-}
-
-static int eth16i_receive_probe_packet(int ioaddr)
-{
- unsigned long starttime;
-
- starttime = jiffies;
-
- while((inb(ioaddr + TX_STATUS_REG) & 0x20) == 0) {
- if( time_after(jiffies, starttime + TX_TIMEOUT)) {
-
- if(eth16i_debug > 1)
- printk(KERN_DEBUG "Timeout occurred waiting transmit packet received\n");
- starttime = jiffies;
- while((inb(ioaddr + RX_STATUS_REG) & 0x80) == 0) {
- if( time_after(jiffies, starttime + TX_TIMEOUT)) {
- if(eth16i_debug > 1)
- printk(KERN_DEBUG "Timeout occurred waiting receive packet\n");
- return -1;
- }
- }
-
- if(eth16i_debug > 1)
- printk(KERN_DEBUG "RECEIVE_PACKET\n");
- return 0; /* Found receive packet */
- }
- }
-
- if(eth16i_debug > 1) {
- printk(KERN_DEBUG "TRANSMIT_PACKET_RECEIVED %x\n", inb(ioaddr + TX_STATUS_REG));
- printk(KERN_DEBUG "RX_STATUS_REG = %x\n", inb(ioaddr + RX_STATUS_REG));
- }
-
- return 0; /* Return success */
-}
-
-#if 0
-static int eth16i_set_irq(struct net_device* dev)
-{
- const int ioaddr = dev->base_addr;
- const int irq = dev->irq;
- int i = 0;
-
- if(ioaddr < 0x1000) {
- while(eth16i_irqmap[i] && eth16i_irqmap[i] != irq)
- i++;
-
- if(i < NUM_OF_ISA_IRQS) {
- u8 cbyte = inb(ioaddr + JUMPERLESS_CONFIG);
- cbyte = (cbyte & 0x3F) | (i << 6);
- outb(cbyte, ioaddr + JUMPERLESS_CONFIG);
- return 0;
- }
- }
- else {
- printk(KERN_NOTICE "%s: EISA Interrupt cannot be set. Use EISA Configuration utility.\n", dev->name);
- }
-
- return -1;
-
-}
-#endif
-
-static int __init eth16i_get_irq(int ioaddr)
-{
- unsigned char cbyte;
-
- if( ioaddr < 0x1000) {
- cbyte = inb(ioaddr + JUMPERLESS_CONFIG);
- return eth16i_irqmap[((cbyte & 0xC0) >> 6)];
- } else { /* Oh..the card is EISA so method getting IRQ different */
- unsigned short index = 0;
- cbyte = inb(ioaddr + EISA_IRQ_REG);
- while( (cbyte & 0x01) == 0) {
- cbyte = cbyte >> 1;
- index++;
- }
- return eth32i_irqmap[index];
- }
-}
-
-static int __init eth16i_check_signature(int ioaddr)
-{
- int i;
- unsigned char creg[4] = { 0 };
-
- for(i = 0; i < 4 ; i++) {
-
- creg[i] = inb(ioaddr + TRANSMIT_MODE_REG + i);
-
- if(eth16i_debug > 1)
- printk("eth16i: read signature byte %x at %x\n",
- creg[i],
- ioaddr + TRANSMIT_MODE_REG + i);
- }
-
- creg[0] &= 0x0F; /* Mask collision cnr */
- creg[2] &= 0x7F; /* Mask DCLEN bit */
-
-#if 0
- /*
- This was removed because the card was sometimes left to state
- from which it couldn't be find anymore. If there is need
- to more strict check still this have to be fixed.
- */
- if( ! ((creg[0] == 0x06) && (creg[1] == 0x41)) ) {
- if(creg[1] != 0x42)
- return -1;
- }
-#endif
-
- if( !((creg[2] == 0x36) && (creg[3] == 0xE0)) ) {
- creg[2] &= 0x40;
- creg[3] &= 0x03;
-
- if( !((creg[2] == 0x40) && (creg[3] == 0x00)) )
- return -1;
- }
-
- if(eth16i_read_eeprom(ioaddr, E_NODEID_0) != 0)
- return -1;
-
- if((eth16i_read_eeprom(ioaddr, E_NODEID_1) & 0xFF00) != 0x4B00)
- return -1;
-
- return 0;
-}
-
-static int eth16i_read_eeprom(int ioaddr, int offset)
-{
- int data = 0;
-
- eth16i_eeprom_cmd(ioaddr, EEPROM_READ | offset);
- outb(CS_1, ioaddr + EEPROM_CTRL_REG);
- data = eth16i_read_eeprom_word(ioaddr);
- outb(CS_0 | SK_0, ioaddr + EEPROM_CTRL_REG);
-
- return data;
-}
-
-static int eth16i_read_eeprom_word(int ioaddr)
-{
- int i;
- int data = 0;
-
- for(i = 16; i > 0; i--) {
- outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG);
- eeprom_slow_io();
- outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG);
- eeprom_slow_io();
- data = (data << 1) |
- ((inb(ioaddr + EEPROM_DATA_REG) & DI_1) ? 1 : 0);
-
- eeprom_slow_io();
- }
-
- return data;
-}
-
-static void eth16i_eeprom_cmd(int ioaddr, unsigned char command)
-{
- int i;
-
- outb(CS_0 | SK_0, ioaddr + EEPROM_CTRL_REG);
- outb(DI_0, ioaddr + EEPROM_DATA_REG);
- outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG);
- outb(DI_1, ioaddr + EEPROM_DATA_REG);
- outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG);
-
- for(i = 7; i >= 0; i--) {
- short cmd = ( (command & (1 << i)) ? DI_1 : DI_0 );
- outb(cmd, ioaddr + EEPROM_DATA_REG);
- outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG);
- eeprom_slow_io();
- outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG);
- eeprom_slow_io();
- }
-}
-
-static int eth16i_open(struct net_device *dev)
-{
- struct eth16i_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- /* Powerup the chip */
- outb(0xc0 | POWERUP, ioaddr + CONFIG_REG_1);
-
- /* Initialize the chip */
- eth16i_initialize(dev, 0);
-
- /* Set the transmit buffer size */
- lp->tx_buf_size = eth16i_tx_buf_map[ETH16I_TX_BUF_SIZE & 0x03];
-
- if(eth16i_debug > 0)
- printk(KERN_DEBUG "%s: transmit buffer size %d\n",
- dev->name, lp->tx_buf_size);
-
- /* Now enable Transmitter and Receiver sections */
- BITCLR(ioaddr + CONFIG_REG_0, DLC_EN);
-
- /* Now switch to register bank 2, for run time operation */
- eth16i_select_regbank(2, ioaddr);
-
- lp->open_time = jiffies;
- lp->tx_started = 0;
- lp->tx_queue = 0;
- lp->tx_queue_len = 0;
-
- /* Turn on interrupts*/
- outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
-
- netif_start_queue(dev);
- return 0;
-}
-
-static int eth16i_close(struct net_device *dev)
-{
- struct eth16i_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- eth16i_reset(dev);
-
- /* Turn off interrupts*/
- outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
-
- netif_stop_queue(dev);
-
- lp->open_time = 0;
-
- /* Disable transmit and receive */
- BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
-
- /* Reset the chip */
- /* outb(0xff, ioaddr + RESET); */
- /* outw(0xffff, ioaddr + TX_STATUS_REG); */
-
- outb(0x00, ioaddr + CONFIG_REG_1);
-
- return 0;
-}
-
-static void eth16i_timeout(struct net_device *dev)
-{
- struct eth16i_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- /*
- If we get here, some higher level has decided that
- we are broken. There should really be a "kick me"
- function call instead.
- */
-
- outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
- printk(KERN_WARNING "%s: transmit timed out with status %04x, %s ?\n",
- dev->name,
- inw(ioaddr + TX_STATUS_REG), (inb(ioaddr + TX_STATUS_REG) & TX_DONE) ?
- "IRQ conflict" : "network cable problem");
-
- dev->trans_start = jiffies; /* prevent tx timeout */
-
- /* Let's dump all registers */
- if(eth16i_debug > 0) {
- printk(KERN_DEBUG "%s: timeout: %02x %02x %02x %02x %02x %02x %02x %02x.\n",
- dev->name, inb(ioaddr + 0),
- inb(ioaddr + 1), inb(ioaddr + 2),
- inb(ioaddr + 3), inb(ioaddr + 4),
- inb(ioaddr + 5),
- inb(ioaddr + 6), inb(ioaddr + 7));
-
- printk(KERN_DEBUG "%s: transmit start reg: %02x. collision reg %02x\n",
- dev->name, inb(ioaddr + TRANSMIT_START_REG),
- inb(ioaddr + COL_16_REG));
- printk(KERN_DEBUG "lp->tx_queue = %d\n", lp->tx_queue);
- printk(KERN_DEBUG "lp->tx_queue_len = %d\n", lp->tx_queue_len);
- printk(KERN_DEBUG "lp->tx_started = %d\n", lp->tx_started);
- }
- dev->stats.tx_errors++;
- eth16i_reset(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
- outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
- netif_wake_queue(dev);
-}
-
-static netdev_tx_t eth16i_tx(struct sk_buff *skb, struct net_device *dev)
-{
- struct eth16i_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- int status = 0;
- ushort length = skb->len;
- unsigned char *buf;
- unsigned long flags;
-
- if (length < ETH_ZLEN) {
- if (skb_padto(skb, ETH_ZLEN))
- return NETDEV_TX_OK;
- length = ETH_ZLEN;
- }
- buf = skb->data;
-
- netif_stop_queue(dev);
-
- /* Turn off TX interrupts */
- outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
-
- /* We would be better doing the disable_irq tricks the 3c509 does,
- that would make this suck a lot less */
-
- spin_lock_irqsave(&lp->lock, flags);
-
- if( (length + 2) > (lp->tx_buf_size - lp->tx_queue_len)) {
- if(eth16i_debug > 0)
- printk(KERN_WARNING "%s: Transmit buffer full.\n", dev->name);
- }
- else {
- outw(length, ioaddr + DATAPORT);
-
- if( ioaddr < 0x1000 )
- outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1);
- else {
- unsigned char frag = length % 4;
- outsl(ioaddr + DATAPORT, buf, length >> 2);
- if( frag != 0 ) {
- outsw(ioaddr + DATAPORT, (buf + (length & 0xFFFC)), 1);
- if( frag == 3 )
- outsw(ioaddr + DATAPORT,
- (buf + (length & 0xFFFC) + 2), 1);
- }
- }
- lp->tx_buffered_packets++;
- lp->tx_buffered_bytes = length;
- lp->tx_queue++;
- lp->tx_queue_len += length + 2;
- }
- lp->tx_buf_busy = 0;
-
- if(lp->tx_started == 0) {
- /* If the transmitter is idle..always trigger a transmit */
- outb(TX_START | lp->tx_queue, ioaddr + TRANSMIT_START_REG);
- lp->tx_queue = 0;
- lp->tx_queue_len = 0;
- lp->tx_started = 1;
- netif_wake_queue(dev);
- }
- else if(lp->tx_queue_len < lp->tx_buf_size - (ETH_FRAME_LEN + 2)) {
- /* There is still more room for one more packet in tx buffer */
- netif_wake_queue(dev);
- }
-
- spin_unlock_irqrestore(&lp->lock, flags);
-
- outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
- /* Turn TX interrupts back on */
- /* outb(TX_INTR_DONE | TX_INTR_16_COL, ioaddr + TX_INTR_REG); */
- status = 0;
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-static void eth16i_rx(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- int boguscount = MAX_RX_LOOP;
-
- /* Loop until all packets have been read */
- while( (inb(ioaddr + RECEIVE_MODE_REG) & RX_BUFFER_EMPTY) == 0) {
-
- /* Read status byte from receive buffer */
- ushort status = inw(ioaddr + DATAPORT);
-
- /* Get the size of the packet from receive buffer */
- ushort pkt_len = inw(ioaddr + DATAPORT);
-
- if(eth16i_debug > 4)
- printk(KERN_DEBUG "%s: Receiving packet mode %02x status %04x.\n",
- dev->name,
- inb(ioaddr + RECEIVE_MODE_REG), status);
-
- if( !(status & PKT_GOOD) ) {
- dev->stats.rx_errors++;
-
- if( (pkt_len < ETH_ZLEN) || (pkt_len > ETH_FRAME_LEN) ) {
- dev->stats.rx_length_errors++;
- eth16i_reset(dev);
- return;
- }
- else {
- eth16i_skip_packet(dev);
- dev->stats.rx_dropped++;
- }
- }
- else { /* Ok so now we should have a good packet */
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb(dev, pkt_len + 3);
- if( skb == NULL ) {
- printk(KERN_WARNING "%s: Could'n allocate memory for packet (len %d)\n",
- dev->name, pkt_len);
- eth16i_skip_packet(dev);
- dev->stats.rx_dropped++;
- break;
- }
-
- skb_reserve(skb,2);
-
- /*
- Now let's get the packet out of buffer.
- size is (pkt_len + 1) >> 1, cause we are now reading words
- and it have to be even aligned.
- */
-
- if(ioaddr < 0x1000)
- insw(ioaddr + DATAPORT, skb_put(skb, pkt_len),
- (pkt_len + 1) >> 1);
- else {
- unsigned char *buf = skb_put(skb, pkt_len);
- unsigned char frag = pkt_len % 4;
-
- insl(ioaddr + DATAPORT, buf, pkt_len >> 2);
-
- if(frag != 0) {
- unsigned short rest[2];
- rest[0] = inw( ioaddr + DATAPORT );
- if(frag == 3)
- rest[1] = inw( ioaddr + DATAPORT );
-
- memcpy(buf + (pkt_len & 0xfffc), (char *)rest, frag);
- }
- }
-
- skb->protocol=eth_type_trans(skb, dev);
-
- if( eth16i_debug > 5 ) {
- int i;
- printk(KERN_DEBUG "%s: Received packet of length %d.\n",
- dev->name, pkt_len);
- for(i = 0; i < 14; i++)
- printk(KERN_DEBUG " %02x", skb->data[i]);
- printk(KERN_DEBUG ".\n");
- }
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
-
- } /* else */
-
- if(--boguscount <= 0)
- break;
-
- } /* while */
-}
-
-static irqreturn_t eth16i_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct eth16i_local *lp;
- int ioaddr = 0, status;
- int handled = 0;
-
- ioaddr = dev->base_addr;
- lp = netdev_priv(dev);
-
- /* Turn off all interrupts from adapter */
- outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
-
- /* eth16i_tx won't be called */
- spin_lock(&lp->lock);
-
- status = inw(ioaddr + TX_STATUS_REG); /* Get the status */
- outw(status, ioaddr + TX_STATUS_REG); /* Clear status bits */
-
- if (status)
- handled = 1;
-
- if(eth16i_debug > 3)
- printk(KERN_DEBUG "%s: Interrupt with status %04x.\n", dev->name, status);
-
- if( status & 0x7f00 ) {
-
- dev->stats.rx_errors++;
-
- if(status & (BUS_RD_ERR << 8) )
- printk(KERN_WARNING "%s: Bus read error.\n",dev->name);
- if(status & (SHORT_PKT_ERR << 8) ) dev->stats.rx_length_errors++;
- if(status & (ALIGN_ERR << 8) ) dev->stats.rx_frame_errors++;
- if(status & (CRC_ERR << 8) ) dev->stats.rx_crc_errors++;
- if(status & (RX_BUF_OVERFLOW << 8) ) dev->stats.rx_over_errors++;
- }
- if( status & 0x001a) {
-
- dev->stats.tx_errors++;
-
- if(status & CR_LOST) dev->stats.tx_carrier_errors++;
- if(status & TX_JABBER_ERR) dev->stats.tx_window_errors++;
-
-#if 0
- if(status & COLLISION) {
- dev->stats.collisions +=
- ((inb(ioaddr+TRANSMIT_MODE_REG) & 0xF0) >> 4);
- }
-#endif
- if(status & COLLISIONS_16) {
- if(lp->col_16 < MAX_COL_16) {
- lp->col_16++;
- dev->stats.collisions++;
- /* Resume transmitting, skip failed packet */
- outb(0x02, ioaddr + COL_16_REG);
- }
- else {
- printk(KERN_WARNING "%s: bailing out due to many consecutive 16-in-a-row collisions. Network cable problem?\n", dev->name);
- }
- }
- }
-
- if( status & 0x00ff ) { /* Let's check the transmit status reg */
-
- if(status & TX_DONE) { /* The transmit has been done */
- dev->stats.tx_packets = lp->tx_buffered_packets;
- dev->stats.tx_bytes += lp->tx_buffered_bytes;
- lp->col_16 = 0;
-
- if(lp->tx_queue) { /* Is there still packets ? */
- /* There was packet(s) so start transmitting and write also
- how many packets there is to be sended */
- outb(TX_START | lp->tx_queue, ioaddr + TRANSMIT_START_REG);
- lp->tx_queue = 0;
- lp->tx_queue_len = 0;
- lp->tx_started = 1;
- }
- else {
- lp->tx_started = 0;
- }
- netif_wake_queue(dev);
- }
- }
-
- if( ( status & 0x8000 ) ||
- ( (inb(ioaddr + RECEIVE_MODE_REG) & RX_BUFFER_EMPTY) == 0) ) {
- eth16i_rx(dev); /* We have packet in receive buffer */
- }
-
- /* Turn interrupts back on */
- outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
-
- if(lp->tx_queue_len < lp->tx_buf_size - (ETH_FRAME_LEN + 2)) {
- /* There is still more room for one more packet in tx buffer */
- netif_wake_queue(dev);
- }
-
- spin_unlock(&lp->lock);
-
- return IRQ_RETVAL(handled);
-}
-
-static void eth16i_skip_packet(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- inw(ioaddr + DATAPORT);
- inw(ioaddr + DATAPORT);
- inw(ioaddr + DATAPORT);
-
- outb(SKIP_RX_PACKET, ioaddr + FILTER_SELF_RX_REG);
- while( inb( ioaddr + FILTER_SELF_RX_REG ) != 0);
-}
-
-static void eth16i_reset(struct net_device *dev)
-{
- struct eth16i_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- if(eth16i_debug > 1)
- printk(KERN_DEBUG "%s: Resetting device.\n", dev->name);
-
- BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
- outw(0xffff, ioaddr + TX_STATUS_REG);
- eth16i_select_regbank(2, ioaddr);
-
- lp->tx_started = 0;
- lp->tx_buf_busy = 0;
- lp->tx_queue = 0;
- lp->tx_queue_len = 0;
- BITCLR(ioaddr + CONFIG_REG_0, DLC_EN);
-}
-
-static void eth16i_multicast(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- if (!netdev_mc_empty(dev) || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
- {
- outb(3, ioaddr + RECEIVE_MODE_REG);
- } else {
- outb(2, ioaddr + RECEIVE_MODE_REG);
- }
-}
-
-static void eth16i_select_regbank(unsigned char banknbr, int ioaddr)
-{
- unsigned char data;
-
- data = inb(ioaddr + CONFIG_REG_1);
- outb( ((data & 0xF3) | ( (banknbr & 0x03) << 2)), ioaddr + CONFIG_REG_1);
-}
-
-#ifdef MODULE
-
-static ushort eth16i_parse_mediatype(const char* s)
-{
- if(!s)
- return E_PORT_FROM_EPROM;
-
- if (!strncmp(s, "bnc", 3))
- return E_PORT_BNC;
- else if (!strncmp(s, "tp", 2))
- return E_PORT_TP;
- else if (!strncmp(s, "dix", 3))
- return E_PORT_DIX;
- else if (!strncmp(s, "auto", 4))
- return E_PORT_AUTO;
- else
- return E_PORT_FROM_EPROM;
-}
-
-#define MAX_ETH16I_CARDS 4 /* Max number of Eth16i cards per module */
-
-static struct net_device *dev_eth16i[MAX_ETH16I_CARDS];
-static int io[MAX_ETH16I_CARDS];
-#if 0
-static int irq[MAX_ETH16I_CARDS];
-#endif
-static char* mediatype[MAX_ETH16I_CARDS];
-static int debug = -1;
-
-MODULE_AUTHOR("Mika Kuoppala <miku@iki.fi>");
-MODULE_DESCRIPTION("ICL EtherTeam 16i/32 driver");
-MODULE_LICENSE("GPL");
-
-
-module_param_array(io, int, NULL, 0);
-MODULE_PARM_DESC(io, "eth16i I/O base address(es)");
-
-#if 0
-module_param_array(irq, int, NULL, 0);
-MODULE_PARM_DESC(irq, "eth16i interrupt request number");
-#endif
-
-module_param_array(mediatype, charp, NULL, 0);
-MODULE_PARM_DESC(mediatype, "eth16i media type of interface(s) (bnc,tp,dix,auto,eprom)");
-
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "eth16i debug level (0-6)");
-
-int __init init_module(void)
-{
- int this_dev, found = 0;
- struct net_device *dev;
-
- for (this_dev = 0; this_dev < MAX_ETH16I_CARDS; this_dev++) {
- dev = alloc_etherdev(sizeof(struct eth16i_local));
- if (!dev)
- break;
-
- dev->base_addr = io[this_dev];
-
- if(debug != -1)
- eth16i_debug = debug;
-
- if(eth16i_debug > 1)
- printk(KERN_NOTICE "eth16i(%d): interface type %s\n", this_dev, mediatype[this_dev] ? mediatype[this_dev] : "none" );
-
- dev->if_port = eth16i_parse_mediatype(mediatype[this_dev]);
-
- if(io[this_dev] == 0) {
- if (this_dev != 0) { /* Only autoprobe 1st one */
- free_netdev(dev);
- break;
- }
-
- printk(KERN_NOTICE "eth16i.c: Presently autoprobing (not recommended) for a single card.\n");
- }
-
- if (do_eth16i_probe(dev) == 0) {
- dev_eth16i[found++] = dev;
- continue;
- }
- printk(KERN_WARNING "eth16i.c No Eth16i card found (i/o = 0x%x).\n",
- io[this_dev]);
- free_netdev(dev);
- break;
- }
- if (found)
- return 0;
- return -ENXIO;
-}
-
-void __exit cleanup_module(void)
-{
- int this_dev;
-
- for(this_dev = 0; this_dev < MAX_ETH16I_CARDS; this_dev++) {
- struct net_device *dev = dev_eth16i[this_dev];
-
- if (netdev_priv(dev)) {
- unregister_netdev(dev);
- free_irq(dev->irq, dev);
- release_region(dev->base_addr, ETH16I_IO_EXTENT);
- free_netdev(dev);
- }
- }
-}
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/i825xx/3c505.c b/drivers/net/ethernet/i825xx/3c505.c
deleted file mode 100644
index 6a5c21b82c51..000000000000
--- a/drivers/net/ethernet/i825xx/3c505.c
+++ /dev/null
@@ -1,1671 +0,0 @@
-/*
- * Linux Ethernet device driver for the 3Com Etherlink Plus (3C505)
- * By Craig Southeren, Juha Laiho and Philip Blundell
- *
- * 3c505.c This module implements an interface to the 3Com
- * Etherlink Plus (3c505) Ethernet card. Linux device
- * driver interface reverse engineered from the Linux 3C509
- * device drivers. Some 3C505 information gleaned from
- * the Crynwr packet driver. Still this driver would not
- * be here without 3C505 technical reference provided by
- * 3Com.
- *
- * $Id: 3c505.c,v 1.10 1996/04/16 13:06:27 phil Exp $
- *
- * Authors: Linux 3c505 device driver by
- * Craig Southeren, <craigs@ineluki.apana.org.au>
- * Final debugging by
- * Andrew Tridgell, <tridge@nimbus.anu.edu.au>
- * Auto irq/address, tuning, cleanup and v1.1.4+ kernel mods by
- * Juha Laiho, <jlaiho@ichaos.nullnet.fi>
- * Linux 3C509 driver by
- * Donald Becker, <becker@super.org>
- * (Now at <becker@scyld.com>)
- * Crynwr packet driver by
- * Krishnan Gopalan and Gregg Stefancik,
- * Clemson University Engineering Computer Operations.
- * Portions of the code have been adapted from the 3c505
- * driver for NCSA Telnet by Bruce Orchard and later
- * modified by Warren Van Houten and krus@diku.dk.
- * 3C505 technical information provided by
- * Terry Murphy, of 3Com Network Adapter Division
- * Linux 1.3.0 changes by
- * Alan Cox <Alan.Cox@linux.org>
- * More debugging, DMA support, currently maintained by
- * Philip Blundell <philb@gnu.org>
- * Multicard/soft configurable dma channel/rev 2 hardware support
- * by Christopher Collins <ccollins@pcug.org.au>
- * Ethtool support (jgarzik), 11/17/2001
- */
-
-#define DRV_NAME "3c505"
-#define DRV_VERSION "1.10a"
-
-
-/* Theory of operation:
- *
- * The 3c505 is quite an intelligent board. All communication with it is done
- * by means of Primary Command Blocks (PCBs); these are transferred using PIO
- * through the command register. The card has 256k of on-board RAM, which is
- * used to buffer received packets. It might seem at first that more buffers
- * are better, but in fact this isn't true. From my tests, it seems that
- * more than about 10 buffers are unnecessary, and there is a noticeable
- * performance hit in having more active on the card. So the majority of the
- * card's memory isn't, in fact, used. Sadly, the card only has one transmit
- * buffer and, short of loading our own firmware into it (which is what some
- * drivers resort to) there's nothing we can do about this.
- *
- * We keep up to 4 "receive packet" commands active on the board at a time.
- * When a packet comes in, so long as there is a receive command active, the
- * board will send us a "packet received" PCB and then add the data for that
- * packet to the DMA queue. If a DMA transfer is not already in progress, we
- * set one up to start uploading the data. We have to maintain a list of
- * backlogged receive packets, because the card may decide to tell us about
- * a newly-arrived packet at any time, and we may not be able to start a DMA
- * transfer immediately (ie one may already be going on). We can't NAK the
- * PCB, because then it would throw the packet away.
- *
- * Trying to send a PCB to the card at the wrong moment seems to have bad
- * effects. If we send it a transmit PCB while a receive DMA is happening,
- * it will just NAK the PCB and so we will have wasted our time. Worse, it
- * sometimes seems to interrupt the transfer. The majority of the low-level
- * code is protected by one huge semaphore -- "busy" -- which is set whenever
- * it probably isn't safe to do anything to the card. The receive routine
- * must gain a lock on "busy" before it can start a DMA transfer, and the
- * transmit routine must gain a lock before it sends the first PCB to the card.
- * The send_pcb() routine also has an internal semaphore to protect it against
- * being re-entered (which would be disastrous) -- this is needed because
- * several things can happen asynchronously (re-priming the receiver and
- * asking the card for statistics, for example). send_pcb() will also refuse
- * to talk to the card at all if a DMA upload is happening. The higher-level
- * networking code will reschedule a later retry if some part of the driver
- * is blocked. In practice, this doesn't seem to happen very often.
- */
-
-/* This driver may now work with revision 2.x hardware, since all the read
- * operations on the HCR have been removed (we now keep our own softcopy).
- * But I don't have an old card to test it on.
- *
- * This has had the bad effect that the autoprobe routine is now a bit
- * less friendly to other devices. However, it was never very good.
- * before, so I doubt it will hurt anybody.
- */
-
-/* The driver is a mess. I took Craig's and Juha's code, and hacked it firstly
- * to make it more reliable, and secondly to add DMA mode. Many things could
- * probably be done better; the concurrency protection is particularly awful.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/in.h>
-#include <linux/ioport.h>
-#include <linux/spinlock.h>
-#include <linux/ethtool.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
-#include <linux/gfp.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-
-#include "3c505.h"
-
-/*********************************************************
- *
- * define debug messages here as common strings to reduce space
- *
- *********************************************************/
-
-#define timeout_msg "*** timeout at %s:%s (line %d) ***\n"
-#define TIMEOUT_MSG(lineno) \
- pr_notice(timeout_msg, __FILE__, __func__, (lineno))
-
-#define invalid_pcb_msg "*** invalid pcb length %d at %s:%s (line %d) ***\n"
-#define INVALID_PCB_MSG(len) \
- pr_notice(invalid_pcb_msg, (len), __FILE__, __func__, __LINE__)
-
-#define search_msg "%s: Looking for 3c505 adapter at address %#x..."
-
-#define stilllooking_msg "still looking..."
-
-#define found_msg "found.\n"
-
-#define notfound_msg "not found (reason = %d)\n"
-
-#define couldnot_msg "%s: 3c505 not found\n"
-
-/*********************************************************
- *
- * various other debug stuff
- *
- *********************************************************/
-
-#ifdef ELP_DEBUG
-static int elp_debug = ELP_DEBUG;
-#else
-static int elp_debug;
-#endif
-#define debug elp_debug
-
-/*
- * 0 = no messages (well, some)
- * 1 = messages when high level commands performed
- * 2 = messages when low level commands performed
- * 3 = messages when interrupts received
- */
-
-/*****************************************************************
- *
- * List of I/O-addresses we try to auto-sense
- * Last element MUST BE 0!
- *****************************************************************/
-
-static int addr_list[] __initdata = {0x300, 0x280, 0x310, 0};
-
-/* Dma Memory related stuff */
-
-static unsigned long dma_mem_alloc(int size)
-{
- int order = get_order(size);
- return __get_dma_pages(GFP_KERNEL, order);
-}
-
-
-/*****************************************************************
- *
- * Functions for I/O (note the inline !)
- *
- *****************************************************************/
-
-static inline unsigned char inb_status(unsigned int base_addr)
-{
- return inb(base_addr + PORT_STATUS);
-}
-
-static inline int inb_command(unsigned int base_addr)
-{
- return inb(base_addr + PORT_COMMAND);
-}
-
-static inline void outb_control(unsigned char val, struct net_device *dev)
-{
- outb(val, dev->base_addr + PORT_CONTROL);
- ((elp_device *)(netdev_priv(dev)))->hcr_val = val;
-}
-
-#define HCR_VAL(x) (((elp_device *)(netdev_priv(x)))->hcr_val)
-
-static inline void outb_command(unsigned char val, unsigned int base_addr)
-{
- outb(val, base_addr + PORT_COMMAND);
-}
-
-static inline unsigned int backlog_next(unsigned int n)
-{
- return (n + 1) % BACKLOG_SIZE;
-}
-
-/*****************************************************************
- *
- * useful functions for accessing the adapter
- *
- *****************************************************************/
-
-/*
- * use this routine when accessing the ASF bits as they are
- * changed asynchronously by the adapter
- */
-
-/* get adapter PCB status */
-#define GET_ASF(addr) \
- (get_status(addr)&ASF_PCB_MASK)
-
-static inline int get_status(unsigned int base_addr)
-{
- unsigned long timeout = jiffies + 10*HZ/100;
- register int stat1;
- do {
- stat1 = inb_status(base_addr);
- } while (stat1 != inb_status(base_addr) && time_before(jiffies, timeout));
- if (time_after_eq(jiffies, timeout))
- TIMEOUT_MSG(__LINE__);
- return stat1;
-}
-
-static inline void set_hsf(struct net_device *dev, int hsf)
-{
- elp_device *adapter = netdev_priv(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->lock, flags);
- outb_control((HCR_VAL(dev) & ~HSF_PCB_MASK) | hsf, dev);
- spin_unlock_irqrestore(&adapter->lock, flags);
-}
-
-static bool start_receive(struct net_device *, pcb_struct *);
-
-static inline void adapter_reset(struct net_device *dev)
-{
- unsigned long timeout;
- elp_device *adapter = netdev_priv(dev);
- unsigned char orig_hcr = adapter->hcr_val;
-
- outb_control(0, dev);
-
- if (inb_status(dev->base_addr) & ACRF) {
- do {
- inb_command(dev->base_addr);
- timeout = jiffies + 2*HZ/100;
- while (time_before_eq(jiffies, timeout) && !(inb_status(dev->base_addr) & ACRF));
- } while (inb_status(dev->base_addr) & ACRF);
- set_hsf(dev, HSF_PCB_NAK);
- }
- outb_control(adapter->hcr_val | ATTN | DIR, dev);
- mdelay(10);
- outb_control(adapter->hcr_val & ~ATTN, dev);
- mdelay(10);
- outb_control(adapter->hcr_val | FLSH, dev);
- mdelay(10);
- outb_control(adapter->hcr_val & ~FLSH, dev);
- mdelay(10);
-
- outb_control(orig_hcr, dev);
- if (!start_receive(dev, &adapter->tx_pcb))
- pr_err("%s: start receive command failed\n", dev->name);
-}
-
-/* Check to make sure that a DMA transfer hasn't timed out. This should
- * never happen in theory, but seems to occur occasionally if the card gets
- * prodded at the wrong time.
- */
-static inline void check_3c505_dma(struct net_device *dev)
-{
- elp_device *adapter = netdev_priv(dev);
- if (adapter->dmaing && time_after(jiffies, adapter->current_dma.start_time + 10)) {
- unsigned long flags, f;
- pr_err("%s: DMA %s timed out, %d bytes left\n", dev->name,
- adapter->current_dma.direction ? "download" : "upload",
- get_dma_residue(dev->dma));
- spin_lock_irqsave(&adapter->lock, flags);
- adapter->dmaing = 0;
- adapter->busy = 0;
-
- f=claim_dma_lock();
- disable_dma(dev->dma);
- release_dma_lock(f);
-
- if (adapter->rx_active)
- adapter->rx_active--;
- outb_control(adapter->hcr_val & ~(DMAE | TCEN | DIR), dev);
- spin_unlock_irqrestore(&adapter->lock, flags);
- }
-}
-
-/* Primitive functions used by send_pcb() */
-static inline bool send_pcb_slow(unsigned int base_addr, unsigned char byte)
-{
- unsigned long timeout;
- outb_command(byte, base_addr);
- for (timeout = jiffies + 5*HZ/100; time_before(jiffies, timeout);) {
- if (inb_status(base_addr) & HCRE)
- return false;
- }
- pr_warning("3c505: send_pcb_slow timed out\n");
- return true;
-}
-
-static inline bool send_pcb_fast(unsigned int base_addr, unsigned char byte)
-{
- unsigned int timeout;
- outb_command(byte, base_addr);
- for (timeout = 0; timeout < 40000; timeout++) {
- if (inb_status(base_addr) & HCRE)
- return false;
- }
- pr_warning("3c505: send_pcb_fast timed out\n");
- return true;
-}
-
-/* Check to see if the receiver needs restarting, and kick it if so */
-static inline void prime_rx(struct net_device *dev)
-{
- elp_device *adapter = netdev_priv(dev);
- while (adapter->rx_active < ELP_RX_PCBS && netif_running(dev)) {
- if (!start_receive(dev, &adapter->itx_pcb))
- break;
- }
-}
-
-/*****************************************************************
- *
- * send_pcb
- * Send a PCB to the adapter.
- *
- * output byte to command reg --<--+
- * wait until HCRE is non zero |
- * loop until all bytes sent -->--+
- * set HSF1 and HSF2 to 1
- * output pcb length
- * wait until ASF give ACK or NAK
- * set HSF1 and HSF2 to 0
- *
- *****************************************************************/
-
-/* This can be quite slow -- the adapter is allowed to take up to 40ms
- * to respond to the initial interrupt.
- *
- * We run initially with interrupts turned on, but with a semaphore set
- * so that nobody tries to re-enter this code. Once the first byte has
- * gone through, we turn interrupts off and then send the others (the
- * timeout is reduced to 500us).
- */
-
-static bool send_pcb(struct net_device *dev, pcb_struct * pcb)
-{
- int i;
- unsigned long timeout;
- elp_device *adapter = netdev_priv(dev);
- unsigned long flags;
-
- check_3c505_dma(dev);
-
- if (adapter->dmaing && adapter->current_dma.direction == 0)
- return false;
-
- /* Avoid contention */
- if (test_and_set_bit(1, &adapter->send_pcb_semaphore)) {
- if (elp_debug >= 3) {
- pr_debug("%s: send_pcb entered while threaded\n", dev->name);
- }
- return false;
- }
- /*
- * load each byte into the command register and
- * wait for the HCRE bit to indicate the adapter
- * had read the byte
- */
- set_hsf(dev, 0);
-
- if (send_pcb_slow(dev->base_addr, pcb->command))
- goto abort;
-
- spin_lock_irqsave(&adapter->lock, flags);
-
- if (send_pcb_fast(dev->base_addr, pcb->length))
- goto sti_abort;
-
- for (i = 0; i < pcb->length; i++) {
- if (send_pcb_fast(dev->base_addr, pcb->data.raw[i]))
- goto sti_abort;
- }
-
- outb_control(adapter->hcr_val | 3, dev); /* signal end of PCB */
- outb_command(2 + pcb->length, dev->base_addr);
-
- /* now wait for the acknowledgement */
- spin_unlock_irqrestore(&adapter->lock, flags);
-
- for (timeout = jiffies + 5*HZ/100; time_before(jiffies, timeout);) {
- switch (GET_ASF(dev->base_addr)) {
- case ASF_PCB_ACK:
- adapter->send_pcb_semaphore = 0;
- return true;
-
- case ASF_PCB_NAK:
-#ifdef ELP_DEBUG
- pr_debug("%s: send_pcb got NAK\n", dev->name);
-#endif
- goto abort;
- }
- }
-
- if (elp_debug >= 1)
- pr_debug("%s: timeout waiting for PCB acknowledge (status %02x)\n",
- dev->name, inb_status(dev->base_addr));
- goto abort;
-
- sti_abort:
- spin_unlock_irqrestore(&adapter->lock, flags);
- abort:
- adapter->send_pcb_semaphore = 0;
- return false;
-}
-
-
-/*****************************************************************
- *
- * receive_pcb
- * Read a PCB from the adapter
- *
- * wait for ACRF to be non-zero ---<---+
- * input a byte |
- * if ASF1 and ASF2 were not both one |
- * before byte was read, loop --->---+
- * set HSF1 and HSF2 for ack
- *
- *****************************************************************/
-
-static bool receive_pcb(struct net_device *dev, pcb_struct * pcb)
-{
- int i, j;
- int total_length;
- int stat;
- unsigned long timeout;
- unsigned long flags;
-
- elp_device *adapter = netdev_priv(dev);
-
- set_hsf(dev, 0);
-
- /* get the command code */
- timeout = jiffies + 2*HZ/100;
- while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && time_before(jiffies, timeout));
- if (time_after_eq(jiffies, timeout)) {
- TIMEOUT_MSG(__LINE__);
- return false;
- }
- pcb->command = inb_command(dev->base_addr);
-
- /* read the data length */
- timeout = jiffies + 3*HZ/100;
- while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && time_before(jiffies, timeout));
- if (time_after_eq(jiffies, timeout)) {
- TIMEOUT_MSG(__LINE__);
- pr_info("%s: status %02x\n", dev->name, stat);
- return false;
- }
- pcb->length = inb_command(dev->base_addr);
-
- if (pcb->length > MAX_PCB_DATA) {
- INVALID_PCB_MSG(pcb->length);
- adapter_reset(dev);
- return false;
- }
- /* read the data */
- spin_lock_irqsave(&adapter->lock, flags);
- for (i = 0; i < MAX_PCB_DATA; i++) {
- for (j = 0; j < 20000; j++) {
- stat = get_status(dev->base_addr);
- if (stat & ACRF)
- break;
- }
- pcb->data.raw[i] = inb_command(dev->base_addr);
- if ((stat & ASF_PCB_MASK) == ASF_PCB_END || j >= 20000)
- break;
- }
- spin_unlock_irqrestore(&adapter->lock, flags);
- if (i >= MAX_PCB_DATA) {
- INVALID_PCB_MSG(i);
- return false;
- }
- if (j >= 20000) {
- TIMEOUT_MSG(__LINE__);
- return false;
- }
- /* the last "data" byte was really the length! */
- total_length = pcb->data.raw[i];
-
- /* safety check total length vs data length */
- if (total_length != (pcb->length + 2)) {
- if (elp_debug >= 2)
- pr_warning("%s: mangled PCB received\n", dev->name);
- set_hsf(dev, HSF_PCB_NAK);
- return false;
- }
-
- if (pcb->command == CMD_RECEIVE_PACKET_COMPLETE) {
- if (test_and_set_bit(0, (void *) &adapter->busy)) {
- if (backlog_next(adapter->rx_backlog.in) == adapter->rx_backlog.out) {
- set_hsf(dev, HSF_PCB_NAK);
- pr_warning("%s: PCB rejected, transfer in progress and backlog full\n", dev->name);
- pcb->command = 0;
- return true;
- } else {
- pcb->command = 0xff;
- }
- }
- }
- set_hsf(dev, HSF_PCB_ACK);
- return true;
-}
-
-/******************************************************
- *
- * queue a receive command on the adapter so we will get an
- * interrupt when a packet is received.
- *
- ******************************************************/
-
-static bool start_receive(struct net_device *dev, pcb_struct * tx_pcb)
-{
- bool status;
- elp_device *adapter = netdev_priv(dev);
-
- if (elp_debug >= 3)
- pr_debug("%s: restarting receiver\n", dev->name);
- tx_pcb->command = CMD_RECEIVE_PACKET;
- tx_pcb->length = sizeof(struct Rcv_pkt);
- tx_pcb->data.rcv_pkt.buf_seg
- = tx_pcb->data.rcv_pkt.buf_ofs = 0; /* Unused */
- tx_pcb->data.rcv_pkt.buf_len = 1600;
- tx_pcb->data.rcv_pkt.timeout = 0; /* set timeout to zero */
- status = send_pcb(dev, tx_pcb);
- if (status)
- adapter->rx_active++;
- return status;
-}
-
-/******************************************************
- *
- * extract a packet from the adapter
- * this routine is only called from within the interrupt
- * service routine, so no cli/sti calls are needed
- * note that the length is always assumed to be even
- *
- ******************************************************/
-
-static void receive_packet(struct net_device *dev, int len)
-{
- int rlen;
- elp_device *adapter = netdev_priv(dev);
- void *target;
- struct sk_buff *skb;
- unsigned long flags;
-
- rlen = (len + 1) & ~1;
- skb = netdev_alloc_skb(dev, rlen + 2);
-
- if (!skb) {
- pr_warning("%s: memory squeeze, dropping packet\n", dev->name);
- target = adapter->dma_buffer;
- adapter->current_dma.target = NULL;
- /* FIXME: stats */
- return;
- }
-
- skb_reserve(skb, 2);
- target = skb_put(skb, rlen);
- if ((unsigned long)(target + rlen) >= MAX_DMA_ADDRESS) {
- adapter->current_dma.target = target;
- target = adapter->dma_buffer;
- } else {
- adapter->current_dma.target = NULL;
- }
-
- /* if this happens, we die */
- if (test_and_set_bit(0, (void *) &adapter->dmaing))
- pr_err("%s: rx blocked, DMA in progress, dir %d\n",
- dev->name, adapter->current_dma.direction);
-
- adapter->current_dma.direction = 0;
- adapter->current_dma.length = rlen;
- adapter->current_dma.skb = skb;
- adapter->current_dma.start_time = jiffies;
-
- outb_control(adapter->hcr_val | DIR | TCEN | DMAE, dev);
-
- flags=claim_dma_lock();
- disable_dma(dev->dma);
- clear_dma_ff(dev->dma);
- set_dma_mode(dev->dma, 0x04); /* dma read */
- set_dma_addr(dev->dma, isa_virt_to_bus(target));
- set_dma_count(dev->dma, rlen);
- enable_dma(dev->dma);
- release_dma_lock(flags);
-
- if (elp_debug >= 3) {
- pr_debug("%s: rx DMA transfer started\n", dev->name);
- }
-
- if (adapter->rx_active)
- adapter->rx_active--;
-
- if (!adapter->busy)
- pr_warning("%s: receive_packet called, busy not set.\n", dev->name);
-}
-
-/******************************************************
- *
- * interrupt handler
- *
- ******************************************************/
-
-static irqreturn_t elp_interrupt(int irq, void *dev_id)
-{
- int len;
- int dlen;
- int icount = 0;
- struct net_device *dev = dev_id;
- elp_device *adapter = netdev_priv(dev);
- unsigned long timeout;
-
- spin_lock(&adapter->lock);
-
- do {
- /*
- * has a DMA transfer finished?
- */
- if (inb_status(dev->base_addr) & DONE) {
- if (!adapter->dmaing)
- pr_warning("%s: phantom DMA completed\n", dev->name);
-
- if (elp_debug >= 3)
- pr_debug("%s: %s DMA complete, status %02x\n", dev->name,
- adapter->current_dma.direction ? "tx" : "rx",
- inb_status(dev->base_addr));
-
- outb_control(adapter->hcr_val & ~(DMAE | TCEN | DIR), dev);
- if (adapter->current_dma.direction) {
- dev_kfree_skb_irq(adapter->current_dma.skb);
- } else {
- struct sk_buff *skb = adapter->current_dma.skb;
- if (skb) {
- if (adapter->current_dma.target) {
- /* have already done the skb_put() */
- memcpy(adapter->current_dma.target, adapter->dma_buffer, adapter->current_dma.length);
- }
- skb->protocol = eth_type_trans(skb,dev);
- dev->stats.rx_bytes += skb->len;
- netif_rx(skb);
- }
- }
- adapter->dmaing = 0;
- if (adapter->rx_backlog.in != adapter->rx_backlog.out) {
- int t = adapter->rx_backlog.length[adapter->rx_backlog.out];
- adapter->rx_backlog.out = backlog_next(adapter->rx_backlog.out);
- if (elp_debug >= 2)
- pr_debug("%s: receiving backlogged packet (%d)\n", dev->name, t);
- receive_packet(dev, t);
- } else {
- adapter->busy = 0;
- }
- } else {
- /* has one timed out? */
- check_3c505_dma(dev);
- }
-
- /*
- * receive a PCB from the adapter
- */
- timeout = jiffies + 3*HZ/100;
- while ((inb_status(dev->base_addr) & ACRF) != 0 && time_before(jiffies, timeout)) {
- if (receive_pcb(dev, &adapter->irx_pcb)) {
- switch (adapter->irx_pcb.command)
- {
- case 0:
- break;
- /*
- * received a packet - this must be handled fast
- */
- case 0xff:
- case CMD_RECEIVE_PACKET_COMPLETE:
- /* if the device isn't open, don't pass packets up the stack */
- if (!netif_running(dev))
- break;
- len = adapter->irx_pcb.data.rcv_resp.pkt_len;
- dlen = adapter->irx_pcb.data.rcv_resp.buf_len;
- if (adapter->irx_pcb.data.rcv_resp.timeout != 0) {
- pr_err("%s: interrupt - packet not received correctly\n", dev->name);
- } else {
- if (elp_debug >= 3) {
- pr_debug("%s: interrupt - packet received of length %i (%i)\n",
- dev->name, len, dlen);
- }
- if (adapter->irx_pcb.command == 0xff) {
- if (elp_debug >= 2)
- pr_debug("%s: adding packet to backlog (len = %d)\n",
- dev->name, dlen);
- adapter->rx_backlog.length[adapter->rx_backlog.in] = dlen;
- adapter->rx_backlog.in = backlog_next(adapter->rx_backlog.in);
- } else {
- receive_packet(dev, dlen);
- }
- if (elp_debug >= 3)
- pr_debug("%s: packet received\n", dev->name);
- }
- break;
-
- /*
- * 82586 configured correctly
- */
- case CMD_CONFIGURE_82586_RESPONSE:
- adapter->got[CMD_CONFIGURE_82586] = 1;
- if (elp_debug >= 3)
- pr_debug("%s: interrupt - configure response received\n", dev->name);
- break;
-
- /*
- * Adapter memory configuration
- */
- case CMD_CONFIGURE_ADAPTER_RESPONSE:
- adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 1;
- if (elp_debug >= 3)
- pr_debug("%s: Adapter memory configuration %s.\n", dev->name,
- adapter->irx_pcb.data.failed ? "failed" : "succeeded");
- break;
-
- /*
- * Multicast list loading
- */
- case CMD_LOAD_MULTICAST_RESPONSE:
- adapter->got[CMD_LOAD_MULTICAST_LIST] = 1;
- if (elp_debug >= 3)
- pr_debug("%s: Multicast address list loading %s.\n", dev->name,
- adapter->irx_pcb.data.failed ? "failed" : "succeeded");
- break;
-
- /*
- * Station address setting
- */
- case CMD_SET_ADDRESS_RESPONSE:
- adapter->got[CMD_SET_STATION_ADDRESS] = 1;
- if (elp_debug >= 3)
- pr_debug("%s: Ethernet address setting %s.\n", dev->name,
- adapter->irx_pcb.data.failed ? "failed" : "succeeded");
- break;
-
-
- /*
- * received board statistics
- */
- case CMD_NETWORK_STATISTICS_RESPONSE:
- dev->stats.rx_packets += adapter->irx_pcb.data.netstat.tot_recv;
- dev->stats.tx_packets += adapter->irx_pcb.data.netstat.tot_xmit;
- dev->stats.rx_crc_errors += adapter->irx_pcb.data.netstat.err_CRC;
- dev->stats.rx_frame_errors += adapter->irx_pcb.data.netstat.err_align;
- dev->stats.rx_fifo_errors += adapter->irx_pcb.data.netstat.err_ovrrun;
- dev->stats.rx_over_errors += adapter->irx_pcb.data.netstat.err_res;
- adapter->got[CMD_NETWORK_STATISTICS] = 1;
- if (elp_debug >= 3)
- pr_debug("%s: interrupt - statistics response received\n", dev->name);
- break;
-
- /*
- * sent a packet
- */
- case CMD_TRANSMIT_PACKET_COMPLETE:
- if (elp_debug >= 3)
- pr_debug("%s: interrupt - packet sent\n", dev->name);
- if (!netif_running(dev))
- break;
- switch (adapter->irx_pcb.data.xmit_resp.c_stat) {
- case 0xffff:
- dev->stats.tx_aborted_errors++;
- pr_info("%s: transmit timed out, network cable problem?\n", dev->name);
- break;
- case 0xfffe:
- dev->stats.tx_fifo_errors++;
- pr_info("%s: transmit timed out, FIFO underrun\n", dev->name);
- break;
- }
- netif_wake_queue(dev);
- break;
-
- /*
- * some unknown PCB
- */
- default:
- pr_debug("%s: unknown PCB received - %2.2x\n",
- dev->name, adapter->irx_pcb.command);
- break;
- }
- } else {
- pr_warning("%s: failed to read PCB on interrupt\n", dev->name);
- adapter_reset(dev);
- }
- }
-
- } while (icount++ < 5 && (inb_status(dev->base_addr) & (ACRF | DONE)));
-
- prime_rx(dev);
-
- /*
- * indicate no longer in interrupt routine
- */
- spin_unlock(&adapter->lock);
- return IRQ_HANDLED;
-}
-
-
-/******************************************************
- *
- * open the board
- *
- ******************************************************/
-
-static int elp_open(struct net_device *dev)
-{
- elp_device *adapter = netdev_priv(dev);
- int retval;
-
- if (elp_debug >= 3)
- pr_debug("%s: request to open device\n", dev->name);
-
- /*
- * make sure we actually found the device
- */
- if (adapter == NULL) {
- pr_err("%s: Opening a non-existent physical device\n", dev->name);
- return -EAGAIN;
- }
- /*
- * disable interrupts on the board
- */
- outb_control(0, dev);
-
- /*
- * clear any pending interrupts
- */
- inb_command(dev->base_addr);
- adapter_reset(dev);
-
- /*
- * no receive PCBs active
- */
- adapter->rx_active = 0;
-
- adapter->busy = 0;
- adapter->send_pcb_semaphore = 0;
- adapter->rx_backlog.in = 0;
- adapter->rx_backlog.out = 0;
-
- spin_lock_init(&adapter->lock);
-
- /*
- * install our interrupt service routine
- */
- if ((retval = request_irq(dev->irq, elp_interrupt, 0, dev->name, dev))) {
- pr_err("%s: could not allocate IRQ%d\n", dev->name, dev->irq);
- return retval;
- }
- if ((retval = request_dma(dev->dma, dev->name))) {
- free_irq(dev->irq, dev);
- pr_err("%s: could not allocate DMA%d channel\n", dev->name, dev->dma);
- return retval;
- }
- adapter->dma_buffer = (void *) dma_mem_alloc(DMA_BUFFER_SIZE);
- if (!adapter->dma_buffer) {
- pr_err("%s: could not allocate DMA buffer\n", dev->name);
- free_dma(dev->dma);
- free_irq(dev->irq, dev);
- return -ENOMEM;
- }
- adapter->dmaing = 0;
-
- /*
- * enable interrupts on the board
- */
- outb_control(CMDE, dev);
-
- /*
- * configure adapter memory: we need 10 multicast addresses, default==0
- */
- if (elp_debug >= 3)
- pr_debug("%s: sending 3c505 memory configuration command\n", dev->name);
- adapter->tx_pcb.command = CMD_CONFIGURE_ADAPTER_MEMORY;
- adapter->tx_pcb.data.memconf.cmd_q = 10;
- adapter->tx_pcb.data.memconf.rcv_q = 20;
- adapter->tx_pcb.data.memconf.mcast = 10;
- adapter->tx_pcb.data.memconf.frame = 20;
- adapter->tx_pcb.data.memconf.rcv_b = 20;
- adapter->tx_pcb.data.memconf.progs = 0;
- adapter->tx_pcb.length = sizeof(struct Memconf);
- adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 0;
- if (!send_pcb(dev, &adapter->tx_pcb))
- pr_err("%s: couldn't send memory configuration command\n", dev->name);
- else {
- unsigned long timeout = jiffies + TIMEOUT;
- while (adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] == 0 && time_before(jiffies, timeout));
- if (time_after_eq(jiffies, timeout))
- TIMEOUT_MSG(__LINE__);
- }
-
-
- /*
- * configure adapter to receive broadcast messages and wait for response
- */
- if (elp_debug >= 3)
- pr_debug("%s: sending 82586 configure command\n", dev->name);
- adapter->tx_pcb.command = CMD_CONFIGURE_82586;
- adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD;
- adapter->tx_pcb.length = 2;
- adapter->got[CMD_CONFIGURE_82586] = 0;
- if (!send_pcb(dev, &adapter->tx_pcb))
- pr_err("%s: couldn't send 82586 configure command\n", dev->name);
- else {
- unsigned long timeout = jiffies + TIMEOUT;
- while (adapter->got[CMD_CONFIGURE_82586] == 0 && time_before(jiffies, timeout));
- if (time_after_eq(jiffies, timeout))
- TIMEOUT_MSG(__LINE__);
- }
-
- /* enable burst-mode DMA */
- /* outb(0x1, dev->base_addr + PORT_AUXDMA); */
-
- /*
- * queue receive commands to provide buffering
- */
- prime_rx(dev);
- if (elp_debug >= 3)
- pr_debug("%s: %d receive PCBs active\n", dev->name, adapter->rx_active);
-
- /*
- * device is now officially open!
- */
-
- netif_start_queue(dev);
- return 0;
-}
-
-
-/******************************************************
- *
- * send a packet to the adapter
- *
- ******************************************************/
-
-static netdev_tx_t send_packet(struct net_device *dev, struct sk_buff *skb)
-{
- elp_device *adapter = netdev_priv(dev);
- unsigned long target;
- unsigned long flags;
-
- /*
- * make sure the length is even and no shorter than 60 bytes
- */
- unsigned int nlen = (((skb->len < 60) ? 60 : skb->len) + 1) & (~1);
-
- if (test_and_set_bit(0, (void *) &adapter->busy)) {
- if (elp_debug >= 2)
- pr_debug("%s: transmit blocked\n", dev->name);
- return false;
- }
-
- dev->stats.tx_bytes += nlen;
-
- /*
- * send the adapter a transmit packet command. Ignore segment and offset
- * and make sure the length is even
- */
- adapter->tx_pcb.command = CMD_TRANSMIT_PACKET;
- adapter->tx_pcb.length = sizeof(struct Xmit_pkt);
- adapter->tx_pcb.data.xmit_pkt.buf_ofs
- = adapter->tx_pcb.data.xmit_pkt.buf_seg = 0; /* Unused */
- adapter->tx_pcb.data.xmit_pkt.pkt_len = nlen;
-
- if (!send_pcb(dev, &adapter->tx_pcb)) {
- adapter->busy = 0;
- return false;
- }
- /* if this happens, we die */
- if (test_and_set_bit(0, (void *) &adapter->dmaing))
- pr_debug("%s: tx: DMA %d in progress\n", dev->name, adapter->current_dma.direction);
-
- adapter->current_dma.direction = 1;
- adapter->current_dma.start_time = jiffies;
-
- if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) {
- skb_copy_from_linear_data(skb, adapter->dma_buffer, nlen);
- memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len);
- target = isa_virt_to_bus(adapter->dma_buffer);
- }
- else {
- target = isa_virt_to_bus(skb->data);
- }
- adapter->current_dma.skb = skb;
-
- flags=claim_dma_lock();
- disable_dma(dev->dma);
- clear_dma_ff(dev->dma);
- set_dma_mode(dev->dma, 0x48); /* dma memory -> io */
- set_dma_addr(dev->dma, target);
- set_dma_count(dev->dma, nlen);
- outb_control(adapter->hcr_val | DMAE | TCEN, dev);
- enable_dma(dev->dma);
- release_dma_lock(flags);
-
- if (elp_debug >= 3)
- pr_debug("%s: DMA transfer started\n", dev->name);
-
- return true;
-}
-
-/*
- * The upper layer thinks we timed out
- */
-
-static void elp_timeout(struct net_device *dev)
-{
- int stat;
-
- stat = inb_status(dev->base_addr);
- pr_warning("%s: transmit timed out, lost %s?\n", dev->name,
- (stat & ACRF) ? "interrupt" : "command");
- if (elp_debug >= 1)
- pr_debug("%s: status %#02x\n", dev->name, stat);
- dev->trans_start = jiffies; /* prevent tx timeout */
- dev->stats.tx_dropped++;
- netif_wake_queue(dev);
-}
-
-/******************************************************
- *
- * start the transmitter
- * return 0 if sent OK, else return 1
- *
- ******************************************************/
-
-static netdev_tx_t elp_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- unsigned long flags;
- elp_device *adapter = netdev_priv(dev);
-
- spin_lock_irqsave(&adapter->lock, flags);
- check_3c505_dma(dev);
-
- if (elp_debug >= 3)
- pr_debug("%s: request to send packet of length %d\n", dev->name, (int) skb->len);
-
- netif_stop_queue(dev);
-
- /*
- * send the packet at skb->data for skb->len
- */
- if (!send_packet(dev, skb)) {
- if (elp_debug >= 2) {
- pr_debug("%s: failed to transmit packet\n", dev->name);
- }
- spin_unlock_irqrestore(&adapter->lock, flags);
- return NETDEV_TX_BUSY;
- }
- if (elp_debug >= 3)
- pr_debug("%s: packet of length %d sent\n", dev->name, (int) skb->len);
-
- prime_rx(dev);
- spin_unlock_irqrestore(&adapter->lock, flags);
- netif_start_queue(dev);
- return NETDEV_TX_OK;
-}
-
-/******************************************************
- *
- * return statistics on the board
- *
- ******************************************************/
-
-static struct net_device_stats *elp_get_stats(struct net_device *dev)
-{
- elp_device *adapter = netdev_priv(dev);
-
- if (elp_debug >= 3)
- pr_debug("%s: request for stats\n", dev->name);
-
- /* If the device is closed, just return the latest stats we have,
- - we cannot ask from the adapter without interrupts */
- if (!netif_running(dev))
- return &dev->stats;
-
- /* send a get statistics command to the board */
- adapter->tx_pcb.command = CMD_NETWORK_STATISTICS;
- adapter->tx_pcb.length = 0;
- adapter->got[CMD_NETWORK_STATISTICS] = 0;
- if (!send_pcb(dev, &adapter->tx_pcb))
- pr_err("%s: couldn't send get statistics command\n", dev->name);
- else {
- unsigned long timeout = jiffies + TIMEOUT;
- while (adapter->got[CMD_NETWORK_STATISTICS] == 0 && time_before(jiffies, timeout));
- if (time_after_eq(jiffies, timeout)) {
- TIMEOUT_MSG(__LINE__);
- return &dev->stats;
- }
- }
-
- /* statistics are now up to date */
- return &dev->stats;
-}
-
-
-static void netdev_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr);
-}
-
-static u32 netdev_get_msglevel(struct net_device *dev)
-{
- return debug;
-}
-
-static void netdev_set_msglevel(struct net_device *dev, u32 level)
-{
- debug = level;
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
- .get_msglevel = netdev_get_msglevel,
- .set_msglevel = netdev_set_msglevel,
-};
-
-/******************************************************
- *
- * close the board
- *
- ******************************************************/
-
-static int elp_close(struct net_device *dev)
-{
- elp_device *adapter = netdev_priv(dev);
-
- if (elp_debug >= 3)
- pr_debug("%s: request to close device\n", dev->name);
-
- netif_stop_queue(dev);
-
- /* Someone may request the device statistic information even when
- * the interface is closed. The following will update the statistics
- * structure in the driver, so we'll be able to give current statistics.
- */
- (void) elp_get_stats(dev);
-
- /*
- * disable interrupts on the board
- */
- outb_control(0, dev);
-
- /*
- * release the IRQ
- */
- free_irq(dev->irq, dev);
-
- free_dma(dev->dma);
- free_pages((unsigned long) adapter->dma_buffer, get_order(DMA_BUFFER_SIZE));
-
- return 0;
-}
-
-
-/************************************************************
- *
- * Set multicast list
- * num_addrs==0: clear mc_list
- * num_addrs==-1: set promiscuous mode
- * num_addrs>0: set mc_list
- *
- ************************************************************/
-
-static void elp_set_mc_list(struct net_device *dev)
-{
- elp_device *adapter = netdev_priv(dev);
- struct netdev_hw_addr *ha;
- int i;
- unsigned long flags;
-
- if (elp_debug >= 3)
- pr_debug("%s: request to set multicast list\n", dev->name);
-
- spin_lock_irqsave(&adapter->lock, flags);
-
- if (!(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
- /* send a "load multicast list" command to the board, max 10 addrs/cmd */
- /* if num_addrs==0 the list will be cleared */
- adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST;
- adapter->tx_pcb.length = 6 * netdev_mc_count(dev);
- i = 0;
- netdev_for_each_mc_addr(ha, dev)
- memcpy(adapter->tx_pcb.data.multicast[i++],
- ha->addr, 6);
- adapter->got[CMD_LOAD_MULTICAST_LIST] = 0;
- if (!send_pcb(dev, &adapter->tx_pcb))
- pr_err("%s: couldn't send set_multicast command\n", dev->name);
- else {
- unsigned long timeout = jiffies + TIMEOUT;
- while (adapter->got[CMD_LOAD_MULTICAST_LIST] == 0 && time_before(jiffies, timeout));
- if (time_after_eq(jiffies, timeout)) {
- TIMEOUT_MSG(__LINE__);
- }
- }
- if (!netdev_mc_empty(dev))
- adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD | RECV_MULTI;
- else /* num_addrs == 0 */
- adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD;
- } else
- adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_PROMISC;
- /*
- * configure adapter to receive messages (as specified above)
- * and wait for response
- */
- if (elp_debug >= 3)
- pr_debug("%s: sending 82586 configure command\n", dev->name);
- adapter->tx_pcb.command = CMD_CONFIGURE_82586;
- adapter->tx_pcb.length = 2;
- adapter->got[CMD_CONFIGURE_82586] = 0;
- if (!send_pcb(dev, &adapter->tx_pcb))
- {
- spin_unlock_irqrestore(&adapter->lock, flags);
- pr_err("%s: couldn't send 82586 configure command\n", dev->name);
- }
- else {
- unsigned long timeout = jiffies + TIMEOUT;
- spin_unlock_irqrestore(&adapter->lock, flags);
- while (adapter->got[CMD_CONFIGURE_82586] == 0 && time_before(jiffies, timeout));
- if (time_after_eq(jiffies, timeout))
- TIMEOUT_MSG(__LINE__);
- }
-}
-
-/************************************************************
- *
- * A couple of tests to see if there's 3C505 or not
- * Called only by elp_autodetect
- ************************************************************/
-
-static int __init elp_sense(struct net_device *dev)
-{
- int addr = dev->base_addr;
- const char *name = dev->name;
- byte orig_HSR;
-
- if (!request_region(addr, ELP_IO_EXTENT, "3c505"))
- return -ENODEV;
-
- orig_HSR = inb_status(addr);
-
- if (elp_debug > 0)
- pr_debug(search_msg, name, addr);
-
- if (orig_HSR == 0xff) {
- if (elp_debug > 0)
- pr_cont(notfound_msg, 1);
- goto out;
- }
-
- /* Wait for a while; the adapter may still be booting up */
- if (elp_debug > 0)
- pr_cont(stilllooking_msg);
-
- if (orig_HSR & DIR) {
- /* If HCR.DIR is up, we pull it down. HSR.DIR should follow. */
- outb(0, dev->base_addr + PORT_CONTROL);
- msleep(300);
- if (inb_status(addr) & DIR) {
- if (elp_debug > 0)
- pr_cont(notfound_msg, 2);
- goto out;
- }
- } else {
- /* If HCR.DIR is down, we pull it up. HSR.DIR should follow. */
- outb(DIR, dev->base_addr + PORT_CONTROL);
- msleep(300);
- if (!(inb_status(addr) & DIR)) {
- if (elp_debug > 0)
- pr_cont(notfound_msg, 3);
- goto out;
- }
- }
- /*
- * It certainly looks like a 3c505.
- */
- if (elp_debug > 0)
- pr_cont(found_msg);
-
- return 0;
-out:
- release_region(addr, ELP_IO_EXTENT);
- return -ENODEV;
-}
-
-/*************************************************************
- *
- * Search through addr_list[] and try to find a 3C505
- * Called only by eplus_probe
- *************************************************************/
-
-static int __init elp_autodetect(struct net_device *dev)
-{
- int idx = 0;
-
- /* if base address set, then only check that address
- otherwise, run through the table */
- if (dev->base_addr != 0) { /* dev->base_addr == 0 ==> plain autodetect */
- if (elp_sense(dev) == 0)
- return dev->base_addr;
- } else
- while ((dev->base_addr = addr_list[idx++])) {
- if (elp_sense(dev) == 0)
- return dev->base_addr;
- }
-
- /* could not find an adapter */
- if (elp_debug > 0)
- pr_debug(couldnot_msg, dev->name);
-
- return 0; /* Because of this, the layer above will return -ENODEV */
-}
-
-static const struct net_device_ops elp_netdev_ops = {
- .ndo_open = elp_open,
- .ndo_stop = elp_close,
- .ndo_get_stats = elp_get_stats,
- .ndo_start_xmit = elp_start_xmit,
- .ndo_tx_timeout = elp_timeout,
- .ndo_set_rx_mode = elp_set_mc_list,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/******************************************************
- *
- * probe for an Etherlink Plus board at the specified address
- *
- ******************************************************/
-
-/* There are three situations we need to be able to detect here:
-
- * a) the card is idle
- * b) the card is still booting up
- * c) the card is stuck in a strange state (some DOS drivers do this)
- *
- * In case (a), all is well. In case (b), we wait 10 seconds to see if the
- * card finishes booting, and carry on if so. In case (c), we do a hard reset,
- * loop round, and hope for the best.
- *
- * This is all very unpleasant, but hopefully avoids the problems with the old
- * probe code (which had a 15-second delay if the card was idle, and didn't
- * work at all if it was in a weird state).
- */
-
-static int __init elplus_setup(struct net_device *dev)
-{
- elp_device *adapter = netdev_priv(dev);
- int i, tries, tries1, okay;
- unsigned long timeout;
- unsigned long cookie = 0;
- int err = -ENODEV;
-
- /*
- * setup adapter structure
- */
-
- dev->base_addr = elp_autodetect(dev);
- if (!dev->base_addr)
- return -ENODEV;
-
- adapter->send_pcb_semaphore = 0;
-
- for (tries1 = 0; tries1 < 3; tries1++) {
- outb_control((adapter->hcr_val | CMDE) & ~DIR, dev);
- /* First try to write just one byte, to see if the card is
- * responding at all normally.
- */
- timeout = jiffies + 5*HZ/100;
- okay = 0;
- while (time_before(jiffies, timeout) && !(inb_status(dev->base_addr) & HCRE));
- if ((inb_status(dev->base_addr) & HCRE)) {
- outb_command(0, dev->base_addr); /* send a spurious byte */
- timeout = jiffies + 5*HZ/100;
- while (time_before(jiffies, timeout) && !(inb_status(dev->base_addr) & HCRE));
- if (inb_status(dev->base_addr) & HCRE)
- okay = 1;
- }
- if (!okay) {
- /* Nope, it's ignoring the command register. This means that
- * either it's still booting up, or it's died.
- */
- pr_err("%s: command register wouldn't drain, ", dev->name);
- if ((inb_status(dev->base_addr) & 7) == 3) {
- /* If the adapter status is 3, it *could* still be booting.
- * Give it the benefit of the doubt for 10 seconds.
- */
- pr_cont("assuming 3c505 still starting\n");
- timeout = jiffies + 10*HZ;
- while (time_before(jiffies, timeout) && (inb_status(dev->base_addr) & 7));
- if (inb_status(dev->base_addr) & 7) {
- pr_err("%s: 3c505 failed to start\n", dev->name);
- } else {
- okay = 1; /* It started */
- }
- } else {
- /* Otherwise, it must just be in a strange
- * state. We probably need to kick it.
- */
- pr_cont("3c505 is sulking\n");
- }
- }
- for (tries = 0; tries < 5 && okay; tries++) {
-
- /*
- * Try to set the Ethernet address, to make sure that the board
- * is working.
- */
- adapter->tx_pcb.command = CMD_STATION_ADDRESS;
- adapter->tx_pcb.length = 0;
- cookie = probe_irq_on();
- if (!send_pcb(dev, &adapter->tx_pcb)) {
- pr_err("%s: could not send first PCB\n", dev->name);
- probe_irq_off(cookie);
- continue;
- }
- if (!receive_pcb(dev, &adapter->rx_pcb)) {
- pr_err("%s: could not read first PCB\n", dev->name);
- probe_irq_off(cookie);
- continue;
- }
- if ((adapter->rx_pcb.command != CMD_ADDRESS_RESPONSE) ||
- (adapter->rx_pcb.length != 6)) {
- pr_err("%s: first PCB wrong (%d, %d)\n", dev->name,
- adapter->rx_pcb.command, adapter->rx_pcb.length);
- probe_irq_off(cookie);
- continue;
- }
- goto okay;
- }
- /* It's broken. Do a hard reset to re-initialise the board,
- * and try again.
- */
- pr_info("%s: resetting adapter\n", dev->name);
- outb_control(adapter->hcr_val | FLSH | ATTN, dev);
- outb_control(adapter->hcr_val & ~(FLSH | ATTN), dev);
- }
- pr_err("%s: failed to initialise 3c505\n", dev->name);
- goto out;
-
- okay:
- if (dev->irq) { /* Is there a preset IRQ? */
- int rpt = probe_irq_off(cookie);
- if (dev->irq != rpt) {
- pr_warning("%s: warning, irq %d configured but %d detected\n", dev->name, dev->irq, rpt);
- }
- /* if dev->irq == probe_irq_off(cookie), all is well */
- } else /* No preset IRQ; just use what we can detect */
- dev->irq = probe_irq_off(cookie);
- switch (dev->irq) { /* Legal, sane? */
- case 0:
- pr_err("%s: IRQ probe failed: check 3c505 jumpers.\n",
- dev->name);
- goto out;
- case 1:
- case 6:
- case 8:
- case 13:
- pr_err("%s: Impossible IRQ %d reported by probe_irq_off().\n",
- dev->name, dev->irq);
- goto out;
- }
- /*
- * Now we have the IRQ number so we can disable the interrupts from
- * the board until the board is opened.
- */
- outb_control(adapter->hcr_val & ~CMDE, dev);
-
- /*
- * copy Ethernet address into structure
- */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = adapter->rx_pcb.data.eth_addr[i];
-
- /* find a DMA channel */
- if (!dev->dma) {
- if (dev->mem_start) {
- dev->dma = dev->mem_start & 7;
- }
- else {
- pr_warning("%s: warning, DMA channel not specified, using default\n", dev->name);
- dev->dma = ELP_DMA;
- }
- }
-
- /*
- * print remainder of startup message
- */
- pr_info("%s: 3c505 at %#lx, irq %d, dma %d, addr %pM, ",
- dev->name, dev->base_addr, dev->irq, dev->dma, dev->dev_addr);
- /*
- * read more information from the adapter
- */
-
- adapter->tx_pcb.command = CMD_ADAPTER_INFO;
- adapter->tx_pcb.length = 0;
- if (!send_pcb(dev, &adapter->tx_pcb) ||
- !receive_pcb(dev, &adapter->rx_pcb) ||
- (adapter->rx_pcb.command != CMD_ADAPTER_INFO_RESPONSE) ||
- (adapter->rx_pcb.length != 10)) {
- pr_cont("not responding to second PCB\n");
- }
- pr_cont("rev %d.%d, %dk\n", adapter->rx_pcb.data.info.major_vers,
- adapter->rx_pcb.data.info.minor_vers, adapter->rx_pcb.data.info.RAM_sz);
-
- /*
- * reconfigure the adapter memory to better suit our purposes
- */
- adapter->tx_pcb.command = CMD_CONFIGURE_ADAPTER_MEMORY;
- adapter->tx_pcb.length = 12;
- adapter->tx_pcb.data.memconf.cmd_q = 8;
- adapter->tx_pcb.data.memconf.rcv_q = 8;
- adapter->tx_pcb.data.memconf.mcast = 10;
- adapter->tx_pcb.data.memconf.frame = 10;
- adapter->tx_pcb.data.memconf.rcv_b = 10;
- adapter->tx_pcb.data.memconf.progs = 0;
- if (!send_pcb(dev, &adapter->tx_pcb) ||
- !receive_pcb(dev, &adapter->rx_pcb) ||
- (adapter->rx_pcb.command != CMD_CONFIGURE_ADAPTER_RESPONSE) ||
- (adapter->rx_pcb.length != 2)) {
- pr_err("%s: could not configure adapter memory\n", dev->name);
- }
- if (adapter->rx_pcb.data.configure) {
- pr_err("%s: adapter configuration failed\n", dev->name);
- }
-
- dev->netdev_ops = &elp_netdev_ops;
- dev->watchdog_timeo = 10*HZ;
- dev->ethtool_ops = &netdev_ethtool_ops; /* local */
-
- dev->mem_start = dev->mem_end = 0;
-
- err = register_netdev(dev);
- if (err)
- goto out;
-
- return 0;
-out:
- release_region(dev->base_addr, ELP_IO_EXTENT);
- return err;
-}
-
-#ifndef MODULE
-struct net_device * __init elplus_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(elp_device));
- int err;
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = elplus_setup(dev);
- if (err) {
- free_netdev(dev);
- return ERR_PTR(err);
- }
- return dev;
-}
-
-#else
-static struct net_device *dev_3c505[ELP_MAX_CARDS];
-static int io[ELP_MAX_CARDS];
-static int irq[ELP_MAX_CARDS];
-static int dma[ELP_MAX_CARDS];
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(dma, int, NULL, 0);
-MODULE_PARM_DESC(io, "EtherLink Plus I/O base address(es)");
-MODULE_PARM_DESC(irq, "EtherLink Plus IRQ number(s) (assigned)");
-MODULE_PARM_DESC(dma, "EtherLink Plus DMA channel(s)");
-
-int __init init_module(void)
-{
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < ELP_MAX_CARDS; this_dev++) {
- struct net_device *dev = alloc_etherdev(sizeof(elp_device));
- if (!dev)
- break;
-
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- if (dma[this_dev]) {
- dev->dma = dma[this_dev];
- } else {
- dev->dma = ELP_DMA;
- pr_warning("3c505.c: warning, using default DMA channel,\n");
- }
- if (io[this_dev] == 0) {
- if (this_dev) {
- free_netdev(dev);
- break;
- }
- pr_notice("3c505.c: module autoprobe not recommended, give io=xx.\n");
- }
- if (elplus_setup(dev) != 0) {
- pr_warning("3c505.c: Failed to register card at 0x%x.\n", io[this_dev]);
- free_netdev(dev);
- break;
- }
- dev_3c505[this_dev] = dev;
- found++;
- }
- if (!found)
- return -ENODEV;
- return 0;
-}
-
-void __exit cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < ELP_MAX_CARDS; this_dev++) {
- struct net_device *dev = dev_3c505[this_dev];
- if (dev) {
- unregister_netdev(dev);
- release_region(dev->base_addr, ELP_IO_EXTENT);
- free_netdev(dev);
- }
- }
-}
-
-#endif /* MODULE */
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/i825xx/3c505.h b/drivers/net/ethernet/i825xx/3c505.h
deleted file mode 100644
index 04df2a9002b6..000000000000
--- a/drivers/net/ethernet/i825xx/3c505.h
+++ /dev/null
@@ -1,292 +0,0 @@
-/*****************************************************************
- *
- * defines for 3Com Etherlink Plus adapter
- *
- *****************************************************************/
-
-#define ELP_DMA 6
-#define ELP_RX_PCBS 4
-#define ELP_MAX_CARDS 4
-
-/*
- * I/O register offsets
- */
-#define PORT_COMMAND 0x00 /* read/write, 8-bit */
-#define PORT_STATUS 0x02 /* read only, 8-bit */
-#define PORT_AUXDMA 0x02 /* write only, 8-bit */
-#define PORT_DATA 0x04 /* read/write, 16-bit */
-#define PORT_CONTROL 0x06 /* read/write, 8-bit */
-
-#define ELP_IO_EXTENT 0x10 /* size of used IO registers */
-
-/*
- * host control registers bits
- */
-#define ATTN 0x80 /* attention */
-#define FLSH 0x40 /* flush data register */
-#define DMAE 0x20 /* DMA enable */
-#define DIR 0x10 /* direction */
-#define TCEN 0x08 /* terminal count interrupt enable */
-#define CMDE 0x04 /* command register interrupt enable */
-#define HSF2 0x02 /* host status flag 2 */
-#define HSF1 0x01 /* host status flag 1 */
-
-/*
- * combinations of HSF flags used for PCB transmission
- */
-#define HSF_PCB_ACK HSF1
-#define HSF_PCB_NAK HSF2
-#define HSF_PCB_END (HSF2|HSF1)
-#define HSF_PCB_MASK (HSF2|HSF1)
-
-/*
- * host status register bits
- */
-#define HRDY 0x80 /* data register ready */
-#define HCRE 0x40 /* command register empty */
-#define ACRF 0x20 /* adapter command register full */
-/* #define DIR 0x10 direction - same as in control register */
-#define DONE 0x08 /* DMA done */
-#define ASF3 0x04 /* adapter status flag 3 */
-#define ASF2 0x02 /* adapter status flag 2 */
-#define ASF1 0x01 /* adapter status flag 1 */
-
-/*
- * combinations of ASF flags used for PCB reception
- */
-#define ASF_PCB_ACK ASF1
-#define ASF_PCB_NAK ASF2
-#define ASF_PCB_END (ASF2|ASF1)
-#define ASF_PCB_MASK (ASF2|ASF1)
-
-/*
- * host aux DMA register bits
- */
-#define DMA_BRST 0x01 /* DMA burst */
-
-/*
- * maximum amount of data allowed in a PCB
- */
-#define MAX_PCB_DATA 62
-
-/*****************************************************************
- *
- * timeout value
- * this is a rough value used for loops to stop them from
- * locking up the whole machine in the case of failure or
- * error conditions
- *
- *****************************************************************/
-
-#define TIMEOUT 300
-
-/*****************************************************************
- *
- * PCB commands
- *
- *****************************************************************/
-
-enum {
- /*
- * host PCB commands
- */
- CMD_CONFIGURE_ADAPTER_MEMORY = 0x01,
- CMD_CONFIGURE_82586 = 0x02,
- CMD_STATION_ADDRESS = 0x03,
- CMD_DMA_DOWNLOAD = 0x04,
- CMD_DMA_UPLOAD = 0x05,
- CMD_PIO_DOWNLOAD = 0x06,
- CMD_PIO_UPLOAD = 0x07,
- CMD_RECEIVE_PACKET = 0x08,
- CMD_TRANSMIT_PACKET = 0x09,
- CMD_NETWORK_STATISTICS = 0x0a,
- CMD_LOAD_MULTICAST_LIST = 0x0b,
- CMD_CLEAR_PROGRAM = 0x0c,
- CMD_DOWNLOAD_PROGRAM = 0x0d,
- CMD_EXECUTE_PROGRAM = 0x0e,
- CMD_SELF_TEST = 0x0f,
- CMD_SET_STATION_ADDRESS = 0x10,
- CMD_ADAPTER_INFO = 0x11,
- NUM_TRANSMIT_CMDS,
-
- /*
- * adapter PCB commands
- */
- CMD_CONFIGURE_ADAPTER_RESPONSE = 0x31,
- CMD_CONFIGURE_82586_RESPONSE = 0x32,
- CMD_ADDRESS_RESPONSE = 0x33,
- CMD_DOWNLOAD_DATA_REQUEST = 0x34,
- CMD_UPLOAD_DATA_REQUEST = 0x35,
- CMD_RECEIVE_PACKET_COMPLETE = 0x38,
- CMD_TRANSMIT_PACKET_COMPLETE = 0x39,
- CMD_NETWORK_STATISTICS_RESPONSE = 0x3a,
- CMD_LOAD_MULTICAST_RESPONSE = 0x3b,
- CMD_CLEAR_PROGRAM_RESPONSE = 0x3c,
- CMD_DOWNLOAD_PROGRAM_RESPONSE = 0x3d,
- CMD_EXECUTE_RESPONSE = 0x3e,
- CMD_SELF_TEST_RESPONSE = 0x3f,
- CMD_SET_ADDRESS_RESPONSE = 0x40,
- CMD_ADAPTER_INFO_RESPONSE = 0x41
-};
-
-/* Definitions for the PCB data structure */
-
-/* Data units */
-typedef unsigned char byte;
-typedef unsigned short int word;
-typedef unsigned long int dword;
-
-/* Data structures */
-struct Memconf {
- word cmd_q,
- rcv_q,
- mcast,
- frame,
- rcv_b,
- progs;
-};
-
-struct Rcv_pkt {
- word buf_ofs,
- buf_seg,
- buf_len,
- timeout;
-};
-
-struct Xmit_pkt {
- word buf_ofs,
- buf_seg,
- pkt_len;
-};
-
-struct Rcv_resp {
- word buf_ofs,
- buf_seg,
- buf_len,
- pkt_len,
- timeout,
- status;
- dword timetag;
-};
-
-struct Xmit_resp {
- word buf_ofs,
- buf_seg,
- c_stat,
- status;
-};
-
-
-struct Netstat {
- dword tot_recv,
- tot_xmit;
- word err_CRC,
- err_align,
- err_res,
- err_ovrrun;
-};
-
-
-struct Selftest {
- word error;
- union {
- word ROM_cksum;
- struct {
- word ofs, seg;
- } RAM;
- word i82586;
- } failure;
-};
-
-struct Info {
- byte minor_vers,
- major_vers;
- word ROM_cksum,
- RAM_sz,
- free_ofs,
- free_seg;
-};
-
-struct Memdump {
- word size,
- off,
- seg;
-};
-
-/*
-Primary Command Block. The most important data structure. All communication
-between the host and the adapter is done with these. (Except for the actual
-Ethernet data, which has different packaging.)
-*/
-typedef struct {
- byte command;
- byte length;
- union {
- struct Memconf memconf;
- word configure;
- struct Rcv_pkt rcv_pkt;
- struct Xmit_pkt xmit_pkt;
- byte multicast[10][6];
- byte eth_addr[6];
- byte failed;
- struct Rcv_resp rcv_resp;
- struct Xmit_resp xmit_resp;
- struct Netstat netstat;
- struct Selftest selftest;
- struct Info info;
- struct Memdump memdump;
- byte raw[62];
- } data;
-} pcb_struct;
-
-/* These defines for 'configure' */
-#define RECV_STATION 0x00
-#define RECV_BROAD 0x01
-#define RECV_MULTI 0x02
-#define RECV_PROMISC 0x04
-#define NO_LOOPBACK 0x00
-#define INT_LOOPBACK 0x08
-#define EXT_LOOPBACK 0x10
-
-/*****************************************************************
- *
- * structure to hold context information for adapter
- *
- *****************************************************************/
-
-#define DMA_BUFFER_SIZE 1600
-#define BACKLOG_SIZE 4
-
-typedef struct {
- volatile short got[NUM_TRANSMIT_CMDS]; /* flags for
- command completion */
- pcb_struct tx_pcb; /* PCB for foreground sending */
- pcb_struct rx_pcb; /* PCB for foreground receiving */
- pcb_struct itx_pcb; /* PCB for background sending */
- pcb_struct irx_pcb; /* PCB for background receiving */
-
- void *dma_buffer;
-
- struct {
- unsigned int length[BACKLOG_SIZE];
- unsigned int in;
- unsigned int out;
- } rx_backlog;
-
- struct {
- unsigned int direction;
- unsigned int length;
- struct sk_buff *skb;
- void *target;
- unsigned long start_time;
- } current_dma;
-
- /* flags */
- unsigned long send_pcb_semaphore;
- unsigned long dmaing;
- unsigned long busy;
-
- unsigned int rx_active; /* number of receive PCBs */
- volatile unsigned char hcr_val; /* what we think the HCR contains */
- spinlock_t lock; /* Interrupt v tx lock */
-} elp_device;
diff --git a/drivers/net/ethernet/i825xx/3c507.c b/drivers/net/ethernet/i825xx/3c507.c
deleted file mode 100644
index e8984b059905..000000000000
--- a/drivers/net/ethernet/i825xx/3c507.c
+++ /dev/null
@@ -1,938 +0,0 @@
-/* 3c507.c: An EtherLink16 device driver for Linux. */
-/*
- Written 1993,1994 by Donald Becker.
-
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
-
- Thanks go to jennings@Montrouge.SMR.slb.com ( Patrick Jennings)
- and jrs@world.std.com (Rick Sladkey) for testing and bugfixes.
- Mark Salazar <leslie@access.digex.net> made the changes for cards with
- only 16K packet buffers.
-
- Things remaining to do:
- Verify that the tx and rx buffers don't have fencepost errors.
- Move the theory of operation and memory map documentation.
- The statistics need to be updated correctly.
-*/
-
-#define DRV_NAME "3c507"
-#define DRV_VERSION "1.10a"
-#define DRV_RELDATE "11/17/2001"
-
-static const char version[] =
- DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Donald Becker (becker@scyld.com)\n";
-
-/*
- Sources:
- This driver wouldn't have been written with the availability of the
- Crynwr driver source code. It provided a known-working implementation
- that filled in the gaping holes of the Intel documentation. Three cheers
- for Russ Nelson.
-
- Intel Microcommunications Databook, Vol. 1, 1990. It provides just enough
- info that the casual reader might think that it documents the i82586 :-<.
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/spinlock.h>
-#include <linux/ethtool.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/if_ether.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-
-#include <asm/dma.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-
-/* use 0 for production, 1 for verification, 2..7 for debug */
-#ifndef NET_DEBUG
-#define NET_DEBUG 1
-#endif
-static unsigned int net_debug = NET_DEBUG;
-#define debug net_debug
-
-
-/*
- Details of the i82586.
-
- You'll really need the databook to understand the details of this part,
- but the outline is that the i82586 has two separate processing units.
- Both are started from a list of three configuration tables, of which only
- the last, the System Control Block (SCB), is used after reset-time. The SCB
- has the following fields:
- Status word
- Command word
- Tx/Command block addr.
- Rx block addr.
- The command word accepts the following controls for the Tx and Rx units:
- */
-
-#define CUC_START 0x0100
-#define CUC_RESUME 0x0200
-#define CUC_SUSPEND 0x0300
-#define RX_START 0x0010
-#define RX_RESUME 0x0020
-#define RX_SUSPEND 0x0030
-
-/* The Rx unit uses a list of frame descriptors and a list of data buffer
- descriptors. We use full-sized (1518 byte) data buffers, so there is
- a one-to-one pairing of frame descriptors to buffer descriptors.
-
- The Tx ("command") unit executes a list of commands that look like:
- Status word Written by the 82586 when the command is done.
- Command word Command in lower 3 bits, post-command action in upper 3
- Link word The address of the next command.
- Parameters (as needed).
-
- Some definitions related to the Command Word are:
- */
-#define CMD_EOL 0x8000 /* The last command of the list, stop. */
-#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
-#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
-
-enum commands {
- CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
- CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7};
-
-/* Information that need to be kept for each board. */
-struct net_local {
- int last_restart;
- ushort rx_head;
- ushort rx_tail;
- ushort tx_head;
- ushort tx_cmd_link;
- ushort tx_reap;
- ushort tx_pkts_in_ring;
- spinlock_t lock;
- void __iomem *base;
-};
-
-/*
- Details of the EtherLink16 Implementation
- The 3c507 is a generic shared-memory i82586 implementation.
- The host can map 16K, 32K, 48K, or 64K of the 64K memory into
- 0x0[CD][08]0000, or all 64K into 0xF[02468]0000.
- */
-
-/* Offsets from the base I/O address. */
-#define SA_DATA 0 /* Station address data, or 3Com signature. */
-#define MISC_CTRL 6 /* Switch the SA_DATA banks, and bus config bits. */
-#define RESET_IRQ 10 /* Reset the latched IRQ line. */
-#define SIGNAL_CA 11 /* Frob the 82586 Channel Attention line. */
-#define ROM_CONFIG 13
-#define MEM_CONFIG 14
-#define IRQ_CONFIG 15
-#define EL16_IO_EXTENT 16
-
-/* The ID port is used at boot-time to locate the ethercard. */
-#define ID_PORT 0x100
-
-/* Offsets to registers in the mailbox (SCB). */
-#define iSCB_STATUS 0x8
-#define iSCB_CMD 0xA
-#define iSCB_CBL 0xC /* Command BLock offset. */
-#define iSCB_RFA 0xE /* Rx Frame Area offset. */
-
-/* Since the 3c507 maps the shared memory window so that the last byte is
- at 82586 address FFFF, the first byte is at 82586 address 0, 16K, 32K, or
- 48K corresponding to window sizes of 64K, 48K, 32K and 16K respectively.
- We can account for this be setting the 'SBC Base' entry in the ISCP table
- below for all the 16 bit offset addresses, and also adding the 'SCB Base'
- value to all 24 bit physical addresses (in the SCP table and the TX and RX
- Buffer Descriptors).
- -Mark
- */
-#define SCB_BASE ((unsigned)64*1024 - (dev->mem_end - dev->mem_start))
-
-/*
- What follows in 'init_words[]' is the "program" that is downloaded to the
- 82586 memory. It's mostly tables and command blocks, and starts at the
- reset address 0xfffff6. This is designed to be similar to the EtherExpress,
- thus the unusual location of the SCB at 0x0008.
-
- Even with the additional "don't care" values, doing it this way takes less
- program space than initializing the individual tables, and I feel it's much
- cleaner.
-
- The databook is particularly useless for the first two structures, I had
- to use the Crynwr driver as an example.
-
- The memory setup is as follows:
- */
-
-#define CONFIG_CMD 0x0018
-#define SET_SA_CMD 0x0024
-#define SA_OFFSET 0x002A
-#define IDLELOOP 0x30
-#define TDR_CMD 0x38
-#define TDR_TIME 0x3C
-#define DUMP_CMD 0x40
-#define DIAG_CMD 0x48
-#define SET_MC_CMD 0x4E
-#define DUMP_DATA 0x56 /* A 170 byte buffer for dump and Set-MC into. */
-
-#define TX_BUF_START 0x0100
-#define NUM_TX_BUFS 5
-#define TX_BUF_SIZE (1518+14+20+16) /* packet+header+TBD */
-
-#define RX_BUF_START 0x2000
-#define RX_BUF_SIZE (1518+14+18) /* packet+header+RBD */
-#define RX_BUF_END (dev->mem_end - dev->mem_start)
-
-#define TX_TIMEOUT (HZ/20)
-
-/*
- That's it: only 86 bytes to set up the beast, including every extra
- command available. The 170 byte buffer at DUMP_DATA is shared between the
- Dump command (called only by the diagnostic program) and the SetMulticastList
- command.
-
- To complete the memory setup you only have to write the station address at
- SA_OFFSET and create the Tx & Rx buffer lists.
-
- The Tx command chain and buffer list is setup as follows:
- A Tx command table, with the data buffer pointing to...
- A Tx data buffer descriptor. The packet is in a single buffer, rather than
- chaining together several smaller buffers.
- A NoOp command, which initially points to itself,
- And the packet data.
-
- A transmit is done by filling in the Tx command table and data buffer,
- re-writing the NoOp command, and finally changing the offset of the last
- command to point to the current Tx command. When the Tx command is finished,
- it jumps to the NoOp, when it loops until the next Tx command changes the
- "link offset" in the NoOp. This way the 82586 never has to go through the
- slow restart sequence.
-
- The Rx buffer list is set up in the obvious ring structure. We have enough
- memory (and low enough interrupt latency) that we can avoid the complicated
- Rx buffer linked lists by alway associating a full-size Rx data buffer with
- each Rx data frame.
-
- I current use four transmit buffers starting at TX_BUF_START (0x0100), and
- use the rest of memory, from RX_BUF_START to RX_BUF_END, for Rx buffers.
-
- */
-
-static unsigned short init_words[] = {
- /* System Configuration Pointer (SCP). */
- 0x0000, /* Set bus size to 16 bits. */
- 0,0, /* pad words. */
- 0x0000,0x0000, /* ISCP phys addr, set in init_82586_mem(). */
-
- /* Intermediate System Configuration Pointer (ISCP). */
- 0x0001, /* Status word that's cleared when init is done. */
- 0x0008,0,0, /* SCB offset, (skip, skip) */
-
- /* System Control Block (SCB). */
- 0,0xf000|RX_START|CUC_START, /* SCB status and cmd. */
- CONFIG_CMD, /* Command list pointer, points to Configure. */
- RX_BUF_START, /* Rx block list. */
- 0,0,0,0, /* Error count: CRC, align, buffer, overrun. */
-
- /* 0x0018: Configure command. Change to put MAC data with packet. */
- 0, CmdConfigure, /* Status, command. */
- SET_SA_CMD, /* Next command is Set Station Addr. */
- 0x0804, /* "4" bytes of config data, 8 byte FIFO. */
- 0x2e40, /* Magic values, including MAC data location. */
- 0, /* Unused pad word. */
-
- /* 0x0024: Setup station address command. */
- 0, CmdSASetup,
- SET_MC_CMD, /* Next command. */
- 0xaa00,0xb000,0x0bad, /* Station address (to be filled in) */
-
- /* 0x0030: NOP, looping back to itself. Point to first Tx buffer to Tx. */
- 0, CmdNOp, IDLELOOP, 0 /* pad */,
-
- /* 0x0038: A unused Time-Domain Reflectometer command. */
- 0, CmdTDR, IDLELOOP, 0,
-
- /* 0x0040: An unused Dump State command. */
- 0, CmdDump, IDLELOOP, DUMP_DATA,
-
- /* 0x0048: An unused Diagnose command. */
- 0, CmdDiagnose, IDLELOOP,
-
- /* 0x004E: An empty set-multicast-list command. */
- 0, CmdMulticastList, IDLELOOP, 0,
-};
-
-/* Index to functions, as function prototypes. */
-
-static int el16_probe1(struct net_device *dev, int ioaddr);
-static int el16_open(struct net_device *dev);
-static netdev_tx_t el16_send_packet(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t el16_interrupt(int irq, void *dev_id);
-static void el16_rx(struct net_device *dev);
-static int el16_close(struct net_device *dev);
-static void el16_tx_timeout (struct net_device *dev);
-
-static void hardware_send_packet(struct net_device *dev, void *buf, short length, short pad);
-static void init_82586_mem(struct net_device *dev);
-static const struct ethtool_ops netdev_ethtool_ops;
-static void init_rx_bufs(struct net_device *);
-
-static int io = 0x300;
-static int irq;
-static int mem_start;
-
-
-/* Check for a network adaptor of this type, and return '0' iff one exists.
- If dev->base_addr == 0, probe all likely locations.
- If dev->base_addr == 1, always return failure.
- If dev->base_addr == 2, (detachable devices only) allocate space for the
- device and return success.
- */
-
-struct net_device * __init el16_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
- static const unsigned ports[] = { 0x300, 0x320, 0x340, 0x280, 0};
- const unsigned *port;
- int err = -ENODEV;
-
- if (!dev)
- return ERR_PTR(-ENODEV);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- io = dev->base_addr;
- irq = dev->irq;
- mem_start = dev->mem_start & 15;
- }
-
- if (io > 0x1ff) /* Check a single specified location. */
- err = el16_probe1(dev, io);
- else if (io != 0)
- err = -ENXIO; /* Don't probe at all. */
- else {
- for (port = ports; *port; port++) {
- err = el16_probe1(dev, *port);
- if (!err)
- break;
- }
- }
-
- if (err)
- goto out;
- err = register_netdev(dev);
- if (err)
- goto out1;
- return dev;
-out1:
- free_irq(dev->irq, dev);
- iounmap(((struct net_local *)netdev_priv(dev))->base);
- release_region(dev->base_addr, EL16_IO_EXTENT);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-static const struct net_device_ops netdev_ops = {
- .ndo_open = el16_open,
- .ndo_stop = el16_close,
- .ndo_start_xmit = el16_send_packet,
- .ndo_tx_timeout = el16_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int __init el16_probe1(struct net_device *dev, int ioaddr)
-{
- static unsigned char init_ID_done;
- int i, irq, irqval, retval;
- struct net_local *lp;
-
- if (init_ID_done == 0) {
- ushort lrs_state = 0xff;
- /* Send the ID sequence to the ID_PORT to enable the board(s). */
- outb(0x00, ID_PORT);
- for(i = 0; i < 255; i++) {
- outb(lrs_state, ID_PORT);
- lrs_state <<= 1;
- if (lrs_state & 0x100)
- lrs_state ^= 0xe7;
- }
- outb(0x00, ID_PORT);
- init_ID_done = 1;
- }
-
- if (!request_region(ioaddr, EL16_IO_EXTENT, DRV_NAME))
- return -ENODEV;
-
- if ((inb(ioaddr) != '*') || (inb(ioaddr + 1) != '3') ||
- (inb(ioaddr + 2) != 'C') || (inb(ioaddr + 3) != 'O')) {
- retval = -ENODEV;
- goto out;
- }
-
- pr_info("%s: 3c507 at %#x,", dev->name, ioaddr);
-
- /* We should make a few more checks here, like the first three octets of
- the S.A. for the manufacturer's code. */
-
- irq = inb(ioaddr + IRQ_CONFIG) & 0x0f;
-
- irqval = request_irq(irq, el16_interrupt, 0, DRV_NAME, dev);
- if (irqval) {
- pr_cont("\n");
- pr_err("3c507: unable to get IRQ %d (irqval=%d).\n", irq, irqval);
- retval = -EAGAIN;
- goto out;
- }
-
- /* We've committed to using the board, and can start filling in *dev. */
- dev->base_addr = ioaddr;
-
- outb(0x01, ioaddr + MISC_CTRL);
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + i);
- pr_cont(" %pM", dev->dev_addr);
-
- if (mem_start)
- net_debug = mem_start & 7;
-
-#ifdef MEM_BASE
- dev->mem_start = MEM_BASE;
- dev->mem_end = dev->mem_start + 0x10000;
-#else
- {
- int base;
- int size;
- char mem_config = inb(ioaddr + MEM_CONFIG);
- if (mem_config & 0x20) {
- size = 64*1024;
- base = 0xf00000 + (mem_config & 0x08 ? 0x080000
- : ((mem_config & 3) << 17));
- } else {
- size = ((mem_config & 3) + 1) << 14;
- base = 0x0c0000 + ( (mem_config & 0x18) << 12);
- }
- dev->mem_start = base;
- dev->mem_end = base + size;
- }
-#endif
-
- dev->if_port = (inb(ioaddr + ROM_CONFIG) & 0x80) ? 1 : 0;
- dev->irq = inb(ioaddr + IRQ_CONFIG) & 0x0f;
-
- pr_cont(", IRQ %d, %sternal xcvr, memory %#lx-%#lx.\n", dev->irq,
- dev->if_port ? "ex" : "in", dev->mem_start, dev->mem_end-1);
-
- if (net_debug)
- pr_debug("%s", version);
-
- lp = netdev_priv(dev);
- spin_lock_init(&lp->lock);
- lp->base = ioremap(dev->mem_start, RX_BUF_END);
- if (!lp->base) {
- pr_err("3c507: unable to remap memory\n");
- retval = -EAGAIN;
- goto out1;
- }
-
- dev->netdev_ops = &netdev_ops;
- dev->watchdog_timeo = TX_TIMEOUT;
- dev->ethtool_ops = &netdev_ethtool_ops;
- dev->flags &= ~IFF_MULTICAST; /* Multicast doesn't work */
- return 0;
-out1:
- free_irq(dev->irq, dev);
-out:
- release_region(ioaddr, EL16_IO_EXTENT);
- return retval;
-}
-
-static int el16_open(struct net_device *dev)
-{
- /* Initialize the 82586 memory and start it. */
- init_82586_mem(dev);
-
- netif_start_queue(dev);
- return 0;
-}
-
-
-static void el16_tx_timeout (struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- void __iomem *shmem = lp->base;
-
- if (net_debug > 1)
- pr_debug("%s: transmit timed out, %s? ", dev->name,
- readw(shmem + iSCB_STATUS) & 0x8000 ? "IRQ conflict" :
- "network cable problem");
- /* Try to restart the adaptor. */
- if (lp->last_restart == dev->stats.tx_packets) {
- if (net_debug > 1)
- pr_cont("Resetting board.\n");
- /* Completely reset the adaptor. */
- init_82586_mem (dev);
- lp->tx_pkts_in_ring = 0;
- } else {
- /* Issue the channel attention signal and hope it "gets better". */
- if (net_debug > 1)
- pr_cont("Kicking board.\n");
- writew(0xf000 | CUC_START | RX_START, shmem + iSCB_CMD);
- outb (0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */
- lp->last_restart = dev->stats.tx_packets;
- }
- dev->trans_start = jiffies; /* prevent tx timeout */
- netif_wake_queue (dev);
-}
-
-
-static netdev_tx_t el16_send_packet (struct sk_buff *skb,
- struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- unsigned long flags;
- short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
- unsigned char *buf = skb->data;
-
- netif_stop_queue (dev);
-
- spin_lock_irqsave (&lp->lock, flags);
-
- dev->stats.tx_bytes += length;
- /* Disable the 82586's input to the interrupt line. */
- outb (0x80, ioaddr + MISC_CTRL);
-
- hardware_send_packet (dev, buf, skb->len, length - skb->len);
-
- /* Enable the 82586 interrupt input. */
- outb (0x84, ioaddr + MISC_CTRL);
-
- spin_unlock_irqrestore (&lp->lock, flags);
-
- dev_kfree_skb (skb);
-
- /* You might need to clean up and record Tx statistics here. */
-
- return NETDEV_TX_OK;
-}
-
-/* The typical workload of the driver:
- Handle the network interface interrupts. */
-static irqreturn_t el16_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct net_local *lp;
- int ioaddr, status, boguscount = 0;
- ushort ack_cmd = 0;
- void __iomem *shmem;
-
- if (dev == NULL) {
- pr_err("net_interrupt(): irq %d for unknown device.\n", irq);
- return IRQ_NONE;
- }
-
- ioaddr = dev->base_addr;
- lp = netdev_priv(dev);
- shmem = lp->base;
-
- spin_lock(&lp->lock);
-
- status = readw(shmem+iSCB_STATUS);
-
- if (net_debug > 4) {
- pr_debug("%s: 3c507 interrupt, status %4.4x.\n", dev->name, status);
- }
-
- /* Disable the 82586's input to the interrupt line. */
- outb(0x80, ioaddr + MISC_CTRL);
-
- /* Reap the Tx packet buffers. */
- while (lp->tx_pkts_in_ring) {
- unsigned short tx_status = readw(shmem+lp->tx_reap);
- if (!(tx_status & 0x8000)) {
- if (net_debug > 5)
- pr_debug("Tx command incomplete (%#x).\n", lp->tx_reap);
- break;
- }
- /* Tx unsuccessful or some interesting status bit set. */
- if (!(tx_status & 0x2000) || (tx_status & 0x0f3f)) {
- dev->stats.tx_errors++;
- if (tx_status & 0x0600) dev->stats.tx_carrier_errors++;
- if (tx_status & 0x0100) dev->stats.tx_fifo_errors++;
- if (!(tx_status & 0x0040)) dev->stats.tx_heartbeat_errors++;
- if (tx_status & 0x0020) dev->stats.tx_aborted_errors++;
- dev->stats.collisions += tx_status & 0xf;
- }
- dev->stats.tx_packets++;
- if (net_debug > 5)
- pr_debug("Reaped %x, Tx status %04x.\n" , lp->tx_reap, tx_status);
- lp->tx_reap += TX_BUF_SIZE;
- if (lp->tx_reap > RX_BUF_START - TX_BUF_SIZE)
- lp->tx_reap = TX_BUF_START;
-
- lp->tx_pkts_in_ring--;
- /* There is always more space in the Tx ring buffer now. */
- netif_wake_queue(dev);
-
- if (++boguscount > 10)
- break;
- }
-
- if (status & 0x4000) { /* Packet received. */
- if (net_debug > 5)
- pr_debug("Received packet, rx_head %04x.\n", lp->rx_head);
- el16_rx(dev);
- }
-
- /* Acknowledge the interrupt sources. */
- ack_cmd = status & 0xf000;
-
- if ((status & 0x0700) != 0x0200 && netif_running(dev)) {
- if (net_debug)
- pr_debug("%s: Command unit stopped, status %04x, restarting.\n",
- dev->name, status);
- /* If this ever occurs we should really re-write the idle loop, reset
- the Tx list, and do a complete restart of the command unit.
- For now we rely on the Tx timeout if the resume doesn't work. */
- ack_cmd |= CUC_RESUME;
- }
-
- if ((status & 0x0070) != 0x0040 && netif_running(dev)) {
- /* The Rx unit is not ready, it must be hung. Restart the receiver by
- initializing the rx buffers, and issuing an Rx start command. */
- if (net_debug)
- pr_debug("%s: Rx unit stopped, status %04x, restarting.\n",
- dev->name, status);
- init_rx_bufs(dev);
- writew(RX_BUF_START,shmem+iSCB_RFA);
- ack_cmd |= RX_START;
- }
-
- writew(ack_cmd,shmem+iSCB_CMD);
- outb(0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */
-
- /* Clear the latched interrupt. */
- outb(0, ioaddr + RESET_IRQ);
-
- /* Enable the 82586's interrupt input. */
- outb(0x84, ioaddr + MISC_CTRL);
- spin_unlock(&lp->lock);
- return IRQ_HANDLED;
-}
-
-static int el16_close(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- void __iomem *shmem = lp->base;
-
- netif_stop_queue(dev);
-
- /* Flush the Tx and disable Rx. */
- writew(RX_SUSPEND | CUC_SUSPEND,shmem+iSCB_CMD);
- outb(0, ioaddr + SIGNAL_CA);
-
- /* Disable the 82586's input to the interrupt line. */
- outb(0x80, ioaddr + MISC_CTRL);
-
- /* We always physically use the IRQ line, so we don't do free_irq(). */
-
- /* Update the statistics here. */
-
- return 0;
-}
-
-/* Initialize the Rx-block list. */
-static void init_rx_bufs(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- void __iomem *write_ptr;
- unsigned short SCB_base = SCB_BASE;
-
- int cur_rxbuf = lp->rx_head = RX_BUF_START;
-
- /* Initialize each Rx frame + data buffer. */
- do { /* While there is room for one more. */
-
- write_ptr = lp->base + cur_rxbuf;
-
- writew(0x0000,write_ptr); /* Status */
- writew(0x0000,write_ptr+=2); /* Command */
- writew(cur_rxbuf + RX_BUF_SIZE,write_ptr+=2); /* Link */
- writew(cur_rxbuf + 22,write_ptr+=2); /* Buffer offset */
- writew(0x0000,write_ptr+=2); /* Pad for dest addr. */
- writew(0x0000,write_ptr+=2);
- writew(0x0000,write_ptr+=2);
- writew(0x0000,write_ptr+=2); /* Pad for source addr. */
- writew(0x0000,write_ptr+=2);
- writew(0x0000,write_ptr+=2);
- writew(0x0000,write_ptr+=2); /* Pad for protocol. */
-
- writew(0x0000,write_ptr+=2); /* Buffer: Actual count */
- writew(-1,write_ptr+=2); /* Buffer: Next (none). */
- writew(cur_rxbuf + 0x20 + SCB_base,write_ptr+=2);/* Buffer: Address low */
- writew(0x0000,write_ptr+=2);
- /* Finally, the number of bytes in the buffer. */
- writew(0x8000 + RX_BUF_SIZE-0x20,write_ptr+=2);
-
- lp->rx_tail = cur_rxbuf;
- cur_rxbuf += RX_BUF_SIZE;
- } while (cur_rxbuf <= RX_BUF_END - RX_BUF_SIZE);
-
- /* Terminate the list by setting the EOL bit, and wrap the pointer to make
- the list a ring. */
- write_ptr = lp->base + lp->rx_tail + 2;
- writew(0xC000,write_ptr); /* Command, mark as last. */
- writew(lp->rx_head,write_ptr+2); /* Link */
-}
-
-static void init_82586_mem(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- short ioaddr = dev->base_addr;
- void __iomem *shmem = lp->base;
-
- /* Enable loopback to protect the wire while starting up,
- and hold the 586 in reset during the memory initialization. */
- outb(0x20, ioaddr + MISC_CTRL);
-
- /* Fix the ISCP address and base. */
- init_words[3] = SCB_BASE;
- init_words[7] = SCB_BASE;
-
- /* Write the words at 0xfff6 (address-aliased to 0xfffff6). */
- memcpy_toio(lp->base + RX_BUF_END - 10, init_words, 10);
-
- /* Write the words at 0x0000. */
- memcpy_toio(lp->base, init_words + 5, sizeof(init_words) - 10);
-
- /* Fill in the station address. */
- memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr, ETH_ALEN);
-
- /* The Tx-block list is written as needed. We just set up the values. */
- lp->tx_cmd_link = IDLELOOP + 4;
- lp->tx_head = lp->tx_reap = TX_BUF_START;
-
- init_rx_bufs(dev);
-
- /* Start the 586 by releasing the reset line, but leave loopback. */
- outb(0xA0, ioaddr + MISC_CTRL);
-
- /* This was time consuming to track down: you need to give two channel
- attention signals to reliably start up the i82586. */
- outb(0, ioaddr + SIGNAL_CA);
-
- {
- int boguscnt = 50;
- while (readw(shmem+iSCB_STATUS) == 0)
- if (--boguscnt == 0) {
- pr_warning("%s: i82586 initialization timed out with status %04x, cmd %04x.\n",
- dev->name, readw(shmem+iSCB_STATUS), readw(shmem+iSCB_CMD));
- break;
- }
- /* Issue channel-attn -- the 82586 won't start. */
- outb(0, ioaddr + SIGNAL_CA);
- }
-
- /* Disable loopback and enable interrupts. */
- outb(0x84, ioaddr + MISC_CTRL);
- if (net_debug > 4)
- pr_debug("%s: Initialized 82586, status %04x.\n", dev->name,
- readw(shmem+iSCB_STATUS));
-}
-
-static void hardware_send_packet(struct net_device *dev, void *buf, short length, short pad)
-{
- struct net_local *lp = netdev_priv(dev);
- short ioaddr = dev->base_addr;
- ushort tx_block = lp->tx_head;
- void __iomem *write_ptr = lp->base + tx_block;
- static char padding[ETH_ZLEN];
-
- /* Set the write pointer to the Tx block, and put out the header. */
- writew(0x0000,write_ptr); /* Tx status */
- writew(CMD_INTR|CmdTx,write_ptr+=2); /* Tx command */
- writew(tx_block+16,write_ptr+=2); /* Next command is a NoOp. */
- writew(tx_block+8,write_ptr+=2); /* Data Buffer offset. */
-
- /* Output the data buffer descriptor. */
- writew((pad + length) | 0x8000,write_ptr+=2); /* Byte count parameter. */
- writew(-1,write_ptr+=2); /* No next data buffer. */
- writew(tx_block+22+SCB_BASE,write_ptr+=2); /* Buffer follows the NoOp command. */
- writew(0x0000,write_ptr+=2); /* Buffer address high bits (always zero). */
-
- /* Output the Loop-back NoOp command. */
- writew(0x0000,write_ptr+=2); /* Tx status */
- writew(CmdNOp,write_ptr+=2); /* Tx command */
- writew(tx_block+16,write_ptr+=2); /* Next is myself. */
-
- /* Output the packet at the write pointer. */
- memcpy_toio(write_ptr+2, buf, length);
- if (pad)
- memcpy_toio(write_ptr+length+2, padding, pad);
-
- /* Set the old command link pointing to this send packet. */
- writew(tx_block,lp->base + lp->tx_cmd_link);
- lp->tx_cmd_link = tx_block + 20;
-
- /* Set the next free tx region. */
- lp->tx_head = tx_block + TX_BUF_SIZE;
- if (lp->tx_head > RX_BUF_START - TX_BUF_SIZE)
- lp->tx_head = TX_BUF_START;
-
- if (net_debug > 4) {
- pr_debug("%s: 3c507 @%x send length = %d, tx_block %3x, next %3x.\n",
- dev->name, ioaddr, length, tx_block, lp->tx_head);
- }
-
- /* Grimly block further packets if there has been insufficient reaping. */
- if (++lp->tx_pkts_in_ring < NUM_TX_BUFS)
- netif_wake_queue(dev);
-}
-
-static void el16_rx(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- void __iomem *shmem = lp->base;
- ushort rx_head = lp->rx_head;
- ushort rx_tail = lp->rx_tail;
- ushort boguscount = 10;
- short frame_status;
-
- while ((frame_status = readw(shmem+rx_head)) < 0) { /* Command complete */
- void __iomem *read_frame = lp->base + rx_head;
- ushort rfd_cmd = readw(read_frame+2);
- ushort next_rx_frame = readw(read_frame+4);
- ushort data_buffer_addr = readw(read_frame+6);
- void __iomem *data_frame = lp->base + data_buffer_addr;
- ushort pkt_len = readw(data_frame);
-
- if (rfd_cmd != 0 || data_buffer_addr != rx_head + 22 ||
- (pkt_len & 0xC000) != 0xC000) {
- pr_err("%s: Rx frame at %#x corrupted, "
- "status %04x cmd %04x next %04x "
- "data-buf @%04x %04x.\n",
- dev->name, rx_head, frame_status, rfd_cmd,
- next_rx_frame, data_buffer_addr, pkt_len);
- } else if ((frame_status & 0x2000) == 0) {
- /* Frame Rxed, but with error. */
- dev->stats.rx_errors++;
- if (frame_status & 0x0800) dev->stats.rx_crc_errors++;
- if (frame_status & 0x0400) dev->stats.rx_frame_errors++;
- if (frame_status & 0x0200) dev->stats.rx_fifo_errors++;
- if (frame_status & 0x0100) dev->stats.rx_over_errors++;
- if (frame_status & 0x0080) dev->stats.rx_length_errors++;
- } else {
- /* Malloc up new buffer. */
- struct sk_buff *skb;
-
- pkt_len &= 0x3fff;
- skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb == NULL) {
- pr_err("%s: Memory squeeze, dropping packet.\n",
- dev->name);
- dev->stats.rx_dropped++;
- break;
- }
-
- skb_reserve(skb,2);
-
- /* 'skb->data' points to the start of sk_buff data area. */
- memcpy_fromio(skb_put(skb,pkt_len), data_frame + 10, pkt_len);
-
- skb->protocol=eth_type_trans(skb,dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- }
-
- /* Clear the status word and set End-of-List on the rx frame. */
- writew(0,read_frame);
- writew(0xC000,read_frame+2);
- /* Clear the end-of-list on the prev. RFD. */
- writew(0x0000,lp->base + rx_tail + 2);
-
- rx_tail = rx_head;
- rx_head = next_rx_frame;
- if (--boguscount == 0)
- break;
- }
-
- lp->rx_head = rx_head;
- lp->rx_tail = rx_tail;
-}
-
-static void netdev_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr);
-}
-
-static u32 netdev_get_msglevel(struct net_device *dev)
-{
- return debug;
-}
-
-static void netdev_set_msglevel(struct net_device *dev, u32 level)
-{
- debug = level;
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
- .get_msglevel = netdev_get_msglevel,
- .set_msglevel = netdev_set_msglevel,
-};
-
-#ifdef MODULE
-static struct net_device *dev_3c507;
-module_param(io, int, 0);
-module_param(irq, int, 0);
-MODULE_PARM_DESC(io, "EtherLink16 I/O base address");
-MODULE_PARM_DESC(irq, "(ignored)");
-
-int __init init_module(void)
-{
- if (io == 0)
- pr_notice("3c507: You should not use auto-probing with insmod!\n");
- dev_3c507 = el16_probe(-1);
- return IS_ERR(dev_3c507) ? PTR_ERR(dev_3c507) : 0;
-}
-
-void __exit
-cleanup_module(void)
-{
- struct net_device *dev = dev_3c507;
- unregister_netdev(dev);
- free_irq(dev->irq, dev);
- iounmap(((struct net_local *)netdev_priv(dev))->base);
- release_region(dev->base_addr, EL16_IO_EXTENT);
- free_netdev(dev);
-}
-#endif /* MODULE */
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index 6aa927af382c..1c54e229e3cc 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -95,9 +95,6 @@ static char version[] __initdata =
#if defined(CONFIG_BVME6000_NET) || defined(CONFIG_BVME6000_NET_MODULE)
#define ENABLE_BVME6000_NET
#endif
-#if defined(CONFIG_APRICOT) || defined(CONFIG_APRICOT_MODULE)
-#define ENABLE_APRICOT
-#endif
#ifdef ENABLE_MVME16x_NET
#include <asm/mvme16xhw.h>
@@ -120,8 +117,15 @@ static char version[] __initdata =
#define WSWAPtbd(x) ((struct i596_tbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
#define WSWAPchar(x) ((char *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
#define ISCP_BUSY 0x00010000
-#define MACH_IS_APRICOT 0
#else
+#error 82596.c: unknown architecture
+#endif
+
+/*
+ * These were the intel versions, left here for reference. There
+ * are currently no x86 users of this legacy i82596 chip.
+ */
+#if 0
#define WSWAPrfd(x) ((struct i596_rfd *)((long)x))
#define WSWAPrbd(x) ((struct i596_rbd *)((long)x))
#define WSWAPiscp(x) ((struct i596_iscp *)((long)x))
@@ -130,7 +134,6 @@ static char version[] __initdata =
#define WSWAPtbd(x) ((struct i596_tbd *)((long)x))
#define WSWAPchar(x) ((char *)((long)x))
#define ISCP_BUSY 0x0001
-#define MACH_IS_APRICOT 1
#endif
/*
@@ -383,11 +386,6 @@ static inline void CA(struct net_device *dev)
i = *(volatile u32 *) (dev->base_addr);
}
#endif
-#ifdef ENABLE_APRICOT
- if (MACH_IS_APRICOT) {
- outw(0, (short) (dev->base_addr) + 4);
- }
-#endif
}
@@ -617,9 +615,6 @@ static void rebuild_rx_bufs(struct net_device *dev)
static int init_i596_mem(struct net_device *dev)
{
struct i596_private *lp = dev->ml_priv;
-#if !defined(ENABLE_MVME16x_NET) && !defined(ENABLE_BVME6000_NET) || defined(ENABLE_APRICOT)
- short ioaddr = dev->base_addr;
-#endif
unsigned long flags;
MPU_PORT(dev, PORT_RESET, NULL);
@@ -653,18 +648,6 @@ static int init_i596_mem(struct net_device *dev)
MPU_PORT(dev, PORT_ALTSCP, (void *)virt_to_bus((void *)&lp->scp));
-#elif defined(ENABLE_APRICOT)
-
- {
- u32 scp = virt_to_bus(&lp->scp);
-
- /* change the scp address */
- outw(0, ioaddr);
- outw(0, ioaddr);
- outb(4, ioaddr + 0xf);
- outw(scp | 2, ioaddr);
- outw(scp >> 16, ioaddr);
- }
#endif
lp->last_cmd = jiffies;
@@ -677,10 +660,6 @@ static int init_i596_mem(struct net_device *dev)
if (MACH_IS_BVME6000)
lp->scp.sysbus = 0x0000004c;
#endif
-#ifdef ENABLE_APRICOT
- if (MACH_IS_APRICOT)
- lp->scp.sysbus = 0x00440000;
-#endif
lp->scp.iscp = WSWAPiscp(virt_to_bus((void *)&lp->iscp));
lp->iscp.scb = WSWAPscb(virt_to_bus((void *)&lp->scb));
@@ -698,10 +677,6 @@ static int init_i596_mem(struct net_device *dev)
DEB(DEB_INIT,printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
-#if defined(ENABLE_APRICOT)
- (void) inb(ioaddr + 0x10);
- outb(4, ioaddr + 0xf);
-#endif
CA(dev);
if (wait_istat(dev,lp,1000,"initialization timed out"))
@@ -1203,43 +1178,6 @@ struct net_device * __init i82596_probe(int unit)
goto found;
}
#endif
-#ifdef ENABLE_APRICOT
- {
- int checksum = 0;
- int ioaddr = 0x300;
-
- /* this is easy the ethernet interface can only be at 0x300 */
- /* first check nothing is already registered here */
-
- if (!request_region(ioaddr, I596_TOTAL_SIZE, DRV_NAME)) {
- printk(KERN_ERR "82596: IO address 0x%04x in use\n", ioaddr);
- err = -EBUSY;
- goto out;
- }
-
- dev->base_addr = ioaddr;
-
- for (i = 0; i < 8; i++) {
- eth_addr[i] = inb(ioaddr + 8 + i);
- checksum += eth_addr[i];
- }
-
- /* checksum is a multiple of 0x100, got this wrong first time
- some machines have 0x100, some 0x200. The DOS driver doesn't
- even bother with the checksum.
- Some other boards trip the checksum.. but then appear as
- ether address 0. Trap these - AC */
-
- if ((checksum % 0x100) ||
- (memcmp(eth_addr, "\x00\x00\x49", 3) != 0)) {
- err = -ENODEV;
- goto out1;
- }
-
- dev->irq = 10;
- goto found;
- }
-#endif
err = -ENODEV;
goto out;
@@ -1296,9 +1234,6 @@ out2:
#endif
free_page ((u32)(dev->mem_start));
out1:
-#ifdef ENABLE_APRICOT
- release_region(dev->base_addr, I596_TOTAL_SIZE);
-#endif
out:
free_netdev(dev);
return ERR_PTR(err);
@@ -1455,10 +1390,6 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
*ethirq = 3;
}
#endif
-#ifdef ENABLE_APRICOT
- (void) inb(ioaddr + 0x10);
- outb(4, ioaddr + 0xf);
-#endif
CA(dev);
DEB(DEB_INTS,printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
@@ -1589,11 +1520,6 @@ static void set_multicast_list(struct net_device *dev)
#ifdef MODULE
static struct net_device *dev_82596;
-#ifdef ENABLE_APRICOT
-module_param(irq, int, 0);
-MODULE_PARM_DESC(irq, "Apricot IRQ number");
-#endif
-
static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "i82596 debug mask");
@@ -1620,10 +1546,6 @@ void __exit cleanup_module(void)
IOMAP_FULL_CACHING);
#endif
free_page ((u32)(dev_82596->mem_start));
-#ifdef ENABLE_APRICOT
- /* If we don't do this, we can't re-insmod it later. */
- release_region(dev_82596->base_addr, I596_TOTAL_SIZE);
-#endif
free_netdev(dev_82596);
}
diff --git a/drivers/net/ethernet/i825xx/Kconfig b/drivers/net/ethernet/i825xx/Kconfig
index 959faf7388e2..9521e68aa3b3 100644
--- a/drivers/net/ethernet/i825xx/Kconfig
+++ b/drivers/net/ethernet/i825xx/Kconfig
@@ -5,9 +5,7 @@
config NET_VENDOR_I825XX
bool "Intel (82586/82593/82596) devices"
default y
- depends on NET_VENDOR_INTEL && (ISA || ISA_DMA_API || ARM || \
- ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \
- GSC || BVME6000 || MVME16x || EXPERIMENTAL)
+ depends on NET_VENDOR_INTEL
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -20,29 +18,6 @@ config NET_VENDOR_I825XX
if NET_VENDOR_I825XX
-config ELPLUS
- tristate "3c505 \"EtherLink Plus\" support"
- depends on ISA && ISA_DMA_API
- ---help---
- Information about this network (Ethernet) card can be found in
- <file:Documentation/networking/3c505.txt>. If you have a card of
- this type, say Y and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called 3c505.
-
-config EL16
- tristate "3c507 \"EtherLink 16\" support (EXPERIMENTAL)"
- depends on ISA && EXPERIMENTAL
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called 3c507.
-
config ARM_ETHER1
tristate "Acorn Ether1 support"
depends on ARM && ARCH_ACORN
@@ -50,17 +25,6 @@ config ARM_ETHER1
If you have an Acorn system with one of these (AKA25) network cards,
you should say Y to this option if you wish to use it with Linux.
-config APRICOT
- tristate "Apricot Xen-II on board Ethernet"
- depends on ISA
- ---help---
- If you have a network (Ethernet) controller of this type, say Y and
- read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called apricot.
-
config BVME6000_NET
tristate "BVME6000 Ethernet support"
depends on BVME6000
@@ -70,33 +34,6 @@ config BVME6000_NET
in your kernel.
To compile this driver as a module, choose M here.
-config EEXPRESS
- tristate "EtherExpress 16 support"
- depends on ISA
- ---help---
- If you have an EtherExpress16 network (Ethernet) card, say Y and
- read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. Note that the Intel
- EtherExpress16 card used to be regarded as a very poor choice
- because the driver was very unreliable. We now have a new driver
- that should do better.
-
- To compile this driver as a module, choose M here. The module
- will be called eexpress.
-
-config EEXPRESS_PRO
- tristate "EtherExpressPro support/EtherExpress 10 (i82595) support"
- depends on ISA
- ---help---
- If you have a network (Ethernet) card of this type, say Y. This
- driver supports Intel i82595{FX,TX} based boards. Note however
- that the EtherExpress PRO/100 Ethernet card has its own separate
- driver. Please read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called eepro.
-
config LASI_82596
tristate "Lasi ethernet"
depends on GSC
@@ -104,14 +41,6 @@ config LASI_82596
Say Y here to support the builtin Intel 82596 ethernet controller
found in Hewlett-Packard PA-RISC machines with 10Mbit ethernet.
-config LP486E
- tristate "LP486E on board Ethernet"
- depends on ISA
- ---help---
- Say Y here to support the 82596-based on-board Ethernet controller
- for the Panther motherboard, which is one of the two shipped in the
- Intel Professional Workstation.
-
config MVME16x_NET
tristate "MVME16x Ethernet support"
depends on MVME16x
@@ -121,17 +50,6 @@ config MVME16x_NET
driver for this chip in your kernel.
To compile this driver as a module, choose M here.
-config NI52
- tristate "NI5210 support"
- depends on ISA
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called ni52.
-
config SNI_82596
tristate "SNI RM ethernet"
depends on SNI_RM
@@ -148,14 +66,4 @@ config SUN3_82586
that this driver does not support 82586-based adapters on additional
VME boards.
-config ZNET
- tristate "Zenith Z-Note support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && ISA_DMA_API && X86
- ---help---
- The Zenith Z-Note notebook computer has a built-in network
- (Ethernet) card, and this is the Linux driver for it. Note that the
- IBM Thinkpad 300 is compatible with the Z-Note and is also supported
- by this driver. Read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
endif # NET_VENDOR_I825XX
diff --git a/drivers/net/ethernet/i825xx/Makefile b/drivers/net/ethernet/i825xx/Makefile
index 6adff85e8ecc..8c8dcd29c40d 100644
--- a/drivers/net/ethernet/i825xx/Makefile
+++ b/drivers/net/ethernet/i825xx/Makefile
@@ -3,15 +3,7 @@
#
obj-$(CONFIG_ARM_ETHER1) += ether1.o
-obj-$(CONFIG_EEXPRESS) += eexpress.o
-obj-$(CONFIG_EEXPRESS_PRO) += eepro.o
-obj-$(CONFIG_ELPLUS) += 3c505.o
-obj-$(CONFIG_EL16) += 3c507.o
-obj-$(CONFIG_LP486E) += lp486e.o
-obj-$(CONFIG_NI52) += ni52.o
obj-$(CONFIG_SUN3_82586) += sun3_82586.o
-obj-$(CONFIG_ZNET) += znet.o
-obj-$(CONFIG_APRICOT) += 82596.o
obj-$(CONFIG_LASI_82596) += lasi_82596.o
obj-$(CONFIG_SNI_82596) += sni_82596.o
obj-$(CONFIG_MVME16x_NET) += 82596.o
diff --git a/drivers/net/ethernet/i825xx/eepro.c b/drivers/net/ethernet/i825xx/eepro.c
deleted file mode 100644
index 7f49fd54c521..000000000000
--- a/drivers/net/ethernet/i825xx/eepro.c
+++ /dev/null
@@ -1,1822 +0,0 @@
-/* eepro.c: Intel EtherExpress Pro/10 device driver for Linux. */
-/*
- Written 1994, 1995,1996 by Bao C. Ha.
-
- Copyright (C) 1994, 1995,1996 by Bao C. Ha.
-
- This software may be used and distributed
- according to the terms of the GNU General Public License,
- incorporated herein by reference.
-
- The author may be reached at bao.ha@srs.gov
- or 418 Hastings Place, Martinez, GA 30907.
-
- Things remaining to do:
- Better record keeping of errors.
- Eliminate transmit interrupt to reduce overhead.
- Implement "concurrent processing". I won't be doing it!
-
- Bugs:
-
- If you have a problem of not detecting the 82595 during a
- reboot (warm reset), disable the FLASH memory should fix it.
- This is a compatibility hardware problem.
-
- Versions:
- 0.13b basic ethtool support (aris, 09/13/2004)
- 0.13a in memory shortage, drop packets also in board
- (Michael Westermann <mw@microdata-pos.de>, 07/30/2002)
- 0.13 irq sharing, rewrote probe function, fixed a nasty bug in
- hardware_send_packet and a major cleanup (aris, 11/08/2001)
- 0.12d fixing a problem with single card detected as eight eth devices
- fixing a problem with sudden drop in card performance
- (chris (asdn@go2.pl), 10/29/2001)
- 0.12c fixing some problems with old cards (aris, 01/08/2001)
- 0.12b misc fixes (aris, 06/26/2000)
- 0.12a port of version 0.12a of 2.2.x kernels to 2.3.x
- (aris (aris@conectiva.com.br), 05/19/2000)
- 0.11e some tweaks about multiple cards support (PdP, jul/aug 1999)
- 0.11d added __initdata, __init stuff; call spin_lock_init
- in eepro_probe1. Replaced "eepro" by dev->name. Augmented
- the code protected by spin_lock in interrupt routine
- (PdP, 12/12/1998)
- 0.11c minor cleanup (PdP, RMC, 09/12/1998)
- 0.11b Pascal Dupuis (dupuis@lei.ucl.ac.be): works as a module
- under 2.1.xx. Debug messages are flagged as KERN_DEBUG to
- avoid console flooding. Added locking at critical parts. Now
- the dawn thing is SMP safe.
- 0.11a Attempt to get 2.1.xx support up (RMC)
- 0.11 Brian Candler added support for multiple cards. Tested as
- a module, no idea if it works when compiled into kernel.
-
- 0.10e Rick Bressler notified me that ifconfig up;ifconfig down fails
- because the irq is lost somewhere. Fixed that by moving
- request_irq and free_irq to eepro_open and eepro_close respectively.
- 0.10d Ugh! Now Wakeup works. Was seriously broken in my first attempt.
- I'll need to find a way to specify an ioport other than
- the default one in the PnP case. PnP definitively sucks.
- And, yes, this is not the only reason.
- 0.10c PnP Wakeup Test for 595FX. uncomment #define PnPWakeup;
- to use.
- 0.10b Should work now with (some) Pro/10+. At least for
- me (and my two cards) it does. _No_ guarantee for
- function with non-Pro/10+ cards! (don't have any)
- (RMC, 9/11/96)
-
- 0.10 Added support for the Etherexpress Pro/10+. The
- IRQ map was changed significantly from the old
- pro/10. The new interrupt map was provided by
- Rainer M. Canavan (Canavan@Zeus.cs.bonn.edu).
- (BCH, 9/3/96)
-
- 0.09 Fixed a race condition in the transmit algorithm,
- which causes crashes under heavy load with fast
- pentium computers. The performance should also
- improve a bit. The size of RX buffer, and hence
- TX buffer, can also be changed via lilo or insmod.
- (BCH, 7/31/96)
-
- 0.08 Implement 32-bit I/O for the 82595TX and 82595FX
- based lan cards. Disable full-duplex mode if TPE
- is not used. (BCH, 4/8/96)
-
- 0.07a Fix a stat report which counts every packet as a
- heart-beat failure. (BCH, 6/3/95)
-
- 0.07 Modified to support all other 82595-based lan cards.
- The IRQ vector of the EtherExpress Pro will be set
- according to the value saved in the EEPROM. For other
- cards, I will do autoirq_request() to grab the next
- available interrupt vector. (BCH, 3/17/95)
-
- 0.06a,b Interim released. Minor changes in the comments and
- print out format. (BCH, 3/9/95 and 3/14/95)
-
- 0.06 First stable release that I am comfortable with. (BCH,
- 3/2/95)
-
- 0.05 Complete testing of multicast. (BCH, 2/23/95)
-
- 0.04 Adding multicast support. (BCH, 2/14/95)
-
- 0.03 First widely alpha release for public testing.
- (BCH, 2/14/95)
-
-*/
-
-static const char version[] =
- "eepro.c: v0.13b 09/13/2004 aris@cathedrallabs.org\n";
-
-#include <linux/module.h>
-
-/*
- Sources:
-
- This driver wouldn't have been written without the availability
- of the Crynwr's Lan595 driver source code. It helps me to
- familiarize with the 82595 chipset while waiting for the Intel
- documentation. I also learned how to detect the 82595 using
- the packet driver's technique.
-
- This driver is written by cutting and pasting the skeleton.c driver
- provided by Donald Becker. I also borrowed the EEPROM routine from
- Donald Becker's 82586 driver.
-
- Datasheet for the Intel 82595 (including the TX and FX version). It
- provides just enough info that the casual reader might think that it
- documents the i82595.
-
- The User Manual for the 82595. It provides a lot of the missing
- information.
-
-*/
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
-#include <linux/ethtool.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#define DRV_NAME "eepro"
-#define DRV_VERSION "0.13c"
-
-#define compat_dev_kfree_skb( skb, mode ) dev_kfree_skb( (skb) )
-/* I had reports of looong delays with SLOW_DOWN defined as udelay(2) */
-#define SLOW_DOWN inb(0x80)
-/* udelay(2) */
-#define compat_init_data __initdata
-enum iftype { AUI=0, BNC=1, TPE=2 };
-
-/* First, a few definitions that the brave might change. */
-/* A zero-terminated list of I/O addresses to be probed. */
-static unsigned int eepro_portlist[] compat_init_data =
- { 0x300, 0x210, 0x240, 0x280, 0x2C0, 0x200, 0x320, 0x340, 0x360, 0};
-/* note: 0x300 is default, the 595FX supports ALL IO Ports
- from 0x000 to 0x3F0, some of which are reserved in PCs */
-
-/* To try the (not-really PnP Wakeup: */
-/*
-#define PnPWakeup
-*/
-
-/* use 0 for production, 1 for verification, >2 for debug */
-#ifndef NET_DEBUG
-#define NET_DEBUG 0
-#endif
-static unsigned int net_debug = NET_DEBUG;
-
-/* The number of low I/O ports used by the ethercard. */
-#define EEPRO_IO_EXTENT 16
-
-/* Different 82595 chips */
-#define LAN595 0
-#define LAN595TX 1
-#define LAN595FX 2
-#define LAN595FX_10ISA 3
-
-/* Information that need to be kept for each board. */
-struct eepro_local {
- unsigned rx_start;
- unsigned tx_start; /* start of the transmit chain */
- int tx_last; /* pointer to last packet in the transmit chain */
- unsigned tx_end; /* end of the transmit chain (plus 1) */
- int eepro; /* 1 for the EtherExpress Pro/10,
- 2 for the EtherExpress Pro/10+,
- 3 for the EtherExpress 10 (blue cards),
- 0 for other 82595-based lan cards. */
- int version; /* a flag to indicate if this is a TX or FX
- version of the 82595 chip. */
- int stepping;
-
- spinlock_t lock; /* Serializing lock */
-
- unsigned rcv_ram; /* pre-calculated space for rx */
- unsigned xmt_ram; /* pre-calculated space for tx */
- unsigned char xmt_bar;
- unsigned char xmt_lower_limit_reg;
- unsigned char xmt_upper_limit_reg;
- short xmt_lower_limit;
- short xmt_upper_limit;
- short rcv_lower_limit;
- short rcv_upper_limit;
- unsigned char eeprom_reg;
- unsigned short word[8];
-};
-
-/* The station (ethernet) address prefix, used for IDing the board. */
-#define SA_ADDR0 0x00 /* Etherexpress Pro/10 */
-#define SA_ADDR1 0xaa
-#define SA_ADDR2 0x00
-
-#define GetBit(x,y) ((x & (1<<y))>>y)
-
-/* EEPROM Word 0: */
-#define ee_PnP 0 /* Plug 'n Play enable bit */
-#define ee_Word1 1 /* Word 1? */
-#define ee_BusWidth 2 /* 8/16 bit */
-#define ee_FlashAddr 3 /* Flash Address */
-#define ee_FlashMask 0x7 /* Mask */
-#define ee_AutoIO 6 /* */
-#define ee_reserved0 7 /* =0! */
-#define ee_Flash 8 /* Flash there? */
-#define ee_AutoNeg 9 /* Auto Negotiation enabled? */
-#define ee_IO0 10 /* IO Address LSB */
-#define ee_IO0Mask 0x /*...*/
-#define ee_IO1 15 /* IO MSB */
-
-/* EEPROM Word 1: */
-#define ee_IntSel 0 /* Interrupt */
-#define ee_IntMask 0x7
-#define ee_LI 3 /* Link Integrity 0= enabled */
-#define ee_PC 4 /* Polarity Correction 0= enabled */
-#define ee_TPE_AUI 5 /* PortSelection 1=TPE */
-#define ee_Jabber 6 /* Jabber prevention 0= enabled */
-#define ee_AutoPort 7 /* Auto Port Selection 1= Disabled */
-#define ee_SMOUT 8 /* SMout Pin Control 0= Input */
-#define ee_PROM 9 /* Flash EPROM / PROM 0=Flash */
-#define ee_reserved1 10 /* .. 12 =0! */
-#define ee_AltReady 13 /* Alternate Ready, 0=normal */
-#define ee_reserved2 14 /* =0! */
-#define ee_Duplex 15
-
-/* Word2,3,4: */
-#define ee_IA5 0 /*bit start for individual Addr Byte 5 */
-#define ee_IA4 8 /*bit start for individual Addr Byte 5 */
-#define ee_IA3 0 /*bit start for individual Addr Byte 5 */
-#define ee_IA2 8 /*bit start for individual Addr Byte 5 */
-#define ee_IA1 0 /*bit start for individual Addr Byte 5 */
-#define ee_IA0 8 /*bit start for individual Addr Byte 5 */
-
-/* Word 5: */
-#define ee_BNC_TPE 0 /* 0=TPE */
-#define ee_BootType 1 /* 00=None, 01=IPX, 10=ODI, 11=NDIS */
-#define ee_BootTypeMask 0x3
-#define ee_NumConn 3 /* Number of Connections 0= One or Two */
-#define ee_FlashSock 4 /* Presence of Flash Socket 0= Present */
-#define ee_PortTPE 5
-#define ee_PortBNC 6
-#define ee_PortAUI 7
-#define ee_PowerMgt 10 /* 0= disabled */
-#define ee_CP 13 /* Concurrent Processing */
-#define ee_CPMask 0x7
-
-/* Word 6: */
-#define ee_Stepping 0 /* Stepping info */
-#define ee_StepMask 0x0F
-#define ee_BoardID 4 /* Manucaturer Board ID, reserved */
-#define ee_BoardMask 0x0FFF
-
-/* Word 7: */
-#define ee_INT_TO_IRQ 0 /* int to IRQ Mapping = 0x1EB8 for Pro/10+ */
-#define ee_FX_INT2IRQ 0x1EB8 /* the _only_ mapping allowed for FX chips */
-
-/*..*/
-#define ee_SIZE 0x40 /* total EEprom Size */
-#define ee_Checksum 0xBABA /* initial and final value for adding checksum */
-
-
-/* Card identification via EEprom: */
-#define ee_addr_vendor 0x10 /* Word offset for EISA Vendor ID */
-#define ee_addr_id 0x11 /* Word offset for Card ID */
-#define ee_addr_SN 0x12 /* Serial Number */
-#define ee_addr_CRC_8 0x14 /* CRC over last thee Bytes */
-
-
-#define ee_vendor_intel0 0x25 /* Vendor ID Intel */
-#define ee_vendor_intel1 0xD4
-#define ee_id_eepro10p0 0x10 /* ID for eepro/10+ */
-#define ee_id_eepro10p1 0x31
-
-#define TX_TIMEOUT ((4*HZ)/10)
-
-/* Index to functions, as function prototypes. */
-
-static int eepro_probe1(struct net_device *dev, int autoprobe);
-static int eepro_open(struct net_device *dev);
-static netdev_tx_t eepro_send_packet(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t eepro_interrupt(int irq, void *dev_id);
-static void eepro_rx(struct net_device *dev);
-static void eepro_transmit_interrupt(struct net_device *dev);
-static int eepro_close(struct net_device *dev);
-static void set_multicast_list(struct net_device *dev);
-static void eepro_tx_timeout (struct net_device *dev);
-
-static int read_eeprom(int ioaddr, int location, struct net_device *dev);
-static int hardware_send_packet(struct net_device *dev, void *buf, short length);
-static int eepro_grab_irq(struct net_device *dev);
-
-/*
- Details of the i82595.
-
-You will need either the datasheet or the user manual to understand what
-is going on here. The 82595 is very different from the 82586, 82593.
-
-The receive algorithm in eepro_rx() is just an implementation of the
-RCV ring structure that the Intel 82595 imposes at the hardware level.
-The receive buffer is set at 24K, and the transmit buffer is 8K. I
-am assuming that the total buffer memory is 32K, which is true for the
-Intel EtherExpress Pro/10. If it is less than that on a generic card,
-the driver will be broken.
-
-The transmit algorithm in the hardware_send_packet() is similar to the
-one in the eepro_rx(). The transmit buffer is a ring linked list.
-I just queue the next available packet to the end of the list. In my
-system, the 82595 is so fast that the list seems to always contain a
-single packet. In other systems with faster computers and more congested
-network traffics, the ring linked list should improve performance by
-allowing up to 8K worth of packets to be queued.
-
-The sizes of the receive and transmit buffers can now be changed via lilo
-or insmod. Lilo uses the appended line "ether=io,irq,debug,rx-buffer,eth0"
-where rx-buffer is in KB unit. Modules uses the parameter mem which is
-also in KB unit, for example "insmod io=io-address irq=0 mem=rx-buffer."
-The receive buffer has to be more than 3K or less than 29K. Otherwise,
-it is reset to the default of 24K, and, hence, 8K for the trasnmit
-buffer (transmit-buffer = 32K - receive-buffer).
-
-*/
-#define RAM_SIZE 0x8000
-
-#define RCV_HEADER 8
-#define RCV_DEFAULT_RAM 0x6000
-
-#define XMT_HEADER 8
-#define XMT_DEFAULT_RAM (RAM_SIZE - RCV_DEFAULT_RAM)
-
-#define XMT_START_PRO RCV_DEFAULT_RAM
-#define XMT_START_10 0x0000
-#define RCV_START_PRO 0x0000
-#define RCV_START_10 XMT_DEFAULT_RAM
-
-#define RCV_DONE 0x0008
-#define RX_OK 0x2000
-#define RX_ERROR 0x0d81
-
-#define TX_DONE_BIT 0x0080
-#define TX_OK 0x2000
-#define CHAIN_BIT 0x8000
-#define XMT_STATUS 0x02
-#define XMT_CHAIN 0x04
-#define XMT_COUNT 0x06
-
-#define BANK0_SELECT 0x00
-#define BANK1_SELECT 0x40
-#define BANK2_SELECT 0x80
-
-/* Bank 0 registers */
-#define COMMAND_REG 0x00 /* Register 0 */
-#define MC_SETUP 0x03
-#define XMT_CMD 0x04
-#define DIAGNOSE_CMD 0x07
-#define RCV_ENABLE_CMD 0x08
-#define RCV_DISABLE_CMD 0x0a
-#define STOP_RCV_CMD 0x0b
-#define RESET_CMD 0x0e
-#define POWER_DOWN_CMD 0x18
-#define RESUME_XMT_CMD 0x1c
-#define SEL_RESET_CMD 0x1e
-#define STATUS_REG 0x01 /* Register 1 */
-#define RX_INT 0x02
-#define TX_INT 0x04
-#define EXEC_STATUS 0x30
-#define ID_REG 0x02 /* Register 2 */
-#define R_ROBIN_BITS 0xc0 /* round robin counter */
-#define ID_REG_MASK 0x2c
-#define ID_REG_SIG 0x24
-#define AUTO_ENABLE 0x10
-#define INT_MASK_REG 0x03 /* Register 3 */
-#define RX_STOP_MASK 0x01
-#define RX_MASK 0x02
-#define TX_MASK 0x04
-#define EXEC_MASK 0x08
-#define ALL_MASK 0x0f
-#define IO_32_BIT 0x10
-#define RCV_BAR 0x04 /* The following are word (16-bit) registers */
-#define RCV_STOP 0x06
-
-#define XMT_BAR_PRO 0x0a
-#define XMT_BAR_10 0x0b
-
-#define HOST_ADDRESS_REG 0x0c
-#define IO_PORT 0x0e
-#define IO_PORT_32_BIT 0x0c
-
-/* Bank 1 registers */
-#define REG1 0x01
-#define WORD_WIDTH 0x02
-#define INT_ENABLE 0x80
-#define INT_NO_REG 0x02
-#define RCV_LOWER_LIMIT_REG 0x08
-#define RCV_UPPER_LIMIT_REG 0x09
-
-#define XMT_LOWER_LIMIT_REG_PRO 0x0a
-#define XMT_UPPER_LIMIT_REG_PRO 0x0b
-#define XMT_LOWER_LIMIT_REG_10 0x0b
-#define XMT_UPPER_LIMIT_REG_10 0x0a
-
-/* Bank 2 registers */
-#define XMT_Chain_Int 0x20 /* Interrupt at the end of the transmit chain */
-#define XMT_Chain_ErrStop 0x40 /* Interrupt at the end of the chain even if there are errors */
-#define RCV_Discard_BadFrame 0x80 /* Throw bad frames away, and continue to receive others */
-#define REG2 0x02
-#define PRMSC_Mode 0x01
-#define Multi_IA 0x20
-#define REG3 0x03
-#define TPE_BIT 0x04
-#define BNC_BIT 0x20
-#define REG13 0x0d
-#define FDX 0x00
-#define A_N_ENABLE 0x02
-
-#define I_ADD_REG0 0x04
-#define I_ADD_REG1 0x05
-#define I_ADD_REG2 0x06
-#define I_ADD_REG3 0x07
-#define I_ADD_REG4 0x08
-#define I_ADD_REG5 0x09
-
-#define EEPROM_REG_PRO 0x0a
-#define EEPROM_REG_10 0x0b
-
-#define EESK 0x01
-#define EECS 0x02
-#define EEDI 0x04
-#define EEDO 0x08
-
-/* do a full reset */
-#define eepro_reset(ioaddr) outb(RESET_CMD, ioaddr)
-
-/* do a nice reset */
-#define eepro_sel_reset(ioaddr) { \
- outb(SEL_RESET_CMD, ioaddr); \
- SLOW_DOWN; \
- SLOW_DOWN; \
- }
-
-/* disable all interrupts */
-#define eepro_dis_int(ioaddr) outb(ALL_MASK, ioaddr + INT_MASK_REG)
-
-/* clear all interrupts */
-#define eepro_clear_int(ioaddr) outb(ALL_MASK, ioaddr + STATUS_REG)
-
-/* enable tx/rx */
-#define eepro_en_int(ioaddr) outb(ALL_MASK & ~(RX_MASK | TX_MASK), \
- ioaddr + INT_MASK_REG)
-
-/* enable exec event interrupt */
-#define eepro_en_intexec(ioaddr) outb(ALL_MASK & ~(EXEC_MASK), ioaddr + INT_MASK_REG)
-
-/* enable rx */
-#define eepro_en_rx(ioaddr) outb(RCV_ENABLE_CMD, ioaddr)
-
-/* disable rx */
-#define eepro_dis_rx(ioaddr) outb(RCV_DISABLE_CMD, ioaddr)
-
-/* switch bank */
-#define eepro_sw2bank0(ioaddr) outb(BANK0_SELECT, ioaddr)
-#define eepro_sw2bank1(ioaddr) outb(BANK1_SELECT, ioaddr)
-#define eepro_sw2bank2(ioaddr) outb(BANK2_SELECT, ioaddr)
-
-/* enable interrupt line */
-#define eepro_en_intline(ioaddr) outb(inb(ioaddr + REG1) | INT_ENABLE,\
- ioaddr + REG1)
-
-/* disable interrupt line */
-#define eepro_dis_intline(ioaddr) outb(inb(ioaddr + REG1) & 0x7f, \
- ioaddr + REG1);
-
-/* set diagnose flag */
-#define eepro_diag(ioaddr) outb(DIAGNOSE_CMD, ioaddr)
-
-/* ack for rx int */
-#define eepro_ack_rx(ioaddr) outb (RX_INT, ioaddr + STATUS_REG)
-
-/* ack for tx int */
-#define eepro_ack_tx(ioaddr) outb (TX_INT, ioaddr + STATUS_REG)
-
-/* a complete sel reset */
-#define eepro_complete_selreset(ioaddr) { \
- dev->stats.tx_errors++;\
- eepro_sel_reset(ioaddr);\
- lp->tx_end = \
- lp->xmt_lower_limit;\
- lp->tx_start = lp->tx_end;\
- lp->tx_last = 0;\
- dev->trans_start = jiffies;\
- netif_wake_queue(dev);\
- eepro_en_rx(ioaddr);\
- }
-
-/* Check for a network adaptor of this type, and return '0' if one exists.
- If dev->base_addr == 0, probe all likely locations.
- If dev->base_addr == 1, always return failure.
- If dev->base_addr == 2, allocate space for the device and return success
- (detachable devices only).
- */
-static int __init do_eepro_probe(struct net_device *dev)
-{
- int i;
- int base_addr = dev->base_addr;
- int irq = dev->irq;
-
-#ifdef PnPWakeup
- /* XXXX for multiple cards should this only be run once? */
-
- /* Wakeup: */
- #define WakeupPort 0x279
- #define WakeupSeq {0x6A, 0xB5, 0xDA, 0xED, 0xF6, 0xFB, 0x7D, 0xBE,\
- 0xDF, 0x6F, 0x37, 0x1B, 0x0D, 0x86, 0xC3, 0x61,\
- 0xB0, 0x58, 0x2C, 0x16, 0x8B, 0x45, 0xA2, 0xD1,\
- 0xE8, 0x74, 0x3A, 0x9D, 0xCE, 0xE7, 0x73, 0x43}
-
- {
- unsigned short int WS[32]=WakeupSeq;
-
- if (request_region(WakeupPort, 2, "eepro wakeup")) {
- if (net_debug>5)
- printk(KERN_DEBUG "Waking UP\n");
-
- outb_p(0,WakeupPort);
- outb_p(0,WakeupPort);
- for (i=0; i<32; i++) {
- outb_p(WS[i],WakeupPort);
- if (net_debug>5) printk(KERN_DEBUG ": %#x ",WS[i]);
- }
-
- release_region(WakeupPort, 2);
- } else
- printk(KERN_WARNING "PnP wakeup region busy!\n");
- }
-#endif
-
- if (base_addr > 0x1ff) /* Check a single specified location. */
- return eepro_probe1(dev, 0);
-
- else if (base_addr != 0) /* Don't probe at all. */
- return -ENXIO;
-
- for (i = 0; eepro_portlist[i]; i++) {
- dev->base_addr = eepro_portlist[i];
- dev->irq = irq;
- if (eepro_probe1(dev, 1) == 0)
- return 0;
- }
-
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init eepro_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct eepro_local));
- int err;
-
- if (!dev)
- return ERR_PTR(-ENODEV);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_eepro_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static void __init printEEPROMInfo(struct net_device *dev)
-{
- struct eepro_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- unsigned short Word;
- int i,j;
-
- j = ee_Checksum;
- for (i = 0; i < 8; i++)
- j += lp->word[i];
- for ( ; i < ee_SIZE; i++)
- j += read_eeprom(ioaddr, i, dev);
-
- printk(KERN_DEBUG "Checksum: %#x\n",j&0xffff);
-
- Word = lp->word[0];
- printk(KERN_DEBUG "Word0:\n");
- printk(KERN_DEBUG " Plug 'n Pray: %d\n",GetBit(Word,ee_PnP));
- printk(KERN_DEBUG " Buswidth: %d\n",(GetBit(Word,ee_BusWidth)+1)*8 );
- printk(KERN_DEBUG " AutoNegotiation: %d\n",GetBit(Word,ee_AutoNeg));
- printk(KERN_DEBUG " IO Address: %#x\n", (Word>>ee_IO0)<<4);
-
- if (net_debug>4) {
- Word = lp->word[1];
- printk(KERN_DEBUG "Word1:\n");
- printk(KERN_DEBUG " INT: %d\n", Word & ee_IntMask);
- printk(KERN_DEBUG " LI: %d\n", GetBit(Word,ee_LI));
- printk(KERN_DEBUG " PC: %d\n", GetBit(Word,ee_PC));
- printk(KERN_DEBUG " TPE/AUI: %d\n", GetBit(Word,ee_TPE_AUI));
- printk(KERN_DEBUG " Jabber: %d\n", GetBit(Word,ee_Jabber));
- printk(KERN_DEBUG " AutoPort: %d\n", !GetBit(Word,ee_AutoPort));
- printk(KERN_DEBUG " Duplex: %d\n", GetBit(Word,ee_Duplex));
- }
-
- Word = lp->word[5];
- printk(KERN_DEBUG "Word5:\n");
- printk(KERN_DEBUG " BNC: %d\n",GetBit(Word,ee_BNC_TPE));
- printk(KERN_DEBUG " NumConnectors: %d\n",GetBit(Word,ee_NumConn));
- printk(KERN_DEBUG " Has ");
- if (GetBit(Word,ee_PortTPE)) printk(KERN_DEBUG "TPE ");
- if (GetBit(Word,ee_PortBNC)) printk(KERN_DEBUG "BNC ");
- if (GetBit(Word,ee_PortAUI)) printk(KERN_DEBUG "AUI ");
- printk(KERN_DEBUG "port(s)\n");
-
- Word = lp->word[6];
- printk(KERN_DEBUG "Word6:\n");
- printk(KERN_DEBUG " Stepping: %d\n",Word & ee_StepMask);
- printk(KERN_DEBUG " BoardID: %d\n",Word>>ee_BoardID);
-
- Word = lp->word[7];
- printk(KERN_DEBUG "Word7:\n");
- printk(KERN_DEBUG " INT to IRQ:\n");
-
- for (i=0, j=0; i<15; i++)
- if (GetBit(Word,i)) printk(KERN_DEBUG " INT%d -> IRQ %d;",j++,i);
-
- printk(KERN_DEBUG "\n");
-}
-
-/* function to recalculate the limits of buffer based on rcv_ram */
-static void eepro_recalc (struct net_device *dev)
-{
- struct eepro_local * lp;
-
- lp = netdev_priv(dev);
- lp->xmt_ram = RAM_SIZE - lp->rcv_ram;
-
- if (lp->eepro == LAN595FX_10ISA) {
- lp->xmt_lower_limit = XMT_START_10;
- lp->xmt_upper_limit = (lp->xmt_ram - 2);
- lp->rcv_lower_limit = lp->xmt_ram;
- lp->rcv_upper_limit = (RAM_SIZE - 2);
- }
- else {
- lp->rcv_lower_limit = RCV_START_PRO;
- lp->rcv_upper_limit = (lp->rcv_ram - 2);
- lp->xmt_lower_limit = lp->rcv_ram;
- lp->xmt_upper_limit = (RAM_SIZE - 2);
- }
-}
-
-/* prints boot-time info */
-static void __init eepro_print_info (struct net_device *dev)
-{
- struct eepro_local * lp = netdev_priv(dev);
- int i;
- const char * ifmap[] = {"AUI", "10Base2", "10BaseT"};
-
- i = inb(dev->base_addr + ID_REG);
- printk(KERN_DEBUG " id: %#x ",i);
- printk(" io: %#x ", (unsigned)dev->base_addr);
-
- switch (lp->eepro) {
- case LAN595FX_10ISA:
- printk("%s: Intel EtherExpress 10 ISA\n at %#x,",
- dev->name, (unsigned)dev->base_addr);
- break;
- case LAN595FX:
- printk("%s: Intel EtherExpress Pro/10+ ISA\n at %#x,",
- dev->name, (unsigned)dev->base_addr);
- break;
- case LAN595TX:
- printk("%s: Intel EtherExpress Pro/10 ISA at %#x,",
- dev->name, (unsigned)dev->base_addr);
- break;
- case LAN595:
- printk("%s: Intel 82595-based lan card at %#x,",
- dev->name, (unsigned)dev->base_addr);
- break;
- }
-
- printk(" %pM", dev->dev_addr);
-
- if (net_debug > 3)
- printk(KERN_DEBUG ", %dK RCV buffer",
- (int)(lp->rcv_ram)/1024);
-
- if (dev->irq > 2)
- printk(", IRQ %d, %s.\n", dev->irq, ifmap[dev->if_port]);
- else
- printk(", %s.\n", ifmap[dev->if_port]);
-
- if (net_debug > 3) {
- i = lp->word[5];
- if (i & 0x2000) /* bit 13 of EEPROM word 5 */
- printk(KERN_DEBUG "%s: Concurrent Processing is "
- "enabled but not used!\n", dev->name);
- }
-
- /* Check the station address for the manufacturer's code */
- if (net_debug>3)
- printEEPROMInfo(dev);
-}
-
-static const struct ethtool_ops eepro_ethtool_ops;
-
-static const struct net_device_ops eepro_netdev_ops = {
- .ndo_open = eepro_open,
- .ndo_stop = eepro_close,
- .ndo_start_xmit = eepro_send_packet,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_tx_timeout = eepro_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/* This is the real probe routine. Linux has a history of friendly device
- probes on the ISA bus. A good device probe avoids doing writes, and
- verifies that the correct device exists and functions. */
-
-static int __init eepro_probe1(struct net_device *dev, int autoprobe)
-{
- unsigned short station_addr[3], id, counter;
- int i;
- struct eepro_local *lp;
- int ioaddr = dev->base_addr;
- int err;
-
- /* Grab the region so we can find another board if autoIRQ fails. */
- if (!request_region(ioaddr, EEPRO_IO_EXTENT, DRV_NAME)) {
- if (!autoprobe)
- printk(KERN_WARNING "EEPRO: io-port 0x%04x in use\n",
- ioaddr);
- return -EBUSY;
- }
-
- /* Now, we are going to check for the signature of the
- ID_REG (register 2 of bank 0) */
-
- id = inb(ioaddr + ID_REG);
-
- if ((id & ID_REG_MASK) != ID_REG_SIG)
- goto exit;
-
- /* We seem to have the 82595 signature, let's
- play with its counter (last 2 bits of
- register 2 of bank 0) to be sure. */
-
- counter = id & R_ROBIN_BITS;
-
- if ((inb(ioaddr + ID_REG) & R_ROBIN_BITS) != (counter + 0x40))
- goto exit;
-
- lp = netdev_priv(dev);
- memset(lp, 0, sizeof(struct eepro_local));
- lp->xmt_bar = XMT_BAR_PRO;
- lp->xmt_lower_limit_reg = XMT_LOWER_LIMIT_REG_PRO;
- lp->xmt_upper_limit_reg = XMT_UPPER_LIMIT_REG_PRO;
- lp->eeprom_reg = EEPROM_REG_PRO;
- spin_lock_init(&lp->lock);
-
- /* Now, get the ethernet hardware address from
- the EEPROM */
- station_addr[0] = read_eeprom(ioaddr, 2, dev);
-
- /* FIXME - find another way to know that we've found
- * an Etherexpress 10
- */
- if (station_addr[0] == 0x0000 || station_addr[0] == 0xffff) {
- lp->eepro = LAN595FX_10ISA;
- lp->eeprom_reg = EEPROM_REG_10;
- lp->xmt_lower_limit_reg = XMT_LOWER_LIMIT_REG_10;
- lp->xmt_upper_limit_reg = XMT_UPPER_LIMIT_REG_10;
- lp->xmt_bar = XMT_BAR_10;
- station_addr[0] = read_eeprom(ioaddr, 2, dev);
- }
-
- /* get all words at once. will be used here and for ethtool */
- for (i = 0; i < 8; i++) {
- lp->word[i] = read_eeprom(ioaddr, i, dev);
- }
- station_addr[1] = lp->word[3];
- station_addr[2] = lp->word[4];
-
- if (!lp->eepro) {
- if (lp->word[7] == ee_FX_INT2IRQ)
- lp->eepro = 2;
- else if (station_addr[2] == SA_ADDR1)
- lp->eepro = 1;
- }
-
- /* Fill in the 'dev' fields. */
- for (i=0; i < 6; i++)
- dev->dev_addr[i] = ((unsigned char *) station_addr)[5-i];
-
- /* RX buffer must be more than 3K and less than 29K */
- if (dev->mem_end < 3072 || dev->mem_end > 29696)
- lp->rcv_ram = RCV_DEFAULT_RAM;
-
- /* calculate {xmt,rcv}_{lower,upper}_limit */
- eepro_recalc(dev);
-
- if (GetBit(lp->word[5], ee_BNC_TPE))
- dev->if_port = BNC;
- else
- dev->if_port = TPE;
-
- if (dev->irq < 2 && lp->eepro != 0) {
- /* Mask off INT number */
- int count = lp->word[1] & 7;
- unsigned irqMask = lp->word[7];
-
- while (count--)
- irqMask &= irqMask - 1;
-
- count = ffs(irqMask);
-
- if (count)
- dev->irq = count - 1;
-
- if (dev->irq < 2) {
- printk(KERN_ERR " Duh! illegal interrupt vector stored in EEPROM.\n");
- goto exit;
- } else if (dev->irq == 2) {
- dev->irq = 9;
- }
- }
-
- dev->netdev_ops = &eepro_netdev_ops;
- dev->watchdog_timeo = TX_TIMEOUT;
- dev->ethtool_ops = &eepro_ethtool_ops;
-
- /* print boot time info */
- eepro_print_info(dev);
-
- /* reset 82595 */
- eepro_reset(ioaddr);
-
- err = register_netdev(dev);
- if (err)
- goto err;
- return 0;
-exit:
- err = -ENODEV;
-err:
- release_region(dev->base_addr, EEPRO_IO_EXTENT);
- return err;
-}
-
-/* Open/initialize the board. This is called (in the current kernel)
- sometime after booting when the 'ifconfig' program is run.
-
- This routine should set everything up anew at each open, even
- registers that "should" only need to be set once at boot, so that
- there is non-reboot way to recover if something goes wrong.
- */
-
-static const char irqrmap[] = {-1,-1,0,1,-1,2,-1,-1,-1,0,3,4,-1,-1,-1,-1};
-static const char irqrmap2[] = {-1,-1,4,0,1,2,-1,3,-1,4,5,6,7,-1,-1,-1};
-static int eepro_grab_irq(struct net_device *dev)
-{
- static const int irqlist[] = { 3, 4, 5, 7, 9, 10, 11, 12, 0 };
- const int *irqp = irqlist;
- int temp_reg, ioaddr = dev->base_addr;
-
- eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */
-
- /* Enable the interrupt line. */
- eepro_en_intline(ioaddr);
-
- /* be CAREFUL, BANK 0 now */
- eepro_sw2bank0(ioaddr);
-
- /* clear all interrupts */
- eepro_clear_int(ioaddr);
-
- /* Let EXEC event to interrupt */
- eepro_en_intexec(ioaddr);
-
- do {
- eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */
-
- temp_reg = inb(ioaddr + INT_NO_REG);
- outb((temp_reg & 0xf8) | irqrmap[*irqp], ioaddr + INT_NO_REG);
-
- eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */
-
- if (request_irq (*irqp, NULL, IRQF_SHARED, "bogus", dev) != EBUSY) {
- unsigned long irq_mask;
- /* Twinkle the interrupt, and check if it's seen */
- irq_mask = probe_irq_on();
-
- eepro_diag(ioaddr); /* RESET the 82595 */
- mdelay(20);
-
- if (*irqp == probe_irq_off(irq_mask)) /* It's a good IRQ line */
- break;
-
- /* clear all interrupts */
- eepro_clear_int(ioaddr);
- }
- } while (*++irqp);
-
- eepro_sw2bank1(ioaddr); /* Switch back to Bank 1 */
-
- /* Disable the physical interrupt line. */
- eepro_dis_intline(ioaddr);
-
- eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */
-
- /* Mask all the interrupts. */
- eepro_dis_int(ioaddr);
-
- /* clear all interrupts */
- eepro_clear_int(ioaddr);
-
- return dev->irq;
-}
-
-static int eepro_open(struct net_device *dev)
-{
- unsigned short temp_reg, old8, old9;
- int irqMask;
- int i, ioaddr = dev->base_addr;
- struct eepro_local *lp = netdev_priv(dev);
-
- if (net_debug > 3)
- printk(KERN_DEBUG "%s: entering eepro_open routine.\n", dev->name);
-
- irqMask = lp->word[7];
-
- if (lp->eepro == LAN595FX_10ISA) {
- if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 3;\n");
- }
- else if (irqMask == ee_FX_INT2IRQ) /* INT to IRQ Mask */
- {
- lp->eepro = 2; /* Yes, an Intel EtherExpress Pro/10+ */
- if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 2;\n");
- }
-
- else if ((dev->dev_addr[0] == SA_ADDR0 &&
- dev->dev_addr[1] == SA_ADDR1 &&
- dev->dev_addr[2] == SA_ADDR2))
- {
- lp->eepro = 1;
- if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 1;\n");
- } /* Yes, an Intel EtherExpress Pro/10 */
-
- else lp->eepro = 0; /* No, it is a generic 82585 lan card */
-
- /* Get the interrupt vector for the 82595 */
- if (dev->irq < 2 && eepro_grab_irq(dev) == 0) {
- printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq);
- return -EAGAIN;
- }
-
- if (request_irq(dev->irq , eepro_interrupt, 0, dev->name, dev)) {
- printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq);
- return -EAGAIN;
- }
-
- /* Initialize the 82595. */
-
- eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
- temp_reg = inb(ioaddr + lp->eeprom_reg);
-
- lp->stepping = temp_reg >> 5; /* Get the stepping number of the 595 */
-
- if (net_debug > 3)
- printk(KERN_DEBUG "The stepping of the 82595 is %d\n", lp->stepping);
-
- if (temp_reg & 0x10) /* Check the TurnOff Enable bit */
- outb(temp_reg & 0xef, ioaddr + lp->eeprom_reg);
- for (i=0; i < 6; i++)
- outb(dev->dev_addr[i] , ioaddr + I_ADD_REG0 + i);
-
- temp_reg = inb(ioaddr + REG1); /* Setup Transmit Chaining */
- outb(temp_reg | XMT_Chain_Int | XMT_Chain_ErrStop /* and discard bad RCV frames */
- | RCV_Discard_BadFrame, ioaddr + REG1);
-
- temp_reg = inb(ioaddr + REG2); /* Match broadcast */
- outb(temp_reg | 0x14, ioaddr + REG2);
-
- temp_reg = inb(ioaddr + REG3);
- outb(temp_reg & 0x3f, ioaddr + REG3); /* clear test mode */
-
- /* Set the receiving mode */
- eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */
-
- /* Set the interrupt vector */
- temp_reg = inb(ioaddr + INT_NO_REG);
- if (lp->eepro == LAN595FX || lp->eepro == LAN595FX_10ISA)
- outb((temp_reg & 0xf8) | irqrmap2[dev->irq], ioaddr + INT_NO_REG);
- else outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG);
-
-
- temp_reg = inb(ioaddr + INT_NO_REG);
- if (lp->eepro == LAN595FX || lp->eepro == LAN595FX_10ISA)
- outb((temp_reg & 0xf0) | irqrmap2[dev->irq] | 0x08,ioaddr+INT_NO_REG);
- else outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG);
-
- if (net_debug > 3)
- printk(KERN_DEBUG "eepro_open: content of INT Reg is %x\n", temp_reg);
-
-
- /* Initialize the RCV and XMT upper and lower limits */
- outb(lp->rcv_lower_limit >> 8, ioaddr + RCV_LOWER_LIMIT_REG);
- outb(lp->rcv_upper_limit >> 8, ioaddr + RCV_UPPER_LIMIT_REG);
- outb(lp->xmt_lower_limit >> 8, ioaddr + lp->xmt_lower_limit_reg);
- outb(lp->xmt_upper_limit >> 8, ioaddr + lp->xmt_upper_limit_reg);
-
- /* Enable the interrupt line. */
- eepro_en_intline(ioaddr);
-
- /* Switch back to Bank 0 */
- eepro_sw2bank0(ioaddr);
-
- /* Let RX and TX events to interrupt */
- eepro_en_int(ioaddr);
-
- /* clear all interrupts */
- eepro_clear_int(ioaddr);
-
- /* Initialize RCV */
- outw(lp->rcv_lower_limit, ioaddr + RCV_BAR);
- lp->rx_start = lp->rcv_lower_limit;
- outw(lp->rcv_upper_limit | 0xfe, ioaddr + RCV_STOP);
-
- /* Initialize XMT */
- outw(lp->xmt_lower_limit, ioaddr + lp->xmt_bar);
- lp->tx_start = lp->tx_end = lp->xmt_lower_limit;
- lp->tx_last = 0;
-
- /* Check for the i82595TX and i82595FX */
- old8 = inb(ioaddr + 8);
- outb(~old8, ioaddr + 8);
-
- if ((temp_reg = inb(ioaddr + 8)) == old8) {
- if (net_debug > 3)
- printk(KERN_DEBUG "i82595 detected!\n");
- lp->version = LAN595;
- }
- else {
- lp->version = LAN595TX;
- outb(old8, ioaddr + 8);
- old9 = inb(ioaddr + 9);
-
- if (irqMask==ee_FX_INT2IRQ) {
- if (net_debug > 3) {
- printk(KERN_DEBUG "IrqMask: %#x\n",irqMask);
- printk(KERN_DEBUG "i82595FX detected!\n");
- }
- lp->version = LAN595FX;
- outb(old9, ioaddr + 9);
- if (dev->if_port != TPE) { /* Hopefully, this will fix the
- problem of using Pentiums and
- pro/10 w/ BNC. */
- eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
- temp_reg = inb(ioaddr + REG13);
- /* disable the full duplex mode since it is not
- applicable with the 10Base2 cable. */
- outb(temp_reg & ~(FDX | A_N_ENABLE), REG13);
- eepro_sw2bank0(ioaddr); /* be CAREFUL, BANK 0 now */
- }
- }
- else if (net_debug > 3) {
- printk(KERN_DEBUG "temp_reg: %#x ~old9: %#x\n",temp_reg,((~old9)&0xff));
- printk(KERN_DEBUG "i82595TX detected!\n");
- }
- }
-
- eepro_sel_reset(ioaddr);
-
- netif_start_queue(dev);
-
- if (net_debug > 3)
- printk(KERN_DEBUG "%s: exiting eepro_open routine.\n", dev->name);
-
- /* enabling rx */
- eepro_en_rx(ioaddr);
-
- return 0;
-}
-
-static void eepro_tx_timeout (struct net_device *dev)
-{
- struct eepro_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- /* if (net_debug > 1) */
- printk (KERN_ERR "%s: transmit timed out, %s?\n", dev->name,
- "network cable problem");
- /* This is not a duplicate. One message for the console,
- one for the log file */
- printk (KERN_DEBUG "%s: transmit timed out, %s?\n", dev->name,
- "network cable problem");
- eepro_complete_selreset(ioaddr);
-}
-
-
-static netdev_tx_t eepro_send_packet(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct eepro_local *lp = netdev_priv(dev);
- unsigned long flags;
- int ioaddr = dev->base_addr;
- short length = skb->len;
-
- if (net_debug > 5)
- printk(KERN_DEBUG "%s: entering eepro_send_packet routine.\n", dev->name);
-
- if (length < ETH_ZLEN) {
- if (skb_padto(skb, ETH_ZLEN))
- return NETDEV_TX_OK;
- length = ETH_ZLEN;
- }
- netif_stop_queue (dev);
-
- eepro_dis_int(ioaddr);
- spin_lock_irqsave(&lp->lock, flags);
-
- {
- unsigned char *buf = skb->data;
-
- if (hardware_send_packet(dev, buf, length))
- /* we won't wake queue here because we're out of space */
- dev->stats.tx_dropped++;
- else {
- dev->stats.tx_bytes+=skb->len;
- netif_wake_queue(dev);
- }
-
- }
-
- dev_kfree_skb (skb);
-
- /* You might need to clean up and record Tx statistics here. */
- /* dev->stats.tx_aborted_errors++; */
-
- if (net_debug > 5)
- printk(KERN_DEBUG "%s: exiting eepro_send_packet routine.\n", dev->name);
-
- eepro_en_int(ioaddr);
- spin_unlock_irqrestore(&lp->lock, flags);
-
- return NETDEV_TX_OK;
-}
-
-
-/* The typical workload of the driver:
- Handle the network interface interrupts. */
-
-static irqreturn_t
-eepro_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct eepro_local *lp;
- int ioaddr, status, boguscount = 20;
- int handled = 0;
-
- lp = netdev_priv(dev);
-
- spin_lock(&lp->lock);
-
- if (net_debug > 5)
- printk(KERN_DEBUG "%s: entering eepro_interrupt routine.\n", dev->name);
-
- ioaddr = dev->base_addr;
-
- while (((status = inb(ioaddr + STATUS_REG)) & (RX_INT|TX_INT)) && (boguscount--))
- {
- handled = 1;
- if (status & RX_INT) {
- if (net_debug > 4)
- printk(KERN_DEBUG "%s: packet received interrupt.\n", dev->name);
-
- eepro_dis_int(ioaddr);
-
- /* Get the received packets */
- eepro_ack_rx(ioaddr);
- eepro_rx(dev);
-
- eepro_en_int(ioaddr);
- }
- if (status & TX_INT) {
- if (net_debug > 4)
- printk(KERN_DEBUG "%s: packet transmit interrupt.\n", dev->name);
-
-
- eepro_dis_int(ioaddr);
-
- /* Process the status of transmitted packets */
- eepro_ack_tx(ioaddr);
- eepro_transmit_interrupt(dev);
-
- eepro_en_int(ioaddr);
- }
- }
-
- if (net_debug > 5)
- printk(KERN_DEBUG "%s: exiting eepro_interrupt routine.\n", dev->name);
-
- spin_unlock(&lp->lock);
- return IRQ_RETVAL(handled);
-}
-
-static int eepro_close(struct net_device *dev)
-{
- struct eepro_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- short temp_reg;
-
- netif_stop_queue(dev);
-
- eepro_sw2bank1(ioaddr); /* Switch back to Bank 1 */
-
- /* Disable the physical interrupt line. */
- temp_reg = inb(ioaddr + REG1);
- outb(temp_reg & 0x7f, ioaddr + REG1);
-
- eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */
-
- /* Flush the Tx and disable Rx. */
- outb(STOP_RCV_CMD, ioaddr);
- lp->tx_start = lp->tx_end = lp->xmt_lower_limit;
- lp->tx_last = 0;
-
- /* Mask all the interrupts. */
- eepro_dis_int(ioaddr);
-
- /* clear all interrupts */
- eepro_clear_int(ioaddr);
-
- /* Reset the 82595 */
- eepro_reset(ioaddr);
-
- /* release the interrupt */
- free_irq(dev->irq, dev);
-
- /* Update the statistics here. What statistics? */
-
- return 0;
-}
-
-/* Set or clear the multicast filter for this adaptor.
- */
-static void
-set_multicast_list(struct net_device *dev)
-{
- struct eepro_local *lp = netdev_priv(dev);
- short ioaddr = dev->base_addr;
- unsigned short mode;
- struct netdev_hw_addr *ha;
- int mc_count = netdev_mc_count(dev);
-
- if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || mc_count > 63)
- {
- eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
- mode = inb(ioaddr + REG2);
- outb(mode | PRMSC_Mode, ioaddr + REG2);
- mode = inb(ioaddr + REG3);
- outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */
- eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */
- }
-
- else if (mc_count == 0)
- {
- eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
- mode = inb(ioaddr + REG2);
- outb(mode & 0xd6, ioaddr + REG2); /* Turn off Multi-IA and PRMSC_Mode bits */
- mode = inb(ioaddr + REG3);
- outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */
- eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */
- }
-
- else
- {
- unsigned short status, *eaddrs;
- int i, boguscount = 0;
-
- /* Disable RX and TX interrupts. Necessary to avoid
- corruption of the HOST_ADDRESS_REG by interrupt
- service routines. */
- eepro_dis_int(ioaddr);
-
- eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
- mode = inb(ioaddr + REG2);
- outb(mode | Multi_IA, ioaddr + REG2);
- mode = inb(ioaddr + REG3);
- outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */
- eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */
- outw(lp->tx_end, ioaddr + HOST_ADDRESS_REG);
- outw(MC_SETUP, ioaddr + IO_PORT);
- outw(0, ioaddr + IO_PORT);
- outw(0, ioaddr + IO_PORT);
- outw(6 * (mc_count + 1), ioaddr + IO_PORT);
-
- netdev_for_each_mc_addr(ha, dev) {
- eaddrs = (unsigned short *) ha->addr;
- outw(*eaddrs++, ioaddr + IO_PORT);
- outw(*eaddrs++, ioaddr + IO_PORT);
- outw(*eaddrs++, ioaddr + IO_PORT);
- }
-
- eaddrs = (unsigned short *) dev->dev_addr;
- outw(eaddrs[0], ioaddr + IO_PORT);
- outw(eaddrs[1], ioaddr + IO_PORT);
- outw(eaddrs[2], ioaddr + IO_PORT);
- outw(lp->tx_end, ioaddr + lp->xmt_bar);
- outb(MC_SETUP, ioaddr);
-
- /* Update the transmit queue */
- i = lp->tx_end + XMT_HEADER + 6 * (mc_count + 1);
-
- if (lp->tx_start != lp->tx_end)
- {
- /* update the next address and the chain bit in the
- last packet */
- outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG);
- outw(i, ioaddr + IO_PORT);
- outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG);
- status = inw(ioaddr + IO_PORT);
- outw(status | CHAIN_BIT, ioaddr + IO_PORT);
- lp->tx_end = i ;
- }
- else {
- lp->tx_start = lp->tx_end = i ;
- }
-
- /* Acknowledge that the MC setup is done */
- do { /* We should be doing this in the eepro_interrupt()! */
- SLOW_DOWN;
- SLOW_DOWN;
- if (inb(ioaddr + STATUS_REG) & 0x08)
- {
- i = inb(ioaddr);
- outb(0x08, ioaddr + STATUS_REG);
-
- if (i & 0x20) { /* command ABORTed */
- printk(KERN_NOTICE "%s: multicast setup failed.\n",
- dev->name);
- break;
- } else if ((i & 0x0f) == 0x03) { /* MC-Done */
- printk(KERN_DEBUG "%s: set Rx mode to %d address%s.\n",
- dev->name, mc_count,
- mc_count > 1 ? "es":"");
- break;
- }
- }
- } while (++boguscount < 100);
-
- /* Re-enable RX and TX interrupts */
- eepro_en_int(ioaddr);
- }
- if (lp->eepro == LAN595FX_10ISA) {
- eepro_complete_selreset(ioaddr);
- }
- else
- eepro_en_rx(ioaddr);
-}
-
-/* The horrible routine to read a word from the serial EEPROM. */
-/* IMPORTANT - the 82595 will be set to Bank 0 after the eeprom is read */
-
-/* The delay between EEPROM clock transitions. */
-#define eeprom_delay() { udelay(40); }
-#define EE_READ_CMD (6 << 6)
-
-static int
-read_eeprom(int ioaddr, int location, struct net_device *dev)
-{
- int i;
- unsigned short retval = 0;
- struct eepro_local *lp = netdev_priv(dev);
- short ee_addr = ioaddr + lp->eeprom_reg;
- int read_cmd = location | EE_READ_CMD;
- short ctrl_val = EECS ;
-
- /* XXXX - black magic */
- eepro_sw2bank1(ioaddr);
- outb(0x00, ioaddr + STATUS_REG);
- /* XXXX - black magic */
-
- eepro_sw2bank2(ioaddr);
- outb(ctrl_val, ee_addr);
-
- /* Shift the read command bits out. */
- for (i = 8; i >= 0; i--) {
- short outval = (read_cmd & (1 << i)) ? ctrl_val | EEDI
- : ctrl_val;
- outb(outval, ee_addr);
- outb(outval | EESK, ee_addr); /* EEPROM clock tick. */
- eeprom_delay();
- outb(outval, ee_addr); /* Finish EEPROM a clock tick. */
- eeprom_delay();
- }
- outb(ctrl_val, ee_addr);
-
- for (i = 16; i > 0; i--) {
- outb(ctrl_val | EESK, ee_addr); eeprom_delay();
- retval = (retval << 1) | ((inb(ee_addr) & EEDO) ? 1 : 0);
- outb(ctrl_val, ee_addr); eeprom_delay();
- }
-
- /* Terminate the EEPROM access. */
- ctrl_val &= ~EECS;
- outb(ctrl_val | EESK, ee_addr);
- eeprom_delay();
- outb(ctrl_val, ee_addr);
- eeprom_delay();
- eepro_sw2bank0(ioaddr);
- return retval;
-}
-
-static int
-hardware_send_packet(struct net_device *dev, void *buf, short length)
-{
- struct eepro_local *lp = netdev_priv(dev);
- short ioaddr = dev->base_addr;
- unsigned status, tx_available, last, end;
-
- if (net_debug > 5)
- printk(KERN_DEBUG "%s: entering hardware_send_packet routine.\n", dev->name);
-
- /* determine how much of the transmit buffer space is available */
- if (lp->tx_end > lp->tx_start)
- tx_available = lp->xmt_ram - (lp->tx_end - lp->tx_start);
- else if (lp->tx_end < lp->tx_start)
- tx_available = lp->tx_start - lp->tx_end;
- else tx_available = lp->xmt_ram;
-
- if (((((length + 3) >> 1) << 1) + 2*XMT_HEADER) >= tx_available) {
- /* No space available ??? */
- return 1;
- }
-
- last = lp->tx_end;
- end = last + (((length + 3) >> 1) << 1) + XMT_HEADER;
-
- if (end >= lp->xmt_upper_limit + 2) { /* the transmit buffer is wrapped around */
- if ((lp->xmt_upper_limit + 2 - last) <= XMT_HEADER) {
- /* Arrrr!!!, must keep the xmt header together,
- several days were lost to chase this one down. */
- last = lp->xmt_lower_limit;
- end = last + (((length + 3) >> 1) << 1) + XMT_HEADER;
- }
- else end = lp->xmt_lower_limit + (end -
- lp->xmt_upper_limit + 2);
- }
-
- outw(last, ioaddr + HOST_ADDRESS_REG);
- outw(XMT_CMD, ioaddr + IO_PORT);
- outw(0, ioaddr + IO_PORT);
- outw(end, ioaddr + IO_PORT);
- outw(length, ioaddr + IO_PORT);
-
- if (lp->version == LAN595)
- outsw(ioaddr + IO_PORT, buf, (length + 3) >> 1);
- else { /* LAN595TX or LAN595FX, capable of 32-bit I/O processing */
- unsigned short temp = inb(ioaddr + INT_MASK_REG);
- outb(temp | IO_32_BIT, ioaddr + INT_MASK_REG);
- outsl(ioaddr + IO_PORT_32_BIT, buf, (length + 3) >> 2);
- outb(temp & ~(IO_32_BIT), ioaddr + INT_MASK_REG);
- }
-
- /* A dummy read to flush the DRAM write pipeline */
- status = inw(ioaddr + IO_PORT);
-
- if (lp->tx_start == lp->tx_end) {
- outw(last, ioaddr + lp->xmt_bar);
- outb(XMT_CMD, ioaddr);
- lp->tx_start = last; /* I don't like to change tx_start here */
- }
- else {
- /* update the next address and the chain bit in the
- last packet */
-
- if (lp->tx_end != last) {
- outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG);
- outw(last, ioaddr + IO_PORT);
- }
-
- outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG);
- status = inw(ioaddr + IO_PORT);
- outw(status | CHAIN_BIT, ioaddr + IO_PORT);
-
- /* Continue the transmit command */
- outb(RESUME_XMT_CMD, ioaddr);
- }
-
- lp->tx_last = last;
- lp->tx_end = end;
-
- if (net_debug > 5)
- printk(KERN_DEBUG "%s: exiting hardware_send_packet routine.\n", dev->name);
-
- return 0;
-}
-
-static void
-eepro_rx(struct net_device *dev)
-{
- struct eepro_local *lp = netdev_priv(dev);
- short ioaddr = dev->base_addr;
- short boguscount = 20;
- short rcv_car = lp->rx_start;
- unsigned rcv_event, rcv_status, rcv_next_frame, rcv_size;
-
- if (net_debug > 5)
- printk(KERN_DEBUG "%s: entering eepro_rx routine.\n", dev->name);
-
- /* Set the read pointer to the start of the RCV */
- outw(rcv_car, ioaddr + HOST_ADDRESS_REG);
-
- rcv_event = inw(ioaddr + IO_PORT);
-
- while (rcv_event == RCV_DONE) {
-
- rcv_status = inw(ioaddr + IO_PORT);
- rcv_next_frame = inw(ioaddr + IO_PORT);
- rcv_size = inw(ioaddr + IO_PORT);
-
- if ((rcv_status & (RX_OK | RX_ERROR)) == RX_OK) {
-
- /* Malloc up new buffer. */
- struct sk_buff *skb;
-
- dev->stats.rx_bytes+=rcv_size;
- rcv_size &= 0x3fff;
- skb = netdev_alloc_skb(dev, rcv_size + 5);
- if (skb == NULL) {
- printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
- dev->stats.rx_dropped++;
- rcv_car = lp->rx_start + RCV_HEADER + rcv_size;
- lp->rx_start = rcv_next_frame;
- outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG);
-
- break;
- }
- skb_reserve(skb,2);
-
- if (lp->version == LAN595)
- insw(ioaddr+IO_PORT, skb_put(skb,rcv_size), (rcv_size + 3) >> 1);
- else { /* LAN595TX or LAN595FX, capable of 32-bit I/O processing */
- unsigned short temp = inb(ioaddr + INT_MASK_REG);
- outb(temp | IO_32_BIT, ioaddr + INT_MASK_REG);
- insl(ioaddr+IO_PORT_32_BIT, skb_put(skb,rcv_size),
- (rcv_size + 3) >> 2);
- outb(temp & ~(IO_32_BIT), ioaddr + INT_MASK_REG);
- }
-
- skb->protocol = eth_type_trans(skb,dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- }
-
- else { /* Not sure will ever reach here,
- I set the 595 to discard bad received frames */
- dev->stats.rx_errors++;
-
- if (rcv_status & 0x0100)
- dev->stats.rx_over_errors++;
-
- else if (rcv_status & 0x0400)
- dev->stats.rx_frame_errors++;
-
- else if (rcv_status & 0x0800)
- dev->stats.rx_crc_errors++;
-
- printk(KERN_DEBUG "%s: event = %#x, status = %#x, next = %#x, size = %#x\n",
- dev->name, rcv_event, rcv_status, rcv_next_frame, rcv_size);
- }
-
- if (rcv_status & 0x1000)
- dev->stats.rx_length_errors++;
-
- rcv_car = lp->rx_start + RCV_HEADER + rcv_size;
- lp->rx_start = rcv_next_frame;
-
- if (--boguscount == 0)
- break;
-
- outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG);
- rcv_event = inw(ioaddr + IO_PORT);
-
- }
- if (rcv_car == 0)
- rcv_car = lp->rcv_upper_limit | 0xff;
-
- outw(rcv_car - 1, ioaddr + RCV_STOP);
-
- if (net_debug > 5)
- printk(KERN_DEBUG "%s: exiting eepro_rx routine.\n", dev->name);
-}
-
-static void
-eepro_transmit_interrupt(struct net_device *dev)
-{
- struct eepro_local *lp = netdev_priv(dev);
- short ioaddr = dev->base_addr;
- short boguscount = 25;
- short xmt_status;
-
- while ((lp->tx_start != lp->tx_end) && boguscount--) {
-
- outw(lp->tx_start, ioaddr + HOST_ADDRESS_REG);
- xmt_status = inw(ioaddr+IO_PORT);
-
- if (!(xmt_status & TX_DONE_BIT))
- break;
-
- xmt_status = inw(ioaddr+IO_PORT);
- lp->tx_start = inw(ioaddr+IO_PORT);
-
- netif_wake_queue (dev);
-
- if (xmt_status & TX_OK)
- dev->stats.tx_packets++;
- else {
- dev->stats.tx_errors++;
- if (xmt_status & 0x0400) {
- dev->stats.tx_carrier_errors++;
- printk(KERN_DEBUG "%s: carrier error\n",
- dev->name);
- printk(KERN_DEBUG "%s: XMT status = %#x\n",
- dev->name, xmt_status);
- }
- else {
- printk(KERN_DEBUG "%s: XMT status = %#x\n",
- dev->name, xmt_status);
- printk(KERN_DEBUG "%s: XMT status = %#x\n",
- dev->name, xmt_status);
- }
- }
- if (xmt_status & 0x000f) {
- dev->stats.collisions += (xmt_status & 0x000f);
- }
-
- if ((xmt_status & 0x0040) == 0x0) {
- dev->stats.tx_heartbeat_errors++;
- }
- }
-}
-
-static int eepro_ethtool_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct eepro_local *lp = netdev_priv(dev);
-
- cmd->supported = SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_Autoneg;
- cmd->advertising = ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_Autoneg;
-
- if (GetBit(lp->word[5], ee_PortTPE)) {
- cmd->supported |= SUPPORTED_TP;
- cmd->advertising |= ADVERTISED_TP;
- }
- if (GetBit(lp->word[5], ee_PortBNC)) {
- cmd->supported |= SUPPORTED_BNC;
- cmd->advertising |= ADVERTISED_BNC;
- }
- if (GetBit(lp->word[5], ee_PortAUI)) {
- cmd->supported |= SUPPORTED_AUI;
- cmd->advertising |= ADVERTISED_AUI;
- }
-
- ethtool_cmd_speed_set(cmd, SPEED_10);
-
- if (dev->if_port == TPE && lp->word[1] & ee_Duplex) {
- cmd->duplex = DUPLEX_FULL;
- }
- else {
- cmd->duplex = DUPLEX_HALF;
- }
-
- cmd->port = dev->if_port;
- cmd->phy_address = dev->base_addr;
- cmd->transceiver = XCVR_INTERNAL;
-
- if (lp->word[0] & ee_AutoNeg) {
- cmd->autoneg = 1;
- }
-
- return 0;
-}
-
-static void eepro_ethtool_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *drvinfo)
-{
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
- snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info),
- "ISA 0x%lx", dev->base_addr);
-}
-
-static const struct ethtool_ops eepro_ethtool_ops = {
- .get_settings = eepro_ethtool_get_settings,
- .get_drvinfo = eepro_ethtool_get_drvinfo,
-};
-
-#ifdef MODULE
-
-#define MAX_EEPRO 8
-static struct net_device *dev_eepro[MAX_EEPRO];
-
-static int io[MAX_EEPRO] = {
- [0 ... MAX_EEPRO-1] = -1
-};
-static int irq[MAX_EEPRO];
-static int mem[MAX_EEPRO] = { /* Size of the rx buffer in KB */
- [0 ... MAX_EEPRO-1] = RCV_DEFAULT_RAM/1024
-};
-static int autodetect;
-
-static int n_eepro;
-/* For linux 2.1.xx */
-
-MODULE_AUTHOR("Pascal Dupuis and others");
-MODULE_DESCRIPTION("Intel i82595 ISA EtherExpressPro10/10+ driver");
-MODULE_LICENSE("GPL");
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(mem, int, NULL, 0);
-module_param(autodetect, int, 0);
-MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base address(es)");
-MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)");
-MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)");
-MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)");
-
-int __init init_module(void)
-{
- struct net_device *dev;
- int i;
- if (io[0] == -1 && autodetect == 0) {
- printk(KERN_WARNING "eepro_init_module: Probe is very dangerous in ISA boards!\n");
- printk(KERN_WARNING "eepro_init_module: Please add \"autodetect=1\" to force probe\n");
- return -ENODEV;
- }
- else if (autodetect) {
- /* if autodetect is set then we must force detection */
- for (i = 0; i < MAX_EEPRO; i++) {
- io[i] = 0;
- }
-
- printk(KERN_INFO "eepro_init_module: Auto-detecting boards (May God protect us...)\n");
- }
-
- for (i = 0; i < MAX_EEPRO && io[i] != -1; i++) {
- dev = alloc_etherdev(sizeof(struct eepro_local));
- if (!dev)
- break;
-
- dev->mem_end = mem[i];
- dev->base_addr = io[i];
- dev->irq = irq[i];
-
- if (do_eepro_probe(dev) == 0) {
- dev_eepro[n_eepro++] = dev;
- continue;
- }
- free_netdev(dev);
- break;
- }
-
- if (n_eepro)
- printk(KERN_INFO "%s", version);
-
- return n_eepro ? 0 : -ENODEV;
-}
-
-void __exit
-cleanup_module(void)
-{
- int i;
-
- for (i=0; i<n_eepro; i++) {
- struct net_device *dev = dev_eepro[i];
- unregister_netdev(dev);
- release_region(dev->base_addr, EEPRO_IO_EXTENT);
- free_netdev(dev);
- }
-}
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/i825xx/eexpress.c b/drivers/net/ethernet/i825xx/eexpress.c
deleted file mode 100644
index 7a6a2f04c5b1..000000000000
--- a/drivers/net/ethernet/i825xx/eexpress.c
+++ /dev/null
@@ -1,1661 +0,0 @@
-/* Intel EtherExpress 16 device driver for Linux
- *
- * Written by John Sullivan, 1995
- * based on original code by Donald Becker, with changes by
- * Alan Cox and Pauline Middelink.
- *
- * Support for 8-bit mode by Zoltan Szilagyi <zoltans@cs.arizona.edu>
- *
- * Many modifications, and currently maintained, by
- * Philip Blundell <philb@gnu.org>
- * Added the Compaq LTE Alan Cox <alan@lxorguk.ukuu.org.uk>
- * Added MCA support Adam Fritzler (now deleted)
- *
- * Note - this driver is experimental still - it has problems on faster
- * machines. Someone needs to sit down and go through it line by line with
- * a databook...
- */
-
-/* The EtherExpress 16 is a fairly simple card, based on a shared-memory
- * design using the i82586 Ethernet coprocessor. It bears no relationship,
- * as far as I know, to the similarly-named "EtherExpress Pro" range.
- *
- * Historically, Linux support for these cards has been very bad. However,
- * things seem to be getting better slowly.
- */
-
-/* If your card is confused about what sort of interface it has (eg it
- * persistently reports "10baseT" when none is fitted), running 'SOFTSET /BART'
- * or 'SOFTSET /LISA' from DOS seems to help.
- */
-
-/* Here's the scoop on memory mapping.
- *
- * There are three ways to access EtherExpress card memory: either using the
- * shared-memory mapping, or using PIO through the dataport, or using PIO
- * through the "shadow memory" ports.
- *
- * The shadow memory system works by having the card map some of its memory
- * as follows:
- *
- * (the low five bits of the SMPTR are ignored)
- *
- * base+0x4000..400f memory at SMPTR+0..15
- * base+0x8000..800f memory at SMPTR+16..31
- * base+0xc000..c007 dubious stuff (memory at SMPTR+16..23 apparently)
- * base+0xc008..c00f memory at 0x0008..0x000f
- *
- * This last set (the one at c008) is particularly handy because the SCB
- * lives at 0x0008. So that set of ports gives us easy random access to data
- * in the SCB without having to mess around setting up pointers and the like.
- * We always use this method to access the SCB (via the scb_xx() functions).
- *
- * Dataport access works by aiming the appropriate (read or write) pointer
- * at the first address you're interested in, and then reading or writing from
- * the dataport. The pointers auto-increment after each transfer. We use
- * this for data transfer.
- *
- * We don't use the shared-memory system because it allegedly doesn't work on
- * all cards, and because it's a bit more prone to go wrong (it's one more
- * thing to configure...).
- */
-
-/* Known bugs:
- *
- * - The card seems to want to give us two interrupts every time something
- * happens, where just one would be better.
- */
-
-/*
- *
- * Note by Zoltan Szilagyi 10-12-96:
- *
- * I've succeeded in eliminating the "CU wedged" messages, and hence the
- * lockups, which were only occurring with cards running in 8-bit mode ("force
- * 8-bit operation" in Intel's SoftSet utility). This version of the driver
- * sets the 82586 and the ASIC to 8-bit mode at startup; it also stops the
- * CU before submitting a packet for transmission, and then restarts it as soon
- * as the process of handing the packet is complete. This is definitely an
- * unnecessary slowdown if the card is running in 16-bit mode; therefore one
- * should detect 16-bit vs 8-bit mode from the EEPROM settings and act
- * accordingly. In 8-bit mode with this bugfix I'm getting about 150 K/s for
- * ftp's, which is significantly better than I get in DOS, so the overhead of
- * stopping and restarting the CU with each transmit is not prohibitive in
- * practice.
- *
- * Update by David Woodhouse 11/5/99:
- *
- * I've seen "CU wedged" messages in 16-bit mode, on the Alpha architecture.
- * I assume that this is because 16-bit accesses are actually handled as two
- * 8-bit accesses.
- */
-
-#ifdef __alpha__
-#define LOCKUP16 1
-#endif
-#ifndef LOCKUP16
-#define LOCKUP16 0
-#endif
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/string.h>
-#include <linux/in.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/bitops.h>
-#include <linux/jiffies.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-
-#ifndef NET_DEBUG
-#define NET_DEBUG 4
-#endif
-
-#include "eexpress.h"
-
-#define EEXP_IO_EXTENT 16
-
-/*
- * Private data declarations
- */
-
-struct net_local
-{
- unsigned long last_tx; /* jiffies when last transmit started */
- unsigned long init_time; /* jiffies when eexp_hw_init586 called */
- unsigned short rx_first; /* first rx buf, same as RX_BUF_START */
- unsigned short rx_last; /* last rx buf */
- unsigned short rx_ptr; /* first rx buf to look at */
- unsigned short tx_head; /* next free tx buf */
- unsigned short tx_reap; /* first in-use tx buf */
- unsigned short tx_tail; /* previous tx buf to tx_head */
- unsigned short tx_link; /* last known-executing tx buf */
- unsigned short last_tx_restart; /* set to tx_link when we
- restart the CU */
- unsigned char started;
- unsigned short rx_buf_start;
- unsigned short rx_buf_end;
- unsigned short num_tx_bufs;
- unsigned short num_rx_bufs;
- unsigned char width; /* 0 for 16bit, 1 for 8bit */
- unsigned char was_promisc;
- unsigned char old_mc_count;
- spinlock_t lock;
-};
-
-/* This is the code and data that is downloaded to the EtherExpress card's
- * memory at boot time.
- */
-
-static unsigned short start_code[] = {
-/* 0x0000 */
- 0x0001, /* ISCP: busy - cleared after reset */
- 0x0008,0x0000,0x0000, /* offset,address (lo,hi) of SCB */
-
- 0x0000,0x0000, /* SCB: status, commands */
- 0x0000,0x0000, /* links to first command block,
- first receive descriptor */
- 0x0000,0x0000, /* CRC error, alignment error counts */
- 0x0000,0x0000, /* out of resources, overrun error counts */
-
- 0x0000,0x0000, /* pad */
- 0x0000,0x0000,
-
-/* 0x20 -- start of 82586 CU program */
-#define CONF_LINK 0x20
- 0x0000,Cmd_Config,
- 0x0032, /* link to next command */
- 0x080c, /* 12 bytes follow : fifo threshold=8 */
- 0x2e40, /* don't rx bad frames
- * SRDY/ARDY => ext. sync. : preamble len=8
- * take addresses from data buffers
- * 6 bytes/address
- */
- 0x6000, /* default backoff method & priority
- * interframe spacing = 0x60 */
- 0xf200, /* slot time=0x200
- * max collision retry = 0xf */
-#define CONF_PROMISC 0x2e
- 0x0000, /* no HDLC : normal CRC : enable broadcast
- * disable promiscuous/multicast modes */
- 0x003c, /* minimum frame length = 60 octets) */
-
- 0x0000,Cmd_SetAddr,
- 0x003e, /* link to next command */
-#define CONF_HWADDR 0x38
- 0x0000,0x0000,0x0000, /* hardware address placed here */
-
- 0x0000,Cmd_MCast,
- 0x0076, /* link to next command */
-#define CONF_NR_MULTICAST 0x44
- 0x0000, /* number of bytes in multicast address(es) */
-#define CONF_MULTICAST 0x46
- 0x0000, 0x0000, 0x0000, /* some addresses */
- 0x0000, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000,
-
-#define CONF_DIAG_RESULT 0x76
- 0x0000, Cmd_Diag,
- 0x007c, /* link to next command */
-
- 0x0000,Cmd_TDR|Cmd_INT,
- 0x0084,
-#define CONF_TDR_RESULT 0x82
- 0x0000,
-
- 0x0000,Cmd_END|Cmd_Nop, /* end of configure sequence */
- 0x0084 /* dummy link */
-};
-
-/* maps irq number to EtherExpress magic value */
-static char irqrmap[] = { 0,0,1,2,3,4,0,0,0,1,5,6,0,0,0,0 };
-
-/*
- * Prototypes for Linux interface
- */
-
-static int eexp_open(struct net_device *dev);
-static int eexp_close(struct net_device *dev);
-static void eexp_timeout(struct net_device *dev);
-static netdev_tx_t eexp_xmit(struct sk_buff *buf,
- struct net_device *dev);
-
-static irqreturn_t eexp_irq(int irq, void *dev_addr);
-static void eexp_set_multicast(struct net_device *dev);
-
-/*
- * Prototypes for hardware access functions
- */
-
-static void eexp_hw_rx_pio(struct net_device *dev);
-static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf,
- unsigned short len);
-static int eexp_hw_probe(struct net_device *dev,unsigned short ioaddr);
-static unsigned short eexp_hw_readeeprom(unsigned short ioaddr,
- unsigned char location);
-
-static unsigned short eexp_hw_lasttxstat(struct net_device *dev);
-static void eexp_hw_txrestart(struct net_device *dev);
-
-static void eexp_hw_txinit (struct net_device *dev);
-static void eexp_hw_rxinit (struct net_device *dev);
-
-static void eexp_hw_init586 (struct net_device *dev);
-static void eexp_setup_filter (struct net_device *dev);
-
-static char *eexp_ifmap[]={"AUI", "BNC", "RJ45"};
-enum eexp_iftype {AUI=0, BNC=1, TPE=2};
-
-#define STARTED_RU 2
-#define STARTED_CU 1
-
-/*
- * Primitive hardware access functions.
- */
-
-static inline unsigned short scb_status(struct net_device *dev)
-{
- return inw(dev->base_addr + 0xc008);
-}
-
-static inline unsigned short scb_rdcmd(struct net_device *dev)
-{
- return inw(dev->base_addr + 0xc00a);
-}
-
-static inline void scb_command(struct net_device *dev, unsigned short cmd)
-{
- outw(cmd, dev->base_addr + 0xc00a);
-}
-
-static inline void scb_wrcbl(struct net_device *dev, unsigned short val)
-{
- outw(val, dev->base_addr + 0xc00c);
-}
-
-static inline void scb_wrrfa(struct net_device *dev, unsigned short val)
-{
- outw(val, dev->base_addr + 0xc00e);
-}
-
-static inline void set_loopback(struct net_device *dev)
-{
- outb(inb(dev->base_addr + Config) | 2, dev->base_addr + Config);
-}
-
-static inline void clear_loopback(struct net_device *dev)
-{
- outb(inb(dev->base_addr + Config) & ~2, dev->base_addr + Config);
-}
-
-static inline unsigned short int SHADOW(short int addr)
-{
- addr &= 0x1f;
- if (addr > 0xf) addr += 0x3ff0;
- return addr + 0x4000;
-}
-
-/*
- * Linux interface
- */
-
-/*
- * checks for presence of EtherExpress card
- */
-
-static int __init do_express_probe(struct net_device *dev)
-{
- unsigned short *port;
- static unsigned short ports[] = { 0x240,0x300,0x310,0x270,0x320,0x340,0 };
- unsigned short ioaddr = dev->base_addr;
- int dev_irq = dev->irq;
- int err;
-
- dev->if_port = 0xff; /* not set */
-
- if (ioaddr&0xfe00) {
- if (!request_region(ioaddr, EEXP_IO_EXTENT, "EtherExpress"))
- return -EBUSY;
- err = eexp_hw_probe(dev,ioaddr);
- release_region(ioaddr, EEXP_IO_EXTENT);
- return err;
- } else if (ioaddr)
- return -ENXIO;
-
- for (port=&ports[0] ; *port ; port++ )
- {
- unsigned short sum = 0;
- int i;
- if (!request_region(*port, EEXP_IO_EXTENT, "EtherExpress"))
- continue;
- for ( i=0 ; i<4 ; i++ )
- {
- unsigned short t;
- t = inb(*port + ID_PORT);
- sum |= (t>>4) << ((t & 0x03)<<2);
- }
- if (sum==0xbaba && !eexp_hw_probe(dev,*port)) {
- release_region(*port, EEXP_IO_EXTENT);
- return 0;
- }
- release_region(*port, EEXP_IO_EXTENT);
- dev->irq = dev_irq;
- }
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init express_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_express_probe(dev);
- if (!err)
- return dev;
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-/*
- * open and initialize the adapter, ready for use
- */
-
-static int eexp_open(struct net_device *dev)
-{
- int ret;
- unsigned short ioaddr = dev->base_addr;
- struct net_local *lp = netdev_priv(dev);
-
-#if NET_DEBUG > 6
- printk(KERN_DEBUG "%s: eexp_open()\n", dev->name);
-#endif
-
- if (!dev->irq || !irqrmap[dev->irq])
- return -ENXIO;
-
- ret = request_irq(dev->irq, eexp_irq, 0, dev->name, dev);
- if (ret)
- return ret;
-
- if (!request_region(ioaddr, EEXP_IO_EXTENT, "EtherExpress")) {
- printk(KERN_WARNING "EtherExpress io port %x, is busy.\n"
- , ioaddr);
- goto err_out1;
- }
- if (!request_region(ioaddr+0x4000, EEXP_IO_EXTENT, "EtherExpress shadow")) {
- printk(KERN_WARNING "EtherExpress io port %x, is busy.\n"
- , ioaddr+0x4000);
- goto err_out2;
- }
- if (!request_region(ioaddr+0x8000, EEXP_IO_EXTENT, "EtherExpress shadow")) {
- printk(KERN_WARNING "EtherExpress io port %x, is busy.\n"
- , ioaddr+0x8000);
- goto err_out3;
- }
- if (!request_region(ioaddr+0xc000, EEXP_IO_EXTENT, "EtherExpress shadow")) {
- printk(KERN_WARNING "EtherExpress io port %x, is busy.\n"
- , ioaddr+0xc000);
- goto err_out4;
- }
-
- if (lp->width) {
- printk("%s: forcing ASIC to 8-bit mode\n", dev->name);
- outb(inb(dev->base_addr+Config)&~4, dev->base_addr+Config);
- }
-
- eexp_hw_init586(dev);
- netif_start_queue(dev);
-#if NET_DEBUG > 6
- printk(KERN_DEBUG "%s: leaving eexp_open()\n", dev->name);
-#endif
- return 0;
-
- err_out4:
- release_region(ioaddr+0x8000, EEXP_IO_EXTENT);
- err_out3:
- release_region(ioaddr+0x4000, EEXP_IO_EXTENT);
- err_out2:
- release_region(ioaddr, EEXP_IO_EXTENT);
- err_out1:
- free_irq(dev->irq, dev);
- return -EBUSY;
-}
-
-/*
- * close and disable the interface, leaving the 586 in reset.
- */
-
-static int eexp_close(struct net_device *dev)
-{
- unsigned short ioaddr = dev->base_addr;
- struct net_local *lp = netdev_priv(dev);
-
- int irq = dev->irq;
-
- netif_stop_queue(dev);
-
- outb(SIRQ_dis|irqrmap[irq],ioaddr+SET_IRQ);
- lp->started = 0;
- scb_command(dev, SCB_CUsuspend|SCB_RUsuspend);
- outb(0,ioaddr+SIGNAL_CA);
- free_irq(irq,dev);
- outb(i586_RST,ioaddr+EEPROM_Ctrl);
- release_region(ioaddr, EEXP_IO_EXTENT);
- release_region(ioaddr+0x4000, 16);
- release_region(ioaddr+0x8000, 16);
- release_region(ioaddr+0xc000, 16);
-
- return 0;
-}
-
-/*
- * This gets called when a higher level thinks we are broken. Check that
- * nothing has become jammed in the CU.
- */
-
-static void unstick_cu(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- unsigned short ioaddr = dev->base_addr;
-
- if (lp->started)
- {
- if (time_after(jiffies, dev_trans_start(dev) + HZ/2))
- {
- if (lp->tx_link==lp->last_tx_restart)
- {
- unsigned short boguscount=200,rsst;
- printk(KERN_WARNING "%s: Retransmit timed out, status %04x, resetting...\n",
- dev->name, scb_status(dev));
- eexp_hw_txinit(dev);
- lp->last_tx_restart = 0;
- scb_wrcbl(dev, lp->tx_link);
- scb_command(dev, SCB_CUstart);
- outb(0,ioaddr+SIGNAL_CA);
- while (!SCB_complete(rsst=scb_status(dev)))
- {
- if (!--boguscount)
- {
- boguscount=200;
- printk(KERN_WARNING "%s: Reset timed out status %04x, retrying...\n",
- dev->name,rsst);
- scb_wrcbl(dev, lp->tx_link);
- scb_command(dev, SCB_CUstart);
- outb(0,ioaddr+SIGNAL_CA);
- }
- }
- netif_wake_queue(dev);
- }
- else
- {
- unsigned short status = scb_status(dev);
- if (SCB_CUdead(status))
- {
- unsigned short txstatus = eexp_hw_lasttxstat(dev);
- printk(KERN_WARNING "%s: Transmit timed out, CU not active status %04x %04x, restarting...\n",
- dev->name, status, txstatus);
- eexp_hw_txrestart(dev);
- }
- else
- {
- unsigned short txstatus = eexp_hw_lasttxstat(dev);
- if (netif_queue_stopped(dev) && !txstatus)
- {
- printk(KERN_WARNING "%s: CU wedged, status %04x %04x, resetting...\n",
- dev->name,status,txstatus);
- eexp_hw_init586(dev);
- netif_wake_queue(dev);
- }
- else
- {
- printk(KERN_WARNING "%s: transmit timed out\n", dev->name);
- }
- }
- }
- }
- }
- else
- {
- if (time_after(jiffies, lp->init_time + 10))
- {
- unsigned short status = scb_status(dev);
- printk(KERN_WARNING "%s: i82586 startup timed out, status %04x, resetting...\n",
- dev->name, status);
- eexp_hw_init586(dev);
- netif_wake_queue(dev);
- }
- }
-}
-
-static void eexp_timeout(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
-#ifdef CONFIG_SMP
- unsigned long flags;
-#endif
- int status;
-
- disable_irq(dev->irq);
-
- /*
- * Best would be to use synchronize_irq(); spin_lock() here
- * lets make it work first..
- */
-
-#ifdef CONFIG_SMP
- spin_lock_irqsave(&lp->lock, flags);
-#endif
-
- status = scb_status(dev);
- unstick_cu(dev);
- printk(KERN_INFO "%s: transmit timed out, %s?\n", dev->name,
- (SCB_complete(status)?"lost interrupt":
- "board on fire"));
- dev->stats.tx_errors++;
- lp->last_tx = jiffies;
- if (!SCB_complete(status)) {
- scb_command(dev, SCB_CUabort);
- outb(0,dev->base_addr+SIGNAL_CA);
- }
- netif_wake_queue(dev);
-#ifdef CONFIG_SMP
- spin_unlock_irqrestore(&lp->lock, flags);
-#endif
-}
-
-/*
- * Called to transmit a packet, or to allow us to right ourselves
- * if the kernel thinks we've died.
- */
-static netdev_tx_t eexp_xmit(struct sk_buff *buf, struct net_device *dev)
-{
- short length = buf->len;
-#ifdef CONFIG_SMP
- struct net_local *lp = netdev_priv(dev);
- unsigned long flags;
-#endif
-
-#if NET_DEBUG > 6
- printk(KERN_DEBUG "%s: eexp_xmit()\n", dev->name);
-#endif
-
- if (buf->len < ETH_ZLEN) {
- if (skb_padto(buf, ETH_ZLEN))
- return NETDEV_TX_OK;
- length = ETH_ZLEN;
- }
-
- disable_irq(dev->irq);
-
- /*
- * Best would be to use synchronize_irq(); spin_lock() here
- * lets make it work first..
- */
-
-#ifdef CONFIG_SMP
- spin_lock_irqsave(&lp->lock, flags);
-#endif
-
- {
- unsigned short *data = (unsigned short *)buf->data;
-
- dev->stats.tx_bytes += length;
-
- eexp_hw_tx_pio(dev,data,length);
- }
- dev_kfree_skb(buf);
-#ifdef CONFIG_SMP
- spin_unlock_irqrestore(&lp->lock, flags);
-#endif
- enable_irq(dev->irq);
- return NETDEV_TX_OK;
-}
-
-/*
- * Handle an EtherExpress interrupt
- * If we've finished initializing, start the RU and CU up.
- * If we've already started, reap tx buffers, handle any received packets,
- * check to make sure we've not become wedged.
- */
-
-static unsigned short eexp_start_irq(struct net_device *dev,
- unsigned short status)
-{
- unsigned short ack_cmd = SCB_ack(status);
- struct net_local *lp = netdev_priv(dev);
- unsigned short ioaddr = dev->base_addr;
- if ((dev->flags & IFF_UP) && !(lp->started & STARTED_CU)) {
- short diag_status, tdr_status;
- while (SCB_CUstat(status)==2)
- status = scb_status(dev);
-#if NET_DEBUG > 4
- printk("%s: CU went non-active (status %04x)\n",
- dev->name, status);
-#endif
-
- outw(CONF_DIAG_RESULT & ~31, ioaddr + SM_PTR);
- diag_status = inw(ioaddr + SHADOW(CONF_DIAG_RESULT));
- if (diag_status & 1<<11) {
- printk(KERN_WARNING "%s: 82586 failed self-test\n",
- dev->name);
- } else if (!(diag_status & 1<<13)) {
- printk(KERN_WARNING "%s: 82586 self-test failed to complete\n", dev->name);
- }
-
- outw(CONF_TDR_RESULT & ~31, ioaddr + SM_PTR);
- tdr_status = inw(ioaddr + SHADOW(CONF_TDR_RESULT));
- if (tdr_status & (TDR_SHORT|TDR_OPEN)) {
- printk(KERN_WARNING "%s: TDR reports cable %s at %d tick%s\n", dev->name, (tdr_status & TDR_SHORT)?"short":"broken", tdr_status & TDR_TIME, ((tdr_status & TDR_TIME) != 1) ? "s" : "");
- }
- else if (tdr_status & TDR_XCVRPROBLEM) {
- printk(KERN_WARNING "%s: TDR reports transceiver problem\n", dev->name);
- }
- else if (tdr_status & TDR_LINKOK) {
-#if NET_DEBUG > 4
- printk(KERN_DEBUG "%s: TDR reports link OK\n", dev->name);
-#endif
- } else {
- printk("%s: TDR is ga-ga (status %04x)\n", dev->name,
- tdr_status);
- }
-
- lp->started |= STARTED_CU;
- scb_wrcbl(dev, lp->tx_link);
- /* if the RU isn't running, start it now */
- if (!(lp->started & STARTED_RU)) {
- ack_cmd |= SCB_RUstart;
- scb_wrrfa(dev, lp->rx_buf_start);
- lp->rx_ptr = lp->rx_buf_start;
- lp->started |= STARTED_RU;
- }
- ack_cmd |= SCB_CUstart | 0x2000;
- }
-
- if ((dev->flags & IFF_UP) && !(lp->started & STARTED_RU) && SCB_RUstat(status)==4)
- lp->started|=STARTED_RU;
-
- return ack_cmd;
-}
-
-static void eexp_cmd_clear(struct net_device *dev)
-{
- unsigned long int oldtime = jiffies;
- while (scb_rdcmd(dev) && (time_before(jiffies, oldtime + 10)));
- if (scb_rdcmd(dev)) {
- printk("%s: command didn't clear\n", dev->name);
- }
-}
-
-static irqreturn_t eexp_irq(int dummy, void *dev_info)
-{
- struct net_device *dev = dev_info;
- struct net_local *lp;
- unsigned short ioaddr,status,ack_cmd;
- unsigned short old_read_ptr, old_write_ptr;
-
- lp = netdev_priv(dev);
- ioaddr = dev->base_addr;
-
- spin_lock(&lp->lock);
-
- old_read_ptr = inw(ioaddr+READ_PTR);
- old_write_ptr = inw(ioaddr+WRITE_PTR);
-
- outb(SIRQ_dis|irqrmap[dev->irq], ioaddr+SET_IRQ);
-
- status = scb_status(dev);
-
-#if NET_DEBUG > 4
- printk(KERN_DEBUG "%s: interrupt (status %x)\n", dev->name, status);
-#endif
-
- if (lp->started == (STARTED_CU | STARTED_RU)) {
-
- do {
- eexp_cmd_clear(dev);
-
- ack_cmd = SCB_ack(status);
- scb_command(dev, ack_cmd);
- outb(0,ioaddr+SIGNAL_CA);
-
- eexp_cmd_clear(dev);
-
- if (SCB_complete(status)) {
- if (!eexp_hw_lasttxstat(dev)) {
- printk("%s: tx interrupt but no status\n", dev->name);
- }
- }
-
- if (SCB_rxdframe(status))
- eexp_hw_rx_pio(dev);
-
- status = scb_status(dev);
- } while (status & 0xc000);
-
- if (SCB_RUdead(status))
- {
- printk(KERN_WARNING "%s: RU stopped: status %04x\n",
- dev->name,status);
-#if 0
- printk(KERN_WARNING "%s: cur_rfd=%04x, cur_rbd=%04x\n", dev->name, lp->cur_rfd, lp->cur_rbd);
- outw(lp->cur_rfd, ioaddr+READ_PTR);
- printk(KERN_WARNING "%s: [%04x]\n", dev->name, inw(ioaddr+DATAPORT));
- outw(lp->cur_rfd+6, ioaddr+READ_PTR);
- printk(KERN_WARNING "%s: rbd is %04x\n", dev->name, rbd= inw(ioaddr+DATAPORT));
- outw(rbd, ioaddr+READ_PTR);
- printk(KERN_WARNING "%s: [%04x %04x] ", dev->name, inw(ioaddr+DATAPORT), inw(ioaddr+DATAPORT));
- outw(rbd+8, ioaddr+READ_PTR);
- printk("[%04x]\n", inw(ioaddr+DATAPORT));
-#endif
- dev->stats.rx_errors++;
-#if 1
- eexp_hw_rxinit(dev);
-#else
- lp->cur_rfd = lp->first_rfd;
-#endif
- scb_wrrfa(dev, lp->rx_buf_start);
- scb_command(dev, SCB_RUstart);
- outb(0,ioaddr+SIGNAL_CA);
- }
- } else {
- if (status & 0x8000)
- ack_cmd = eexp_start_irq(dev, status);
- else
- ack_cmd = SCB_ack(status);
- scb_command(dev, ack_cmd);
- outb(0,ioaddr+SIGNAL_CA);
- }
-
- eexp_cmd_clear(dev);
-
- outb(SIRQ_en|irqrmap[dev->irq], ioaddr+SET_IRQ);
-
-#if NET_DEBUG > 6
- printk("%s: leaving eexp_irq()\n", dev->name);
-#endif
- outw(old_read_ptr, ioaddr+READ_PTR);
- outw(old_write_ptr, ioaddr+WRITE_PTR);
-
- spin_unlock(&lp->lock);
- return IRQ_HANDLED;
-}
-
-/*
- * Hardware access functions
- */
-
-/*
- * Set the cable type to use.
- */
-
-static void eexp_hw_set_interface(struct net_device *dev)
-{
- unsigned char oldval = inb(dev->base_addr + 0x300e);
- oldval &= ~0x82;
- switch (dev->if_port) {
- case TPE:
- oldval |= 0x2;
- case BNC:
- oldval |= 0x80;
- break;
- }
- outb(oldval, dev->base_addr+0x300e);
- mdelay(20);
-}
-
-/*
- * Check all the receive buffers, and hand any received packets
- * to the upper levels. Basic sanity check on each frame
- * descriptor, though we don't bother trying to fix broken ones.
- */
-
-static void eexp_hw_rx_pio(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- unsigned short rx_block = lp->rx_ptr;
- unsigned short boguscount = lp->num_rx_bufs;
- unsigned short ioaddr = dev->base_addr;
- unsigned short status;
-
-#if NET_DEBUG > 6
- printk(KERN_DEBUG "%s: eexp_hw_rx()\n", dev->name);
-#endif
-
- do {
- unsigned short rfd_cmd, rx_next, pbuf, pkt_len;
-
- outw(rx_block, ioaddr + READ_PTR);
- status = inw(ioaddr + DATAPORT);
-
- if (FD_Done(status))
- {
- rfd_cmd = inw(ioaddr + DATAPORT);
- rx_next = inw(ioaddr + DATAPORT);
- pbuf = inw(ioaddr + DATAPORT);
-
- outw(pbuf, ioaddr + READ_PTR);
- pkt_len = inw(ioaddr + DATAPORT);
-
- if (rfd_cmd!=0x0000)
- {
- printk(KERN_WARNING "%s: rfd_cmd not zero:0x%04x\n",
- dev->name, rfd_cmd);
- continue;
- }
- else if (pbuf!=rx_block+0x16)
- {
- printk(KERN_WARNING "%s: rfd and rbd out of sync 0x%04x 0x%04x\n",
- dev->name, rx_block+0x16, pbuf);
- continue;
- }
- else if ((pkt_len & 0xc000)!=0xc000)
- {
- printk(KERN_WARNING "%s: EOF or F not set on received buffer (%04x)\n",
- dev->name, pkt_len & 0xc000);
- continue;
- }
- else if (!FD_OK(status))
- {
- dev->stats.rx_errors++;
- if (FD_CRC(status))
- dev->stats.rx_crc_errors++;
- if (FD_Align(status))
- dev->stats.rx_frame_errors++;
- if (FD_Resrc(status))
- dev->stats.rx_fifo_errors++;
- if (FD_DMA(status))
- dev->stats.rx_over_errors++;
- if (FD_Short(status))
- dev->stats.rx_length_errors++;
- }
- else
- {
- struct sk_buff *skb;
- pkt_len &= 0x3fff;
- skb = netdev_alloc_skb(dev, pkt_len + 16);
- if (skb == NULL)
- {
- printk(KERN_WARNING "%s: Memory squeeze, dropping packet\n",dev->name);
- dev->stats.rx_dropped++;
- break;
- }
- skb_reserve(skb, 2);
- outw(pbuf+10, ioaddr+READ_PTR);
- insw(ioaddr+DATAPORT, skb_put(skb,pkt_len),(pkt_len+1)>>1);
- skb->protocol = eth_type_trans(skb,dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- }
- outw(rx_block, ioaddr+WRITE_PTR);
- outw(0, ioaddr+DATAPORT);
- outw(0, ioaddr+DATAPORT);
- rx_block = rx_next;
- }
- } while (FD_Done(status) && boguscount--);
- lp->rx_ptr = rx_block;
-}
-
-/*
- * Hand a packet to the card for transmission
- * If we get here, we MUST have already checked
- * to make sure there is room in the transmit
- * buffer region.
- */
-
-static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf,
- unsigned short len)
-{
- struct net_local *lp = netdev_priv(dev);
- unsigned short ioaddr = dev->base_addr;
-
- if (LOCKUP16 || lp->width) {
- /* Stop the CU so that there is no chance that it
- jumps off to a bogus address while we are writing the
- pointer to the next transmit packet in 8-bit mode --
- this eliminates the "CU wedged" errors in 8-bit mode.
- (Zoltan Szilagyi 10-12-96) */
- scb_command(dev, SCB_CUsuspend);
- outw(0xFFFF, ioaddr+SIGNAL_CA);
- }
-
- outw(lp->tx_head, ioaddr + WRITE_PTR);
-
- outw(0x0000, ioaddr + DATAPORT);
- outw(Cmd_INT|Cmd_Xmit, ioaddr + DATAPORT);
- outw(lp->tx_head+0x08, ioaddr + DATAPORT);
- outw(lp->tx_head+0x0e, ioaddr + DATAPORT);
-
- outw(0x0000, ioaddr + DATAPORT);
- outw(0x0000, ioaddr + DATAPORT);
- outw(lp->tx_head+0x08, ioaddr + DATAPORT);
-
- outw(0x8000|len, ioaddr + DATAPORT);
- outw(-1, ioaddr + DATAPORT);
- outw(lp->tx_head+0x16, ioaddr + DATAPORT);
- outw(0, ioaddr + DATAPORT);
-
- outsw(ioaddr + DATAPORT, buf, (len+1)>>1);
-
- outw(lp->tx_tail+0xc, ioaddr + WRITE_PTR);
- outw(lp->tx_head, ioaddr + DATAPORT);
-
- dev->trans_start = jiffies;
- lp->tx_tail = lp->tx_head;
- if (lp->tx_head==TX_BUF_START+((lp->num_tx_bufs-1)*TX_BUF_SIZE))
- lp->tx_head = TX_BUF_START;
- else
- lp->tx_head += TX_BUF_SIZE;
- if (lp->tx_head != lp->tx_reap)
- netif_wake_queue(dev);
-
- if (LOCKUP16 || lp->width) {
- /* Restart the CU so that the packet can actually
- be transmitted. (Zoltan Szilagyi 10-12-96) */
- scb_command(dev, SCB_CUresume);
- outw(0xFFFF, ioaddr+SIGNAL_CA);
- }
-
- dev->stats.tx_packets++;
- lp->last_tx = jiffies;
-}
-
-static const struct net_device_ops eexp_netdev_ops = {
- .ndo_open = eexp_open,
- .ndo_stop = eexp_close,
- .ndo_start_xmit = eexp_xmit,
- .ndo_set_rx_mode = eexp_set_multicast,
- .ndo_tx_timeout = eexp_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/*
- * Sanity check the suspected EtherExpress card
- * Read hardware address, reset card, size memory and initialize buffer
- * memory pointers. These are held in netdev_priv(), in case someone has more
- * than one card in a machine.
- */
-
-static int __init eexp_hw_probe(struct net_device *dev, unsigned short ioaddr)
-{
- unsigned short hw_addr[3];
- unsigned char buswidth;
- unsigned int memory_size;
- int i;
- unsigned short xsum = 0;
- struct net_local *lp = netdev_priv(dev);
-
- printk("%s: EtherExpress 16 at %#x ",dev->name,ioaddr);
-
- outb(ASIC_RST, ioaddr+EEPROM_Ctrl);
- outb(0, ioaddr+EEPROM_Ctrl);
- udelay(500);
- outb(i586_RST, ioaddr+EEPROM_Ctrl);
-
- hw_addr[0] = eexp_hw_readeeprom(ioaddr,2);
- hw_addr[1] = eexp_hw_readeeprom(ioaddr,3);
- hw_addr[2] = eexp_hw_readeeprom(ioaddr,4);
-
- /* Standard Address or Compaq LTE Address */
- if (!((hw_addr[2]==0x00aa && ((hw_addr[1] & 0xff00)==0x0000)) ||
- (hw_addr[2]==0x0080 && ((hw_addr[1] & 0xff00)==0x5F00))))
- {
- printk(" rejected: invalid address %04x%04x%04x\n",
- hw_addr[2],hw_addr[1],hw_addr[0]);
- return -ENODEV;
- }
-
- /* Calculate the EEPROM checksum. Carry on anyway if it's bad,
- * though.
- */
- for (i = 0; i < 64; i++)
- xsum += eexp_hw_readeeprom(ioaddr, i);
- if (xsum != 0xbaba)
- printk(" (bad EEPROM xsum 0x%02x)", xsum);
-
- dev->base_addr = ioaddr;
- for ( i=0 ; i<6 ; i++ )
- dev->dev_addr[i] = ((unsigned char *)hw_addr)[5-i];
-
- {
- static const char irqmap[] = { 0, 9, 3, 4, 5, 10, 11, 0 };
- unsigned short setupval = eexp_hw_readeeprom(ioaddr,0);
-
- /* Use the IRQ from EEPROM if none was given */
- if (!dev->irq)
- dev->irq = irqmap[setupval>>13];
-
- if (dev->if_port == 0xff) {
- dev->if_port = !(setupval & 0x1000) ? AUI :
- eexp_hw_readeeprom(ioaddr,5) & 0x1 ? TPE : BNC;
- }
-
- buswidth = !((setupval & 0x400) >> 10);
- }
-
- memset(lp, 0, sizeof(struct net_local));
- spin_lock_init(&lp->lock);
-
- printk("(IRQ %d, %s connector, %d-bit bus", dev->irq,
- eexp_ifmap[dev->if_port], buswidth?8:16);
-
- if (!request_region(dev->base_addr + 0x300e, 1, "EtherExpress"))
- return -EBUSY;
-
- eexp_hw_set_interface(dev);
-
- release_region(dev->base_addr + 0x300e, 1);
-
- /* Find out how much RAM we have on the card */
- outw(0, dev->base_addr + WRITE_PTR);
- for (i = 0; i < 32768; i++)
- outw(0, dev->base_addr + DATAPORT);
-
- for (memory_size = 0; memory_size < 64; memory_size++)
- {
- outw(memory_size<<10, dev->base_addr + READ_PTR);
- if (inw(dev->base_addr+DATAPORT))
- break;
- outw(memory_size<<10, dev->base_addr + WRITE_PTR);
- outw(memory_size | 0x5000, dev->base_addr+DATAPORT);
- outw(memory_size<<10, dev->base_addr + READ_PTR);
- if (inw(dev->base_addr+DATAPORT) != (memory_size | 0x5000))
- break;
- }
-
- /* Sort out the number of buffers. We may have 16, 32, 48 or 64k
- * of RAM to play with.
- */
- lp->num_tx_bufs = 4;
- lp->rx_buf_end = 0x3ff6;
- switch (memory_size)
- {
- case 64:
- lp->rx_buf_end += 0x4000;
- case 48:
- lp->num_tx_bufs += 4;
- lp->rx_buf_end += 0x4000;
- case 32:
- lp->rx_buf_end += 0x4000;
- case 16:
- printk(", %dk RAM)\n", memory_size);
- break;
- default:
- printk(") bad memory size (%dk).\n", memory_size);
- return -ENODEV;
- break;
- }
-
- lp->rx_buf_start = TX_BUF_START + (lp->num_tx_bufs*TX_BUF_SIZE);
- lp->width = buswidth;
-
- dev->netdev_ops = &eexp_netdev_ops;
- dev->watchdog_timeo = 2*HZ;
-
- return register_netdev(dev);
-}
-
-/*
- * Read a word from the EtherExpress on-board serial EEPROM.
- * The EEPROM contains 64 words of 16 bits.
- */
-static unsigned short __init eexp_hw_readeeprom(unsigned short ioaddr,
- unsigned char location)
-{
- unsigned short cmd = 0x180|(location&0x7f);
- unsigned short rval = 0,wval = EC_CS|i586_RST;
- int i;
-
- outb(EC_CS|i586_RST,ioaddr+EEPROM_Ctrl);
- for (i=0x100 ; i ; i>>=1 )
- {
- if (cmd&i)
- wval |= EC_Wr;
- else
- wval &= ~EC_Wr;
-
- outb(wval,ioaddr+EEPROM_Ctrl);
- outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl);
- eeprom_delay();
- outb(wval,ioaddr+EEPROM_Ctrl);
- eeprom_delay();
- }
- wval &= ~EC_Wr;
- outb(wval,ioaddr+EEPROM_Ctrl);
- for (i=0x8000 ; i ; i>>=1 )
- {
- outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl);
- eeprom_delay();
- if (inb(ioaddr+EEPROM_Ctrl)&EC_Rd)
- rval |= i;
- outb(wval,ioaddr+EEPROM_Ctrl);
- eeprom_delay();
- }
- wval &= ~EC_CS;
- outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl);
- eeprom_delay();
- outb(wval,ioaddr+EEPROM_Ctrl);
- eeprom_delay();
- return rval;
-}
-
-/*
- * Reap tx buffers and return last transmit status.
- * if ==0 then either:
- * a) we're not transmitting anything, so why are we here?
- * b) we've died.
- * otherwise, Stat_Busy(return) means we've still got some packets
- * to transmit, Stat_Done(return) means our buffers should be empty
- * again
- */
-
-static unsigned short eexp_hw_lasttxstat(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- unsigned short tx_block = lp->tx_reap;
- unsigned short status;
-
- if (!netif_queue_stopped(dev) && lp->tx_head==lp->tx_reap)
- return 0x0000;
-
- do
- {
- outw(tx_block & ~31, dev->base_addr + SM_PTR);
- status = inw(dev->base_addr + SHADOW(tx_block));
- if (!Stat_Done(status))
- {
- lp->tx_link = tx_block;
- return status;
- }
- else
- {
- lp->last_tx_restart = 0;
- dev->stats.collisions += Stat_NoColl(status);
- if (!Stat_OK(status))
- {
- char *whatsup = NULL;
- dev->stats.tx_errors++;
- if (Stat_Abort(status))
- dev->stats.tx_aborted_errors++;
- if (Stat_TNoCar(status)) {
- whatsup = "aborted, no carrier";
- dev->stats.tx_carrier_errors++;
- }
- if (Stat_TNoCTS(status)) {
- whatsup = "aborted, lost CTS";
- dev->stats.tx_carrier_errors++;
- }
- if (Stat_TNoDMA(status)) {
- whatsup = "FIFO underran";
- dev->stats.tx_fifo_errors++;
- }
- if (Stat_TXColl(status)) {
- whatsup = "aborted, too many collisions";
- dev->stats.tx_aborted_errors++;
- }
- if (whatsup)
- printk(KERN_INFO "%s: transmit %s\n",
- dev->name, whatsup);
- }
- else
- dev->stats.tx_packets++;
- }
- if (tx_block == TX_BUF_START+((lp->num_tx_bufs-1)*TX_BUF_SIZE))
- lp->tx_reap = tx_block = TX_BUF_START;
- else
- lp->tx_reap = tx_block += TX_BUF_SIZE;
- netif_wake_queue(dev);
- }
- while (lp->tx_reap != lp->tx_head);
-
- lp->tx_link = lp->tx_tail + 0x08;
-
- return status;
-}
-
-/*
- * This should never happen. It is called when some higher routine detects
- * that the CU has stopped, to try to restart it from the last packet we knew
- * we were working on, or the idle loop if we had finished for the time.
- */
-
-static void eexp_hw_txrestart(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- unsigned short ioaddr = dev->base_addr;
-
- lp->last_tx_restart = lp->tx_link;
- scb_wrcbl(dev, lp->tx_link);
- scb_command(dev, SCB_CUstart);
- outb(0,ioaddr+SIGNAL_CA);
-
- {
- unsigned short boguscount=50,failcount=5;
- while (!scb_status(dev))
- {
- if (!--boguscount)
- {
- if (--failcount)
- {
- printk(KERN_WARNING "%s: CU start timed out, status %04x, cmd %04x\n", dev->name, scb_status(dev), scb_rdcmd(dev));
- scb_wrcbl(dev, lp->tx_link);
- scb_command(dev, SCB_CUstart);
- outb(0,ioaddr+SIGNAL_CA);
- boguscount = 100;
- }
- else
- {
- printk(KERN_WARNING "%s: Failed to restart CU, resetting board...\n",dev->name);
- eexp_hw_init586(dev);
- netif_wake_queue(dev);
- return;
- }
- }
- }
- }
-}
-
-/*
- * Writes down the list of transmit buffers into card memory. Each
- * entry consists of an 82586 transmit command, followed by a jump
- * pointing to itself. When we want to transmit a packet, we write
- * the data into the appropriate transmit buffer and then modify the
- * preceding jump to point at the new transmit command. This means that
- * the 586 command unit is continuously active.
- */
-
-static void eexp_hw_txinit(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- unsigned short tx_block = TX_BUF_START;
- unsigned short curtbuf;
- unsigned short ioaddr = dev->base_addr;
-
- for ( curtbuf=0 ; curtbuf<lp->num_tx_bufs ; curtbuf++ )
- {
- outw(tx_block, ioaddr + WRITE_PTR);
-
- outw(0x0000, ioaddr + DATAPORT);
- outw(Cmd_INT|Cmd_Xmit, ioaddr + DATAPORT);
- outw(tx_block+0x08, ioaddr + DATAPORT);
- outw(tx_block+0x0e, ioaddr + DATAPORT);
-
- outw(0x0000, ioaddr + DATAPORT);
- outw(0x0000, ioaddr + DATAPORT);
- outw(tx_block+0x08, ioaddr + DATAPORT);
-
- outw(0x8000, ioaddr + DATAPORT);
- outw(-1, ioaddr + DATAPORT);
- outw(tx_block+0x16, ioaddr + DATAPORT);
- outw(0x0000, ioaddr + DATAPORT);
-
- tx_block += TX_BUF_SIZE;
- }
- lp->tx_head = TX_BUF_START;
- lp->tx_reap = TX_BUF_START;
- lp->tx_tail = tx_block - TX_BUF_SIZE;
- lp->tx_link = lp->tx_tail + 0x08;
- lp->rx_buf_start = tx_block;
-
-}
-
-/*
- * Write the circular list of receive buffer descriptors to card memory.
- * The end of the list isn't marked, which means that the 82586 receive
- * unit will loop until buffers become available (this avoids it giving us
- * "out of resources" messages).
- */
-
-static void eexp_hw_rxinit(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- unsigned short rx_block = lp->rx_buf_start;
- unsigned short ioaddr = dev->base_addr;
-
- lp->num_rx_bufs = 0;
- lp->rx_first = lp->rx_ptr = rx_block;
- do
- {
- lp->num_rx_bufs++;
-
- outw(rx_block, ioaddr + WRITE_PTR);
-
- outw(0, ioaddr + DATAPORT); outw(0, ioaddr+DATAPORT);
- outw(rx_block + RX_BUF_SIZE, ioaddr+DATAPORT);
- outw(0xffff, ioaddr+DATAPORT);
-
- outw(0x0000, ioaddr+DATAPORT);
- outw(0xdead, ioaddr+DATAPORT);
- outw(0xdead, ioaddr+DATAPORT);
- outw(0xdead, ioaddr+DATAPORT);
- outw(0xdead, ioaddr+DATAPORT);
- outw(0xdead, ioaddr+DATAPORT);
- outw(0xdead, ioaddr+DATAPORT);
-
- outw(0x0000, ioaddr+DATAPORT);
- outw(rx_block + RX_BUF_SIZE + 0x16, ioaddr+DATAPORT);
- outw(rx_block + 0x20, ioaddr+DATAPORT);
- outw(0, ioaddr+DATAPORT);
- outw(RX_BUF_SIZE-0x20, ioaddr+DATAPORT);
-
- lp->rx_last = rx_block;
- rx_block += RX_BUF_SIZE;
- } while (rx_block <= lp->rx_buf_end-RX_BUF_SIZE);
-
-
- /* Make first Rx frame descriptor point to first Rx buffer
- descriptor */
- outw(lp->rx_first + 6, ioaddr+WRITE_PTR);
- outw(lp->rx_first + 0x16, ioaddr+DATAPORT);
-
- /* Close Rx frame descriptor ring */
- outw(lp->rx_last + 4, ioaddr+WRITE_PTR);
- outw(lp->rx_first, ioaddr+DATAPORT);
-
- /* Close Rx buffer descriptor ring */
- outw(lp->rx_last + 0x16 + 2, ioaddr+WRITE_PTR);
- outw(lp->rx_first + 0x16, ioaddr+DATAPORT);
-
-}
-
-/*
- * Un-reset the 586, and start the configuration sequence. We don't wait for
- * this to finish, but allow the interrupt handler to start the CU and RU for
- * us. We can't start the receive/transmission system up before we know that
- * the hardware is configured correctly.
- */
-
-static void eexp_hw_init586(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- unsigned short ioaddr = dev->base_addr;
- int i;
-
-#if NET_DEBUG > 6
- printk("%s: eexp_hw_init586()\n", dev->name);
-#endif
-
- lp->started = 0;
-
- set_loopback(dev);
-
- outb(SIRQ_dis|irqrmap[dev->irq],ioaddr+SET_IRQ);
-
- /* Download the startup code */
- outw(lp->rx_buf_end & ~31, ioaddr + SM_PTR);
- outw(lp->width?0x0001:0x0000, ioaddr + 0x8006);
- outw(0x0000, ioaddr + 0x8008);
- outw(0x0000, ioaddr + 0x800a);
- outw(0x0000, ioaddr + 0x800c);
- outw(0x0000, ioaddr + 0x800e);
-
- for (i = 0; i < ARRAY_SIZE(start_code) * 2; i+=32) {
- int j;
- outw(i, ioaddr + SM_PTR);
- for (j = 0; j < 16 && (i+j)/2 < ARRAY_SIZE(start_code); j+=2)
- outw(start_code[(i+j)/2],
- ioaddr+0x4000+j);
- for (j = 0; j < 16 && (i+j+16)/2 < ARRAY_SIZE(start_code); j+=2)
- outw(start_code[(i+j+16)/2],
- ioaddr+0x8000+j);
- }
-
- /* Do we want promiscuous mode or multicast? */
- outw(CONF_PROMISC & ~31, ioaddr+SM_PTR);
- i = inw(ioaddr+SHADOW(CONF_PROMISC));
- outw((dev->flags & IFF_PROMISC)?(i|1):(i & ~1),
- ioaddr+SHADOW(CONF_PROMISC));
- lp->was_promisc = dev->flags & IFF_PROMISC;
-#if 0
- eexp_setup_filter(dev);
-#endif
-
- /* Write our hardware address */
- outw(CONF_HWADDR & ~31, ioaddr+SM_PTR);
- outw(((unsigned short *)dev->dev_addr)[0], ioaddr+SHADOW(CONF_HWADDR));
- outw(((unsigned short *)dev->dev_addr)[1],
- ioaddr+SHADOW(CONF_HWADDR+2));
- outw(((unsigned short *)dev->dev_addr)[2],
- ioaddr+SHADOW(CONF_HWADDR+4));
-
- eexp_hw_txinit(dev);
- eexp_hw_rxinit(dev);
-
- outb(0,ioaddr+EEPROM_Ctrl);
- mdelay(5);
-
- scb_command(dev, 0xf000);
- outb(0,ioaddr+SIGNAL_CA);
-
- outw(0, ioaddr+SM_PTR);
-
- {
- unsigned short rboguscount=50,rfailcount=5;
- while (inw(ioaddr+0x4000))
- {
- if (!--rboguscount)
- {
- printk(KERN_WARNING "%s: i82586 reset timed out, kicking...\n",
- dev->name);
- scb_command(dev, 0);
- outb(0,ioaddr+SIGNAL_CA);
- rboguscount = 100;
- if (!--rfailcount)
- {
- printk(KERN_WARNING "%s: i82586 not responding, giving up.\n",
- dev->name);
- return;
- }
- }
- }
- }
-
- scb_wrcbl(dev, CONF_LINK);
- scb_command(dev, 0xf000|SCB_CUstart);
- outb(0,ioaddr+SIGNAL_CA);
-
- {
- unsigned short iboguscount=50,ifailcount=5;
- while (!scb_status(dev))
- {
- if (!--iboguscount)
- {
- if (--ifailcount)
- {
- printk(KERN_WARNING "%s: i82586 initialization timed out, status %04x, cmd %04x\n",
- dev->name, scb_status(dev), scb_rdcmd(dev));
- scb_wrcbl(dev, CONF_LINK);
- scb_command(dev, 0xf000|SCB_CUstart);
- outb(0,ioaddr+SIGNAL_CA);
- iboguscount = 100;
- }
- else
- {
- printk(KERN_WARNING "%s: Failed to initialize i82586, giving up.\n",dev->name);
- return;
- }
- }
- }
- }
-
- clear_loopback(dev);
- outb(SIRQ_en|irqrmap[dev->irq],ioaddr+SET_IRQ);
-
- lp->init_time = jiffies;
-#if NET_DEBUG > 6
- printk("%s: leaving eexp_hw_init586()\n", dev->name);
-#endif
-}
-
-static void eexp_setup_filter(struct net_device *dev)
-{
- struct netdev_hw_addr *ha;
- unsigned short ioaddr = dev->base_addr;
- int count = netdev_mc_count(dev);
- int i;
- if (count > 8) {
- printk(KERN_INFO "%s: too many multicast addresses (%d)\n",
- dev->name, count);
- count = 8;
- }
-
- outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR);
- outw(6*count, ioaddr+SHADOW(CONF_NR_MULTICAST));
- i = 0;
- netdev_for_each_mc_addr(ha, dev) {
- unsigned short *data = (unsigned short *) ha->addr;
-
- if (i == count)
- break;
- outw((CONF_MULTICAST+(6*i)) & ~31, ioaddr+SM_PTR);
- outw(data[0], ioaddr+SHADOW(CONF_MULTICAST+(6*i)));
- outw((CONF_MULTICAST+(6*i)+2) & ~31, ioaddr+SM_PTR);
- outw(data[1], ioaddr+SHADOW(CONF_MULTICAST+(6*i)+2));
- outw((CONF_MULTICAST+(6*i)+4) & ~31, ioaddr+SM_PTR);
- outw(data[2], ioaddr+SHADOW(CONF_MULTICAST+(6*i)+4));
- i++;
- }
-}
-
-/*
- * Set or clear the multicast filter for this adaptor.
- */
-static void
-eexp_set_multicast(struct net_device *dev)
-{
- unsigned short ioaddr = dev->base_addr;
- struct net_local *lp = netdev_priv(dev);
- int kick = 0, i;
- if ((dev->flags & IFF_PROMISC) != lp->was_promisc) {
- outw(CONF_PROMISC & ~31, ioaddr+SM_PTR);
- i = inw(ioaddr+SHADOW(CONF_PROMISC));
- outw((dev->flags & IFF_PROMISC)?(i|1):(i & ~1),
- ioaddr+SHADOW(CONF_PROMISC));
- lp->was_promisc = dev->flags & IFF_PROMISC;
- kick = 1;
- }
- if (!(dev->flags & IFF_PROMISC)) {
- eexp_setup_filter(dev);
- if (lp->old_mc_count != netdev_mc_count(dev)) {
- kick = 1;
- lp->old_mc_count = netdev_mc_count(dev);
- }
- }
- if (kick) {
- unsigned long oj;
- scb_command(dev, SCB_CUsuspend);
- outb(0, ioaddr+SIGNAL_CA);
- outb(0, ioaddr+SIGNAL_CA);
-#if 0
- printk("%s: waiting for CU to go suspended\n", dev->name);
-#endif
- oj = jiffies;
- while ((SCB_CUstat(scb_status(dev)) == 2) &&
- (time_before(jiffies, oj + 2000)));
- if (SCB_CUstat(scb_status(dev)) == 2)
- printk("%s: warning, CU didn't stop\n", dev->name);
- lp->started &= ~(STARTED_CU);
- scb_wrcbl(dev, CONF_LINK);
- scb_command(dev, SCB_CUstart);
- outb(0, ioaddr+SIGNAL_CA);
- }
-}
-
-
-/*
- * MODULE stuff
- */
-
-#ifdef MODULE
-
-#define EEXP_MAX_CARDS 4 /* max number of cards to support */
-
-static struct net_device *dev_eexp[EEXP_MAX_CARDS];
-static int irq[EEXP_MAX_CARDS];
-static int io[EEXP_MAX_CARDS];
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-MODULE_PARM_DESC(io, "EtherExpress 16 I/O base address(es)");
-MODULE_PARM_DESC(irq, "EtherExpress 16 IRQ number(s)");
-MODULE_LICENSE("GPL");
-
-
-/* Ideally the user would give us io=, irq= for every card. If any parameters
- * are specified, we verify and then use them. If no parameters are given, we
- * autoprobe for one card only.
- */
-int __init init_module(void)
-{
- struct net_device *dev;
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < EEXP_MAX_CARDS; this_dev++) {
- dev = alloc_etherdev(sizeof(struct net_local));
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- if (io[this_dev] == 0) {
- if (this_dev)
- break;
- printk(KERN_NOTICE "eexpress.c: Module autoprobe not recommended, give io=xx.\n");
- }
- if (do_express_probe(dev) == 0) {
- dev_eexp[this_dev] = dev;
- found++;
- continue;
- }
- printk(KERN_WARNING "eexpress.c: Failed to register card at 0x%x.\n", io[this_dev]);
- free_netdev(dev);
- break;
- }
- if (found)
- return 0;
- return -ENXIO;
-}
-
-void __exit cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < EEXP_MAX_CARDS; this_dev++) {
- struct net_device *dev = dev_eexp[this_dev];
- if (dev) {
- unregister_netdev(dev);
- free_netdev(dev);
- }
- }
-}
-#endif
-
-/*
- * Local Variables:
- * c-file-style: "linux"
- * tab-width: 8
- * End:
- */
diff --git a/drivers/net/ethernet/i825xx/eexpress.h b/drivers/net/ethernet/i825xx/eexpress.h
deleted file mode 100644
index dc9c6ea289e9..000000000000
--- a/drivers/net/ethernet/i825xx/eexpress.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * eexpress.h: Intel EtherExpress16 defines
- */
-
-/*
- * EtherExpress card register addresses
- * as offsets from the base IO region (dev->base_addr)
- */
-
-#define DATAPORT 0x0000
-#define WRITE_PTR 0x0002
-#define READ_PTR 0x0004
-#define SIGNAL_CA 0x0006
-#define SET_IRQ 0x0007
-#define SM_PTR 0x0008
-#define MEM_Dec 0x000a
-#define MEM_Ctrl 0x000b
-#define MEM_Page_Ctrl 0x000c
-#define Config 0x000d
-#define EEPROM_Ctrl 0x000e
-#define ID_PORT 0x000f
-#define MEM_ECtrl 0x000f
-
-/*
- * card register defines
- */
-
-/* SET_IRQ */
-#define SIRQ_en 0x08
-#define SIRQ_dis 0x00
-
-/* EEPROM_Ctrl */
-#define EC_Clk 0x01
-#define EC_CS 0x02
-#define EC_Wr 0x04
-#define EC_Rd 0x08
-#define ASIC_RST 0x40
-#define i586_RST 0x80
-
-#define eeprom_delay() { udelay(40); }
-
-/*
- * i82586 Memory Configuration
- */
-
-/* (System Configuration Pointer) System start up block, read after 586_RST */
-#define SCP_START 0xfff6
-
-/* Intermediate System Configuration Pointer */
-#define ISCP_START 0x0000
-
-/* System Command Block */
-#define SCB_START 0x0008
-
-/* Start of buffer region. Everything before this is used for control
- * structures and the CU configuration program. The memory layout is
- * determined in eexp_hw_probe(), once we know how much memory is
- * available on the card.
- */
-
-#define TX_BUF_START 0x0100
-
-#define TX_BUF_SIZE ((24+ETH_FRAME_LEN+31)&~0x1f)
-#define RX_BUF_SIZE ((32+ETH_FRAME_LEN+31)&~0x1f)
-
-/*
- * SCB defines
- */
-
-/* these functions take the SCB status word and test the relevant status bit */
-#define SCB_complete(s) (((s) & 0x8000) != 0)
-#define SCB_rxdframe(s) (((s) & 0x4000) != 0)
-#define SCB_CUdead(s) (((s) & 0x2000) != 0)
-#define SCB_RUdead(s) (((s) & 0x1000) != 0)
-#define SCB_ack(s) ((s) & 0xf000)
-
-/* Command unit status: 0=idle, 1=suspended, 2=active */
-#define SCB_CUstat(s) (((s)&0x0300)>>8)
-
-/* Receive unit status: 0=idle, 1=suspended, 2=out of resources, 4=ready */
-#define SCB_RUstat(s) (((s)&0x0070)>>4)
-
-/* SCB commands */
-#define SCB_CUnop 0x0000
-#define SCB_CUstart 0x0100
-#define SCB_CUresume 0x0200
-#define SCB_CUsuspend 0x0300
-#define SCB_CUabort 0x0400
-#define SCB_resetchip 0x0080
-
-#define SCB_RUnop 0x0000
-#define SCB_RUstart 0x0010
-#define SCB_RUresume 0x0020
-#define SCB_RUsuspend 0x0030
-#define SCB_RUabort 0x0040
-
-/*
- * Command block defines
- */
-
-#define Stat_Done(s) (((s) & 0x8000) != 0)
-#define Stat_Busy(s) (((s) & 0x4000) != 0)
-#define Stat_OK(s) (((s) & 0x2000) != 0)
-#define Stat_Abort(s) (((s) & 0x1000) != 0)
-#define Stat_STFail (((s) & 0x0800) != 0)
-#define Stat_TNoCar(s) (((s) & 0x0400) != 0)
-#define Stat_TNoCTS(s) (((s) & 0x0200) != 0)
-#define Stat_TNoDMA(s) (((s) & 0x0100) != 0)
-#define Stat_TDefer(s) (((s) & 0x0080) != 0)
-#define Stat_TColl(s) (((s) & 0x0040) != 0)
-#define Stat_TXColl(s) (((s) & 0x0020) != 0)
-#define Stat_NoColl(s) ((s) & 0x000f)
-
-/* Cmd_END will end AFTER the command if this is the first
- * command block after an SCB_CUstart, but BEFORE the command
- * for all subsequent commands. Best strategy is to place
- * Cmd_INT on the last command in the sequence, followed by a
- * dummy Cmd_Nop with Cmd_END after this.
- */
-
-#define Cmd_END 0x8000
-#define Cmd_SUS 0x4000
-#define Cmd_INT 0x2000
-
-#define Cmd_Nop 0x0000
-#define Cmd_SetAddr 0x0001
-#define Cmd_Config 0x0002
-#define Cmd_MCast 0x0003
-#define Cmd_Xmit 0x0004
-#define Cmd_TDR 0x0005
-#define Cmd_Dump 0x0006
-#define Cmd_Diag 0x0007
-
-
-/*
- * Frame Descriptor (Receive block) defines
- */
-
-#define FD_Done(s) (((s) & 0x8000) != 0)
-#define FD_Busy(s) (((s) & 0x4000) != 0)
-#define FD_OK(s) (((s) & 0x2000) != 0)
-
-#define FD_CRC(s) (((s) & 0x0800) != 0)
-#define FD_Align(s) (((s) & 0x0400) != 0)
-#define FD_Resrc(s) (((s) & 0x0200) != 0)
-#define FD_DMA(s) (((s) & 0x0100) != 0)
-#define FD_Short(s) (((s) & 0x0080) != 0)
-#define FD_NoEOF(s) (((s) & 0x0040) != 0)
-
-struct rfd_header {
- volatile unsigned long flags;
- volatile unsigned short link;
- volatile unsigned short rbd_offset;
- volatile unsigned short dstaddr1;
- volatile unsigned short dstaddr2;
- volatile unsigned short dstaddr3;
- volatile unsigned short srcaddr1;
- volatile unsigned short srcaddr2;
- volatile unsigned short srcaddr3;
- volatile unsigned short length;
-
- /* This is actually a Receive Buffer Descriptor. The way we
- * arrange memory means that an RBD always follows the RFD that
- * points to it, so they might as well be in the same structure.
- */
- volatile unsigned short actual_count;
- volatile unsigned short next_rbd;
- volatile unsigned short buf_addr1;
- volatile unsigned short buf_addr2;
- volatile unsigned short size;
-};
-
-/* Returned data from the Time Domain Reflectometer */
-
-#define TDR_LINKOK (1<<15)
-#define TDR_XCVRPROBLEM (1<<14)
-#define TDR_OPEN (1<<13)
-#define TDR_SHORT (1<<12)
-#define TDR_TIME 0x7ff
diff --git a/drivers/net/ethernet/i825xx/lp486e.c b/drivers/net/ethernet/i825xx/lp486e.c
deleted file mode 100644
index 3735bfa53600..000000000000
--- a/drivers/net/ethernet/i825xx/lp486e.c
+++ /dev/null
@@ -1,1337 +0,0 @@
-/* Intel Professional Workstation/panther ethernet driver */
-/* lp486e.c: A panther 82596 ethernet driver for linux. */
-/*
- History and copyrights:
-
- Driver skeleton
- Written 1993 by Donald Becker.
- Copyright 1993 United States Government as represented by the Director,
- National Security Agency. This software may only be used and
- distributed according to the terms of the GNU General Public License
- as modified by SRC, incorporated herein by reference.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
- Apricot
- Written 1994 by Mark Evans.
- This driver is for the Apricot 82596 bus-master interface
-
- Modularised 12/94 Mark Evans
-
- Professional Workstation
- Derived from apricot.c by Ard van Breemen
- <ard@murphy.nl>|<ard@cstmel.hobby.nl>|<ard@cstmel.nl.eu.org>
-
- Credits:
- Thanks to Murphy Software BV for letting me write this in their time.
- Well, actually, I get paid doing this...
- (Also: see http://www.murphy.nl for murphy, and my homepage ~ard for
- more information on the Professional Workstation)
-
- Present version
- aeb@cwi.nl
-*/
-/*
- There are currently two motherboards that I know of in the
- professional workstation. The only one that I know is the
- intel panther motherboard. -- ard
-*/
-/*
-The pws is equipped with an intel 82596. This is a very intelligent controller
-which runs its own micro-code. Communication with the hostprocessor is done
-through linked lists of commands and buffers in the hostprocessors memory.
-A complete description of the 82596 is available from intel. Search for
-a file called "29021806.pdf". It is a complete description of the chip itself.
-To use it for the pws some additions are needed regarding generation of
-the PORT and CA signal, and the interrupt glue needed for a pc.
-I/O map:
-PORT SIZE ACTION MEANING
-0xCB0 2 WRITE Lower 16 bits for PORT command
-0xCB2 2 WRITE Upper 16 bits for PORT command, and issue of PORT command
-0xCB4 1 WRITE Generation of CA signal
-0xCB8 1 WRITE Clear interrupt glue
-All other communication is through memory!
-*/
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/bitops.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#define DRV_NAME "lp486e"
-
-/* debug print flags */
-#define LOG_SRCDST 0x80000000
-#define LOG_STATINT 0x40000000
-#define LOG_STARTINT 0x20000000
-
-#define i596_debug debug
-
-static int i596_debug = 0;
-
-static const char * const medianame[] = {
- "10baseT", "AUI",
- "10baseT-FD", "AUI-FD",
-};
-
-#define LP486E_TOTAL_SIZE 16
-
-#define I596_NULL (0xffffffff)
-
-#define CMD_EOL 0x8000 /* The last command of the list, stop. */
-#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
-#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
-
-#define CMD_FLEX 0x0008 /* Enable flexible memory model */
-
-enum commands {
- CmdNOP = 0,
- CmdIASetup = 1,
- CmdConfigure = 2,
- CmdMulticastList = 3,
- CmdTx = 4,
- CmdTDR = 5,
- CmdDump = 6,
- CmdDiagnose = 7
-};
-
-#if 0
-static const char *CUcmdnames[8] = { "NOP", "IASetup", "Configure", "MulticastList",
- "Tx", "TDR", "Dump", "Diagnose" };
-#endif
-
-/* Status word bits */
-#define STAT_CX 0x8000 /* The CU finished executing a command
- with the Interrupt bit set */
-#define STAT_FR 0x4000 /* The RU finished receiving a frame */
-#define STAT_CNA 0x2000 /* The CU left the active state */
-#define STAT_RNR 0x1000 /* The RU left the active state */
-#define STAT_ACK (STAT_CX | STAT_FR | STAT_CNA | STAT_RNR)
-#define STAT_CUS 0x0700 /* Status of CU: 0: idle, 1: suspended,
- 2: active, 3-7: unused */
-#define STAT_RUS 0x00f0 /* Status of RU: 0: idle, 1: suspended,
- 2: no resources, 4: ready,
- 10: no resources due to no more RBDs,
- 12: no more RBDs, other: unused */
-#define STAT_T 0x0008 /* Bus throttle timers loaded */
-#define STAT_ZERO 0x0807 /* Always zero */
-
-#if 0
-static char *CUstates[8] = {
- "idle", "suspended", "active", 0, 0, 0, 0, 0
-};
-static char *RUstates[16] = {
- "idle", "suspended", "no resources", 0, "ready", 0, 0, 0,
- 0, 0, "no RBDs", 0, "out of RBDs", 0, 0, 0
-};
-
-static void
-i596_out_status(int status) {
- int bad = 0;
- char *s;
-
- printk("status %4.4x:", status);
- if (status == 0xffff)
- printk(" strange..\n");
- else {
- if (status & STAT_CX)
- printk(" CU done");
- if (status & STAT_CNA)
- printk(" CU stopped");
- if (status & STAT_FR)
- printk(" got a frame");
- if (status & STAT_RNR)
- printk(" RU stopped");
- if (status & STAT_T)
- printk(" throttled");
- if (status & STAT_ZERO)
- bad = 1;
- s = CUstates[(status & STAT_CUS) >> 8];
- if (!s)
- bad = 1;
- else
- printk(" CU(%s)", s);
- s = RUstates[(status & STAT_RUS) >> 4];
- if (!s)
- bad = 1;
- else
- printk(" RU(%s)", s);
- if (bad)
- printk(" bad status");
- printk("\n");
- }
-}
-#endif
-
-/* Command word bits */
-#define ACK_CX 0x8000
-#define ACK_FR 0x4000
-#define ACK_CNA 0x2000
-#define ACK_RNR 0x1000
-
-#define CUC_START 0x0100
-#define CUC_RESUME 0x0200
-#define CUC_SUSPEND 0x0300
-#define CUC_ABORT 0x0400
-
-#define RX_START 0x0010
-#define RX_RESUME 0x0020
-#define RX_SUSPEND 0x0030
-#define RX_ABORT 0x0040
-
-typedef u32 phys_addr;
-
-static inline phys_addr
-va_to_pa(void *x) {
- return x ? virt_to_bus(x) : I596_NULL;
-}
-
-static inline void *
-pa_to_va(phys_addr x) {
- return (x == I596_NULL) ? NULL : bus_to_virt(x);
-}
-
-/* status bits for cmd */
-#define CMD_STAT_C 0x8000 /* CU command complete */
-#define CMD_STAT_B 0x4000 /* CU command in progress */
-#define CMD_STAT_OK 0x2000 /* CU command completed without errors */
-#define CMD_STAT_A 0x1000 /* CU command abnormally terminated */
-
-struct i596_cmd { /* 8 bytes */
- unsigned short status;
- unsigned short command;
- phys_addr pa_next; /* va_to_pa(struct i596_cmd *next) */
-};
-
-#define EOF 0x8000
-#define SIZE_MASK 0x3fff
-
-struct i596_tbd {
- unsigned short size;
- unsigned short pad;
- phys_addr pa_next; /* va_to_pa(struct i596_tbd *next) */
- phys_addr pa_data; /* va_to_pa(char *data) */
- struct sk_buff *skb;
-};
-
-struct tx_cmd {
- struct i596_cmd cmd;
- phys_addr pa_tbd; /* va_to_pa(struct i596_tbd *tbd) */
- unsigned short size;
- unsigned short pad;
-};
-
-/* status bits for rfd */
-#define RFD_STAT_C 0x8000 /* Frame reception complete */
-#define RFD_STAT_B 0x4000 /* Frame reception in progress */
-#define RFD_STAT_OK 0x2000 /* Frame received without errors */
-#define RFD_STATUS 0x1fff
-#define RFD_LENGTH_ERR 0x1000
-#define RFD_CRC_ERR 0x0800
-#define RFD_ALIGN_ERR 0x0400
-#define RFD_NOBUFS_ERR 0x0200
-#define RFD_DMA_ERR 0x0100 /* DMA overrun failure to acquire system bus */
-#define RFD_SHORT_FRAME_ERR 0x0080
-#define RFD_NOEOP_ERR 0x0040
-#define RFD_TRUNC_ERR 0x0020
-#define RFD_MULTICAST 0x0002 /* 0: destination had our address
- 1: destination was broadcast/multicast */
-#define RFD_COLLISION 0x0001
-
-/* receive frame descriptor */
-struct i596_rfd {
- unsigned short stat;
- unsigned short cmd;
- phys_addr pa_next; /* va_to_pa(struct i596_rfd *next) */
- phys_addr pa_rbd; /* va_to_pa(struct i596_rbd *rbd) */
- unsigned short count;
- unsigned short size;
- char data[1532];
-};
-
-#define RBD_EL 0x8000
-#define RBD_P 0x4000
-#define RBD_SIZEMASK 0x3fff
-#define RBD_EOF 0x8000
-#define RBD_F 0x4000
-
-/* receive buffer descriptor */
-struct i596_rbd {
- unsigned short size;
- unsigned short pad;
- phys_addr pa_next; /* va_to_pa(struct i596_tbd *next) */
- phys_addr pa_data; /* va_to_pa(char *data) */
- phys_addr pa_prev; /* va_to_pa(struct i596_tbd *prev) */
-
- /* Driver private part */
- struct sk_buff *skb;
-};
-
-#define RX_RING_SIZE 64
-#define RX_SKBSIZE (ETH_FRAME_LEN+10)
-#define RX_RBD_SIZE 32
-
-/* System Control Block - 40 bytes */
-struct i596_scb {
- u16 status; /* 0 */
- u16 command; /* 2 */
- phys_addr pa_cmd; /* 4 - va_to_pa(struct i596_cmd *cmd) */
- phys_addr pa_rfd; /* 8 - va_to_pa(struct i596_rfd *rfd) */
- u32 crc_err; /* 12 */
- u32 align_err; /* 16 */
- u32 resource_err; /* 20 */
- u32 over_err; /* 24 */
- u32 rcvdt_err; /* 28 */
- u32 short_err; /* 32 */
- u16 t_on; /* 36 */
- u16 t_off; /* 38 */
-};
-
-/* Intermediate System Configuration Pointer - 8 bytes */
-struct i596_iscp {
- u32 busy; /* 0 */
- phys_addr pa_scb; /* 4 - va_to_pa(struct i596_scb *scb) */
-};
-
-/* System Configuration Pointer - 12 bytes */
-struct i596_scp {
- u32 sysbus; /* 0 */
- u32 pad; /* 4 */
- phys_addr pa_iscp; /* 8 - va_to_pa(struct i596_iscp *iscp) */
-};
-
-/* Selftest and dump results - needs 16-byte alignment */
-/*
- * The size of the dump area is 304 bytes. When the dump is executed
- * by the Port command an extra word will be appended to the dump area.
- * The extra word is a copy of the Dump status word (containing the
- * C, B, OK bits). [I find 0xa006, with a0 for C+OK and 6 for dump]
- */
-struct i596_dump {
- u16 dump[153]; /* (304 = 130h) + 2 bytes */
-};
-
-struct i596_private { /* aligned to a 16-byte boundary */
- struct i596_scp scp; /* 0 - needs 16-byte alignment */
- struct i596_iscp iscp; /* 12 */
- struct i596_scb scb; /* 20 */
- u32 dummy; /* 60 */
- struct i596_dump dump; /* 64 - needs 16-byte alignment */
-
- struct i596_cmd set_add;
- char eth_addr[8]; /* directly follows set_add */
-
- struct i596_cmd set_conf;
- char i596_config[16]; /* directly follows set_conf */
-
- struct i596_cmd tdr;
- unsigned long tdr_stat; /* directly follows tdr */
-
- int last_restart;
- struct i596_rbd *rbd_list;
- struct i596_rbd *rbd_tail;
- struct i596_rfd *rx_tail;
- struct i596_cmd *cmd_tail;
- struct i596_cmd *cmd_head;
- int cmd_backlog;
- unsigned long last_cmd;
- spinlock_t cmd_lock;
-};
-
-static char init_setup[14] = {
- 0x8E, /* length 14 bytes, prefetch on */
- 0xC8, /* default: fifo to 8, monitor off */
- 0x40, /* default: don't save bad frames (apricot.c had 0x80) */
- 0x2E, /* (default is 0x26)
- No source address insertion, 8 byte preamble */
- 0x00, /* default priority and backoff */
- 0x60, /* default interframe spacing */
- 0x00, /* default slot time LSB */
- 0xf2, /* default slot time and nr of retries */
- 0x00, /* default various bits
- (0: promiscuous mode, 1: broadcast disable,
- 2: encoding mode, 3: transmit on no CRS,
- 4: no CRC insertion, 5: CRC type,
- 6: bit stuffing, 7: padding) */
- 0x00, /* default carrier sense and collision detect */
- 0x40, /* default minimum frame length */
- 0xff, /* (default is 0xff, and that is what apricot.c has;
- elp486.c has 0xfb: Enable crc append in memory.) */
- 0x00, /* default: not full duplex */
- 0x7f /* (default is 0x3f) multi IA */
-};
-
-static int i596_open(struct net_device *dev);
-static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
-static irqreturn_t i596_interrupt(int irq, void *dev_id);
-static int i596_close(struct net_device *dev);
-static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
-static void print_eth(char *);
-static void set_multicast_list(struct net_device *dev);
-static void i596_tx_timeout(struct net_device *dev);
-
-static int
-i596_timeout(struct net_device *dev, char *msg, int ct) {
- struct i596_private *lp;
- int boguscnt = ct;
-
- lp = netdev_priv(dev);
- while (lp->scb.command) {
- if (--boguscnt == 0) {
- printk("%s: %s timed out - stat %4.4x, cmd %4.4x\n",
- dev->name, msg,
- lp->scb.status, lp->scb.command);
- return 1;
- }
- udelay(5);
- barrier();
- }
- return 0;
-}
-
-static inline int
-init_rx_bufs(struct net_device *dev, int num) {
- struct i596_private *lp;
- struct i596_rfd *rfd;
- int i;
- // struct i596_rbd *rbd;
-
- lp = netdev_priv(dev);
- lp->scb.pa_rfd = I596_NULL;
-
- for (i = 0; i < num; i++) {
- rfd = kmalloc(sizeof(struct i596_rfd), GFP_KERNEL);
- if (rfd == NULL)
- break;
-
- rfd->stat = 0;
- rfd->pa_rbd = I596_NULL;
- rfd->count = 0;
- rfd->size = 1532;
- if (i == 0) {
- rfd->cmd = CMD_EOL;
- lp->rx_tail = rfd;
- } else {
- rfd->cmd = 0;
- }
- rfd->pa_next = lp->scb.pa_rfd;
- lp->scb.pa_rfd = va_to_pa(rfd);
- lp->rx_tail->pa_next = lp->scb.pa_rfd;
- }
-
-#if 0
- for (i = 0; i<RX_RBD_SIZE; i++) {
- rbd = kmalloc(sizeof(struct i596_rbd), GFP_KERNEL);
- if (rbd) {
- rbd->pad = 0;
- rbd->count = 0;
- rbd->skb = dev_alloc_skb(RX_SKBSIZE);
- if (!rbd->skb) {
- printk("dev_alloc_skb failed");
- }
- rbd->next = rfd->rbd;
- if (i) {
- rfd->rbd->prev = rbd;
- rbd->size = RX_SKBSIZE;
- } else {
- rbd->size = (RX_SKBSIZE | RBD_EL);
- lp->rbd_tail = rbd;
- }
-
- rfd->rbd = rbd;
- }
- }
- lp->rbd_tail->next = rfd->rbd;
-#endif
- return i;
-}
-
-static inline void
-remove_rx_bufs(struct net_device *dev) {
- struct i596_private *lp;
- struct i596_rfd *rfd;
-
- lp = netdev_priv(dev);
- lp->rx_tail->pa_next = I596_NULL;
-
- do {
- rfd = pa_to_va(lp->scb.pa_rfd);
- lp->scb.pa_rfd = rfd->pa_next;
- kfree(rfd);
- } while (rfd != lp->rx_tail);
-
- lp->rx_tail = NULL;
-
-#if 0
- for (lp->rbd_list) {
- }
-#endif
-}
-
-#define PORT_RESET 0x00 /* reset 82596 */
-#define PORT_SELFTEST 0x01 /* selftest */
-#define PORT_ALTSCP 0x02 /* alternate SCB address */
-#define PORT_DUMP 0x03 /* dump */
-
-#define IOADDR 0xcb0 /* real constant */
-#define IRQ 10 /* default IRQ - can be changed by ECU */
-
-/* The 82596 requires two 16-bit write cycles for a port command */
-static inline void
-PORT(phys_addr a, unsigned int cmd) {
- if (a & 0xf)
- printk("lp486e.c: PORT: address not aligned\n");
- outw(((a & 0xffff) | cmd), IOADDR);
- outw(((a>>16) & 0xffff), IOADDR+2);
-}
-
-static inline void
-CA(void) {
- outb(0, IOADDR+4);
- udelay(8);
-}
-
-static inline void
-CLEAR_INT(void) {
- outb(0, IOADDR+8);
-}
-
-#if 0
-/* selftest or dump */
-static void
-i596_port_do(struct net_device *dev, int portcmd, char *cmdname) {
- struct i596_private *lp = netdev_priv(dev);
- u16 *outp;
- int i, m;
-
- memset((void *)&(lp->dump), 0, sizeof(struct i596_dump));
- outp = &(lp->dump.dump[0]);
-
- PORT(va_to_pa(outp), portcmd);
- mdelay(30); /* random, unmotivated */
-
- printk("lp486e i82596 %s result:\n", cmdname);
- for (m = ARRAY_SIZE(lp->dump.dump); m && lp->dump.dump[m-1] == 0; m--)
- ;
- for (i = 0; i < m; i++) {
- printk(" %04x", lp->dump.dump[i]);
- if (i%8 == 7)
- printk("\n");
- }
- printk("\n");
-}
-#endif
-
-static int
-i596_scp_setup(struct net_device *dev) {
- struct i596_private *lp = netdev_priv(dev);
- int boguscnt;
-
- /* Setup SCP, ISCP, SCB */
- /*
- * sysbus bits:
- * only a single byte is significant - here 0x44
- * 0x80: big endian mode (details depend on stepping)
- * 0x40: 1
- * 0x20: interrupt pin is active low
- * 0x10: lock function disabled
- * 0x08: external triggering of bus throttle timers
- * 0x06: 00: 82586 compat mode, 01: segmented mode, 10: linear mode
- * 0x01: unused
- */
- lp->scp.sysbus = 0x00440000; /* linear mode */
- lp->scp.pad = 0; /* must be zero */
- lp->scp.pa_iscp = va_to_pa(&(lp->iscp));
-
- /*
- * The CPU sets the ISCP to 1 before it gives the first CA()
- */
- lp->iscp.busy = 0x0001;
- lp->iscp.pa_scb = va_to_pa(&(lp->scb));
-
- lp->scb.command = 0;
- lp->scb.status = 0;
- lp->scb.pa_cmd = I596_NULL;
- /* lp->scb.pa_rfd has been initialised already */
-
- lp->last_cmd = jiffies;
- lp->cmd_backlog = 0;
- lp->cmd_head = NULL;
-
- /*
- * Reset the 82596.
- * We need to wait 10 systemclock cycles, and
- * 5 serial clock cycles.
- */
- PORT(0, PORT_RESET); /* address part ignored */
- udelay(100);
-
- /*
- * Before the CA signal is asserted, the default SCP address
- * (0x00fffff4) can be changed to a 16-byte aligned value
- */
- PORT(va_to_pa(&lp->scp), PORT_ALTSCP); /* change the scp address */
-
- /*
- * The initialization procedure begins when a
- * Channel Attention signal is asserted after a reset.
- */
-
- CA();
-
- /*
- * The ISCP busy is cleared by the 82596 after the SCB address is read.
- */
- boguscnt = 100;
- while (lp->iscp.busy) {
- if (--boguscnt == 0) {
- /* No i82596 present? */
- printk("%s: i82596 initialization timed out\n",
- dev->name);
- return 1;
- }
- udelay(5);
- barrier();
- }
- /* I find here boguscnt==100, so no delay was required. */
-
- return 0;
-}
-
-static int
-init_i596(struct net_device *dev) {
- struct i596_private *lp;
-
- if (i596_scp_setup(dev))
- return 1;
-
- lp = netdev_priv(dev);
- lp->scb.command = 0;
-
- memcpy ((void *)lp->i596_config, init_setup, 14);
- lp->set_conf.command = CmdConfigure;
- i596_add_cmd(dev, (void *)&lp->set_conf);
-
- memcpy ((void *)lp->eth_addr, dev->dev_addr, 6);
- lp->set_add.command = CmdIASetup;
- i596_add_cmd(dev, &lp->set_add);
-
- lp->tdr.command = CmdTDR;
- i596_add_cmd(dev, &lp->tdr);
-
- if (lp->scb.command && i596_timeout(dev, "i82596 init", 200))
- return 1;
-
- lp->scb.command = RX_START;
- CA();
-
- barrier();
-
- if (lp->scb.command && i596_timeout(dev, "Receive Unit start", 100))
- return 1;
-
- return 0;
-}
-
-/* Receive a single frame */
-static inline int
-i596_rx_one(struct net_device *dev, struct i596_private *lp,
- struct i596_rfd *rfd, int *frames) {
-
- if (rfd->stat & RFD_STAT_OK) {
- /* a good frame */
- int pkt_len = (rfd->count & 0x3fff);
- struct sk_buff *skb = netdev_alloc_skb(dev, pkt_len);
-
- (*frames)++;
-
- if (rfd->cmd & CMD_EOL)
- printk("Received on EOL\n");
-
- if (skb == NULL) {
- printk ("%s: i596_rx Memory squeeze, "
- "dropping packet.\n", dev->name);
- dev->stats.rx_dropped++;
- return 1;
- }
-
- memcpy(skb_put(skb,pkt_len), rfd->data, pkt_len);
-
- skb->protocol = eth_type_trans(skb,dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- } else {
-#if 0
- printk("Frame reception error status %04x\n",
- rfd->stat);
-#endif
- dev->stats.rx_errors++;
- if (rfd->stat & RFD_COLLISION)
- dev->stats.collisions++;
- if (rfd->stat & RFD_SHORT_FRAME_ERR)
- dev->stats.rx_length_errors++;
- if (rfd->stat & RFD_DMA_ERR)
- dev->stats.rx_over_errors++;
- if (rfd->stat & RFD_NOBUFS_ERR)
- dev->stats.rx_fifo_errors++;
- if (rfd->stat & RFD_ALIGN_ERR)
- dev->stats.rx_frame_errors++;
- if (rfd->stat & RFD_CRC_ERR)
- dev->stats.rx_crc_errors++;
- if (rfd->stat & RFD_LENGTH_ERR)
- dev->stats.rx_length_errors++;
- }
- rfd->stat = rfd->count = 0;
- return 0;
-}
-
-static int
-i596_rx(struct net_device *dev) {
- struct i596_private *lp = netdev_priv(dev);
- struct i596_rfd *rfd;
- int frames = 0;
-
- while (1) {
- rfd = pa_to_va(lp->scb.pa_rfd);
- if (!rfd) {
- printk(KERN_ERR "i596_rx: NULL rfd?\n");
- return 0;
- }
-#if 1
- if (rfd->stat && !(rfd->stat & (RFD_STAT_C | RFD_STAT_B)))
- printk("SF:%p-%04x\n", rfd, rfd->stat);
-#endif
- if (!(rfd->stat & RFD_STAT_C))
- break; /* next one not ready */
- if (i596_rx_one(dev, lp, rfd, &frames))
- break; /* out of memory */
- rfd->cmd = CMD_EOL;
- lp->rx_tail->cmd = 0;
- lp->rx_tail = rfd;
- lp->scb.pa_rfd = rfd->pa_next;
- barrier();
- }
-
- return frames;
-}
-
-static void
-i596_cleanup_cmd(struct net_device *dev) {
- struct i596_private *lp;
- struct i596_cmd *cmd;
-
- lp = netdev_priv(dev);
- while (lp->cmd_head) {
- cmd = lp->cmd_head;
-
- lp->cmd_head = pa_to_va(lp->cmd_head->pa_next);
- lp->cmd_backlog--;
-
- switch ((cmd->command) & 0x7) {
- case CmdTx: {
- struct tx_cmd *tx_cmd = (struct tx_cmd *) cmd;
- struct i596_tbd * tx_cmd_tbd;
- tx_cmd_tbd = pa_to_va(tx_cmd->pa_tbd);
-
- dev_kfree_skb_any(tx_cmd_tbd->skb);
-
- dev->stats.tx_errors++;
- dev->stats.tx_aborted_errors++;
-
- cmd->pa_next = I596_NULL;
- kfree((unsigned char *)tx_cmd);
- netif_wake_queue(dev);
- break;
- }
- case CmdMulticastList: {
- // unsigned short count = *((unsigned short *) (ptr + 1));
-
- cmd->pa_next = I596_NULL;
- kfree((unsigned char *)cmd);
- break;
- }
- default: {
- cmd->pa_next = I596_NULL;
- break;
- }
- }
- barrier();
- }
-
- if (lp->scb.command && i596_timeout(dev, "i596_cleanup_cmd", 100))
- ;
-
- lp->scb.pa_cmd = va_to_pa(lp->cmd_head);
-}
-
-static void i596_reset(struct net_device *dev, struct i596_private *lp, int ioaddr) {
-
- if (lp->scb.command && i596_timeout(dev, "i596_reset", 100))
- ;
-
- netif_stop_queue(dev);
-
- lp->scb.command = CUC_ABORT | RX_ABORT;
- CA();
- barrier();
-
- /* wait for shutdown */
- if (lp->scb.command && i596_timeout(dev, "i596_reset(2)", 400))
- ;
-
- i596_cleanup_cmd(dev);
- i596_rx(dev);
-
- netif_start_queue(dev);
- /*dev_kfree_skb(skb, FREE_WRITE);*/
- init_i596(dev);
-}
-
-static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) {
- struct i596_private *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- unsigned long flags;
-
- cmd->status = 0;
- cmd->command |= (CMD_EOL | CMD_INTR);
- cmd->pa_next = I596_NULL;
-
- spin_lock_irqsave(&lp->cmd_lock, flags);
-
- if (lp->cmd_head) {
- lp->cmd_tail->pa_next = va_to_pa(cmd);
- } else {
- lp->cmd_head = cmd;
- if (lp->scb.command && i596_timeout(dev, "i596_add_cmd", 100))
- ;
- lp->scb.pa_cmd = va_to_pa(cmd);
- lp->scb.command = CUC_START;
- CA();
- }
- lp->cmd_tail = cmd;
- lp->cmd_backlog++;
-
- lp->cmd_head = pa_to_va(lp->scb.pa_cmd);
- spin_unlock_irqrestore(&lp->cmd_lock, flags);
-
- if (lp->cmd_backlog > 16) {
- int tickssofar = jiffies - lp->last_cmd;
- if (tickssofar < HZ/4)
- return;
-
- printk(KERN_WARNING "%s: command unit timed out, status resetting.\n", dev->name);
- i596_reset(dev, lp, ioaddr);
- }
-}
-
-static int i596_open(struct net_device *dev)
-{
- int i;
-
- i = request_irq(dev->irq, i596_interrupt, IRQF_SHARED, dev->name, dev);
- if (i) {
- printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
- return i;
- }
-
- if ((i = init_rx_bufs(dev, RX_RING_SIZE)) < RX_RING_SIZE)
- printk(KERN_ERR "%s: only able to allocate %d receive buffers\n", dev->name, i);
-
- if (i < 4) {
- free_irq(dev->irq, dev);
- return -EAGAIN;
- }
- netif_start_queue(dev);
- init_i596(dev);
- return 0; /* Always succeed */
-}
-
-static netdev_tx_t i596_start_xmit (struct sk_buff *skb, struct net_device *dev) {
- struct tx_cmd *tx_cmd;
- short length;
-
- length = skb->len;
-
- if (length < ETH_ZLEN) {
- if (skb_padto(skb, ETH_ZLEN))
- return NETDEV_TX_OK;
- length = ETH_ZLEN;
- }
-
- tx_cmd = kmalloc((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC);
- if (tx_cmd == NULL) {
- printk(KERN_WARNING "%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name);
- dev->stats.tx_dropped++;
- dev_kfree_skb (skb);
- } else {
- struct i596_tbd *tx_cmd_tbd;
- tx_cmd_tbd = (struct i596_tbd *) (tx_cmd + 1);
- tx_cmd->pa_tbd = va_to_pa (tx_cmd_tbd);
- tx_cmd_tbd->pa_next = I596_NULL;
-
- tx_cmd->cmd.command = (CMD_FLEX | CmdTx);
-
- tx_cmd->pad = 0;
- tx_cmd->size = 0;
- tx_cmd_tbd->pad = 0;
- tx_cmd_tbd->size = (EOF | length);
-
- tx_cmd_tbd->pa_data = va_to_pa (skb->data);
- tx_cmd_tbd->skb = skb;
-
- if (i596_debug & LOG_SRCDST)
- print_eth (skb->data);
-
- i596_add_cmd (dev, (struct i596_cmd *) tx_cmd);
-
- dev->stats.tx_packets++;
- }
-
- return NETDEV_TX_OK;
-}
-
-static void
-i596_tx_timeout (struct net_device *dev) {
- struct i596_private *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- /* Transmitter timeout, serious problems. */
- printk(KERN_WARNING "%s: transmit timed out, status resetting.\n", dev->name);
- dev->stats.tx_errors++;
-
- /* Try to restart the adaptor */
- if (lp->last_restart == dev->stats.tx_packets) {
- printk ("Resetting board.\n");
-
- /* Shutdown and restart */
- i596_reset (dev, lp, ioaddr);
- } else {
- /* Issue a channel attention signal */
- printk ("Kicking board.\n");
- lp->scb.command = (CUC_START | RX_START);
- CA();
- lp->last_restart = dev->stats.tx_packets;
- }
- netif_wake_queue(dev);
-}
-
-static void print_eth(char *add)
-{
- int i;
-
- printk ("Dest ");
- for (i = 0; i < 6; i++)
- printk(" %2.2X", (unsigned char) add[i]);
- printk ("\n");
-
- printk ("Source");
- for (i = 0; i < 6; i++)
- printk(" %2.2X", (unsigned char) add[i+6]);
- printk ("\n");
-
- printk ("type %2.2X%2.2X\n",
- (unsigned char) add[12], (unsigned char) add[13]);
-}
-
-static const struct net_device_ops i596_netdev_ops = {
- .ndo_open = i596_open,
- .ndo_stop = i596_close,
- .ndo_start_xmit = i596_start_xmit,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_tx_timeout = i596_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int __init lp486e_probe(struct net_device *dev) {
- struct i596_private *lp;
- unsigned char eth_addr[6] = { 0, 0xaa, 0, 0, 0, 0 };
- unsigned char *bios;
- int i, j;
- int ret = -ENOMEM;
- static int probed;
-
- if (probed)
- return -ENODEV;
- probed++;
-
- if (!request_region(IOADDR, LP486E_TOTAL_SIZE, DRV_NAME)) {
- printk(KERN_ERR "lp486e: IO address 0x%x in use\n", IOADDR);
- return -EBUSY;
- }
-
- lp = netdev_priv(dev);
- spin_lock_init(&lp->cmd_lock);
-
- /*
- * Do we really have this thing?
- */
- if (i596_scp_setup(dev)) {
- ret = -ENODEV;
- goto err_out_kfree;
- }
-
- dev->base_addr = IOADDR;
- dev->irq = IRQ;
-
-
- /*
- * How do we find the ethernet address? I don't know.
- * One possibility is to look at the EISA configuration area
- * [0xe8000-0xe9fff]. This contains the ethernet address
- * but not at a fixed address - things depend on setup options.
- *
- * If we find no address, or the wrong address, use
- * ifconfig eth0 hw ether a1:a2:a3:a4:a5:a6
- * with the value found in the BIOS setup.
- */
- bios = bus_to_virt(0xe8000);
- for (j = 0; j < 0x2000; j++) {
- if (bios[j] == 0 && bios[j+1] == 0xaa && bios[j+2] == 0) {
- printk("%s: maybe address at BIOS 0x%x:",
- dev->name, 0xe8000+j);
- for (i = 0; i < 6; i++) {
- eth_addr[i] = bios[i+j];
- printk(" %2.2X", eth_addr[i]);
- }
- printk("\n");
- }
- }
-
- printk("%s: lp486e 82596 at %#3lx, IRQ %d,",
- dev->name, dev->base_addr, dev->irq);
- for (i = 0; i < 6; i++)
- printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]);
- printk("\n");
-
- /* The LP486E-specific entries in the device structure. */
- dev->netdev_ops = &i596_netdev_ops;
- dev->watchdog_timeo = 5*HZ;
-
-#if 0
- /* selftest reports 0x320925ae - don't know what that means */
- i596_port_do(dev, PORT_SELFTEST, "selftest");
- i596_port_do(dev, PORT_DUMP, "dump");
-#endif
- return 0;
-
-err_out_kfree:
- release_region(IOADDR, LP486E_TOTAL_SIZE);
- return ret;
-}
-
-static inline void
-i596_handle_CU_completion(struct net_device *dev,
- struct i596_private *lp,
- unsigned short status,
- unsigned short *ack_cmdp) {
- struct i596_cmd *cmd;
- int frames_out = 0;
- int commands_done = 0;
- int cmd_val;
- unsigned long flags;
-
- spin_lock_irqsave(&lp->cmd_lock, flags);
- cmd = lp->cmd_head;
-
- while (lp->cmd_head && (lp->cmd_head->status & CMD_STAT_C)) {
- cmd = lp->cmd_head;
-
- lp->cmd_head = pa_to_va(lp->cmd_head->pa_next);
- lp->cmd_backlog--;
-
- commands_done++;
- cmd_val = cmd->command & 0x7;
-#if 0
- printk("finished CU %s command (%d)\n",
- CUcmdnames[cmd_val], cmd_val);
-#endif
- switch (cmd_val) {
- case CmdTx:
- {
- struct tx_cmd *tx_cmd;
- struct i596_tbd *tx_cmd_tbd;
-
- tx_cmd = (struct tx_cmd *) cmd;
- tx_cmd_tbd = pa_to_va(tx_cmd->pa_tbd);
-
- frames_out++;
- if (cmd->status & CMD_STAT_OK) {
- if (i596_debug)
- print_eth(pa_to_va(tx_cmd_tbd->pa_data));
- } else {
- dev->stats.tx_errors++;
- if (i596_debug)
- printk("transmission failure:%04x\n",
- cmd->status);
- if (cmd->status & 0x0020)
- dev->stats.collisions++;
- if (!(cmd->status & 0x0040))
- dev->stats.tx_heartbeat_errors++;
- if (cmd->status & 0x0400)
- dev->stats.tx_carrier_errors++;
- if (cmd->status & 0x0800)
- dev->stats.collisions++;
- if (cmd->status & 0x1000)
- dev->stats.tx_aborted_errors++;
- }
- dev_kfree_skb_irq(tx_cmd_tbd->skb);
-
- cmd->pa_next = I596_NULL;
- kfree((unsigned char *)tx_cmd);
- netif_wake_queue(dev);
- break;
- }
-
- case CmdMulticastList:
- cmd->pa_next = I596_NULL;
- kfree((unsigned char *)cmd);
- break;
-
- case CmdTDR:
- {
- unsigned long status = *((unsigned long *) (cmd + 1));
- if (status & 0x8000) {
- if (i596_debug)
- printk("%s: link ok.\n", dev->name);
- } else {
- if (status & 0x4000)
- printk("%s: Transceiver problem.\n",
- dev->name);
- if (status & 0x2000)
- printk("%s: Termination problem.\n",
- dev->name);
- if (status & 0x1000)
- printk("%s: Short circuit.\n",
- dev->name);
- printk("%s: Time %ld.\n",
- dev->name, status & 0x07ff);
- }
- }
- default:
- cmd->pa_next = I596_NULL;
- lp->last_cmd = jiffies;
-
- }
- barrier();
- }
-
- cmd = lp->cmd_head;
- while (cmd && (cmd != lp->cmd_tail)) {
- cmd->command &= 0x1fff;
- cmd = pa_to_va(cmd->pa_next);
- barrier();
- }
-
- if (lp->cmd_head)
- *ack_cmdp |= CUC_START;
- lp->scb.pa_cmd = va_to_pa(lp->cmd_head);
- spin_unlock_irqrestore(&lp->cmd_lock, flags);
-}
-
-static irqreturn_t
-i596_interrupt(int irq, void *dev_instance)
-{
- struct net_device *dev = dev_instance;
- struct i596_private *lp = netdev_priv(dev);
- unsigned short status, ack_cmd = 0;
- int frames_in = 0;
-
- /*
- * The 82596 examines the command, performs the required action,
- * and then clears the SCB command word.
- */
- if (lp->scb.command && i596_timeout(dev, "interrupt", 40))
- ;
-
- /*
- * The status word indicates the status of the 82596.
- * It is modified only by the 82596.
- *
- * [So, we must not clear it. I find often status 0xffff,
- * which is not one of the values allowed by the docs.]
- */
- status = lp->scb.status;
-#if 0
- if (i596_debug) {
- printk("%s: i596 interrupt, ", dev->name);
- i596_out_status(status);
- }
-#endif
- /* Impossible, but it happens - perhaps when we get
- a receive interrupt but scb.pa_rfd is I596_NULL. */
- if (status == 0xffff) {
- printk("%s: i596_interrupt: got status 0xffff\n", dev->name);
- goto out;
- }
-
- ack_cmd = (status & STAT_ACK);
-
- if (status & (STAT_CX | STAT_CNA))
- i596_handle_CU_completion(dev, lp, status, &ack_cmd);
-
- if (status & (STAT_FR | STAT_RNR)) {
- /* Restart the receive unit when it got inactive somehow */
- if ((status & STAT_RNR) && netif_running(dev))
- ack_cmd |= RX_START;
-
- if (status & STAT_FR) {
- frames_in = i596_rx(dev);
- if (!frames_in)
- printk("receive frame reported, but no frames\n");
- }
- }
-
- /* acknowledge the interrupt */
- /*
- if ((lp->scb.pa_cmd != I596_NULL) && netif_running(dev))
- ack_cmd |= CUC_START;
- */
-
- if (lp->scb.command && i596_timeout(dev, "i596 interrupt", 100))
- ;
-
- lp->scb.command = ack_cmd;
-
- CLEAR_INT();
- CA();
-
- out:
- return IRQ_HANDLED;
-}
-
-static int i596_close(struct net_device *dev) {
- struct i596_private *lp = netdev_priv(dev);
-
- netif_stop_queue(dev);
-
- if (i596_debug)
- printk("%s: Shutting down ethercard, status was %4.4x.\n",
- dev->name, lp->scb.status);
-
- lp->scb.command = (CUC_ABORT | RX_ABORT);
- CA();
-
- i596_cleanup_cmd(dev);
-
- if (lp->scb.command && i596_timeout(dev, "i596_close", 200))
- ;
-
- free_irq(dev->irq, dev);
- remove_rx_bufs(dev);
-
- return 0;
-}
-
-/*
-* Set or clear the multicast filter for this adaptor.
-*/
-
-static void set_multicast_list(struct net_device *dev) {
- struct i596_private *lp = netdev_priv(dev);
- struct i596_cmd *cmd;
-
- if (i596_debug > 1)
- printk ("%s: set multicast list %d\n",
- dev->name, netdev_mc_count(dev));
-
- if (!netdev_mc_empty(dev)) {
- struct netdev_hw_addr *ha;
- char *cp;
- cmd = kmalloc(sizeof(struct i596_cmd) + 2 +
- netdev_mc_count(dev) * 6, GFP_ATOMIC);
- if (cmd == NULL) {
- printk (KERN_ERR "%s: set_multicast Memory squeeze.\n", dev->name);
- return;
- }
- cmd->command = CmdMulticastList;
- *((unsigned short *) (cmd + 1)) = netdev_mc_count(dev) * 6;
- cp = ((char *)(cmd + 1))+2;
- netdev_for_each_mc_addr(ha, dev) {
- memcpy(cp, ha->addr, 6);
- cp += 6;
- }
- if (i596_debug & LOG_SRCDST)
- print_eth (((char *)(cmd + 1)) + 2);
- i596_add_cmd(dev, cmd);
- } else {
- if (lp->set_conf.pa_next != I596_NULL) {
- return;
- }
- if (netdev_mc_empty(dev) &&
- !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
- lp->i596_config[8] &= ~0x01;
- } else {
- lp->i596_config[8] |= 0x01;
- }
-
- i596_add_cmd(dev, &lp->set_conf);
- }
-}
-
-MODULE_AUTHOR("Ard van Breemen <ard@cstmel.nl.eu.org>");
-MODULE_DESCRIPTION("Intel Panther onboard i82596 driver");
-MODULE_LICENSE("GPL");
-
-static struct net_device *dev_lp486e;
-static int full_duplex;
-static int options;
-static int io = IOADDR;
-static int irq = IRQ;
-
-module_param(debug, int, 0);
-//module_param(max_interrupt_work, int, 0);
-//module_param(reverse_probe, int, 0);
-//module_param(rx_copybreak, int, 0);
-module_param(options, int, 0);
-module_param(full_duplex, int, 0);
-
-static int __init lp486e_init_module(void) {
- int err;
- struct net_device *dev = alloc_etherdev(sizeof(struct i596_private));
- if (!dev)
- return -ENOMEM;
-
- dev->irq = irq;
- dev->base_addr = io;
- err = lp486e_probe(dev);
- if (err) {
- free_netdev(dev);
- return err;
- }
- err = register_netdev(dev);
- if (err) {
- release_region(dev->base_addr, LP486E_TOTAL_SIZE);
- free_netdev(dev);
- return err;
- }
- dev_lp486e = dev;
- full_duplex = 0;
- options = 0;
- return 0;
-}
-
-static void __exit lp486e_cleanup_module(void) {
- unregister_netdev(dev_lp486e);
- release_region(dev_lp486e->base_addr, LP486E_TOTAL_SIZE);
- free_netdev(dev_lp486e);
-}
-
-module_init(lp486e_init_module);
-module_exit(lp486e_cleanup_module);
diff --git a/drivers/net/ethernet/i825xx/ni52.c b/drivers/net/ethernet/i825xx/ni52.c
deleted file mode 100644
index 272976e1bb0f..000000000000
--- a/drivers/net/ethernet/i825xx/ni52.c
+++ /dev/null
@@ -1,1346 +0,0 @@
-/*
- * net-3-driver for the NI5210 card (i82586 Ethernet chip)
- *
- * This is an extension to the Linux operating system, and is covered by the
- * same GNU General Public License that covers that work.
- *
- * Alphacode 0.82 (96/09/29) for Linux 2.0.0 (or later)
- * Copyrights (c) 1994,1995,1996 by M.Hipp (hippm@informatik.uni-tuebingen.de)
- * [feel free to mail ....]
- *
- * when using as module: (no autoprobing!)
- * run with e.g:
- * insmod ni52.o io=0x360 irq=9 memstart=0xd0000 memend=0xd4000
- *
- * CAN YOU PLEASE REPORT ME YOUR PERFORMANCE EXPERIENCES !!.
- *
- * If you find a bug, please report me:
- * The kernel panic output and any kmsg from the ni52 driver
- * the ni5210-driver-version and the linux-kernel version
- * how many shared memory (memsize) on the netcard,
- * bootprom: yes/no, base_addr, mem_start
- * maybe the ni5210-card revision and the i82586 version
- *
- * autoprobe for: base_addr: 0x300,0x280,0x360,0x320,0x340
- * mem_start: 0xd0000,0xd2000,0xc8000,0xca000,0xd4000,0xd6000,
- * 0xd8000,0xcc000,0xce000,0xda000,0xdc000
- *
- * sources:
- * skeleton.c from Donald Becker
- *
- * I have also done a look in the following sources: (mail me if you need them)
- * crynwr-packet-driver by Russ Nelson
- * Garret A. Wollman's (fourth) i82586-driver for BSD
- * (before getting an i82596 (yes 596 not 586) manual, the existing drivers
- * helped me a lot to understand this tricky chip.)
- *
- * Known Problems:
- * The internal sysbus seems to be slow. So we often lose packets because of
- * overruns while receiving from a fast remote host.
- * This can slow down TCP connections. Maybe the newer ni5210 cards are
- * better. My experience is, that if a machine sends with more than about
- * 500-600K/s the fifo/sysbus overflows.
- *
- * IMPORTANT NOTE:
- * On fast networks, it's a (very) good idea to have 16K shared memory. With
- * 8K, we can store only 4 receive frames, so it can (easily) happen that a
- * remote machine 'overruns' our system.
- *
- * Known i82586/card problems (I'm sure, there are many more!):
- * Running the NOP-mode, the i82586 sometimes seems to forget to report
- * every xmit-interrupt until we restart the CU.
- * Another MAJOR bug is, that the RU sometimes seems to ignore the EL-Bit
- * in the RBD-Struct which indicates an end of the RBD queue.
- * Instead, the RU fetches another (randomly selected and
- * usually used) RBD and begins to fill it. (Maybe, this happens only if
- * the last buffer from the previous RFD fits exact into the queue and
- * the next RFD can't fetch an initial RBD. Anyone knows more? )
- *
- * results from ftp performance tests with Linux 1.2.5
- * send and receive about 350-400 KByte/s (peak up to 460 kbytes/s)
- * sending in NOP-mode: peak performance up to 530K/s (but better don't
- * run this mode)
- */
-
-/*
- * 29.Sept.96: virt_to_bus changes for new memory scheme
- * 19.Feb.96: more Mcast changes, module support (MH)
- *
- * 18.Nov.95: Mcast changes (AC).
- *
- * 23.April.95: fixed(?) receiving problems by configuring a RFD more
- * than the number of RBD's. Can maybe cause other problems.
- * 18.April.95: Added MODULE support (MH)
- * 17.April.95: MC related changes in init586() and set_multicast_list().
- * removed use of 'jiffies' in init586() (MH)
- *
- * 19.Sep.94: Added Multicast support (not tested yet) (MH)
- *
- * 18.Sep.94: Workaround for 'EL-Bug'. Removed flexible RBD-handling.
- * Now, every RFD has exact one RBD. (MH)
- *
- * 14.Sep.94: added promiscuous mode, a few cleanups (MH)
- *
- * 19.Aug.94: changed request_irq() parameter (MH)
- *
- * 20.July.94: removed cleanup bugs, removed a 16K-mem-probe-bug (MH)
- *
- * 19.July.94: lotsa cleanups .. (MH)
- *
- * 17.July.94: some patches ... verified to run with 1.1.29 (MH)
- *
- * 4.July.94: patches for Linux 1.1.24 (MH)
- *
- * 26.March.94: patches for Linux 1.0 and iomem-auto-probe (MH)
- *
- * 30.Sep.93: Added nop-chain .. driver now runs with only one Xmit-Buff,
- * too (MH)
- *
- * < 30.Sep.93: first versions
- */
-
-static int debuglevel; /* debug-printk 0: off 1: a few 2: more */
-static int automatic_resume; /* experimental .. better should be zero */
-static int rfdadd; /* rfdadd=1 may be better for 8K MEM cards */
-static int fifo = 0x8; /* don't change */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <asm/io.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-
-#include "ni52.h"
-
-#define DRV_NAME "ni52"
-
-#define DEBUG /* debug on */
-#define SYSBUSVAL 1 /* 8 Bit */
-
-#define ni_attn586() { outb(0, dev->base_addr + NI52_ATTENTION); }
-#define ni_reset586() { outb(0, dev->base_addr + NI52_RESET); }
-#define ni_disint() { outb(0, dev->base_addr + NI52_INTDIS); }
-#define ni_enaint() { outb(0, dev->base_addr + NI52_INTENA); }
-
-#define make32(ptr16) ((void __iomem *)(p->memtop + (short) (ptr16)))
-#define make24(ptr32) ((char __iomem *)(ptr32)) - p->base
-#define make16(ptr32) ((unsigned short) ((char __iomem *)(ptr32)\
- - p->memtop))
-
-/******************* how to calculate the buffers *****************************
-
- * IMPORTANT NOTE: if you configure only one NUM_XMIT_BUFFS, the driver works
- * --------------- in a different (more stable?) mode. Only in this mode it's
- * possible to configure the driver with 'NO_NOPCOMMANDS'
-
-sizeof(scp)=12; sizeof(scb)=16; sizeof(iscp)=8;
-sizeof(scp)+sizeof(iscp)+sizeof(scb) = 36 = INIT
-sizeof(rfd) = 24; sizeof(rbd) = 12;
-sizeof(tbd) = 8; sizeof(transmit_cmd) = 16;
-sizeof(nop_cmd) = 8;
-
- * if you don't know the driver, better do not change these values: */
-
-#define RECV_BUFF_SIZE 1524 /* slightly oversized */
-#define XMIT_BUFF_SIZE 1524 /* slightly oversized */
-#define NUM_XMIT_BUFFS 1 /* config for both, 8K and 16K shmem */
-#define NUM_RECV_BUFFS_8 4 /* config for 8K shared mem */
-#define NUM_RECV_BUFFS_16 9 /* config for 16K shared mem */
-#define NO_NOPCOMMANDS /* only possible with NUM_XMIT_BUFFS=1 */
-
-/**************************************************************************/
-
-
-#define NI52_TOTAL_SIZE 16
-#define NI52_ADDR0 0x02
-#define NI52_ADDR1 0x07
-#define NI52_ADDR2 0x01
-
-static int ni52_probe1(struct net_device *dev, int ioaddr);
-static irqreturn_t ni52_interrupt(int irq, void *dev_id);
-static int ni52_open(struct net_device *dev);
-static int ni52_close(struct net_device *dev);
-static netdev_tx_t ni52_send_packet(struct sk_buff *, struct net_device *);
-static struct net_device_stats *ni52_get_stats(struct net_device *dev);
-static void set_multicast_list(struct net_device *dev);
-static void ni52_timeout(struct net_device *dev);
-
-/* helper-functions */
-static int init586(struct net_device *dev);
-static int check586(struct net_device *dev, unsigned size);
-static void alloc586(struct net_device *dev);
-static void startrecv586(struct net_device *dev);
-static void __iomem *alloc_rfa(struct net_device *dev, void __iomem *ptr);
-static void ni52_rcv_int(struct net_device *dev);
-static void ni52_xmt_int(struct net_device *dev);
-static void ni52_rnr_int(struct net_device *dev);
-
-struct priv {
- char __iomem *base;
- char __iomem *mapped;
- char __iomem *memtop;
- spinlock_t spinlock;
- int reset;
- struct rfd_struct __iomem *rfd_last, *rfd_top, *rfd_first;
- struct scp_struct __iomem *scp;
- struct iscp_struct __iomem *iscp;
- struct scb_struct __iomem *scb;
- struct tbd_struct __iomem *xmit_buffs[NUM_XMIT_BUFFS];
-#if (NUM_XMIT_BUFFS == 1)
- struct transmit_cmd_struct __iomem *xmit_cmds[2];
- struct nop_cmd_struct __iomem *nop_cmds[2];
-#else
- struct transmit_cmd_struct __iomem *xmit_cmds[NUM_XMIT_BUFFS];
- struct nop_cmd_struct __iomem *nop_cmds[NUM_XMIT_BUFFS];
-#endif
- int nop_point, num_recv_buffs;
- char __iomem *xmit_cbuffs[NUM_XMIT_BUFFS];
- int xmit_count, xmit_last;
-};
-
-/* wait for command with timeout: */
-static void wait_for_scb_cmd(struct net_device *dev)
-{
- struct priv *p = netdev_priv(dev);
- int i;
- for (i = 0; i < 16384; i++) {
- if (readb(&p->scb->cmd_cuc) == 0)
- break;
- udelay(4);
- if (i == 16383) {
- printk(KERN_ERR "%s: scb_cmd timed out: %04x,%04x .. disabling i82586!!\n",
- dev->name, readb(&p->scb->cmd_cuc), readb(&p->scb->cus));
- if (!p->reset) {
- p->reset = 1;
- ni_reset586();
- }
- }
- }
-}
-
-static void wait_for_scb_cmd_ruc(struct net_device *dev)
-{
- struct priv *p = netdev_priv(dev);
- int i;
- for (i = 0; i < 16384; i++) {
- if (readb(&p->scb->cmd_ruc) == 0)
- break;
- udelay(4);
- if (i == 16383) {
- printk(KERN_ERR "%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n",
- dev->name, readb(&p->scb->cmd_ruc),
- readb(&p->scb->rus));
- if (!p->reset) {
- p->reset = 1;
- ni_reset586();
- }
- }
- }
-}
-
-static void wait_for_stat_compl(void __iomem *p)
-{
- struct nop_cmd_struct __iomem *addr = p;
- int i;
- for (i = 0; i < 32767; i++) {
- if (readw(&((addr)->cmd_status)) & STAT_COMPL)
- break;
- udelay(32);
- }
-}
-
-/**********************************************
- * close device
- */
-static int ni52_close(struct net_device *dev)
-{
- free_irq(dev->irq, dev);
- ni_reset586(); /* the hard way to stop the receiver */
- netif_stop_queue(dev);
- return 0;
-}
-
-/**********************************************
- * open device
- */
-static int ni52_open(struct net_device *dev)
-{
- int ret;
-
- ni_disint();
- alloc586(dev);
- init586(dev);
- startrecv586(dev);
- ni_enaint();
-
- ret = request_irq(dev->irq, ni52_interrupt, 0, dev->name, dev);
- if (ret) {
- ni_reset586();
- return ret;
- }
- netif_start_queue(dev);
- return 0; /* most done by init */
-}
-
-static int check_iscp(struct net_device *dev, void __iomem *addr)
-{
- struct iscp_struct __iomem *iscp = addr;
- struct priv *p = netdev_priv(dev);
- memset_io(iscp, 0, sizeof(struct iscp_struct));
-
- writel(make24(iscp), &p->scp->iscp);
- writeb(1, &iscp->busy);
-
- ni_reset586();
- ni_attn586();
- mdelay(32); /* wait a while... */
- /* i82586 clears 'busy' after successful init */
- if (readb(&iscp->busy))
- return 0;
- return 1;
-}
-
-/**********************************************
- * Check to see if there's an 82586 out there.
- */
-static int check586(struct net_device *dev, unsigned size)
-{
- struct priv *p = netdev_priv(dev);
- int i;
-
- p->mapped = ioremap(dev->mem_start, size);
- if (!p->mapped)
- return 0;
-
- p->base = p->mapped + size - 0x01000000;
- p->memtop = p->mapped + size;
- p->scp = (struct scp_struct __iomem *)(p->base + SCP_DEFAULT_ADDRESS);
- p->scb = (struct scb_struct __iomem *) p->mapped;
- p->iscp = (struct iscp_struct __iomem *)p->scp - 1;
- memset_io(p->scp, 0, sizeof(struct scp_struct));
- for (i = 0; i < sizeof(struct scp_struct); i++)
- /* memory was writeable? */
- if (readb((char __iomem *)p->scp + i))
- goto Enodev;
- writeb(SYSBUSVAL, &p->scp->sysbus); /* 1 = 8Bit-Bus, 0 = 16 Bit */
- if (readb(&p->scp->sysbus) != SYSBUSVAL)
- goto Enodev;
-
- if (!check_iscp(dev, p->mapped))
- goto Enodev;
- if (!check_iscp(dev, p->iscp))
- goto Enodev;
- return 1;
-Enodev:
- iounmap(p->mapped);
- return 0;
-}
-
-/******************************************************************
- * set iscp at the right place, called by ni52_probe1 and open586.
- */
-static void alloc586(struct net_device *dev)
-{
- struct priv *p = netdev_priv(dev);
-
- ni_reset586();
- mdelay(32);
-
- memset_io(p->iscp, 0, sizeof(struct iscp_struct));
- memset_io(p->scp , 0, sizeof(struct scp_struct));
-
- writel(make24(p->iscp), &p->scp->iscp);
- writeb(SYSBUSVAL, &p->scp->sysbus);
- writew(make16(p->scb), &p->iscp->scb_offset);
-
- writeb(1, &p->iscp->busy);
- ni_reset586();
- ni_attn586();
-
- mdelay(32);
-
- if (readb(&p->iscp->busy))
- printk(KERN_ERR "%s: Init-Problems (alloc).\n", dev->name);
-
- p->reset = 0;
-
- memset_io(p->scb, 0, sizeof(struct scb_struct));
-}
-
-/* set: io,irq,memstart,memend or set it when calling insmod */
-static int irq = 9;
-static int io = 0x300;
-static long memstart; /* e.g 0xd0000 */
-static long memend; /* e.g 0xd4000 */
-
-/**********************************************
- * probe the ni5210-card
- */
-struct net_device * __init ni52_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct priv));
- static const int ports[] = {0x300, 0x280, 0x360, 0x320, 0x340, 0};
- const int *port;
- struct priv *p;
- int err = 0;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- p = netdev_priv(dev);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- io = dev->base_addr;
- irq = dev->irq;
- memstart = dev->mem_start;
- memend = dev->mem_end;
- }
-
- if (io > 0x1ff) { /* Check a single specified location. */
- err = ni52_probe1(dev, io);
- } else if (io > 0) { /* Don't probe at all. */
- err = -ENXIO;
- } else {
- for (port = ports; *port && ni52_probe1(dev, *port) ; port++)
- ;
- if (*port)
- goto got_it;
-#ifdef FULL_IO_PROBE
- for (io = 0x200; io < 0x400 && ni52_probe1(dev, io); io += 8)
- ;
- if (io < 0x400)
- goto got_it;
-#endif
- err = -ENODEV;
- }
- if (err)
- goto out;
-got_it:
- err = register_netdev(dev);
- if (err)
- goto out1;
- return dev;
-out1:
- iounmap(p->mapped);
- release_region(dev->base_addr, NI52_TOTAL_SIZE);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-static const struct net_device_ops ni52_netdev_ops = {
- .ndo_open = ni52_open,
- .ndo_stop = ni52_close,
- .ndo_get_stats = ni52_get_stats,
- .ndo_tx_timeout = ni52_timeout,
- .ndo_start_xmit = ni52_send_packet,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int __init ni52_probe1(struct net_device *dev, int ioaddr)
-{
- int i, size, retval;
- struct priv *priv = netdev_priv(dev);
-
- dev->base_addr = ioaddr;
- dev->irq = irq;
- dev->mem_start = memstart;
- dev->mem_end = memend;
-
- spin_lock_init(&priv->spinlock);
-
- if (!request_region(ioaddr, NI52_TOTAL_SIZE, DRV_NAME))
- return -EBUSY;
-
- if (!(inb(ioaddr+NI52_MAGIC1) == NI52_MAGICVAL1) ||
- !(inb(ioaddr+NI52_MAGIC2) == NI52_MAGICVAL2)) {
- retval = -ENODEV;
- goto out;
- }
-
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = inb(dev->base_addr+i);
-
- if (dev->dev_addr[0] != NI52_ADDR0 || dev->dev_addr[1] != NI52_ADDR1 ||
- dev->dev_addr[2] != NI52_ADDR2) {
- retval = -ENODEV;
- goto out;
- }
-
- printk(KERN_INFO "%s: NI5210 found at %#3lx, ",
- dev->name, dev->base_addr);
-
- /*
- * check (or search) IO-Memory, 8K and 16K
- */
-#ifdef MODULE
- size = dev->mem_end - dev->mem_start;
- if (size != 0x2000 && size != 0x4000) {
- printk("\n");
- printk(KERN_ERR "%s: Invalid memory size %d. Allowed is 0x2000 or 0x4000 bytes.\n", dev->name, size);
- retval = -ENODEV;
- goto out;
- }
- if (!check586(dev, size)) {
- printk(KERN_ERR "?memcheck, Can't find memory at 0x%lx with size %d!\n", dev->mem_start, size);
- retval = -ENODEV;
- goto out;
- }
-#else
- if (dev->mem_start != 0) {
- /* no auto-mem-probe */
- size = 0x4000; /* check for 16K mem */
- if (!check586(dev, size)) {
- size = 0x2000; /* check for 8K mem */
- if (!check586(dev, size)) {
- printk(KERN_ERR "?memprobe, Can't find memory at 0x%lx!\n", dev->mem_start);
- retval = -ENODEV;
- goto out;
- }
- }
- } else {
- static const unsigned long memaddrs[] = {
- 0xc8000, 0xca000, 0xcc000, 0xce000, 0xd0000, 0xd2000,
- 0xd4000, 0xd6000, 0xd8000, 0xda000, 0xdc000, 0
- };
- for (i = 0;; i++) {
- if (!memaddrs[i]) {
- printk(KERN_ERR "?memprobe, Can't find io-memory!\n");
- retval = -ENODEV;
- goto out;
- }
- dev->mem_start = memaddrs[i];
- size = 0x2000; /* check for 8K mem */
- if (check586(dev, size))
- /* 8K-check */
- break;
- size = 0x4000; /* check for 16K mem */
- if (check586(dev, size))
- /* 16K-check */
- break;
- }
- }
- /* set mem_end showed by 'ifconfig' */
- dev->mem_end = dev->mem_start + size;
-#endif
-
- alloc586(dev);
-
- /* set number of receive-buffs according to memsize */
- if (size == 0x2000)
- priv->num_recv_buffs = NUM_RECV_BUFFS_8;
- else
- priv->num_recv_buffs = NUM_RECV_BUFFS_16;
-
- printk(KERN_DEBUG "Memaddr: 0x%lx, Memsize: %d, ",
- dev->mem_start, size);
-
- if (dev->irq < 2) {
- unsigned long irq_mask;
-
- irq_mask = probe_irq_on();
- ni_reset586();
- ni_attn586();
-
- mdelay(20);
- dev->irq = probe_irq_off(irq_mask);
- if (!dev->irq) {
- printk("?autoirq, Failed to detect IRQ line!\n");
- retval = -EAGAIN;
- iounmap(priv->mapped);
- goto out;
- }
- printk("IRQ %d (autodetected).\n", dev->irq);
- } else {
- if (dev->irq == 2)
- dev->irq = 9;
- printk("IRQ %d (assigned and not checked!).\n", dev->irq);
- }
-
- dev->netdev_ops = &ni52_netdev_ops;
- dev->watchdog_timeo = HZ/20;
-
- return 0;
-out:
- release_region(ioaddr, NI52_TOTAL_SIZE);
- return retval;
-}
-
-/**********************************************
- * init the chip (ni52-interrupt should be disabled?!)
- * needs a correct 'allocated' memory
- */
-
-static int init586(struct net_device *dev)
-{
- void __iomem *ptr;
- int i, result = 0;
- struct priv *p = netdev_priv(dev);
- struct configure_cmd_struct __iomem *cfg_cmd;
- struct iasetup_cmd_struct __iomem *ias_cmd;
- struct tdr_cmd_struct __iomem *tdr_cmd;
- struct mcsetup_cmd_struct __iomem *mc_cmd;
- struct netdev_hw_addr *ha;
- int num_addrs = netdev_mc_count(dev);
-
- ptr = p->scb + 1;
-
- cfg_cmd = ptr; /* configure-command */
- writew(0, &cfg_cmd->cmd_status);
- writew(CMD_CONFIGURE | CMD_LAST, &cfg_cmd->cmd_cmd);
- writew(0xFFFF, &cfg_cmd->cmd_link);
-
- /* number of cfg bytes */
- writeb(0x0a, &cfg_cmd->byte_cnt);
- /* fifo-limit (8=tx:32/rx:64) */
- writeb(fifo, &cfg_cmd->fifo);
- /* hold or discard bad recv frames (bit 7) */
- writeb(0x40, &cfg_cmd->sav_bf);
- /* addr_len |!src_insert |pre-len |loopback */
- writeb(0x2e, &cfg_cmd->adr_len);
- writeb(0x00, &cfg_cmd->priority);
- writeb(0x60, &cfg_cmd->ifs);
- writeb(0x00, &cfg_cmd->time_low);
- writeb(0xf2, &cfg_cmd->time_high);
- writeb(0x00, &cfg_cmd->promisc);
- if (dev->flags & IFF_ALLMULTI) {
- int len = ((char __iomem *)p->iscp - (char __iomem *)ptr - 8) / 6;
- if (num_addrs > len) {
- printk(KERN_ERR "%s: switching to promisc. mode\n",
- dev->name);
- writeb(0x01, &cfg_cmd->promisc);
- }
- }
- if (dev->flags & IFF_PROMISC)
- writeb(0x01, &cfg_cmd->promisc);
- writeb(0x00, &cfg_cmd->carr_coll);
- writew(make16(cfg_cmd), &p->scb->cbl_offset);
- writeb(0, &p->scb->cmd_ruc);
-
- writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */
- ni_attn586();
-
- wait_for_stat_compl(cfg_cmd);
-
- if ((readw(&cfg_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) !=
- (STAT_COMPL|STAT_OK)) {
- printk(KERN_ERR "%s: configure command failed: %x\n",
- dev->name, readw(&cfg_cmd->cmd_status));
- return 1;
- }
-
- /*
- * individual address setup
- */
-
- ias_cmd = ptr;
-
- writew(0, &ias_cmd->cmd_status);
- writew(CMD_IASETUP | CMD_LAST, &ias_cmd->cmd_cmd);
- writew(0xffff, &ias_cmd->cmd_link);
-
- memcpy_toio(&ias_cmd->iaddr, (char *)dev->dev_addr, ETH_ALEN);
-
- writew(make16(ias_cmd), &p->scb->cbl_offset);
-
- writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */
- ni_attn586();
-
- wait_for_stat_compl(ias_cmd);
-
- if ((readw(&ias_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) !=
- (STAT_OK|STAT_COMPL)) {
- printk(KERN_ERR "%s (ni52): individual address setup command failed: %04x\n", dev->name, readw(&ias_cmd->cmd_status));
- return 1;
- }
-
- /*
- * TDR, wire check .. e.g. no resistor e.t.c
- */
-
- tdr_cmd = ptr;
-
- writew(0, &tdr_cmd->cmd_status);
- writew(CMD_TDR | CMD_LAST, &tdr_cmd->cmd_cmd);
- writew(0xffff, &tdr_cmd->cmd_link);
- writew(0, &tdr_cmd->status);
-
- writew(make16(tdr_cmd), &p->scb->cbl_offset);
- writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */
- ni_attn586();
-
- wait_for_stat_compl(tdr_cmd);
-
- if (!(readw(&tdr_cmd->cmd_status) & STAT_COMPL))
- printk(KERN_ERR "%s: Problems while running the TDR.\n",
- dev->name);
- else {
- udelay(16);
- result = readw(&tdr_cmd->status);
- writeb(readb(&p->scb->cus) & STAT_MASK, &p->scb->cmd_cuc);
- ni_attn586(); /* ack the interrupts */
-
- if (result & TDR_LNK_OK)
- ;
- else if (result & TDR_XCVR_PRB)
- printk(KERN_ERR "%s: TDR: Transceiver problem. Check the cable(s)!\n",
- dev->name);
- else if (result & TDR_ET_OPN)
- printk(KERN_ERR "%s: TDR: No correct termination %d clocks away.\n",
- dev->name, result & TDR_TIMEMASK);
- else if (result & TDR_ET_SRT) {
- /* time == 0 -> strange :-) */
- if (result & TDR_TIMEMASK)
- printk(KERN_ERR "%s: TDR: Detected a short circuit %d clocks away.\n",
- dev->name, result & TDR_TIMEMASK);
- } else
- printk(KERN_ERR "%s: TDR: Unknown status %04x\n",
- dev->name, result);
- }
-
- /*
- * Multicast setup
- */
- if (num_addrs && !(dev->flags & IFF_PROMISC)) {
- mc_cmd = ptr;
- writew(0, &mc_cmd->cmd_status);
- writew(CMD_MCSETUP | CMD_LAST, &mc_cmd->cmd_cmd);
- writew(0xffff, &mc_cmd->cmd_link);
- writew(num_addrs * 6, &mc_cmd->mc_cnt);
-
- i = 0;
- netdev_for_each_mc_addr(ha, dev)
- memcpy_toio(mc_cmd->mc_list[i++], ha->addr, 6);
-
- writew(make16(mc_cmd), &p->scb->cbl_offset);
- writeb(CUC_START, &p->scb->cmd_cuc);
- ni_attn586();
-
- wait_for_stat_compl(mc_cmd);
-
- if ((readw(&mc_cmd->cmd_status) & (STAT_COMPL|STAT_OK))
- != (STAT_COMPL|STAT_OK))
- printk(KERN_ERR "%s: Can't apply multicast-address-list.\n", dev->name);
- }
-
- /*
- * alloc nop/xmit-cmds
- */
-#if (NUM_XMIT_BUFFS == 1)
- for (i = 0; i < 2; i++) {
- p->nop_cmds[i] = ptr;
- writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd);
- writew(0, &p->nop_cmds[i]->cmd_status);
- writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link);
- ptr = ptr + sizeof(struct nop_cmd_struct);
- }
-#else
- for (i = 0; i < NUM_XMIT_BUFFS; i++) {
- p->nop_cmds[i] = ptr;
- writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd);
- writew(0, &p->nop_cmds[i]->cmd_status);
- writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link);
- ptr = ptr + sizeof(struct nop_cmd_struct);
- }
-#endif
-
- ptr = alloc_rfa(dev, ptr); /* init receive-frame-area */
-
- /*
- * alloc xmit-buffs / init xmit_cmds
- */
- for (i = 0; i < NUM_XMIT_BUFFS; i++) {
- /* Transmit cmd/buff 0 */
- p->xmit_cmds[i] = ptr;
- ptr = ptr + sizeof(struct transmit_cmd_struct);
- p->xmit_cbuffs[i] = ptr; /* char-buffs */
- ptr = ptr + XMIT_BUFF_SIZE;
- p->xmit_buffs[i] = ptr; /* TBD */
- ptr = ptr + sizeof(struct tbd_struct);
- if ((void __iomem *)ptr > (void __iomem *)p->iscp) {
- printk(KERN_ERR "%s: not enough shared-mem for your configuration!\n",
- dev->name);
- return 1;
- }
- memset_io(p->xmit_cmds[i], 0,
- sizeof(struct transmit_cmd_struct));
- memset_io(p->xmit_buffs[i], 0,
- sizeof(struct tbd_struct));
- writew(make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]),
- &p->xmit_cmds[i]->cmd_link);
- writew(STAT_COMPL, &p->xmit_cmds[i]->cmd_status);
- writew(CMD_XMIT|CMD_INT, &p->xmit_cmds[i]->cmd_cmd);
- writew(make16(p->xmit_buffs[i]), &p->xmit_cmds[i]->tbd_offset);
- writew(0xffff, &p->xmit_buffs[i]->next);
- writel(make24(p->xmit_cbuffs[i]), &p->xmit_buffs[i]->buffer);
- }
-
- p->xmit_count = 0;
- p->xmit_last = 0;
-#ifndef NO_NOPCOMMANDS
- p->nop_point = 0;
-#endif
-
- /*
- * 'start transmitter'
- */
-#ifndef NO_NOPCOMMANDS
- writew(make16(p->nop_cmds[0]), &p->scb->cbl_offset);
- writeb(CUC_START, &p->scb->cmd_cuc);
- ni_attn586();
- wait_for_scb_cmd(dev);
-#else
- writew(make16(p->xmit_cmds[0]), &p->xmit_cmds[0]->cmd_link);
- writew(CMD_XMIT | CMD_SUSPEND | CMD_INT, &p->xmit_cmds[0]->cmd_cmd);
-#endif
-
- /*
- * ack. interrupts
- */
- writeb(readb(&p->scb->cus) & STAT_MASK, &p->scb->cmd_cuc);
- ni_attn586();
- udelay(16);
-
- ni_enaint();
-
- return 0;
-}
-
-/******************************************************
- * This is a helper routine for ni52_rnr_int() and init586().
- * It sets up the Receive Frame Area (RFA).
- */
-
-static void __iomem *alloc_rfa(struct net_device *dev, void __iomem *ptr)
-{
- struct rfd_struct __iomem *rfd = ptr;
- struct rbd_struct __iomem *rbd;
- int i;
- struct priv *p = netdev_priv(dev);
-
- memset_io(rfd, 0,
- sizeof(struct rfd_struct) * (p->num_recv_buffs + rfdadd));
- p->rfd_first = rfd;
-
- for (i = 0; i < (p->num_recv_buffs + rfdadd); i++) {
- writew(make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd)),
- &rfd[i].next);
- writew(0xffff, &rfd[i].rbd_offset);
- }
- /* RU suspend */
- writeb(RFD_SUSP, &rfd[p->num_recv_buffs-1+rfdadd].last);
-
- ptr = rfd + (p->num_recv_buffs + rfdadd);
-
- rbd = ptr;
- ptr = rbd + p->num_recv_buffs;
-
- /* clr descriptors */
- memset_io(rbd, 0, sizeof(struct rbd_struct) * (p->num_recv_buffs));
-
- for (i = 0; i < p->num_recv_buffs; i++) {
- writew(make16(rbd + (i+1) % p->num_recv_buffs), &rbd[i].next);
- writew(RECV_BUFF_SIZE, &rbd[i].size);
- writel(make24(ptr), &rbd[i].buffer);
- ptr = ptr + RECV_BUFF_SIZE;
- }
- p->rfd_top = p->rfd_first;
- p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd);
-
- writew(make16(p->rfd_first), &p->scb->rfa_offset);
- writew(make16(rbd), &p->rfd_first->rbd_offset);
-
- return ptr;
-}
-
-
-/**************************************************
- * Interrupt Handler ...
- */
-
-static irqreturn_t ni52_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- unsigned int stat;
- int cnt = 0;
- struct priv *p;
-
- p = netdev_priv(dev);
-
- if (debuglevel > 1)
- printk("I");
-
- spin_lock(&p->spinlock);
-
- wait_for_scb_cmd(dev); /* wait for last command */
-
- while ((stat = readb(&p->scb->cus) & STAT_MASK)) {
- writeb(stat, &p->scb->cmd_cuc);
- ni_attn586();
-
- if (stat & STAT_FR) /* received a frame */
- ni52_rcv_int(dev);
-
- if (stat & STAT_RNR) { /* RU went 'not ready' */
- printk("(R)");
- if (readb(&p->scb->rus) & RU_SUSPEND) {
- /* special case: RU_SUSPEND */
- wait_for_scb_cmd(dev);
- writeb(RUC_RESUME, &p->scb->cmd_ruc);
- ni_attn586();
- wait_for_scb_cmd_ruc(dev);
- } else {
- printk(KERN_ERR "%s: Receiver-Unit went 'NOT READY': %04x/%02x.\n",
- dev->name, stat, readb(&p->scb->rus));
- ni52_rnr_int(dev);
- }
- }
-
- /* Command with I-bit set complete */
- if (stat & STAT_CX)
- ni52_xmt_int(dev);
-
-#ifndef NO_NOPCOMMANDS
- if (stat & STAT_CNA) { /* CU went 'not ready' */
- if (netif_running(dev))
- printk(KERN_ERR "%s: oops! CU has left active state. stat: %04x/%02x.\n",
- dev->name, stat, readb(&p->scb->cus));
- }
-#endif
-
- if (debuglevel > 1)
- printk("%d", cnt++);
-
- /* Wait for ack. (ni52_xmt_int can be faster than ack!!) */
- wait_for_scb_cmd(dev);
- if (readb(&p->scb->cmd_cuc)) { /* timed out? */
- printk(KERN_ERR "%s: Acknowledge timed out.\n",
- dev->name);
- ni_disint();
- break;
- }
- }
- spin_unlock(&p->spinlock);
-
- if (debuglevel > 1)
- printk("i");
- return IRQ_HANDLED;
-}
-
-/*******************************************************
- * receive-interrupt
- */
-
-static void ni52_rcv_int(struct net_device *dev)
-{
- int status, cnt = 0;
- unsigned short totlen;
- struct sk_buff *skb;
- struct rbd_struct __iomem *rbd;
- struct priv *p = netdev_priv(dev);
-
- if (debuglevel > 0)
- printk("R");
-
- for (; (status = readb(&p->rfd_top->stat_high)) & RFD_COMPL;) {
- rbd = make32(readw(&p->rfd_top->rbd_offset));
- if (status & RFD_OK) { /* frame received without error? */
- totlen = readw(&rbd->status);
- if (totlen & RBD_LAST) {
- /* the first and the last buffer? */
- totlen &= RBD_MASK; /* length of this frame */
- writew(0x00, &rbd->status);
- skb = netdev_alloc_skb(dev, totlen + 2);
- if (skb != NULL) {
- skb_reserve(skb, 2);
- skb_put(skb, totlen);
- memcpy_fromio(skb->data, p->base + readl(&rbd->buffer), totlen);
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += totlen;
- } else
- dev->stats.rx_dropped++;
- } else {
- int rstat;
- /* free all RBD's until RBD_LAST is set */
- totlen = 0;
- while (!((rstat = readw(&rbd->status)) & RBD_LAST)) {
- totlen += rstat & RBD_MASK;
- if (!rstat) {
- printk(KERN_ERR "%s: Whoops .. no end mark in RBD list\n", dev->name);
- break;
- }
- writew(0, &rbd->status);
- rbd = make32(readw(&rbd->next));
- }
- totlen += rstat & RBD_MASK;
- writew(0, &rbd->status);
- printk(KERN_ERR "%s: received oversized frame! length: %d\n",
- dev->name, totlen);
- dev->stats.rx_dropped++;
- }
- } else {/* frame !(ok), only with 'save-bad-frames' */
- printk(KERN_ERR "%s: oops! rfd-error-status: %04x\n",
- dev->name, status);
- dev->stats.rx_errors++;
- }
- writeb(0, &p->rfd_top->stat_high);
- writeb(RFD_SUSP, &p->rfd_top->last); /* maybe exchange by RFD_LAST */
- writew(0xffff, &p->rfd_top->rbd_offset);
- writeb(0, &p->rfd_last->last); /* delete RFD_SUSP */
- p->rfd_last = p->rfd_top;
- p->rfd_top = make32(readw(&p->rfd_top->next)); /* step to next RFD */
- writew(make16(p->rfd_top), &p->scb->rfa_offset);
-
- if (debuglevel > 0)
- printk("%d", cnt++);
- }
-
- if (automatic_resume) {
- wait_for_scb_cmd(dev);
- writeb(RUC_RESUME, &p->scb->cmd_ruc);
- ni_attn586();
- wait_for_scb_cmd_ruc(dev);
- }
-
-#ifdef WAIT_4_BUSY
- {
- int i;
- for (i = 0; i < 1024; i++) {
- if (p->rfd_top->status)
- break;
- udelay(16);
- if (i == 1023)
- printk(KERN_ERR "%s: RU hasn't fetched next RFD (not busy/complete)\n", dev->name);
- }
- }
-#endif
- if (debuglevel > 0)
- printk("r");
-}
-
-/**********************************************************
- * handle 'Receiver went not ready'.
- */
-
-static void ni52_rnr_int(struct net_device *dev)
-{
- struct priv *p = netdev_priv(dev);
-
- dev->stats.rx_errors++;
-
- wait_for_scb_cmd(dev); /* wait for the last cmd, WAIT_4_FULLSTAT?? */
- writeb(RUC_ABORT, &p->scb->cmd_ruc); /* usually the RU is in the 'no resource'-state .. abort it now. */
- ni_attn586();
- wait_for_scb_cmd_ruc(dev); /* wait for accept cmd. */
-
- alloc_rfa(dev, p->rfd_first);
- /* maybe add a check here, before restarting the RU */
- startrecv586(dev); /* restart RU */
-
- printk(KERN_ERR "%s: Receive-Unit restarted. Status: %04x\n",
- dev->name, readb(&p->scb->rus));
-
-}
-
-/**********************************************************
- * handle xmit - interrupt
- */
-
-static void ni52_xmt_int(struct net_device *dev)
-{
- int status;
- struct priv *p = netdev_priv(dev);
-
- if (debuglevel > 0)
- printk("X");
-
- status = readw(&p->xmit_cmds[p->xmit_last]->cmd_status);
- if (!(status & STAT_COMPL))
- printk(KERN_ERR "%s: strange .. xmit-int without a 'COMPLETE'\n", dev->name);
-
- if (status & STAT_OK) {
- dev->stats.tx_packets++;
- dev->stats.collisions += (status & TCMD_MAXCOLLMASK);
- } else {
- dev->stats.tx_errors++;
- if (status & TCMD_LATECOLL) {
- printk(KERN_ERR "%s: late collision detected.\n",
- dev->name);
- dev->stats.collisions++;
- } else if (status & TCMD_NOCARRIER) {
- dev->stats.tx_carrier_errors++;
- printk(KERN_ERR "%s: no carrier detected.\n",
- dev->name);
- } else if (status & TCMD_LOSTCTS)
- printk(KERN_ERR "%s: loss of CTS detected.\n",
- dev->name);
- else if (status & TCMD_UNDERRUN) {
- dev->stats.tx_fifo_errors++;
- printk(KERN_ERR "%s: DMA underrun detected.\n",
- dev->name);
- } else if (status & TCMD_MAXCOLL) {
- printk(KERN_ERR "%s: Max. collisions exceeded.\n",
- dev->name);
- dev->stats.collisions += 16;
- }
- }
-#if (NUM_XMIT_BUFFS > 1)
- if ((++p->xmit_last) == NUM_XMIT_BUFFS)
- p->xmit_last = 0;
-#endif
- netif_wake_queue(dev);
-}
-
-/***********************************************************
- * (re)start the receiver
- */
-
-static void startrecv586(struct net_device *dev)
-{
- struct priv *p = netdev_priv(dev);
-
- wait_for_scb_cmd(dev);
- wait_for_scb_cmd_ruc(dev);
- writew(make16(p->rfd_first), &p->scb->rfa_offset);
- writeb(RUC_START, &p->scb->cmd_ruc);
- ni_attn586(); /* start cmd. */
- wait_for_scb_cmd_ruc(dev);
- /* wait for accept cmd. (no timeout!!) */
-}
-
-static void ni52_timeout(struct net_device *dev)
-{
- struct priv *p = netdev_priv(dev);
-#ifndef NO_NOPCOMMANDS
- if (readb(&p->scb->cus) & CU_ACTIVE) { /* COMMAND-UNIT active? */
- netif_wake_queue(dev);
-#ifdef DEBUG
- printk(KERN_ERR "%s: strange ... timeout with CU active?!?\n",
- dev->name);
- printk(KERN_ERR "%s: X0: %04x N0: %04x N1: %04x %d\n",
- dev->name, (int)p->xmit_cmds[0]->cmd_status,
- readw(&p->nop_cmds[0]->cmd_status),
- readw(&p->nop_cmds[1]->cmd_status),
- p->nop_point);
-#endif
- writeb(CUC_ABORT, &p->scb->cmd_cuc);
- ni_attn586();
- wait_for_scb_cmd(dev);
- writew(make16(p->nop_cmds[p->nop_point]), &p->scb->cbl_offset);
- writeb(CUC_START, &p->scb->cmd_cuc);
- ni_attn586();
- wait_for_scb_cmd(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
- return 0;
- }
-#endif
- {
-#ifdef DEBUG
- printk(KERN_ERR "%s: xmitter timed out, try to restart! stat: %02x\n",
- dev->name, readb(&p->scb->cus));
- printk(KERN_ERR "%s: command-stats: %04x %04x\n",
- dev->name,
- readw(&p->xmit_cmds[0]->cmd_status),
- readw(&p->xmit_cmds[1]->cmd_status));
- printk(KERN_ERR "%s: check, whether you set the right interrupt number!\n",
- dev->name);
-#endif
- ni52_close(dev);
- ni52_open(dev);
- }
- dev->trans_start = jiffies; /* prevent tx timeout */
-}
-
-/******************************************************
- * send frame
- */
-
-static netdev_tx_t ni52_send_packet(struct sk_buff *skb,
- struct net_device *dev)
-{
- int len, i;
-#ifndef NO_NOPCOMMANDS
- int next_nop;
-#endif
- struct priv *p = netdev_priv(dev);
-
- if (skb->len > XMIT_BUFF_SIZE) {
- printk(KERN_ERR "%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n", dev->name, XMIT_BUFF_SIZE, skb->len);
- return NETDEV_TX_OK;
- }
-
- netif_stop_queue(dev);
-
- memcpy_toio(p->xmit_cbuffs[p->xmit_count], skb->data, skb->len);
- len = skb->len;
- if (len < ETH_ZLEN) {
- len = ETH_ZLEN;
- memset_io(p->xmit_cbuffs[p->xmit_count]+skb->len, 0,
- len - skb->len);
- }
-
-#if (NUM_XMIT_BUFFS == 1)
-# ifdef NO_NOPCOMMANDS
-
-#ifdef DEBUG
- if (readb(&p->scb->cus) & CU_ACTIVE) {
- printk(KERN_ERR "%s: Hmmm .. CU is still running and we wanna send a new packet.\n", dev->name);
- printk(KERN_ERR "%s: stat: %04x %04x\n",
- dev->name, readb(&p->scb->cus),
- readw(&p->xmit_cmds[0]->cmd_status));
- }
-#endif
- writew(TBD_LAST | len, &p->xmit_buffs[0]->size);
- for (i = 0; i < 16; i++) {
- writew(0, &p->xmit_cmds[0]->cmd_status);
- wait_for_scb_cmd(dev);
- if ((readb(&p->scb->cus) & CU_STATUS) == CU_SUSPEND)
- writeb(CUC_RESUME, &p->scb->cmd_cuc);
- else {
- writew(make16(p->xmit_cmds[0]), &p->scb->cbl_offset);
- writeb(CUC_START, &p->scb->cmd_cuc);
- }
- ni_attn586();
- if (!i)
- dev_kfree_skb(skb);
- wait_for_scb_cmd(dev);
- /* test it, because CU sometimes doesn't start immediately */
- if (readb(&p->scb->cus) & CU_ACTIVE)
- break;
- if (readw(&p->xmit_cmds[0]->cmd_status))
- break;
- if (i == 15)
- printk(KERN_WARNING "%s: Can't start transmit-command.\n", dev->name);
- }
-# else
- next_nop = (p->nop_point + 1) & 0x1;
- writew(TBD_LAST | len, &p->xmit_buffs[0]->size);
- writew(make16(p->nop_cmds[next_nop]), &p->xmit_cmds[0]->cmd_link);
- writew(make16(p->nop_cmds[next_nop]),
- &p->nop_cmds[next_nop]->cmd_link);
- writew(0, &p->xmit_cmds[0]->cmd_status);
- writew(0, &p->nop_cmds[next_nop]->cmd_status);
-
- writew(make16(p->xmit_cmds[0]), &p->nop_cmds[p->nop_point]->cmd_link);
- p->nop_point = next_nop;
- dev_kfree_skb(skb);
-# endif
-#else
- writew(TBD_LAST | len, &p->xmit_buffs[p->xmit_count]->size);
- next_nop = p->xmit_count + 1
- if (next_nop == NUM_XMIT_BUFFS)
- next_nop = 0;
- writew(0, &p->xmit_cmds[p->xmit_count]->cmd_status);
- /* linkpointer of xmit-command already points to next nop cmd */
- writew(make16(p->nop_cmds[next_nop]),
- &p->nop_cmds[next_nop]->cmd_link);
- writew(0, &p->nop_cmds[next_nop]->cmd_status);
- writew(make16(p->xmit_cmds[p->xmit_count]),
- &p->nop_cmds[p->xmit_count]->cmd_link);
- p->xmit_count = next_nop;
- {
- unsigned long flags;
- spin_lock_irqsave(&p->spinlock);
- if (p->xmit_count != p->xmit_last)
- netif_wake_queue(dev);
- spin_unlock_irqrestore(&p->spinlock);
- }
- dev_kfree_skb(skb);
-#endif
- return NETDEV_TX_OK;
-}
-
-/*******************************************
- * Someone wanna have the statistics
- */
-
-static struct net_device_stats *ni52_get_stats(struct net_device *dev)
-{
- struct priv *p = netdev_priv(dev);
- unsigned short crc, aln, rsc, ovrn;
-
- /* Get error-statistics from the ni82586 */
- crc = readw(&p->scb->crc_errs);
- writew(0, &p->scb->crc_errs);
- aln = readw(&p->scb->aln_errs);
- writew(0, &p->scb->aln_errs);
- rsc = readw(&p->scb->rsc_errs);
- writew(0, &p->scb->rsc_errs);
- ovrn = readw(&p->scb->ovrn_errs);
- writew(0, &p->scb->ovrn_errs);
-
- dev->stats.rx_crc_errors += crc;
- dev->stats.rx_fifo_errors += ovrn;
- dev->stats.rx_frame_errors += aln;
- dev->stats.rx_dropped += rsc;
-
- return &dev->stats;
-}
-
-/********************************************************
- * Set MC list ..
- */
-
-static void set_multicast_list(struct net_device *dev)
-{
- netif_stop_queue(dev);
- ni_disint();
- alloc586(dev);
- init586(dev);
- startrecv586(dev);
- ni_enaint();
- netif_wake_queue(dev);
-}
-
-#ifdef MODULE
-static struct net_device *dev_ni52;
-
-module_param(io, int, 0);
-module_param(irq, int, 0);
-module_param(memstart, long, 0);
-module_param(memend, long, 0);
-MODULE_PARM_DESC(io, "NI5210 I/O base address,required");
-MODULE_PARM_DESC(irq, "NI5210 IRQ number,required");
-MODULE_PARM_DESC(memstart, "NI5210 memory base address,required");
-MODULE_PARM_DESC(memend, "NI5210 memory end address,required");
-
-int __init init_module(void)
-{
- if (io <= 0x0 || !memend || !memstart || irq < 2) {
- printk(KERN_ERR "ni52: Autoprobing not allowed for modules.\n");
- printk(KERN_ERR "ni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n");
- return -ENODEV;
- }
- dev_ni52 = ni52_probe(-1);
- if (IS_ERR(dev_ni52))
- return PTR_ERR(dev_ni52);
- return 0;
-}
-
-void __exit cleanup_module(void)
-{
- struct priv *p = netdev_priv(dev_ni52);
- unregister_netdev(dev_ni52);
- iounmap(p->mapped);
- release_region(dev_ni52->base_addr, NI52_TOTAL_SIZE);
- free_netdev(dev_ni52);
-}
-#endif /* MODULE */
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/i825xx/ni52.h b/drivers/net/ethernet/i825xx/ni52.h
deleted file mode 100644
index 0a03b2883327..000000000000
--- a/drivers/net/ethernet/i825xx/ni52.h
+++ /dev/null
@@ -1,310 +0,0 @@
-/*
- * Intel i82586 Ethernet definitions
- *
- * This is an extension to the Linux operating system, and is covered by the
- * same GNU General Public License that covers that work.
- *
- * copyrights (c) 1994 by Michael Hipp (hippm@informatik.uni-tuebingen.de)
- *
- * I have done a look in the following sources:
- * crynwr-packet-driver by Russ Nelson
- * Garret A. Wollman's i82586-driver for BSD
- */
-
-
-#define NI52_RESET 0 /* writing to this address, resets the i82586 */
-#define NI52_ATTENTION 1 /* channel attention, kick the 586 */
-#define NI52_TENA 3 /* 2-5 possibly wrong, Xmit enable */
-#define NI52_TDIS 2 /* Xmit disable */
-#define NI52_INTENA 5 /* Interrupt enable */
-#define NI52_INTDIS 4 /* Interrupt disable */
-#define NI52_MAGIC1 6 /* dunno exact function */
-#define NI52_MAGIC2 7 /* dunno exact function */
-
-#define NI52_MAGICVAL1 0x00 /* magic-values for ni5210 card */
-#define NI52_MAGICVAL2 0x55
-
-/*
- * where to find the System Configuration Pointer (SCP)
- */
-#define SCP_DEFAULT_ADDRESS 0xfffff4
-
-
-/*
- * System Configuration Pointer Struct
- */
-
-struct scp_struct
-{
- u16 zero_dum0; /* has to be zero */
- u8 sysbus; /* 0=16Bit,1=8Bit */
- u8 zero_dum1; /* has to be zero for 586 */
- u16 zero_dum2;
- u16 zero_dum3;
- u32 iscp; /* pointer to the iscp-block */
-};
-
-
-/*
- * Intermediate System Configuration Pointer (ISCP)
- */
-struct iscp_struct
-{
- u8 busy; /* 586 clears after successful init */
- u8 zero_dummy; /* has to be zero */
- u16 scb_offset; /* pointeroffset to the scb_base */
- u32 scb_base; /* base-address of all 16-bit offsets */
-};
-
-/*
- * System Control Block (SCB)
- */
-struct scb_struct
-{
- u8 rus;
- u8 cus;
- u8 cmd_ruc; /* command word: RU part */
- u8 cmd_cuc; /* command word: CU part & ACK */
- u16 cbl_offset; /* pointeroffset, command block list */
- u16 rfa_offset; /* pointeroffset, receive frame area */
- u16 crc_errs; /* CRC-Error counter */
- u16 aln_errs; /* alignmenterror counter */
- u16 rsc_errs; /* Resourceerror counter */
- u16 ovrn_errs; /* OVerrunerror counter */
-};
-
-/*
- * possible command values for the command word
- */
-#define RUC_MASK 0x0070 /* mask for RU commands */
-#define RUC_NOP 0x0000 /* NOP-command */
-#define RUC_START 0x0010 /* start RU */
-#define RUC_RESUME 0x0020 /* resume RU after suspend */
-#define RUC_SUSPEND 0x0030 /* suspend RU */
-#define RUC_ABORT 0x0040 /* abort receiver operation immediately */
-
-#define CUC_MASK 0x07 /* mask for CU command */
-#define CUC_NOP 0x00 /* NOP-command */
-#define CUC_START 0x01 /* start execution of 1. cmd on the CBL */
-#define CUC_RESUME 0x02 /* resume after suspend */
-#define CUC_SUSPEND 0x03 /* Suspend CU */
-#define CUC_ABORT 0x04 /* abort command operation immediately */
-
-#define ACK_MASK 0xf0 /* mask for ACK command */
-#define ACK_CX 0x80 /* acknowledges STAT_CX */
-#define ACK_FR 0x40 /* ack. STAT_FR */
-#define ACK_CNA 0x20 /* ack. STAT_CNA */
-#define ACK_RNR 0x10 /* ack. STAT_RNR */
-
-/*
- * possible status values for the status word
- */
-#define STAT_MASK 0xf0 /* mask for cause of interrupt */
-#define STAT_CX 0x80 /* CU finished cmd with its I bit set */
-#define STAT_FR 0x40 /* RU finished receiving a frame */
-#define STAT_CNA 0x20 /* CU left active state */
-#define STAT_RNR 0x10 /* RU left ready state */
-
-#define CU_STATUS 0x7 /* CU status, 0=idle */
-#define CU_SUSPEND 0x1 /* CU is suspended */
-#define CU_ACTIVE 0x2 /* CU is active */
-
-#define RU_STATUS 0x70 /* RU status, 0=idle */
-#define RU_SUSPEND 0x10 /* RU suspended */
-#define RU_NOSPACE 0x20 /* RU no resources */
-#define RU_READY 0x40 /* RU is ready */
-
-/*
- * Receive Frame Descriptor (RFD)
- */
-struct rfd_struct
-{
- u8 stat_low; /* status word */
- u8 stat_high; /* status word */
- u8 rfd_sf; /* 82596 mode only */
- u8 last; /* Bit15,Last Frame on List / Bit14,suspend */
- u16 next; /* linkoffset to next RFD */
- u16 rbd_offset; /* pointeroffset to RBD-buffer */
- u8 dest[6]; /* ethernet-address, destination */
- u8 source[6]; /* ethernet-address, source */
- u16 length; /* 802.3 frame-length */
- u16 zero_dummy; /* dummy */
-};
-
-#define RFD_LAST 0x80 /* last: last rfd in the list */
-#define RFD_SUSP 0x40 /* last: suspend RU after */
-#define RFD_COMPL 0x80
-#define RFD_OK 0x20
-#define RFD_BUSY 0x40
-#define RFD_ERR_LEN 0x10 /* Length error (if enabled length-checking */
-#define RFD_ERR_CRC 0x08 /* CRC error */
-#define RFD_ERR_ALGN 0x04 /* Alignment error */
-#define RFD_ERR_RNR 0x02 /* status: receiver out of resources */
-#define RFD_ERR_OVR 0x01 /* DMA Overrun! */
-
-#define RFD_ERR_FTS 0x0080 /* Frame to short */
-#define RFD_ERR_NEOP 0x0040 /* No EOP flag (for bitstuffing only) */
-#define RFD_ERR_TRUN 0x0020 /* (82596 only/SF mode) indicates truncated frame */
-#define RFD_MATCHADD 0x0002 /* status: Destinationaddress !matches IA (only 82596) */
-#define RFD_COLLDET 0x0001 /* Detected collision during reception */
-
-/*
- * Receive Buffer Descriptor (RBD)
- */
-struct rbd_struct
-{
- u16 status; /* status word,number of used bytes in buff */
- u16 next; /* pointeroffset to next RBD */
- u32 buffer; /* receive buffer address pointer */
- u16 size; /* size of this buffer */
- u16 zero_dummy; /* dummy */
-};
-
-#define RBD_LAST 0x8000 /* last buffer */
-#define RBD_USED 0x4000 /* this buffer has data */
-#define RBD_MASK 0x3fff /* size-mask for length */
-
-/*
- * Statusvalues for Commands/RFD
- */
-#define STAT_COMPL 0x8000 /* status: frame/command is complete */
-#define STAT_BUSY 0x4000 /* status: frame/command is busy */
-#define STAT_OK 0x2000 /* status: frame/command is ok */
-
-/*
- * Action-Commands
- */
-#define CMD_NOP 0x0000 /* NOP */
-#define CMD_IASETUP 0x0001 /* initial address setup command */
-#define CMD_CONFIGURE 0x0002 /* configure command */
-#define CMD_MCSETUP 0x0003 /* MC setup command */
-#define CMD_XMIT 0x0004 /* transmit command */
-#define CMD_TDR 0x0005 /* time domain reflectometer (TDR) command */
-#define CMD_DUMP 0x0006 /* dump command */
-#define CMD_DIAGNOSE 0x0007 /* diagnose command */
-
-/*
- * Action command bits
- */
-#define CMD_LAST 0x8000 /* indicates last command in the CBL */
-#define CMD_SUSPEND 0x4000 /* suspend CU after this CB */
-#define CMD_INT 0x2000 /* generate interrupt after execution */
-
-/*
- * NOP - command
- */
-struct nop_cmd_struct
-{
- u16 cmd_status; /* status of this command */
- u16 cmd_cmd; /* the command itself (+bits) */
- u16 cmd_link; /* offsetpointer to next command */
-};
-
-/*
- * IA Setup command
- */
-struct iasetup_cmd_struct
-{
- u16 cmd_status;
- u16 cmd_cmd;
- u16 cmd_link;
- u8 iaddr[6];
-};
-
-/*
- * Configure command
- */
-struct configure_cmd_struct
-{
- u16 cmd_status;
- u16 cmd_cmd;
- u16 cmd_link;
- u8 byte_cnt; /* size of the config-cmd */
- u8 fifo; /* fifo/recv monitor */
- u8 sav_bf; /* save bad frames (bit7=1)*/
- u8 adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/
- u8 priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */
- u8 ifs; /* inter frame spacing */
- u8 time_low; /* slot time low */
- u8 time_high; /* slot time high(0-2) and max. retries(4-7) */
- u8 promisc; /* promisc-mode(0) , et al (1-7) */
- u8 carr_coll; /* carrier(0-3)/collision(4-7) stuff */
- u8 fram_len; /* minimal frame len */
- u8 dummy; /* dummy */
-};
-
-/*
- * Multicast Setup command
- */
-struct mcsetup_cmd_struct
-{
- u16 cmd_status;
- u16 cmd_cmd;
- u16 cmd_link;
- u16 mc_cnt; /* number of bytes in the MC-List */
- u8 mc_list[0][6]; /* pointer to 6 bytes entries */
-};
-
-/*
- * DUMP command
- */
-struct dump_cmd_struct
-{
- u16 cmd_status;
- u16 cmd_cmd;
- u16 cmd_link;
- u16 dump_offset; /* pointeroffset to DUMP space */
-};
-
-/*
- * transmit command
- */
-struct transmit_cmd_struct
-{
- u16 cmd_status;
- u16 cmd_cmd;
- u16 cmd_link;
- u16 tbd_offset; /* pointeroffset to TBD */
- u8 dest[6]; /* destination address of the frame */
- u16 length; /* user defined: 802.3 length / Ether type */
-};
-
-#define TCMD_ERRMASK 0x0fa0
-#define TCMD_MAXCOLLMASK 0x000f
-#define TCMD_MAXCOLL 0x0020
-#define TCMD_HEARTBEAT 0x0040
-#define TCMD_DEFERRED 0x0080
-#define TCMD_UNDERRUN 0x0100
-#define TCMD_LOSTCTS 0x0200
-#define TCMD_NOCARRIER 0x0400
-#define TCMD_LATECOLL 0x0800
-
-struct tdr_cmd_struct
-{
- u16 cmd_status;
- u16 cmd_cmd;
- u16 cmd_link;
- u16 status;
-};
-
-#define TDR_LNK_OK 0x8000 /* No link problem identified */
-#define TDR_XCVR_PRB 0x4000 /* indicates a transceiver problem */
-#define TDR_ET_OPN 0x2000 /* open, no correct termination */
-#define TDR_ET_SRT 0x1000 /* TDR detected a short circuit */
-#define TDR_TIMEMASK 0x07ff /* mask for the time field */
-
-/*
- * Transmit Buffer Descriptor (TBD)
- */
-struct tbd_struct
-{
- u16 size; /* size + EOF-Flag(15) */
- u16 next; /* pointeroffset to next TBD */
- u32 buffer; /* pointer to buffer */
-};
-
-#define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */
-
-
-
-
diff --git a/drivers/net/ethernet/i825xx/znet.c b/drivers/net/ethernet/i825xx/znet.c
deleted file mode 100644
index c9479e081b8a..000000000000
--- a/drivers/net/ethernet/i825xx/znet.c
+++ /dev/null
@@ -1,928 +0,0 @@
-/* znet.c: An Zenith Z-Note ethernet driver for linux. */
-
-/*
- Written by Donald Becker.
-
- The author may be reached as becker@scyld.com.
- This driver is based on the Linux skeleton driver. The copyright of the
- skeleton driver is held by the United States Government, as represented
- by DIRNSA, and it is released under the GPL.
-
- Thanks to Mike Hollick for alpha testing and suggestions.
-
- References:
- The Crynwr packet driver.
-
- "82593 CSMA/CD Core LAN Controller" Intel datasheet, 1992
- Intel Microcommunications Databook, Vol. 1, 1990.
- As usual with Intel, the documentation is incomplete and inaccurate.
- I had to read the Crynwr packet driver to figure out how to actually
- use the i82593, and guess at what register bits matched the loosely
- related i82586.
-
- Theory of Operation
-
- The i82593 used in the Zenith Z-Note series operates using two(!) slave
- DMA channels, one interrupt, and one 8-bit I/O port.
-
- While there several ways to configure '593 DMA system, I chose the one
- that seemed commensurate with the highest system performance in the face
- of moderate interrupt latency: Both DMA channels are configured as
- recirculating ring buffers, with one channel (#0) dedicated to Rx and
- the other channel (#1) to Tx and configuration. (Note that this is
- different than the Crynwr driver, where the Tx DMA channel is initialized
- before each operation. That approach simplifies operation and Tx error
- recovery, but requires additional I/O in normal operation and precludes
- transmit buffer chaining.)
-
- Both rings are set to 8192 bytes using {TX,RX}_RING_SIZE. This provides
- a reasonable ring size for Rx, while simplifying DMA buffer allocation --
- DMA buffers must not cross a 128K boundary. (In truth the size selection
- was influenced by my lack of '593 documentation. I thus was constrained
- to use the Crynwr '593 initialization table, which sets the Rx ring size
- to 8K.)
-
- Despite my usual low opinion about Intel-designed parts, I must admit
- that the bulk data handling of the i82593 is a good design for
- an integrated system, like a laptop, where using two slave DMA channels
- doesn't pose a problem. I still take issue with using only a single I/O
- port. In the same controlled environment there are essentially no
- limitations on I/O space, and using multiple locations would eliminate
- the need for multiple operations when looking at status registers,
- setting the Rx ring boundary, or switching to promiscuous mode.
-
- I also question Zenith's selection of the '593: one of the advertised
- advantages of earlier Intel parts was that if you figured out the magic
- initialization incantation you could use the same part on many different
- network types. Zenith's use of the "FriendlyNet" (sic) connector rather
- than an on-board transceiver leads me to believe that they were planning
- to take advantage of this. But, uhmmm, the '593 omits all but ethernet
- functionality from the serial subsystem.
- */
-
-/* 10/2002
-
- o Resurected for Linux 2.5+ by Marc Zyngier <maz@wild-wind.fr.eu.org> :
-
- - Removed strange DMA snooping in znet_sent_packet, which lead to
- TX buffer corruption on my laptop.
- - Use init_etherdev stuff.
- - Use kmalloc-ed DMA buffers.
- - Use as few global variables as possible.
- - Use proper resources management.
- - Use wireless/i82593.h as much as possible (structure, constants)
- - Compiles as module or build-in.
- - Now survives unplugging/replugging cable.
-
- Some code was taken from wavelan_cs.
-
- Tested on a vintage Zenith Z-Note 433Lnp+. Probably broken on
- anything else. Testers (and detailed bug reports) are welcome :-).
-
- o TODO :
-
- - Properly handle multicast
- - Understand why some traffic patterns add a 1s latency...
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/bitops.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include <linux/i82593.h>
-
-static char version[] __initdata = "znet.c:v1.02 9/23/94 becker@scyld.com\n";
-
-#ifndef ZNET_DEBUG
-#define ZNET_DEBUG 1
-#endif
-static unsigned int znet_debug = ZNET_DEBUG;
-module_param (znet_debug, int, 0);
-MODULE_PARM_DESC (znet_debug, "ZNet debug level");
-MODULE_LICENSE("GPL");
-
-/* The DMA modes we need aren't in <dma.h>. */
-#define DMA_RX_MODE 0x14 /* Auto init, I/O to mem, ++, demand. */
-#define DMA_TX_MODE 0x18 /* Auto init, Mem to I/O, ++, demand. */
-#define dma_page_eq(ptr1, ptr2) ((long)(ptr1)>>17 == (long)(ptr2)>>17)
-#define RX_BUF_SIZE 8192
-#define TX_BUF_SIZE 8192
-#define DMA_BUF_SIZE (RX_BUF_SIZE + 16) /* 8k + 16 bytes for trailers */
-
-#define TX_TIMEOUT (HZ/10)
-
-struct znet_private {
- int rx_dma, tx_dma;
- spinlock_t lock;
- short sia_base, sia_size, io_size;
- struct i82593_conf_block i593_init;
- /* The starting, current, and end pointers for the packet buffers. */
- ushort *rx_start, *rx_cur, *rx_end;
- ushort *tx_start, *tx_cur, *tx_end;
- ushort tx_buf_len; /* Tx buffer length, in words. */
-};
-
-/* Only one can be built-in;-> */
-static struct net_device *znet_dev;
-
-#define NETIDBLK_MAGIC "NETIDBLK"
-#define NETIDBLK_MAGIC_SIZE 8
-
-struct netidblk {
- char magic[NETIDBLK_MAGIC_SIZE]; /* The magic number (string) "NETIDBLK" */
- unsigned char netid[8]; /* The physical station address */
- char nettype, globalopt;
- char vendor[8]; /* The machine vendor and product name. */
- char product[8];
- char irq1, irq2; /* Interrupts, only one is currently used. */
- char dma1, dma2;
- short dma_mem_misc[8]; /* DMA buffer locations (unused in Linux). */
- short iobase1, iosize1;
- short iobase2, iosize2; /* Second iobase unused. */
- char driver_options; /* Misc. bits */
- char pad;
-};
-
-static int znet_open(struct net_device *dev);
-static netdev_tx_t znet_send_packet(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t znet_interrupt(int irq, void *dev_id);
-static void znet_rx(struct net_device *dev);
-static int znet_close(struct net_device *dev);
-static void hardware_init(struct net_device *dev);
-static void update_stop_hit(short ioaddr, unsigned short rx_stop_offset);
-static void znet_tx_timeout (struct net_device *dev);
-
-/* Request needed resources */
-static int znet_request_resources (struct net_device *dev)
-{
- struct znet_private *znet = netdev_priv(dev);
-
- if (request_irq (dev->irq, znet_interrupt, 0, "ZNet", dev))
- goto failed;
- if (request_dma (znet->rx_dma, "ZNet rx"))
- goto free_irq;
- if (request_dma (znet->tx_dma, "ZNet tx"))
- goto free_rx_dma;
- if (!request_region (znet->sia_base, znet->sia_size, "ZNet SIA"))
- goto free_tx_dma;
- if (!request_region (dev->base_addr, znet->io_size, "ZNet I/O"))
- goto free_sia;
-
- return 0; /* Happy ! */
-
- free_sia:
- release_region (znet->sia_base, znet->sia_size);
- free_tx_dma:
- free_dma (znet->tx_dma);
- free_rx_dma:
- free_dma (znet->rx_dma);
- free_irq:
- free_irq (dev->irq, dev);
- failed:
- return -1;
-}
-
-static void znet_release_resources (struct net_device *dev)
-{
- struct znet_private *znet = netdev_priv(dev);
-
- release_region (znet->sia_base, znet->sia_size);
- release_region (dev->base_addr, znet->io_size);
- free_dma (znet->tx_dma);
- free_dma (znet->rx_dma);
- free_irq (dev->irq, dev);
-}
-
-/* Keep the magical SIA stuff in a single function... */
-static void znet_transceiver_power (struct net_device *dev, int on)
-{
- struct znet_private *znet = netdev_priv(dev);
- unsigned char v;
-
- /* Turn on/off the 82501 SIA, using zenith-specific magic. */
- /* Select LAN control register */
- outb(0x10, znet->sia_base);
-
- if (on)
- v = inb(znet->sia_base + 1) | 0x84;
- else
- v = inb(znet->sia_base + 1) & ~0x84;
-
- outb(v, znet->sia_base+1); /* Turn on/off LAN power (bit 2). */
-}
-
-/* Init the i82593, with current promisc/mcast configuration.
- Also used from hardware_init. */
-static void znet_set_multicast_list (struct net_device *dev)
-{
- struct znet_private *znet = netdev_priv(dev);
- short ioaddr = dev->base_addr;
- struct i82593_conf_block *cfblk = &znet->i593_init;
-
- memset(cfblk, 0x00, sizeof(struct i82593_conf_block));
-
- /* The configuration block. What an undocumented nightmare.
- The first set of values are those suggested (without explanation)
- for ethernet in the Intel 82586 databook. The rest appear to be
- completely undocumented, except for cryptic notes in the Crynwr
- packet driver. This driver uses the Crynwr values verbatim. */
-
- /* maz : Rewritten to take advantage of the wanvelan includes.
- At least we have names, not just blind values */
-
- /* Byte 0 */
- cfblk->fifo_limit = 10; /* = 16 B rx and 80 B tx fifo thresholds */
- cfblk->forgnesi = 0; /* 0=82C501, 1=AMD7992B compatibility */
- cfblk->fifo_32 = 1;
- cfblk->d6mod = 0; /* Run in i82593 advanced mode */
- cfblk->throttle_enb = 1;
-
- /* Byte 1 */
- cfblk->throttle = 8; /* Continuous w/interrupts, 128-clock DMA. */
- cfblk->cntrxint = 0; /* enable continuous mode receive interrupts */
- cfblk->contin = 1; /* enable continuous mode */
-
- /* Byte 2 */
- cfblk->addr_len = ETH_ALEN;
- cfblk->acloc = 1; /* Disable source addr insertion by i82593 */
- cfblk->preamb_len = 2; /* 8 bytes preamble */
- cfblk->loopback = 0; /* Loopback off */
-
- /* Byte 3 */
- cfblk->lin_prio = 0; /* Default priorities & backoff methods. */
- cfblk->tbofstop = 0;
- cfblk->exp_prio = 0;
- cfblk->bof_met = 0;
-
- /* Byte 4 */
- cfblk->ifrm_spc = 6; /* 96 bit times interframe spacing */
-
- /* Byte 5 */
- cfblk->slottim_low = 0; /* 512 bit times slot time (low) */
-
- /* Byte 6 */
- cfblk->slottim_hi = 2; /* 512 bit times slot time (high) */
- cfblk->max_retr = 15; /* 15 collisions retries */
-
- /* Byte 7 */
- cfblk->prmisc = ((dev->flags & IFF_PROMISC) ? 1 : 0); /* Promiscuous mode */
- cfblk->bc_dis = 0; /* Enable broadcast reception */
- cfblk->crs_1 = 0; /* Don't transmit without carrier sense */
- cfblk->nocrc_ins = 0; /* i82593 generates CRC */
- cfblk->crc_1632 = 0; /* 32-bit Autodin-II CRC */
- cfblk->crs_cdt = 0; /* CD not to be interpreted as CS */
-
- /* Byte 8 */
- cfblk->cs_filter = 0; /* CS is recognized immediately */
- cfblk->crs_src = 0; /* External carrier sense */
- cfblk->cd_filter = 0; /* CD is recognized immediately */
-
- /* Byte 9 */
- cfblk->min_fr_len = ETH_ZLEN >> 2; /* Minimum frame length */
-
- /* Byte A */
- cfblk->lng_typ = 1; /* Type/length checks OFF */
- cfblk->lng_fld = 1; /* Disable 802.3 length field check */
- cfblk->rxcrc_xf = 1; /* Don't transfer CRC to memory */
- cfblk->artx = 1; /* Disable automatic retransmission */
- cfblk->sarec = 1; /* Disable source addr trig of CD */
- cfblk->tx_jabber = 0; /* Disable jabber jam sequence */
- cfblk->hash_1 = 1; /* Use bits 0-5 in mc address hash */
- cfblk->lbpkpol = 0; /* Loopback pin active high */
-
- /* Byte B */
- cfblk->fdx = 0; /* Disable full duplex operation */
-
- /* Byte C */
- cfblk->dummy_6 = 0x3f; /* all ones, Default multicast addresses & backoff. */
- cfblk->mult_ia = 0; /* No multiple individual addresses */
- cfblk->dis_bof = 0; /* Disable the backoff algorithm ?! */
-
- /* Byte D */
- cfblk->dummy_1 = 1; /* set to 1 */
- cfblk->tx_ifs_retrig = 3; /* Hmm... Disabled */
- cfblk->mc_all = (!netdev_mc_empty(dev) ||
- (dev->flags & IFF_ALLMULTI)); /* multicast all mode */
- cfblk->rcv_mon = 0; /* Monitor mode disabled */
- cfblk->frag_acpt = 0; /* Do not accept fragments */
- cfblk->tstrttrs = 0; /* No start transmission threshold */
-
- /* Byte E */
- cfblk->fretx = 1; /* FIFO automatic retransmission */
- cfblk->runt_eop = 0; /* drop "runt" packets */
- cfblk->hw_sw_pin = 0; /* ?? */
- cfblk->big_endn = 0; /* Big Endian ? no... */
- cfblk->syncrqs = 1; /* Synchronous DRQ deassertion... */
- cfblk->sttlen = 1; /* 6 byte status registers */
- cfblk->rx_eop = 0; /* Signal EOP on packet reception */
- cfblk->tx_eop = 0; /* Signal EOP on packet transmission */
-
- /* Byte F */
- cfblk->rbuf_size = RX_BUF_SIZE >> 12; /* Set receive buffer size */
- cfblk->rcvstop = 1; /* Enable Receive Stop Register */
-
- if (znet_debug > 2) {
- int i;
- unsigned char *c;
-
- for (i = 0, c = (char *) cfblk; i < sizeof (*cfblk); i++)
- printk ("%02X ", c[i]);
- printk ("\n");
- }
-
- *znet->tx_cur++ = sizeof(struct i82593_conf_block);
- memcpy(znet->tx_cur, cfblk, sizeof(struct i82593_conf_block));
- znet->tx_cur += sizeof(struct i82593_conf_block)/2;
- outb(OP0_CONFIGURE | CR0_CHNL, ioaddr);
-
- /* XXX FIXME maz : Add multicast addresses here, so having a
- * multicast address configured isn't equal to IFF_ALLMULTI */
-}
-
-static const struct net_device_ops znet_netdev_ops = {
- .ndo_open = znet_open,
- .ndo_stop = znet_close,
- .ndo_start_xmit = znet_send_packet,
- .ndo_set_rx_mode = znet_set_multicast_list,
- .ndo_tx_timeout = znet_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/* The Z-Note probe is pretty easy. The NETIDBLK exists in the safe-to-probe
- BIOS area. We just scan for the signature, and pull the vital parameters
- out of the structure. */
-
-static int __init znet_probe (void)
-{
- int i;
- struct netidblk *netinfo;
- struct znet_private *znet;
- struct net_device *dev;
- char *p;
- char *plast = phys_to_virt(0x100000 - NETIDBLK_MAGIC_SIZE);
- int err = -ENOMEM;
-
- /* This code scans the region 0xf0000 to 0xfffff for a "NETIDBLK". */
- for(p = (char *)phys_to_virt(0xf0000); p <= plast; p++)
- if (*p == 'N' &&
- strncmp(p, NETIDBLK_MAGIC, NETIDBLK_MAGIC_SIZE) == 0)
- break;
-
- if (p > plast) {
- if (znet_debug > 1)
- printk(KERN_INFO "No Z-Note ethernet adaptor found.\n");
- return -ENODEV;
- }
-
- dev = alloc_etherdev(sizeof(struct znet_private));
- if (!dev)
- return -ENOMEM;
-
- znet = netdev_priv(dev);
-
- netinfo = (struct netidblk *)p;
- dev->base_addr = netinfo->iobase1;
- dev->irq = netinfo->irq1;
-
- /* The station address is in the "netidblk" at 0x0f0000. */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = netinfo->netid[i];
-
- printk(KERN_INFO "%s: ZNET at %#3lx, %pM"
- ", using IRQ %d DMA %d and %d.\n",
- dev->name, dev->base_addr, dev->dev_addr,
- dev->irq, netinfo->dma1, netinfo->dma2);
-
- if (znet_debug > 1) {
- printk(KERN_INFO "%s: vendor '%16.16s' IRQ1 %d IRQ2 %d DMA1 %d DMA2 %d.\n",
- dev->name, netinfo->vendor,
- netinfo->irq1, netinfo->irq2,
- netinfo->dma1, netinfo->dma2);
- printk(KERN_INFO "%s: iobase1 %#x size %d iobase2 %#x size %d net type %2.2x.\n",
- dev->name, netinfo->iobase1, netinfo->iosize1,
- netinfo->iobase2, netinfo->iosize2, netinfo->nettype);
- }
-
- if (znet_debug > 0)
- printk(KERN_INFO "%s", version);
-
- znet->rx_dma = netinfo->dma1;
- znet->tx_dma = netinfo->dma2;
- spin_lock_init(&znet->lock);
- znet->sia_base = 0xe6; /* Magic address for the 82501 SIA */
- znet->sia_size = 2;
- /* maz: Despite the '593 being advertised above as using a
- * single 8bits I/O port, this driver does many 16bits
- * access. So set io_size accordingly */
- znet->io_size = 2;
-
- if (!(znet->rx_start = kmalloc (DMA_BUF_SIZE, GFP_KERNEL | GFP_DMA)))
- goto free_dev;
- if (!(znet->tx_start = kmalloc (DMA_BUF_SIZE, GFP_KERNEL | GFP_DMA)))
- goto free_rx;
-
- if (!dma_page_eq (znet->rx_start, znet->rx_start + (RX_BUF_SIZE/2-1)) ||
- !dma_page_eq (znet->tx_start, znet->tx_start + (TX_BUF_SIZE/2-1))) {
- printk (KERN_WARNING "tx/rx crossing DMA frontiers, giving up\n");
- goto free_tx;
- }
-
- znet->rx_end = znet->rx_start + RX_BUF_SIZE/2;
- znet->tx_buf_len = TX_BUF_SIZE/2;
- znet->tx_end = znet->tx_start + znet->tx_buf_len;
-
- /* The ZNET-specific entries in the device structure. */
- dev->netdev_ops = &znet_netdev_ops;
- dev->watchdog_timeo = TX_TIMEOUT;
- err = register_netdev(dev);
- if (err)
- goto free_tx;
- znet_dev = dev;
- return 0;
-
- free_tx:
- kfree(znet->tx_start);
- free_rx:
- kfree(znet->rx_start);
- free_dev:
- free_netdev(dev);
- return err;
-}
-
-
-static int znet_open(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- if (znet_debug > 2)
- printk(KERN_DEBUG "%s: znet_open() called.\n", dev->name);
-
- /* These should never fail. You can't add devices to a sealed box! */
- if (znet_request_resources (dev)) {
- printk(KERN_WARNING "%s: Not opened -- resource busy?!?\n", dev->name);
- return -EBUSY;
- }
-
- znet_transceiver_power (dev, 1);
-
- /* According to the Crynwr driver we should wait 50 msec. for the
- LAN clock to stabilize. My experiments indicates that the '593 can
- be initialized immediately. The delay is probably needed for the
- DC-to-DC converter to come up to full voltage, and for the oscillator
- to be spot-on at 20Mhz before transmitting.
- Until this proves to be a problem we rely on the higher layers for the
- delay and save allocating a timer entry. */
-
- /* maz : Well, I'm getting every time the following message
- * without the delay on a 486@33. This machine is much too
- * fast... :-) So maybe the Crynwr driver wasn't wrong after
- * all, even if the message is completly harmless on my
- * setup. */
- mdelay (50);
-
- /* This follows the packet driver's lead, and checks for success. */
- if (inb(ioaddr) != 0x10 && inb(ioaddr) != 0x00)
- printk(KERN_WARNING "%s: Problem turning on the transceiver power.\n",
- dev->name);
-
- hardware_init(dev);
- netif_start_queue (dev);
-
- return 0;
-}
-
-
-static void znet_tx_timeout (struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- ushort event, tx_status, rx_offset, state;
-
- outb (CR0_STATUS_0, ioaddr);
- event = inb (ioaddr);
- outb (CR0_STATUS_1, ioaddr);
- tx_status = inw (ioaddr);
- outb (CR0_STATUS_2, ioaddr);
- rx_offset = inw (ioaddr);
- outb (CR0_STATUS_3, ioaddr);
- state = inb (ioaddr);
- printk (KERN_WARNING "%s: transmit timed out, status %02x %04x %04x %02x,"
- " resetting.\n", dev->name, event, tx_status, rx_offset, state);
- if (tx_status == TX_LOST_CRS)
- printk (KERN_WARNING "%s: Tx carrier error, check transceiver cable.\n",
- dev->name);
- outb (OP0_RESET, ioaddr);
- hardware_init (dev);
- netif_wake_queue (dev);
-}
-
-static netdev_tx_t znet_send_packet(struct sk_buff *skb, struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- struct znet_private *znet = netdev_priv(dev);
- unsigned long flags;
- short length = skb->len;
-
- if (znet_debug > 4)
- printk(KERN_DEBUG "%s: ZNet_send_packet.\n", dev->name);
-
- if (length < ETH_ZLEN) {
- if (skb_padto(skb, ETH_ZLEN))
- return NETDEV_TX_OK;
- length = ETH_ZLEN;
- }
-
- netif_stop_queue (dev);
-
- /* Check that the part hasn't reset itself, probably from suspend. */
- outb(CR0_STATUS_0, ioaddr);
- if (inw(ioaddr) == 0x0010 &&
- inw(ioaddr) == 0x0000 &&
- inw(ioaddr) == 0x0010) {
- if (znet_debug > 1)
- printk (KERN_WARNING "%s : waking up\n", dev->name);
- hardware_init(dev);
- znet_transceiver_power (dev, 1);
- }
-
- if (1) {
- unsigned char *buf = (void *)skb->data;
- ushort *tx_link = znet->tx_cur - 1;
- ushort rnd_len = (length + 1)>>1;
-
- dev->stats.tx_bytes+=length;
-
- if (znet->tx_cur >= znet->tx_end)
- znet->tx_cur = znet->tx_start;
- *znet->tx_cur++ = length;
- if (znet->tx_cur + rnd_len + 1 > znet->tx_end) {
- int semi_cnt = (znet->tx_end - znet->tx_cur)<<1; /* Cvrt to byte cnt. */
- memcpy(znet->tx_cur, buf, semi_cnt);
- rnd_len -= semi_cnt>>1;
- memcpy(znet->tx_start, buf + semi_cnt, length - semi_cnt);
- znet->tx_cur = znet->tx_start + rnd_len;
- } else {
- memcpy(znet->tx_cur, buf, skb->len);
- znet->tx_cur += rnd_len;
- }
- *znet->tx_cur++ = 0;
-
- spin_lock_irqsave(&znet->lock, flags);
- {
- *tx_link = OP0_TRANSMIT | CR0_CHNL;
- /* Is this always safe to do? */
- outb(OP0_TRANSMIT | CR0_CHNL, ioaddr);
- }
- spin_unlock_irqrestore (&znet->lock, flags);
-
- netif_start_queue (dev);
-
- if (znet_debug > 4)
- printk(KERN_DEBUG "%s: Transmitter queued, length %d.\n", dev->name, length);
- }
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-/* The ZNET interrupt handler. */
-static irqreturn_t znet_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct znet_private *znet = netdev_priv(dev);
- int ioaddr;
- int boguscnt = 20;
- int handled = 0;
-
- spin_lock (&znet->lock);
-
- ioaddr = dev->base_addr;
-
- outb(CR0_STATUS_0, ioaddr);
- do {
- ushort status = inb(ioaddr);
- if (znet_debug > 5) {
- ushort result, rx_ptr, running;
- outb(CR0_STATUS_1, ioaddr);
- result = inw(ioaddr);
- outb(CR0_STATUS_2, ioaddr);
- rx_ptr = inw(ioaddr);
- outb(CR0_STATUS_3, ioaddr);
- running = inb(ioaddr);
- printk(KERN_DEBUG "%s: interrupt, status %02x, %04x %04x %02x serial %d.\n",
- dev->name, status, result, rx_ptr, running, boguscnt);
- }
- if ((status & SR0_INTERRUPT) == 0)
- break;
-
- handled = 1;
-
- if ((status & SR0_EVENT_MASK) == SR0_TRANSMIT_DONE ||
- (status & SR0_EVENT_MASK) == SR0_RETRANSMIT_DONE ||
- (status & SR0_EVENT_MASK) == SR0_TRANSMIT_NO_CRC_DONE) {
- int tx_status;
- outb(CR0_STATUS_1, ioaddr);
- tx_status = inw(ioaddr);
- /* It's undocumented, but tx_status seems to match the i82586. */
- if (tx_status & TX_OK) {
- dev->stats.tx_packets++;
- dev->stats.collisions += tx_status & TX_NCOL_MASK;
- } else {
- if (tx_status & (TX_LOST_CTS | TX_LOST_CRS))
- dev->stats.tx_carrier_errors++;
- if (tx_status & TX_UND_RUN)
- dev->stats.tx_fifo_errors++;
- if (!(tx_status & TX_HRT_BEAT))
- dev->stats.tx_heartbeat_errors++;
- if (tx_status & TX_MAX_COL)
- dev->stats.tx_aborted_errors++;
- /* ...and the catch-all. */
- if ((tx_status | (TX_LOST_CRS | TX_LOST_CTS | TX_UND_RUN | TX_HRT_BEAT | TX_MAX_COL)) != (TX_LOST_CRS | TX_LOST_CTS | TX_UND_RUN | TX_HRT_BEAT | TX_MAX_COL))
- dev->stats.tx_errors++;
-
- /* Transceiver may be stuck if cable
- * was removed while emitting a
- * packet. Flip it off, then on to
- * reset it. This is very empirical,
- * but it seems to work. */
-
- znet_transceiver_power (dev, 0);
- znet_transceiver_power (dev, 1);
- }
- netif_wake_queue (dev);
- }
-
- if ((status & SR0_RECEPTION) ||
- (status & SR0_EVENT_MASK) == SR0_STOP_REG_HIT) {
- znet_rx(dev);
- }
- /* Clear the interrupts we've handled. */
- outb(CR0_INT_ACK, ioaddr);
- } while (boguscnt--);
-
- spin_unlock (&znet->lock);
-
- return IRQ_RETVAL(handled);
-}
-
-static void znet_rx(struct net_device *dev)
-{
- struct znet_private *znet = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- int boguscount = 1;
- short next_frame_end_offset = 0; /* Offset of next frame start. */
- short *cur_frame_end;
- short cur_frame_end_offset;
-
- outb(CR0_STATUS_2, ioaddr);
- cur_frame_end_offset = inw(ioaddr);
-
- if (cur_frame_end_offset == znet->rx_cur - znet->rx_start) {
- printk(KERN_WARNING "%s: Interrupted, but nothing to receive, offset %03x.\n",
- dev->name, cur_frame_end_offset);
- return;
- }
-
- /* Use same method as the Crynwr driver: construct a forward list in
- the same area of the backwards links we now have. This allows us to
- pass packets to the upper layers in the order they were received --
- important for fast-path sequential operations. */
- while (znet->rx_start + cur_frame_end_offset != znet->rx_cur &&
- ++boguscount < 5) {
- unsigned short hi_cnt, lo_cnt, hi_status, lo_status;
- int count, status;
-
- if (cur_frame_end_offset < 4) {
- /* Oh no, we have a special case: the frame trailer wraps around
- the end of the ring buffer. We've saved space at the end of
- the ring buffer for just this problem. */
- memcpy(znet->rx_end, znet->rx_start, 8);
- cur_frame_end_offset += (RX_BUF_SIZE/2);
- }
- cur_frame_end = znet->rx_start + cur_frame_end_offset - 4;
-
- lo_status = *cur_frame_end++;
- hi_status = *cur_frame_end++;
- status = ((hi_status & 0xff) << 8) + (lo_status & 0xff);
- lo_cnt = *cur_frame_end++;
- hi_cnt = *cur_frame_end++;
- count = ((hi_cnt & 0xff) << 8) + (lo_cnt & 0xff);
-
- if (znet_debug > 5)
- printk(KERN_DEBUG "Constructing trailer at location %03x, %04x %04x %04x %04x"
- " count %#x status %04x.\n",
- cur_frame_end_offset<<1, lo_status, hi_status, lo_cnt, hi_cnt,
- count, status);
- cur_frame_end[-4] = status;
- cur_frame_end[-3] = next_frame_end_offset;
- cur_frame_end[-2] = count;
- next_frame_end_offset = cur_frame_end_offset;
- cur_frame_end_offset -= ((count + 1)>>1) + 3;
- if (cur_frame_end_offset < 0)
- cur_frame_end_offset += RX_BUF_SIZE/2;
- }
-
- /* Now step forward through the list. */
- do {
- ushort *this_rfp_ptr = znet->rx_start + next_frame_end_offset;
- int status = this_rfp_ptr[-4];
- int pkt_len = this_rfp_ptr[-2];
-
- if (znet_debug > 5)
- printk(KERN_DEBUG "Looking at trailer ending at %04x status %04x length %03x"
- " next %04x.\n", next_frame_end_offset<<1, status, pkt_len,
- this_rfp_ptr[-3]<<1);
- /* Once again we must assume that the i82586 docs apply. */
- if ( ! (status & RX_RCV_OK)) { /* There was an error. */
- dev->stats.rx_errors++;
- if (status & RX_CRC_ERR) dev->stats.rx_crc_errors++;
- if (status & RX_ALG_ERR) dev->stats.rx_frame_errors++;
-#if 0
- if (status & 0x0200) dev->stats.rx_over_errors++; /* Wrong. */
- if (status & 0x0100) dev->stats.rx_fifo_errors++;
-#else
- /* maz : Wild guess... */
- if (status & RX_OVRRUN) dev->stats.rx_over_errors++;
-#endif
- if (status & RX_SRT_FRM) dev->stats.rx_length_errors++;
- } else if (pkt_len > 1536) {
- dev->stats.rx_length_errors++;
- } else {
- /* Malloc up new buffer. */
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb(dev, pkt_len);
- if (skb == NULL) {
- if (znet_debug)
- printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
- dev->stats.rx_dropped++;
- break;
- }
-
- if (&znet->rx_cur[(pkt_len+1)>>1] > znet->rx_end) {
- int semi_cnt = (znet->rx_end - znet->rx_cur)<<1;
- memcpy(skb_put(skb,semi_cnt), znet->rx_cur, semi_cnt);
- memcpy(skb_put(skb,pkt_len-semi_cnt), znet->rx_start,
- pkt_len - semi_cnt);
- } else {
- memcpy(skb_put(skb,pkt_len), znet->rx_cur, pkt_len);
- if (znet_debug > 6) {
- unsigned int *packet = (unsigned int *) skb->data;
- printk(KERN_DEBUG "Packet data is %08x %08x %08x %08x.\n", packet[0],
- packet[1], packet[2], packet[3]);
- }
- }
- skb->protocol=eth_type_trans(skb,dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- }
- znet->rx_cur = this_rfp_ptr;
- if (znet->rx_cur >= znet->rx_end)
- znet->rx_cur -= RX_BUF_SIZE/2;
- update_stop_hit(ioaddr, (znet->rx_cur - znet->rx_start)<<1);
- next_frame_end_offset = this_rfp_ptr[-3];
- if (next_frame_end_offset == 0) /* Read all the frames? */
- break; /* Done for now */
- this_rfp_ptr = znet->rx_start + next_frame_end_offset;
- } while (--boguscount);
-
- /* If any worth-while packets have been received, dev_rint()
- has done a mark_bh(INET_BH) for us and will work on them
- when we get to the bottom-half routine. */
-}
-
-/* The inverse routine to znet_open(). */
-static int znet_close(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- netif_stop_queue (dev);
-
- outb(OP0_RESET, ioaddr); /* CMD0_RESET */
-
- if (znet_debug > 1)
- printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
- /* Turn off transceiver power. */
- znet_transceiver_power (dev, 0);
-
- znet_release_resources (dev);
-
- return 0;
-}
-
-static void show_dma(struct net_device *dev)
-{
- short ioaddr = dev->base_addr;
- unsigned char stat = inb (ioaddr);
- struct znet_private *znet = netdev_priv(dev);
- unsigned long flags;
- short dma_port = ((znet->tx_dma&3)<<2) + IO_DMA2_BASE;
- unsigned addr = inb(dma_port);
- short residue;
-
- addr |= inb(dma_port) << 8;
- residue = get_dma_residue(znet->tx_dma);
-
- if (znet_debug > 1) {
- flags=claim_dma_lock();
- printk(KERN_DEBUG "Stat:%02x Addr: %04x cnt:%3x\n",
- stat, addr<<1, residue);
- release_dma_lock(flags);
- }
-}
-
-/* Initialize the hardware. We have to do this when the board is open()ed
- or when we come out of suspend mode. */
-static void hardware_init(struct net_device *dev)
-{
- unsigned long flags;
- short ioaddr = dev->base_addr;
- struct znet_private *znet = netdev_priv(dev);
-
- znet->rx_cur = znet->rx_start;
- znet->tx_cur = znet->tx_start;
-
- /* Reset the chip, and start it up. */
- outb(OP0_RESET, ioaddr);
-
- flags=claim_dma_lock();
- disable_dma(znet->rx_dma); /* reset by an interrupting task. */
- clear_dma_ff(znet->rx_dma);
- set_dma_mode(znet->rx_dma, DMA_RX_MODE);
- set_dma_addr(znet->rx_dma, isa_virt_to_bus(znet->rx_start));
- set_dma_count(znet->rx_dma, RX_BUF_SIZE);
- enable_dma(znet->rx_dma);
- /* Now set up the Tx channel. */
- disable_dma(znet->tx_dma);
- clear_dma_ff(znet->tx_dma);
- set_dma_mode(znet->tx_dma, DMA_TX_MODE);
- set_dma_addr(znet->tx_dma, isa_virt_to_bus(znet->tx_start));
- set_dma_count(znet->tx_dma, znet->tx_buf_len<<1);
- enable_dma(znet->tx_dma);
- release_dma_lock(flags);
-
- if (znet_debug > 1)
- printk(KERN_DEBUG "%s: Initializing the i82593, rx buf %p tx buf %p\n",
- dev->name, znet->rx_start,znet->tx_start);
- /* Do an empty configure command, just like the Crynwr driver. This
- resets to chip to its default values. */
- *znet->tx_cur++ = 0;
- *znet->tx_cur++ = 0;
- show_dma(dev);
- outb(OP0_CONFIGURE | CR0_CHNL, ioaddr);
-
- znet_set_multicast_list (dev);
-
- *znet->tx_cur++ = 6;
- memcpy(znet->tx_cur, dev->dev_addr, 6);
- znet->tx_cur += 3;
- show_dma(dev);
- outb(OP0_IA_SETUP | CR0_CHNL, ioaddr);
- show_dma(dev);
-
- update_stop_hit(ioaddr, 8192);
- if (znet_debug > 1) printk(KERN_DEBUG "enabling Rx.\n");
- outb(OP0_RCV_ENABLE, ioaddr);
- netif_start_queue (dev);
-}
-
-static void update_stop_hit(short ioaddr, unsigned short rx_stop_offset)
-{
- outb(OP0_SWIT_TO_PORT_1 | CR0_CHNL, ioaddr);
- if (znet_debug > 5)
- printk(KERN_DEBUG "Updating stop hit with value %02x.\n",
- (rx_stop_offset >> 6) | CR1_STOP_REG_UPDATE);
- outb((rx_stop_offset >> 6) | CR1_STOP_REG_UPDATE, ioaddr);
- outb(OP1_SWIT_TO_PORT_0, ioaddr);
-}
-
-static __exit void znet_cleanup (void)
-{
- if (znet_dev) {
- struct znet_private *znet = netdev_priv(znet_dev);
-
- unregister_netdev (znet_dev);
- kfree (znet->rx_start);
- kfree (znet->tx_start);
- free_netdev (znet_dev);
- }
-}
-
-module_init (znet_probe);
-module_exit (znet_cleanup);
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 19b64de7124b..328f47c92e26 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -76,16 +76,16 @@ MODULE_PARM_DESC(msg_level, "msg_level");
MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
"port to stack. 1:yes, 0:no. Default = 0 ");
MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
- "[2^x - 1], x = [6..14]. Default = "
+ "[2^x - 1], x = [7..14]. Default = "
__MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
- "[2^x - 1], x = [6..14]. Default = "
+ "[2^x - 1], x = [7..14]. Default = "
__MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
- "[2^x - 1], x = [6..14]. Default = "
+ "[2^x - 1], x = [7..14]. Default = "
__MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
- "[2^x - 1], x = [6..14]. Default = "
+ "[2^x - 1], x = [7..14]. Default = "
__MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
"Default = 1");
@@ -1921,10 +1921,8 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
u64 hret;
ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
- if (!ehea_mcl_entry) {
- pr_err("no mem for mcl_entry\n");
+ if (!ehea_mcl_entry)
return;
- }
INIT_LIST_HEAD(&ehea_mcl_entry->list);
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index 27f881758d16..9b03033bb557 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -64,11 +64,10 @@ static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
}
queue->queue_length = nr_of_pages * pagesize;
- queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
- if (!queue->queue_pages) {
- pr_err("no mem for queue_pages\n");
+ queue->queue_pages = kmalloc_array(nr_of_pages, sizeof(void *),
+ GFP_KERNEL);
+ if (!queue->queue_pages)
return -ENOMEM;
- }
/*
* allocate pages for queue:
@@ -129,10 +128,8 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
void *vpage;
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
- if (!cq) {
- pr_err("no mem for cq\n");
+ if (!cq)
goto out_nomem;
- }
cq->attr.max_nr_of_cqes = nr_of_cqe;
cq->attr.cq_token = cq_token;
@@ -257,10 +254,8 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
struct ehea_eq *eq;
eq = kzalloc(sizeof(*eq), GFP_KERNEL);
- if (!eq) {
- pr_err("no mem for eq\n");
+ if (!eq)
return NULL;
- }
eq->adapter = adapter;
eq->attr.type = type;
@@ -428,10 +423,8 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp) {
- pr_err("no mem for qp\n");
+ if (!qp)
return NULL;
- }
qp->adapter = adapter;
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 256bdb8e1994..4989481c19f0 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2190,11 +2190,10 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev,
{
struct emac_instance *dev = netdev_priv(ndev);
- strcpy(info->driver, "ibm_emac");
- strcpy(info->version, DRV_VERSION);
- info->fw_version[0] = '\0';
- sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
- dev->cell_index, dev->ofdev->dev.of_node->full_name);
+ strlcpy(info->driver, "ibm_emac", sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %s",
+ dev->cell_index, dev->ofdev->dev.of_node->full_name);
info->regdump_len = emac_ethtool_get_regs_len(ndev);
}
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 50ea12bfb579..1f7ecf57181e 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -528,12 +528,9 @@ static int mal_probe(struct platform_device *ofdev)
irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde;
mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL);
- if (!mal) {
- printk(KERN_ERR
- "mal%d: out of memory allocating MAL structure!\n",
- index);
+ if (!mal)
return -ENOMEM;
- }
+
mal->index = index;
mal->ofdev = ofdev;
mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index f2fdbb79837e..c859771a9902 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -637,7 +637,6 @@ static int ibmveth_open(struct net_device *netdev)
adapter->bounce_buffer =
kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
if (!adapter->bounce_buffer) {
- netdev_err(netdev, "unable to allocate bounce buffer\n");
rc = -ENOMEM;
goto err_out_free_irq;
}
@@ -722,9 +721,8 @@ static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
- strncpy(info->version, ibmveth_driver_version,
- sizeof(info->version) - 1);
+ strlcpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
+ strlcpy(info->version, ibmveth_driver_version, sizeof(info->version));
}
static netdev_features_t ibmveth_fix_features(struct net_device *dev,
diff --git a/drivers/net/ethernet/icplus/Kconfig b/drivers/net/ethernet/icplus/Kconfig
index 3aff81d7989f..5119ef18953b 100644
--- a/drivers/net/ethernet/icplus/Kconfig
+++ b/drivers/net/ethernet/icplus/Kconfig
@@ -4,7 +4,7 @@
config IP1000
tristate "IP1000 Gigabit Ethernet support"
- depends on PCI && EXPERIMENTAL
+ depends on PCI
select NET_CORE
select MII
---help---
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index ddee4060948a..05f7264c51f7 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -5,11 +5,6 @@
config NET_VENDOR_INTEL
bool "Intel devices"
default y
- depends on PCI || PCI_MSI || ISA || ISA_DMA_API || ARM || \
- ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \
- GSC || BVME6000 || MVME16x || \
- (ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR) || \
- EXPERIMENTAL
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -74,6 +69,7 @@ config E1000E
tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
depends on PCI && (!SPARC32 || BROKEN)
select CRC32
+ select PTP_1588_CLOCK
---help---
This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
ethernet family of adapters. For PCI or PCI-X e1000 adapters,
@@ -94,6 +90,8 @@ config IGB
tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
depends on PCI
select PTP_1588_CLOCK
+ select I2C
+ select I2C_ALGOBIT
---help---
This driver supports Intel(R) 82575/82576 gigabit ethernet family of
adapters. For more information on how to identify your adapter, go
@@ -112,6 +110,17 @@ config IGB
To compile this driver as a module, choose M here. The module
will be called igb.
+config IGB_HWMON
+ bool "Intel(R) PCI-Express Gigabit adapters HWMON support"
+ default y
+ depends on IGB && HWMON && !(IGB=y && HWMON=m)
+ ---help---
+ Say Y if you want to expose thermal sensor data on Intel devices.
+
+ Some of our devices contain thermal sensors, both external and internal.
+ This data is available via the hwmon sysfs interface and exposes
+ the onboard sensors.
+
config IGB_DCA
bool "Direct Cache Access (DCA) Support"
default y
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index a59f0779e1c3..ec800b093e7e 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2928,8 +2928,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
e100_phy_init(nic);
memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
- memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
- if (!is_valid_ether_addr(netdev->perm_addr)) {
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
if (!eeprom_bad_csum_allow) {
netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
err = -EAGAIN;
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 2b6cd02bfba0..26d9cd59ec75 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -81,68 +81,69 @@ struct e1000_adapter;
#include "e1000_hw.h"
-#define E1000_MAX_INTR 10
+#define E1000_MAX_INTR 10
/* TX/RX descriptor defines */
-#define E1000_DEFAULT_TXD 256
-#define E1000_MAX_TXD 256
-#define E1000_MIN_TXD 48
-#define E1000_MAX_82544_TXD 4096
+#define E1000_DEFAULT_TXD 256
+#define E1000_MAX_TXD 256
+#define E1000_MIN_TXD 48
+#define E1000_MAX_82544_TXD 4096
-#define E1000_DEFAULT_RXD 256
-#define E1000_MAX_RXD 256
-#define E1000_MIN_RXD 48
-#define E1000_MAX_82544_RXD 4096
+#define E1000_DEFAULT_RXD 256
+#define E1000_MAX_RXD 256
+#define E1000_MIN_RXD 48
+#define E1000_MAX_82544_RXD 4096
#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */
#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */
/* this is the size past which hardware will drop packets when setting LPE=0 */
-#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
/* Supported Rx Buffer Sizes */
-#define E1000_RXBUFFER_128 128 /* Used for packet split */
-#define E1000_RXBUFFER_256 256 /* Used for packet split */
-#define E1000_RXBUFFER_512 512
-#define E1000_RXBUFFER_1024 1024
-#define E1000_RXBUFFER_2048 2048
-#define E1000_RXBUFFER_4096 4096
-#define E1000_RXBUFFER_8192 8192
-#define E1000_RXBUFFER_16384 16384
+#define E1000_RXBUFFER_128 128 /* Used for packet split */
+#define E1000_RXBUFFER_256 256 /* Used for packet split */
+#define E1000_RXBUFFER_512 512
+#define E1000_RXBUFFER_1024 1024
+#define E1000_RXBUFFER_2048 2048
+#define E1000_RXBUFFER_4096 4096
+#define E1000_RXBUFFER_8192 8192
+#define E1000_RXBUFFER_16384 16384
/* SmartSpeed delimiters */
-#define E1000_SMARTSPEED_DOWNSHIFT 3
-#define E1000_SMARTSPEED_MAX 15
+#define E1000_SMARTSPEED_DOWNSHIFT 3
+#define E1000_SMARTSPEED_MAX 15
/* Packet Buffer allocations */
-#define E1000_PBA_BYTES_SHIFT 0xA
-#define E1000_TX_HEAD_ADDR_SHIFT 7
-#define E1000_PBA_TX_MASK 0xFFFF0000
+#define E1000_PBA_BYTES_SHIFT 0xA
+#define E1000_TX_HEAD_ADDR_SHIFT 7
+#define E1000_PBA_TX_MASK 0xFFFF0000
/* Flow Control Watermarks */
-#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */
-#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */
+#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */
+#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */
-#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */
+#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */
/* How many Tx Descriptors do we need to call netif_wake_queue ? */
#define E1000_TX_QUEUE_WAKE 16
/* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
-#define AUTO_ALL_MODES 0
-#define E1000_EEPROM_82544_APM 0x0004
-#define E1000_EEPROM_APME 0x0400
+#define AUTO_ALL_MODES 0
+#define E1000_EEPROM_82544_APM 0x0004
+#define E1000_EEPROM_APME 0x0400
#ifndef E1000_MASTER_SLAVE
/* Switch to override PHY master/slave setting */
#define E1000_MASTER_SLAVE e1000_ms_hw_default
#endif
-#define E1000_MNG_VLAN_NONE (-1)
+#define E1000_MNG_VLAN_NONE (-1)
/* wrapper around a pointer to a socket buffer,
- * so a DMA handle can be stored along with the buffer */
+ * so a DMA handle can be stored along with the buffer
+ */
struct e1000_buffer {
struct sk_buff *skb;
dma_addr_t dma;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 14e30515f6aa..43462d596a4e 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -115,12 +115,12 @@ static int e1000_get_settings(struct net_device *netdev,
if (hw->media_type == e1000_media_type_copper) {
ecmd->supported = (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full|
- SUPPORTED_Autoneg |
- SUPPORTED_TP);
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full|
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
ecmd->advertising = ADVERTISED_TP;
if (hw->autoneg == 1) {
@@ -161,8 +161,8 @@ static int e1000_get_settings(struct net_device *netdev,
ethtool_cmd_speed_set(ecmd, adapter->link_speed);
/* unfortunately FULL_DUPLEX != DUPLEX_FULL
- * and HALF_DUPLEX != DUPLEX_HALF */
-
+ * and HALF_DUPLEX != DUPLEX_HALF
+ */
if (adapter->link_duplex == FULL_DUPLEX)
ecmd->duplex = DUPLEX_FULL;
else
@@ -179,8 +179,7 @@ static int e1000_get_settings(struct net_device *netdev,
if ((hw->media_type == e1000_media_type_copper) &&
netif_carrier_ok(netdev))
ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
- ETH_TP_MDI_X :
- ETH_TP_MDI);
+ ETH_TP_MDI_X : ETH_TP_MDI);
else
ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
@@ -197,8 +196,7 @@ static int e1000_set_settings(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- /*
- * MDI setting is only allowed when autoneg enabled because
+ /* MDI setting is only allowed when autoneg enabled because
* some hardware doesn't allow MDI setting when speed or
* duplex is forced.
*/
@@ -224,8 +222,8 @@ static int e1000_set_settings(struct net_device *netdev,
ADVERTISED_Autoneg;
else
hw->autoneg_advertised = ecmd->advertising |
- ADVERTISED_TP |
- ADVERTISED_Autoneg;
+ ADVERTISED_TP |
+ ADVERTISED_Autoneg;
ecmd->advertising = hw->autoneg_advertised;
} else {
u32 speed = ethtool_cmd_speed(ecmd);
@@ -260,8 +258,7 @@ static u32 e1000_get_link(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- /*
- * If the link is not reported up to netdev, interrupts are disabled,
+ /* If the link is not reported up to netdev, interrupts are disabled,
* and so the physical link state may have changed since we last
* looked. Set get_link_status to make sure that the true link
* state is interrogated, rather than pulling a cached and possibly
@@ -484,7 +481,7 @@ static int e1000_get_eeprom(struct net_device *netdev,
le16_to_cpus(&eeprom_buff[i]);
memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
- eeprom->len);
+ eeprom->len);
kfree(eeprom_buff);
return ret_val;
@@ -517,15 +514,17 @@ static int e1000_set_eeprom(struct net_device *netdev,
ptr = (void *)eeprom_buff;
if (eeprom->offset & 1) {
- /* need read/modify/write of first changed EEPROM word */
- /* only the second byte of the word is being modified */
+ /* need read/modify/write of first changed EEPROM word
+ * only the second byte of the word is being modified
+ */
ret_val = e1000_read_eeprom(hw, first_word, 1,
&eeprom_buff[0]);
ptr++;
}
if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
- /* need read/modify/write of last changed EEPROM word */
- /* only the first byte of the word is being modified */
+ /* need read/modify/write of last changed EEPROM word
+ * only the first byte of the word is being modified
+ */
ret_val = e1000_read_eeprom(hw, last_word, 1,
&eeprom_buff[last_word - first_word]);
}
@@ -606,11 +605,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
rx_old = adapter->rx_ring;
err = -ENOMEM;
- txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), GFP_KERNEL);
+ txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring),
+ GFP_KERNEL);
if (!txdr)
goto err_alloc_tx;
- rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), GFP_KERNEL);
+ rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring),
+ GFP_KERNEL);
if (!rxdr)
goto err_alloc_rx;
@@ -619,12 +620,12 @@ static int e1000_set_ringparam(struct net_device *netdev,
rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD);
rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ?
- E1000_MAX_RXD : E1000_MAX_82544_RXD));
+ E1000_MAX_RXD : E1000_MAX_82544_RXD));
rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD);
txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ?
- E1000_MAX_TXD : E1000_MAX_82544_TXD));
+ E1000_MAX_TXD : E1000_MAX_82544_TXD));
txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
for (i = 0; i < adapter->num_tx_queues; i++)
@@ -642,7 +643,8 @@ static int e1000_set_ringparam(struct net_device *netdev,
goto err_setup_tx;
/* save the new, restore the old in order to free it,
- * then restore the new back again */
+ * then restore the new back again
+ */
adapter->rx_ring = rx_old;
adapter->tx_ring = tx_old;
@@ -784,7 +786,6 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
if (hw->mac_type >= e1000_82543) {
-
REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF);
REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
@@ -795,14 +796,11 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
0xFFFFFFFF);
}
-
} else {
-
REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF);
REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF);
REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF);
REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF);
-
}
value = E1000_MC_TBL_SIZE;
@@ -858,13 +856,14 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
*data = 0;
- /* NOTE: we don't test MSI interrupts here, yet */
- /* Hook up test interrupt handler just for this test */
+ /* NOTE: we don't test MSI interrupts here, yet
+ * Hook up test interrupt handler just for this test
+ */
if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
- netdev))
+ netdev))
shared_int = false;
else if (request_irq(irq, e1000_test_intr, IRQF_SHARED,
- netdev->name, netdev)) {
+ netdev->name, netdev)) {
*data = 1;
return -1;
}
@@ -1253,14 +1252,15 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
- E1000_CTRL_FD); /* Force Duplex to FULL */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
if (hw->media_type == e1000_media_type_copper &&
hw->phy_type == e1000_phy_m88)
ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
else {
/* Set the ILOS bit on the fiber Nic is half
- * duplex link is detected. */
+ * duplex link is detected.
+ */
stat_reg = er32(STATUS);
if ((stat_reg & E1000_STATUS_FD) == 0)
ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
@@ -1446,7 +1446,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
ret_val = e1000_check_lbtest_frame(
rxdr->buffer_info[l].skb,
- 1024);
+ 1024);
if (!ret_val)
good_cnt++;
if (unlikely(++l == rxdr->count)) l = 0;
@@ -1493,7 +1493,8 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
hw->serdes_has_link = false;
/* On some blade server designs, link establishment
- * could take as long as 2-3 minutes */
+ * could take as long as 2-3 minutes
+ */
do {
e1000_check_for_link(hw);
if (hw->serdes_has_link)
@@ -1545,7 +1546,8 @@ static void e1000_diag_test(struct net_device *netdev,
e_info(hw, "offline testing starting\n");
/* Link test performed before hardware reset so autoneg doesn't
- * interfere with test result */
+ * interfere with test result
+ */
if (e1000_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1639,7 +1641,8 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter,
default:
/* dual port cards only support WoL on port A from now on
* unless it was enabled in the eeprom for port B
- * so exclude FUNC_1 ports from having WoL enabled */
+ * so exclude FUNC_1 ports from having WoL enabled
+ */
if (er32(STATUS) & E1000_STATUS_FUNC_1 &&
!adapter->eeprom_wol) {
wol->supported = 0;
@@ -1663,7 +1666,8 @@ static void e1000_get_wol(struct net_device *netdev,
wol->wolopts = 0;
/* this function will set ->supported = 0 and return 1 if wol is not
- * supported by this hardware */
+ * supported by this hardware
+ */
if (e1000_wol_exclusion(adapter, wol) ||
!device_can_wakeup(&adapter->pdev->dev))
return;
@@ -1839,7 +1843,7 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
-/* BUG_ON(i != E1000_STATS_LEN); */
+/* BUG_ON(i != E1000_STATS_LEN); */
}
static void e1000_get_strings(struct net_device *netdev, u32 stringset,
@@ -1859,37 +1863,37 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
-/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
+ /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
break;
}
}
static const struct ethtool_ops e1000_ethtool_ops = {
- .get_settings = e1000_get_settings,
- .set_settings = e1000_set_settings,
- .get_drvinfo = e1000_get_drvinfo,
- .get_regs_len = e1000_get_regs_len,
- .get_regs = e1000_get_regs,
- .get_wol = e1000_get_wol,
- .set_wol = e1000_set_wol,
- .get_msglevel = e1000_get_msglevel,
- .set_msglevel = e1000_set_msglevel,
- .nway_reset = e1000_nway_reset,
- .get_link = e1000_get_link,
- .get_eeprom_len = e1000_get_eeprom_len,
- .get_eeprom = e1000_get_eeprom,
- .set_eeprom = e1000_set_eeprom,
- .get_ringparam = e1000_get_ringparam,
- .set_ringparam = e1000_set_ringparam,
- .get_pauseparam = e1000_get_pauseparam,
- .set_pauseparam = e1000_set_pauseparam,
- .self_test = e1000_diag_test,
- .get_strings = e1000_get_strings,
- .set_phys_id = e1000_set_phys_id,
- .get_ethtool_stats = e1000_get_ethtool_stats,
- .get_sset_count = e1000_get_sset_count,
- .get_coalesce = e1000_get_coalesce,
- .set_coalesce = e1000_set_coalesce,
+ .get_settings = e1000_get_settings,
+ .set_settings = e1000_set_settings,
+ .get_drvinfo = e1000_get_drvinfo,
+ .get_regs_len = e1000_get_regs_len,
+ .get_regs = e1000_get_regs,
+ .get_wol = e1000_get_wol,
+ .set_wol = e1000_set_wol,
+ .get_msglevel = e1000_get_msglevel,
+ .set_msglevel = e1000_set_msglevel,
+ .nway_reset = e1000_nway_reset,
+ .get_link = e1000_get_link,
+ .get_eeprom_len = e1000_get_eeprom_len,
+ .get_eeprom = e1000_get_eeprom,
+ .set_eeprom = e1000_set_eeprom,
+ .get_ringparam = e1000_get_ringparam,
+ .set_ringparam = e1000_set_ringparam,
+ .get_pauseparam = e1000_get_pauseparam,
+ .set_pauseparam = e1000_set_pauseparam,
+ .self_test = e1000_diag_test,
+ .get_strings = e1000_get_strings,
+ .set_phys_id = e1000_set_phys_id,
+ .get_ethtool_stats = e1000_get_ethtool_stats,
+ .get_sset_count = e1000_get_sset_count,
+ .get_coalesce = e1000_get_coalesce,
+ .set_coalesce = e1000_set_coalesce,
.get_ts_info = ethtool_op_get_ts_info,
};
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 8fedd2451538..2879b9631e15 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -164,8 +164,9 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
if (hw->phy_init_script) {
msleep(20);
- /* Save off the current value of register 0x2F5B to be restored at
- * the end of this routine. */
+ /* Save off the current value of register 0x2F5B to be restored
+ * at the end of this routine.
+ */
ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
/* Disabled the PHY transmitter */
@@ -466,7 +467,8 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
case e1000_82541:
case e1000_82541_rev_2:
/* These controllers can't ack the 64-bit write when issuing the
- * reset, so use IO-mapping as a workaround to issue the reset */
+ * reset, so use IO-mapping as a workaround to issue the reset
+ */
E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST));
break;
case e1000_82545_rev_3:
@@ -480,9 +482,9 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
break;
}
- /* After MAC reset, force reload of EEPROM to restore power-on settings to
- * device. Later controllers reload the EEPROM automatically, so just wait
- * for reload to complete.
+ /* After MAC reset, force reload of EEPROM to restore power-on settings
+ * to device. Later controllers reload the EEPROM automatically, so
+ * just wait for reload to complete.
*/
switch (hw->mac_type) {
case e1000_82542_rev2_0:
@@ -591,8 +593,8 @@ s32 e1000_init_hw(struct e1000_hw *hw)
msleep(5);
}
- /* Setup the receive address. This involves initializing all of the Receive
- * Address Registers (RARs 0 - 15).
+ /* Setup the receive address. This involves initializing all of the
+ * Receive Address Registers (RARs 0 - 15).
*/
e1000_init_rx_addrs(hw);
@@ -611,7 +613,8 @@ s32 e1000_init_hw(struct e1000_hw *hw)
for (i = 0; i < mta_size; i++) {
E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
/* use write flush to prevent Memory Write Block (MWB) from
- * occurring when accessing our register space */
+ * occurring when accessing our register space
+ */
E1000_WRITE_FLUSH();
}
@@ -630,7 +633,9 @@ s32 e1000_init_hw(struct e1000_hw *hw)
case e1000_82546_rev_3:
break;
default:
- /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */
+ /* Workaround for PCI-X problem when BIOS sets MMRBC
+ * incorrectly.
+ */
if (hw->bus_type == e1000_bus_type_pcix
&& e1000_pcix_get_mmrbc(hw) > 2048)
e1000_pcix_set_mmrbc(hw, 2048);
@@ -660,7 +665,8 @@ s32 e1000_init_hw(struct e1000_hw *hw)
hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
ctrl_ext = er32(CTRL_EXT);
/* Relaxed ordering must be disabled to avoid a parity
- * error crash in a PCI slot. */
+ * error crash in a PCI slot.
+ */
ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
ew32(CTRL_EXT, ctrl_ext);
}
@@ -810,8 +816,9 @@ s32 e1000_setup_link(struct e1000_hw *hw)
ew32(FCRTL, 0);
ew32(FCRTH, 0);
} else {
- /* We need to set up the Receive Threshold high and low water marks
- * as well as (optionally) enabling the transmission of XON frames.
+ /* We need to set up the Receive Threshold high and low water
+ * marks as well as (optionally) enabling the transmission of
+ * XON frames.
*/
if (hw->fc_send_xon) {
ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
@@ -868,42 +875,46 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
e1000_config_collision_dist(hw);
/* Check for a software override of the flow control settings, and setup
- * the device accordingly. If auto-negotiation is enabled, then software
- * will have to set the "PAUSE" bits to the correct value in the Tranmsit
- * Config Word Register (TXCW) and re-start auto-negotiation. However, if
- * auto-negotiation is disabled, then software will have to manually
- * configure the two flow control enable bits in the CTRL register.
+ * the device accordingly. If auto-negotiation is enabled, then
+ * software will have to set the "PAUSE" bits to the correct value in
+ * the Tranmsit Config Word Register (TXCW) and re-start
+ * auto-negotiation. However, if auto-negotiation is disabled, then
+ * software will have to manually configure the two flow control enable
+ * bits in the CTRL register.
*
* The possible values of the "fc" parameter are:
- * 0: Flow control is completely disabled
- * 1: Rx flow control is enabled (we can receive pause frames, but
- * not send pause frames).
- * 2: Tx flow control is enabled (we can send pause frames but we do
- * not support receiving pause frames).
- * 3: Both Rx and TX flow control (symmetric) are enabled.
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames, but
+ * not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but we do
+ * not support receiving pause frames).
+ * 3: Both Rx and TX flow control (symmetric) are enabled.
*/
switch (hw->fc) {
case E1000_FC_NONE:
- /* Flow control is completely disabled by a software over-ride. */
+ /* Flow ctrl is completely disabled by a software over-ride */
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
break;
case E1000_FC_RX_PAUSE:
- /* RX Flow control is enabled and TX Flow control is disabled by a
- * software over-ride. Since there really isn't a way to advertise
- * that we are capable of RX Pause ONLY, we will advertise that we
- * support both symmetric and asymmetric RX PAUSE. Later, we will
- * disable the adapter's ability to send PAUSE frames.
+ /* Rx Flow control is enabled and Tx Flow control is disabled by
+ * a software over-ride. Since there really isn't a way to
+ * advertise that we are capable of Rx Pause ONLY, we will
+ * advertise that we support both symmetric and asymmetric Rx
+ * PAUSE. Later, we will disable the adapter's ability to send
+ * PAUSE frames.
*/
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
break;
case E1000_FC_TX_PAUSE:
- /* TX Flow control is enabled, and RX Flow control is disabled, by a
- * software over-ride.
+ /* Tx Flow control is enabled, and Rx Flow control is disabled,
+ * by a software over-ride.
*/
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
break;
case E1000_FC_FULL:
- /* Flow control (both RX and TX) is enabled by a software over-ride. */
+ /* Flow control (both Rx and Tx) is enabled by a software
+ * over-ride.
+ */
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
break;
default:
@@ -912,11 +923,11 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
break;
}
- /* Since auto-negotiation is enabled, take the link out of reset (the link
- * will be in reset, because we previously reset the chip). This will
- * restart auto-negotiation. If auto-negotiation is successful then the
- * link-up status bit will be set and the flow control enable bits (RFCE
- * and TFCE) will be set according to their negotiated value.
+ /* Since auto-negotiation is enabled, take the link out of reset (the
+ * link will be in reset, because we previously reset the chip). This
+ * will restart auto-negotiation. If auto-negotiation is successful
+ * then the link-up status bit will be set and the flow control enable
+ * bits (RFCE and TFCE) will be set according to their negotiated value.
*/
e_dbg("Auto-negotiation enabled\n");
@@ -927,11 +938,12 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
hw->txcw = txcw;
msleep(1);
- /* If we have a signal (the cable is plugged in) then poll for a "Link-Up"
- * indication in the Device Status Register. Time-out if a link isn't
- * seen in 500 milliseconds seconds (Auto-negotiation should complete in
- * less than 500 milliseconds even if the other end is doing it in SW).
- * For internal serdes, we just assume a signal is present, then poll.
+ /* If we have a signal (the cable is plugged in) then poll for a
+ * "Link-Up" indication in the Device Status Register. Time-out if a
+ * link isn't seen in 500 milliseconds seconds (Auto-negotiation should
+ * complete in less than 500 milliseconds even if the other end is doing
+ * it in SW). For internal serdes, we just assume a signal is present,
+ * then poll.
*/
if (hw->media_type == e1000_media_type_internal_serdes ||
(er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) {
@@ -946,9 +958,9 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
e_dbg("Never got a valid link from auto-neg!!!\n");
hw->autoneg_failed = 1;
/* AutoNeg failed to achieve a link, so we'll call
- * e1000_check_for_link. This routine will force the link up if
- * we detect a signal. This will allow us to communicate with
- * non-autonegotiating link partners.
+ * e1000_check_for_link. This routine will force the
+ * link up if we detect a signal. This will allow us to
+ * communicate with non-autonegotiating link partners.
*/
ret_val = e1000_check_for_link(hw);
if (ret_val) {
@@ -1042,9 +1054,9 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
e_dbg("e1000_copper_link_preconfig");
ctrl = er32(CTRL);
- /* With 82543, we need to force speed and duplex on the MAC equal to what
- * the PHY speed and duplex configuration is. In addition, we need to
- * perform a hardware reset on the PHY to take it out of reset.
+ /* With 82543, we need to force speed and duplex on the MAC equal to
+ * what the PHY speed and duplex configuration is. In addition, we need
+ * to perform a hardware reset on the PHY to take it out of reset.
*/
if (hw->mac_type > e1000_82543) {
ctrl |= E1000_CTRL_SLU;
@@ -1175,7 +1187,8 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
/* when autonegotiation advertisement is only 1000Mbps then we
* should disable SmartSpeed and enable Auto MasterSlave
- * resolution as hardware default. */
+ * resolution as hardware default.
+ */
if (hw->autoneg_advertised == ADVERTISE_1000_FULL) {
/* Disable SmartSpeed */
ret_val =
@@ -1485,13 +1498,15 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
if (hw->autoneg) {
/* Setup autoneg and flow control advertisement
- * and perform autonegotiation */
+ * and perform autonegotiation
+ */
ret_val = e1000_copper_link_autoneg(hw);
if (ret_val)
return ret_val;
} else {
/* PHY will be set to 10H, 10F, 100H,or 100F
- * depending on value from forced_speed_duplex. */
+ * depending on value from forced_speed_duplex.
+ */
e_dbg("Forcing speed and duplex\n");
ret_val = e1000_phy_force_speed_duplex(hw);
if (ret_val) {
@@ -1609,7 +1624,8 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
* setup the PHY advertisement registers accordingly. If
* auto-negotiation is enabled, then software will have to set the
* "PAUSE" bits to the correct value in the Auto-Negotiation
- * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation.
+ * Advertisement Register (PHY_AUTONEG_ADV) and re-start
+ * auto-negotiation.
*
* The possible values of the "fc" parameter are:
* 0: Flow control is completely disabled
@@ -1636,7 +1652,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
* capable of RX Pause ONLY, we will advertise that we
* support both symmetric and asymmetric RX PAUSE. Later
* (in e1000_config_fc_after_link_up) we will disable the
- *hw's ability to send PAUSE frames.
+ * hw's ability to send PAUSE frames.
*/
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
@@ -1720,15 +1736,15 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
/* Are we forcing Full or Half Duplex? */
if (hw->forced_speed_duplex == e1000_100_full ||
hw->forced_speed_duplex == e1000_10_full) {
- /* We want to force full duplex so we SET the full duplex bits in the
- * Device and MII Control Registers.
+ /* We want to force full duplex so we SET the full duplex bits
+ * in the Device and MII Control Registers.
*/
ctrl |= E1000_CTRL_FD;
mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
e_dbg("Full Duplex\n");
} else {
- /* We want to force half duplex so we CLEAR the full duplex bits in
- * the Device and MII Control Registers.
+ /* We want to force half duplex so we CLEAR the full duplex bits
+ * in the Device and MII Control Registers.
*/
ctrl &= ~E1000_CTRL_FD;
mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
@@ -1762,8 +1778,8 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
- * forced whenever speed are duplex are forced.
+ /* Clear Auto-Crossover to force MDI manually. M88E1000 requires
+ * MDI forced whenever speed are duplex are forced.
*/
phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
ret_val =
@@ -1814,10 +1830,10 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
e_dbg("Waiting for forced speed/duplex link.\n");
mii_status_reg = 0;
- /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+ /* Wait for autoneg to complete or 4.5 seconds to expire */
for (i = PHY_FORCE_TIME; i > 0; i--) {
- /* Read the MII Status Register and wait for Auto-Neg Complete bit
- * to be set.
+ /* Read the MII Status Register and wait for Auto-Neg
+ * Complete bit to be set.
*/
ret_val =
e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
@@ -1834,20 +1850,24 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
msleep(100);
}
if ((i == 0) && (hw->phy_type == e1000_phy_m88)) {
- /* We didn't get link. Reset the DSP and wait again for link. */
+ /* We didn't get link. Reset the DSP and wait again
+ * for link.
+ */
ret_val = e1000_phy_reset_dsp(hw);
if (ret_val) {
e_dbg("Error Resetting PHY DSP\n");
return ret_val;
}
}
- /* This loop will early-out if the link condition has been met. */
+ /* This loop will early-out if the link condition has been
+ * met
+ */
for (i = PHY_FORCE_TIME; i > 0; i--) {
if (mii_status_reg & MII_SR_LINK_STATUS)
break;
msleep(100);
- /* Read the MII Status Register and wait for Auto-Neg Complete bit
- * to be set.
+ /* Read the MII Status Register and wait for Auto-Neg
+ * Complete bit to be set.
*/
ret_val =
e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
@@ -1862,9 +1882,10 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
}
if (hw->phy_type == e1000_phy_m88) {
- /* Because we reset the PHY above, we need to re-force TX_CLK in the
- * Extended PHY Specific Control Register to 25MHz clock. This value
- * defaults back to a 2.5MHz clock when the PHY is reset.
+ /* Because we reset the PHY above, we need to re-force TX_CLK in
+ * the Extended PHY Specific Control Register to 25MHz clock.
+ * This value defaults back to a 2.5MHz clock when the PHY is
+ * reset.
*/
ret_val =
e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
@@ -1879,8 +1900,9 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /* In addition, because of the s/w reset above, we need to enable CRS on
- * TX. This must be set for both full and half duplex operation.
+ /* In addition, because of the s/w reset above, we need to
+ * enable CRS on Tx. This must be set for both full and half
+ * duplex operation.
*/
ret_val =
e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -1951,7 +1973,8 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
e_dbg("e1000_config_mac_to_phy");
/* 82544 or newer MAC, Auto Speed Detection takes care of
- * MAC speed/duplex configuration.*/
+ * MAC speed/duplex configuration.
+ */
if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100))
return E1000_SUCCESS;
@@ -1985,7 +2008,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
* registers depending on negotiated values.
*/
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
- &phy_data);
+ &phy_data);
if (ret_val)
return ret_val;
@@ -2002,7 +2025,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
ctrl |= E1000_CTRL_SPD_1000;
else if ((phy_data & M88E1000_PSSR_SPEED) ==
- M88E1000_PSSR_100MBS)
+ M88E1000_PSSR_100MBS)
ctrl |= E1000_CTRL_SPD_100;
}
@@ -2135,9 +2158,9 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
/* The AutoNeg process has completed, so we now need to
* read both the Auto Negotiation Advertisement Register
- * (Address 4) and the Auto_Negotiation Base Page Ability
- * Register (Address 5) to determine how flow control was
- * negotiated.
+ * (Address 4) and the Auto_Negotiation Base Page
+ * Ability Register (Address 5) to determine how flow
+ * control was negotiated.
*/
ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
&mii_nway_adv_reg);
@@ -2148,18 +2171,19 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /* Two bits in the Auto Negotiation Advertisement Register
- * (Address 4) and two bits in the Auto Negotiation Base
- * Page Ability Register (Address 5) determine flow control
- * for both the PHY and the link partner. The following
- * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
- * 1999, describes these PAUSE resolution bits and how flow
- * control is determined based upon these settings.
+ /* Two bits in the Auto Negotiation Advertisement
+ * Register (Address 4) and two bits in the Auto
+ * Negotiation Base Page Ability Register (Address 5)
+ * determine flow control for both the PHY and the link
+ * partner. The following table, taken out of the IEEE
+ * 802.3ab/D6.0 dated March 25, 1999, describes these
+ * PAUSE resolution bits and how flow control is
+ * determined based upon these settings.
* NOTE: DC = Don't Care
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
- *-------|---------|-------|---------|--------------------
+ *-------|---------|-------|---------|------------------
* 0 | 0 | DC | DC | E1000_FC_NONE
* 0 | 1 | 0 | DC | E1000_FC_NONE
* 0 | 1 | 1 | 0 | E1000_FC_NONE
@@ -2178,17 +2202,18 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
+ *-------|---------|-------|---------|------------------
* 1 | DC | 1 | DC | E1000_FC_FULL
*
*/
if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
- /* Now we need to check if the user selected RX ONLY
- * of pause frames. In this case, we had to advertise
- * FULL flow control because we could not advertise RX
- * ONLY. Hence, we must now check to see if we need to
- * turn OFF the TRANSMISSION of PAUSE frames.
+ /* Now we need to check if the user selected Rx
+ * ONLY of pause frames. In this case, we had
+ * to advertise FULL flow control because we
+ * could not advertise Rx ONLY. Hence, we must
+ * now check to see if we need to turn OFF the
+ * TRANSMISSION of PAUSE frames.
*/
if (hw->original_fc == E1000_FC_FULL) {
hw->fc = E1000_FC_FULL;
@@ -2203,7 +2228,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
+ *-------|---------|-------|---------|------------------
* 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE
*
*/
@@ -2220,7 +2245,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
+ *-------|---------|-------|---------|------------------
* 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE
*
*/
@@ -2233,25 +2258,27 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
e_dbg
("Flow Control = RX PAUSE frames only.\n");
}
- /* Per the IEEE spec, at this point flow control should be
- * disabled. However, we want to consider that we could
- * be connected to a legacy switch that doesn't advertise
- * desired flow control, but can be forced on the link
- * partner. So if we advertised no flow control, that is
- * what we will resolve to. If we advertised some kind of
- * receive capability (Rx Pause Only or Full Flow Control)
- * and the link partner advertised none, we will configure
- * ourselves to enable Rx Flow Control only. We can do
- * this safely for two reasons: If the link partner really
- * didn't want flow control enabled, and we enable Rx, no
- * harm done since we won't be receiving any PAUSE frames
- * anyway. If the intent on the link partner was to have
- * flow control enabled, then by us enabling RX only, we
- * can at least receive pause frames and process them.
- * This is a good idea because in most cases, since we are
- * predominantly a server NIC, more times than not we will
- * be asked to delay transmission of packets than asking
- * our link partner to pause transmission of frames.
+ /* Per the IEEE spec, at this point flow control should
+ * be disabled. However, we want to consider that we
+ * could be connected to a legacy switch that doesn't
+ * advertise desired flow control, but can be forced on
+ * the link partner. So if we advertised no flow
+ * control, that is what we will resolve to. If we
+ * advertised some kind of receive capability (Rx Pause
+ * Only or Full Flow Control) and the link partner
+ * advertised none, we will configure ourselves to
+ * enable Rx Flow Control only. We can do this safely
+ * for two reasons: If the link partner really
+ * didn't want flow control enabled, and we enable Rx,
+ * no harm done since we won't be receiving any PAUSE
+ * frames anyway. If the intent on the link partner was
+ * to have flow control enabled, then by us enabling Rx
+ * only, we can at least receive pause frames and
+ * process them. This is a good idea because in most
+ * cases, since we are predominantly a server NIC, more
+ * times than not we will be asked to delay transmission
+ * of packets than asking our link partner to pause
+ * transmission of frames.
*/
else if ((hw->original_fc == E1000_FC_NONE ||
hw->original_fc == E1000_FC_TX_PAUSE) ||
@@ -2316,8 +2343,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
status = er32(STATUS);
rxcw = er32(RXCW);
- /*
- * If we don't have link (auto-negotiation failed or link partner
+ /* If we don't have link (auto-negotiation failed or link partner
* cannot auto-negotiate), and our link partner is not trying to
* auto-negotiate with us (we are receiving idles or data),
* we need to force link up. We also need to give auto-negotiation
@@ -2346,8 +2372,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
goto out;
}
} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
- /*
- * If we are forcing link and we are receiving /C/ ordered
+ /* If we are forcing link and we are receiving /C/ ordered
* sets, re-enable auto-negotiation in the TXCW register
* and disable forced link in the Device Control register
* in an attempt to auto-negotiate with our link partner.
@@ -2358,8 +2383,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
hw->serdes_has_link = true;
} else if (!(E1000_TXCW_ANE & er32(TXCW))) {
- /*
- * If we force link for non-auto-negotiation switch, check
+ /* If we force link for non-auto-negotiation switch, check
* link status based on MAC synchronization for internal
* serdes media type.
*/
@@ -2468,15 +2492,17 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
if (phy_data & MII_SR_LINK_STATUS) {
hw->get_link_status = false;
- /* Check if there was DownShift, must be checked immediately after
- * link-up */
+ /* Check if there was DownShift, must be checked
+ * immediately after link-up
+ */
e1000_check_downshift(hw);
/* If we are on 82544 or 82543 silicon and speed/duplex
- * are forced to 10H or 10F, then we will implement the polarity
- * reversal workaround. We disable interrupts first, and upon
- * returning, place the devices interrupt state to its previous
- * value except for the link status change interrupt which will
+ * are forced to 10H or 10F, then we will implement the
+ * polarity reversal workaround. We disable interrupts
+ * first, and upon returning, place the devices
+ * interrupt state to its previous value except for the
+ * link status change interrupt which will
* happen due to the execution of this workaround.
*/
@@ -2527,9 +2553,10 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
}
}
- /* Configure Flow Control now that Auto-Neg has completed. First, we
- * need to restore the desired flow control settings because we may
- * have had to re-autoneg with a different link partner.
+ /* Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control settings
+ * because we may have had to re-autoneg with a different link
+ * partner.
*/
ret_val = e1000_config_fc_after_link_up(hw);
if (ret_val) {
@@ -2538,11 +2565,12 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
}
/* At this point we know that we are on copper and we have
- * auto-negotiated link. These are conditions for checking the link
- * partner capability register. We use the link speed to determine if
- * TBI compatibility needs to be turned on or off. If the link is not
- * at gigabit speed, then TBI compatibility is not needed. If we are
- * at gigabit speed, we turn on TBI compatibility.
+ * auto-negotiated link. These are conditions for checking the
+ * link partner capability register. We use the link speed to
+ * determine if TBI compatibility needs to be turned on or off.
+ * If the link is not at gigabit speed, then TBI compatibility
+ * is not needed. If we are at gigabit speed, we turn on TBI
+ * compatibility.
*/
if (hw->tbi_compatibility_en) {
u16 speed, duplex;
@@ -2554,20 +2582,23 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
return ret_val;
}
if (speed != SPEED_1000) {
- /* If link speed is not set to gigabit speed, we do not need
- * to enable TBI compatibility.
+ /* If link speed is not set to gigabit speed, we
+ * do not need to enable TBI compatibility.
*/
if (hw->tbi_compatibility_on) {
- /* If we previously were in the mode, turn it off. */
+ /* If we previously were in the mode,
+ * turn it off.
+ */
rctl = er32(RCTL);
rctl &= ~E1000_RCTL_SBP;
ew32(RCTL, rctl);
hw->tbi_compatibility_on = false;
}
} else {
- /* If TBI compatibility is was previously off, turn it on. For
- * compatibility with a TBI link partner, we will store bad
- * packets. Some frames have an additional byte on the end and
+ /* If TBI compatibility is was previously off,
+ * turn it on. For compatibility with a TBI link
+ * partner, we will store bad packets. Some
+ * frames have an additional byte on the end and
* will look like CRC errors to to the hardware.
*/
if (!hw->tbi_compatibility_on) {
@@ -2629,9 +2660,9 @@ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
*duplex = FULL_DUPLEX;
}
- /* IGP01 PHY may advertise full duplex operation after speed downgrade even
- * if it is operating at half duplex. Here we set the duplex settings to
- * match the duplex in the link partner's capabilities.
+ /* IGP01 PHY may advertise full duplex operation after speed downgrade
+ * even if it is operating at half duplex. Here we set the duplex
+ * settings to match the duplex in the link partner's capabilities.
*/
if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data);
@@ -2697,8 +2728,8 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
*/
static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
{
- /* Raise the clock input to the Management Data Clock (by setting the MDC
- * bit), and then delay 10 microseconds.
+ /* Raise the clock input to the Management Data Clock (by setting the
+ * MDC bit), and then delay 10 microseconds.
*/
ew32(CTRL, (*ctrl | E1000_CTRL_MDC));
E1000_WRITE_FLUSH();
@@ -2712,8 +2743,8 @@ static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
*/
static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
{
- /* Lower the clock input to the Management Data Clock (by clearing the MDC
- * bit), and then delay 10 microseconds.
+ /* Lower the clock input to the Management Data Clock (by clearing the
+ * MDC bit), and then delay 10 microseconds.
*/
ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC));
E1000_WRITE_FLUSH();
@@ -2746,10 +2777,10 @@ static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count)
ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
while (mask) {
- /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and
- * then raising and lowering the Management Data Clock. A "0" is
- * shifted out to the PHY by setting the MDIO bit to "0" and then
- * raising and lowering the clock.
+ /* A "1" is shifted out to the PHY by setting the MDIO bit to
+ * "1" and then raising and lowering the Management Data Clock.
+ * A "0" is shifted out to the PHY by setting the MDIO bit to
+ * "0" and then raising and lowering the clock.
*/
if (data & mask)
ctrl |= E1000_CTRL_MDIO;
@@ -2781,24 +2812,26 @@ static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
u8 i;
/* In order to read a register from the PHY, we need to shift in a total
- * of 18 bits from the PHY. The first two bit (turnaround) times are used
- * to avoid contention on the MDIO pin when a read operation is performed.
- * These two bits are ignored by us and thrown away. Bits are "shifted in"
- * by raising the input to the Management Data Clock (setting the MDC bit),
- * and then reading the value of the MDIO bit.
+ * of 18 bits from the PHY. The first two bit (turnaround) times are
+ * used to avoid contention on the MDIO pin when a read operation is
+ * performed. These two bits are ignored by us and thrown away. Bits are
+ * "shifted in" by raising the input to the Management Data Clock
+ * (setting the MDC bit), and then reading the value of the MDIO bit.
*/
ctrl = er32(CTRL);
- /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */
+ /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as
+ * input.
+ */
ctrl &= ~E1000_CTRL_MDIO_DIR;
ctrl &= ~E1000_CTRL_MDIO;
ew32(CTRL, ctrl);
E1000_WRITE_FLUSH();
- /* Raise and Lower the clock before reading in the data. This accounts for
- * the turnaround bits. The first clock occurred when we clocked out the
- * last bit of the Register Address.
+ /* Raise and Lower the clock before reading in the data. This accounts
+ * for the turnaround bits. The first clock occurred when we clocked out
+ * the last bit of the Register Address.
*/
e1000_raise_mdi_clk(hw, &ctrl);
e1000_lower_mdi_clk(hw, &ctrl);
@@ -2870,8 +2903,8 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
if (hw->mac_type > e1000_82543) {
/* Set up Op-code, Phy Address, and register address in the MDI
- * Control register. The MAC will take care of interfacing with the
- * PHY to retrieve the desired data.
+ * Control register. The MAC will take care of interfacing with
+ * the PHY to retrieve the desired data.
*/
if (hw->mac_type == e1000_ce4100) {
mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
@@ -2929,31 +2962,32 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
*phy_data = (u16) mdic;
}
} else {
- /* We must first send a preamble through the MDIO pin to signal the
- * beginning of an MII instruction. This is done by sending 32
- * consecutive "1" bits.
+ /* We must first send a preamble through the MDIO pin to signal
+ * the beginning of an MII instruction. This is done by sending
+ * 32 consecutive "1" bits.
*/
e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
/* Now combine the next few fields that are required for a read
* operation. We use this method instead of calling the
- * e1000_shift_out_mdi_bits routine five different times. The format of
- * a MII read instruction consists of a shift out of 14 bits and is
- * defined as follows:
+ * e1000_shift_out_mdi_bits routine five different times. The
+ * format of a MII read instruction consists of a shift out of
+ * 14 bits and is defined as follows:
* <Preamble><SOF><Op Code><Phy Addr><Reg Addr>
- * followed by a shift in of 18 bits. This first two bits shifted in
- * are TurnAround bits used to avoid contention on the MDIO pin when a
- * READ operation is performed. These two bits are thrown away
- * followed by a shift in of 16 bits which contains the desired data.
+ * followed by a shift in of 18 bits. This first two bits
+ * shifted in are TurnAround bits used to avoid contention on
+ * the MDIO pin when a READ operation is performed. These two
+ * bits are thrown away followed by a shift in of 16 bits which
+ * contains the desired data.
*/
mdic = ((reg_addr) | (phy_addr << 5) |
(PHY_OP_READ << 10) | (PHY_SOF << 12));
e1000_shift_out_mdi_bits(hw, mdic, 14);
- /* Now that we've shifted out the read command to the MII, we need to
- * "shift in" the 16-bit value (18 total bits) of the requested PHY
- * register address.
+ /* Now that we've shifted out the read command to the MII, we
+ * need to "shift in" the 16-bit value (18 total bits) of the
+ * requested PHY register address.
*/
*phy_data = e1000_shift_in_mdi_bits(hw);
}
@@ -3060,18 +3094,18 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
}
}
} else {
- /* We'll need to use the SW defined pins to shift the write command
- * out to the PHY. We first send a preamble to the PHY to signal the
- * beginning of the MII instruction. This is done by sending 32
- * consecutive "1" bits.
+ /* We'll need to use the SW defined pins to shift the write
+ * command out to the PHY. We first send a preamble to the PHY
+ * to signal the beginning of the MII instruction. This is done
+ * by sending 32 consecutive "1" bits.
*/
e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
- /* Now combine the remaining required fields that will indicate a
- * write operation. We use this method instead of calling the
- * e1000_shift_out_mdi_bits routine for each field in the command. The
- * format of a MII write instruction is as follows:
- * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
+ /* Now combine the remaining required fields that will indicate
+ * a write operation. We use this method instead of calling the
+ * e1000_shift_out_mdi_bits routine for each field in the
+ * command. The format of a MII write instruction is as follows:
+ * <Preamble><SOF><OpCode><PhyAddr><RegAddr><Turnaround><Data>.
*/
mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
(PHY_OP_WRITE << 12) | (PHY_SOF << 14));
@@ -3100,10 +3134,10 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
e_dbg("Resetting Phy...\n");
if (hw->mac_type > e1000_82543) {
- /* Read the device control register and assert the E1000_CTRL_PHY_RST
- * bit. Then, take it out of reset.
+ /* Read the device control register and assert the
+ * E1000_CTRL_PHY_RST bit. Then, take it out of reset.
* For e1000 hardware, we delay for 10ms between the assert
- * and deassert.
+ * and de-assert.
*/
ctrl = er32(CTRL);
ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
@@ -3115,8 +3149,9 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
E1000_WRITE_FLUSH();
} else {
- /* Read the Extended Device Control Register, assert the PHY_RESET_DIR
- * bit to put the PHY into reset. Then, take it out of reset.
+ /* Read the Extended Device Control Register, assert the
+ * PHY_RESET_DIR bit to put the PHY into reset. Then, take it
+ * out of reset.
*/
ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
@@ -3301,7 +3336,8 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
e_dbg("e1000_phy_igp_get_info");
/* The downshift status is checked only once, after link is established,
- * and it stored in the hw->speed_downgraded parameter. */
+ * and it stored in the hw->speed_downgraded parameter.
+ */
phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
/* IGP01E1000 does not need to support it. */
@@ -3327,7 +3363,9 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
IGP01E1000_PSSR_SPEED_1000MBPS) {
- /* Local/Remote Receiver Information are only valid at 1000 Mbps */
+ /* Local/Remote Receiver Information are only valid @ 1000
+ * Mbps
+ */
ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
if (ret_val)
return ret_val;
@@ -3379,7 +3417,8 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
e_dbg("e1000_phy_m88_get_info");
/* The downshift status is checked only once, after link is established,
- * and it stored in the hw->speed_downgraded parameter. */
+ * and it stored in the hw->speed_downgraded parameter.
+ */
phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -3574,8 +3613,8 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
}
if (eeprom->type == e1000_eeprom_spi) {
- /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to
- * 32KB (incremented by powers of 2).
+ /* eeprom_size will be an enum [0..8] that maps to eeprom sizes
+ * 128B to 32KB (incremented by powers of 2).
*/
/* Set to default value for initial eeprom read. */
eeprom->word_size = 64;
@@ -3585,8 +3624,9 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
eeprom_size =
(eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
/* 256B eeprom size was not supported in earlier hardware, so we
- * bump eeprom_size up one to ensure that "1" (which maps to 256B)
- * is never the result used in the shifting logic below. */
+ * bump eeprom_size up one to ensure that "1" (which maps to
+ * 256B) is never the result used in the shifting logic below.
+ */
if (eeprom_size)
eeprom_size++;
@@ -3618,8 +3658,8 @@ static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd)
*/
static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd)
{
- /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
- * wait 50 microseconds.
+ /* Lower the clock input to the EEPROM (by clearing the SK bit), and
+ * then wait 50 microseconds.
*/
*eecd = *eecd & ~E1000_EECD_SK;
ew32(EECD, *eecd);
@@ -3651,10 +3691,11 @@ static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count)
eecd |= E1000_EECD_DO;
}
do {
- /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
- * and then raising and then lowering the clock (the SK bit controls
- * the clock input to the EEPROM). A "0" is shifted out to the EEPROM
- * by setting "DI" to "0" and then raising and then lowering the clock.
+ /* A "1" is shifted out to the EEPROM by setting bit "DI" to a
+ * "1", and then raising and then lowering the clock (the SK bit
+ * controls the clock input to the EEPROM). A "0" is shifted
+ * out to the EEPROM by setting "DI" to "0" and then raising and
+ * then lowering the clock.
*/
eecd &= ~E1000_EECD_DI;
@@ -3691,9 +3732,9 @@ static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count)
/* In order to read a register from the EEPROM, we need to shift 'count'
* bits in from the EEPROM. Bits are "shifted in" by raising the clock
- * input to the EEPROM (setting the SK bit), and then reading the value of
- * the "DO" bit. During this "shifting in" process the "DI" bit should
- * always be clear.
+ * input to the EEPROM (setting the SK bit), and then reading the value
+ * of the "DO" bit. During this "shifting in" process the "DI" bit
+ * should always be clear.
*/
eecd = er32(EECD);
@@ -3945,8 +3986,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
if (eeprom->word_size == 0)
e1000_init_eeprom_params(hw);
- /* A check for invalid values: offset too large, too many words, and not
- * enough words.
+ /* A check for invalid values: offset too large, too many words, and
+ * not enough words.
*/
if ((offset >= eeprom->word_size)
|| (words > eeprom->word_size - offset) || (words == 0)) {
@@ -3964,7 +4005,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
return -E1000_ERR_EEPROM;
/* Set up the SPI or Microwire EEPROM for bit-bang reading. We have
- * acquired the EEPROM at this point, so any returns should release it */
+ * acquired the EEPROM at this point, so any returns should release it
+ */
if (eeprom->type == e1000_eeprom_spi) {
u16 word_in;
u8 read_opcode = EEPROM_READ_OPCODE_SPI;
@@ -3976,7 +4018,9 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
e1000_standby_eeprom(hw);
- /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+ /* Some SPI eeproms use the 8th address bit embedded in the
+ * opcode
+ */
if ((eeprom->address_bits == 8) && (offset >= 128))
read_opcode |= EEPROM_A8_OPCODE_SPI;
@@ -3985,11 +4029,13 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
e1000_shift_out_ee_bits(hw, (u16) (offset * 2),
eeprom->address_bits);
- /* Read the data. The address of the eeprom internally increments with
- * each byte (spi) being read, saving on the overhead of eeprom setup
- * and tear-down. The address counter will roll over if reading beyond
- * the size of the eeprom, thus allowing the entire memory to be read
- * starting from any offset. */
+ /* Read the data. The address of the eeprom internally
+ * increments with each byte (spi) being read, saving on the
+ * overhead of eeprom setup and tear-down. The address counter
+ * will roll over if reading beyond the size of the eeprom, thus
+ * allowing the entire memory to be read starting from any
+ * offset.
+ */
for (i = 0; i < words; i++) {
word_in = e1000_shift_in_ee_bits(hw, 16);
data[i] = (word_in >> 8) | (word_in << 8);
@@ -4003,8 +4049,9 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
e1000_shift_out_ee_bits(hw, (u16) (offset + i),
eeprom->address_bits);
- /* Read the data. For microwire, each word requires the overhead
- * of eeprom setup and tear-down. */
+ /* Read the data. For microwire, each word requires the
+ * overhead of eeprom setup and tear-down.
+ */
data[i] = e1000_shift_in_ee_bits(hw, 16);
e1000_standby_eeprom(hw);
}
@@ -4119,8 +4166,8 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
if (eeprom->word_size == 0)
e1000_init_eeprom_params(hw);
- /* A check for invalid values: offset too large, too many words, and not
- * enough words.
+ /* A check for invalid values: offset too large, too many words, and
+ * not enough words.
*/
if ((offset >= eeprom->word_size)
|| (words > eeprom->word_size - offset) || (words == 0)) {
@@ -4174,7 +4221,9 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
e1000_standby_eeprom(hw);
- /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+ /* Some SPI eeproms use the 8th address bit embedded in the
+ * opcode
+ */
if ((eeprom->address_bits == 8) && (offset >= 128))
write_opcode |= EEPROM_A8_OPCODE_SPI;
@@ -4186,16 +4235,19 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
/* Send the data */
- /* Loop to allow for up to whole page write (32 bytes) of eeprom */
+ /* Loop to allow for up to whole page write (32 bytes) of
+ * eeprom
+ */
while (widx < words) {
u16 word_out = data[widx];
word_out = (word_out >> 8) | (word_out << 8);
e1000_shift_out_ee_bits(hw, word_out, 16);
widx++;
- /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE
- * operation, while the smaller eeproms are capable of an 8-byte
- * PAGE WRITE operation. Break the inner loop to pass new address
+ /* Some larger eeprom sizes are capable of a 32-byte
+ * PAGE WRITE operation, while the smaller eeproms are
+ * capable of an 8-byte PAGE WRITE operation. Break the
+ * inner loop to pass new address
*/
if ((((offset + widx) * 2) % eeprom->page_size) == 0) {
e1000_standby_eeprom(hw);
@@ -4249,14 +4301,15 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
/* Send the data */
e1000_shift_out_ee_bits(hw, data[words_written], 16);
- /* Toggle the CS line. This in effect tells the EEPROM to execute
- * the previous command.
+ /* Toggle the CS line. This in effect tells the EEPROM to
+ * execute the previous command.
*/
e1000_standby_eeprom(hw);
- /* Read DO repeatedly until it is high (equal to '1'). The EEPROM will
- * signal that the command has been completed by raising the DO signal.
- * If DO does not go high in 10 milliseconds, then error out.
+ /* Read DO repeatedly until it is high (equal to '1'). The
+ * EEPROM will signal that the command has been completed by
+ * raising the DO signal. If DO does not go high in 10
+ * milliseconds, then error out.
*/
for (i = 0; i < 200; i++) {
eecd = er32(EECD);
@@ -4483,7 +4536,8 @@ static void e1000_clear_vfta(struct e1000_hw *hw)
for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
/* If the offset we want to clear is the same offset of the
* manageability VLAN ID, then clear all bits except that of the
- * manageability unit */
+ * manageability unit
+ */
vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
E1000_WRITE_FLUSH();
@@ -4911,12 +4965,12 @@ void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
* counters overcount this packet as a CRC error and undercount
* the packet as a good packet
*/
- /* This packet should not be counted as a CRC error. */
+ /* This packet should not be counted as a CRC error. */
stats->crcerrs--;
- /* This packet does count as a Good Packet Received. */
+ /* This packet does count as a Good Packet Received. */
stats->gprc++;
- /* Adjust the Good Octets received counters */
+ /* Adjust the Good Octets received counters */
carry_bit = 0x80000000 & stats->gorcl;
stats->gorcl += frame_len;
/* If the high bit of Gorcl (the low 32 bits of the Good Octets
@@ -5196,8 +5250,9 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
if (ret_val)
return ret_val;
- /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to
- * find the polarity status */
+ /* If speed is 1000 Mbps, must read the
+ * IGP01E1000_PHY_PCS_INIT_REG to find the polarity status
+ */
if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
IGP01E1000_PSSR_SPEED_1000MBPS) {
@@ -5213,8 +5268,9 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
e1000_rev_polarity_reversed :
e1000_rev_polarity_normal;
} else {
- /* For 10 Mbps, read the polarity bit in the status register. (for
- * 100 Mbps this bit is always 0) */
+ /* For 10 Mbps, read the polarity bit in the status
+ * register. (for 100 Mbps this bit is always 0)
+ */
*polarity =
(phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ?
e1000_rev_polarity_reversed :
@@ -5374,8 +5430,9 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
}
} else {
if (hw->dsp_config_state == e1000_dsp_config_activated) {
- /* Save off the current value of register 0x2F5B to be restored at
- * the end of the routines. */
+ /* Save off the current value of register 0x2F5B to be
+ * restored at the end of the routines.
+ */
ret_val =
e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
@@ -5391,7 +5448,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
msleep(20);
ret_val = e1000_write_phy_reg(hw, 0x0000,
- IGP01E1000_IEEE_FORCE_GIGA);
+ IGP01E1000_IEEE_FORCE_GIGA);
if (ret_val)
return ret_val;
for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
@@ -5412,7 +5469,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
}
ret_val = e1000_write_phy_reg(hw, 0x0000,
- IGP01E1000_IEEE_RESTART_AUTONEG);
+ IGP01E1000_IEEE_RESTART_AUTONEG);
if (ret_val)
return ret_val;
@@ -5429,8 +5486,9 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
}
if (hw->ffe_config_state == e1000_ffe_config_active) {
- /* Save off the current value of register 0x2F5B to be restored at
- * the end of the routines. */
+ /* Save off the current value of register 0x2F5B to be
+ * restored at the end of the routines.
+ */
ret_val =
e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
@@ -5446,7 +5504,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
msleep(20);
ret_val = e1000_write_phy_reg(hw, 0x0000,
- IGP01E1000_IEEE_FORCE_GIGA);
+ IGP01E1000_IEEE_FORCE_GIGA);
if (ret_val)
return ret_val;
ret_val =
@@ -5456,7 +5514,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
return ret_val;
ret_val = e1000_write_phy_reg(hw, 0x0000,
- IGP01E1000_IEEE_RESTART_AUTONEG);
+ IGP01E1000_IEEE_RESTART_AUTONEG);
if (ret_val)
return ret_val;
@@ -5542,8 +5600,9 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
return E1000_SUCCESS;
/* During driver activity LPLU should not be used or it will attain link
- * from the lowest speeds starting from 10Mbps. The capability is used for
- * Dx transitions and states */
+ * from the lowest speeds starting from 10Mbps. The capability is used
+ * for Dx transitions and states
+ */
if (hw->mac_type == e1000_82541_rev_2
|| hw->mac_type == e1000_82547_rev_2) {
ret_val =
@@ -5563,10 +5622,11 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
return ret_val;
}
- /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
- * Dx states where the power conservation is most important. During
- * driver activity we should enable SmartSpeed, so performance is
- * maintained. */
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
if (hw->smart_speed == e1000_smart_speed_on) {
ret_val =
e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 294da56b824c..8502c625dbef 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -239,7 +239,6 @@ struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
* e1000_init_module is the first routine called when the driver is
* loaded. All it does is register with the PCI subsystem.
**/
-
static int __init e1000_init_module(void)
{
int ret;
@@ -266,7 +265,6 @@ module_init(e1000_init_module);
* e1000_exit_module is called just before the driver is removed
* from memory.
**/
-
static void __exit e1000_exit_module(void)
{
pci_unregister_driver(&e1000_driver);
@@ -301,7 +299,6 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
* e1000_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
**/
-
static void e1000_irq_disable(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -315,7 +312,6 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
* e1000_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
**/
-
static void e1000_irq_enable(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -398,11 +394,12 @@ static void e1000_configure(struct e1000_adapter *adapter)
e1000_configure_rx(adapter);
/* call E1000_DESC_UNUSED which always leaves
* at least 1 descriptor unused to make sure
- * next_to_use != next_to_clean */
+ * next_to_use != next_to_clean
+ */
for (i = 0; i < adapter->num_rx_queues; i++) {
struct e1000_rx_ring *ring = &adapter->rx_ring[i];
adapter->alloc_rx_buf(adapter, ring,
- E1000_DESC_UNUSED(ring));
+ E1000_DESC_UNUSED(ring));
}
}
@@ -433,9 +430,7 @@ int e1000_up(struct e1000_adapter *adapter)
* The phy may be powered down to save power and turn off link when the
* driver is unloaded and wake on lan is not enabled (among others)
* *** this routine MUST be followed by a call to e1000_reset ***
- *
**/
-
void e1000_power_up_phy(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -444,7 +439,8 @@ void e1000_power_up_phy(struct e1000_adapter *adapter)
/* Just clear the power down bit to wake the phy back up */
if (hw->media_type == e1000_media_type_copper) {
/* according to the manual, the phy will retain its
- * settings across a power-down/up cycle */
+ * settings across a power-down/up cycle
+ */
e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
mii_reg &= ~MII_CR_POWER_DOWN;
e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
@@ -459,7 +455,8 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
* The PHY cannot be powered down if any of the following is true *
* (a) WoL is enabled
* (b) AMT is active
- * (c) SoL/IDER session is active */
+ * (c) SoL/IDER session is active
+ */
if (!adapter->wol && hw->mac_type >= e1000_82540 &&
hw->media_type == e1000_media_type_copper) {
u16 mii_reg = 0;
@@ -529,8 +526,7 @@ void e1000_down(struct e1000_adapter *adapter)
e1000_irq_disable(adapter);
- /*
- * Setting DOWN must be after irq_disable to prevent
+ /* Setting DOWN must be after irq_disable to prevent
* a screaming interrupt. Setting DOWN also prevents
* tasks from rescheduling.
*/
@@ -627,14 +623,14 @@ void e1000_reset(struct e1000_adapter *adapter)
* rounded up to the next 1KB and expressed in KB. Likewise,
* the Rx FIFO should be large enough to accommodate at least
* one full receive packet and is similarly rounded up and
- * expressed in KB. */
+ * expressed in KB.
+ */
pba = er32(PBA);
/* upper 16 bits has Tx packet buffer allocation size in KB */
tx_space = pba >> 16;
/* lower 16 bits has Rx packet buffer allocation size in KB */
pba &= 0xffff;
- /*
- * the tx fifo also stores 16 bytes of information about the tx
+ /* the Tx fifo also stores 16 bytes of information about the Tx
* but don't include ethernet FCS because hardware appends it
*/
min_tx_space = (hw->max_frame_size +
@@ -649,7 +645,8 @@ void e1000_reset(struct e1000_adapter *adapter)
/* If current Tx allocation is less than the min Tx FIFO size,
* and the min Tx FIFO size is less than the current Rx FIFO
- * allocation, take space away from current Rx allocation */
+ * allocation, take space away from current Rx allocation
+ */
if (tx_space < min_tx_space &&
((min_tx_space - tx_space) < pba)) {
pba = pba - (min_tx_space - tx_space);
@@ -663,8 +660,9 @@ void e1000_reset(struct e1000_adapter *adapter)
break;
}
- /* if short on rx space, rx wins and must trump tx
- * adjustment or use Early Receive if available */
+ /* if short on Rx space, Rx wins and must trump Tx
+ * adjustment or use Early Receive if available
+ */
if (pba < min_rx_space)
pba = min_rx_space;
}
@@ -672,8 +670,7 @@ void e1000_reset(struct e1000_adapter *adapter)
ew32(PBA, pba);
- /*
- * flow control settings:
+ /* flow control settings:
* The high water mark must be low enough to fit one full frame
* (or the size used for early receive) above it in the Rx FIFO.
* Set it to the lower of:
@@ -707,7 +704,8 @@ void e1000_reset(struct e1000_adapter *adapter)
u32 ctrl = er32(CTRL);
/* clear phy power management bit if we are in gig only mode,
* which if enabled will attempt negotiation to 100Mb, which
- * can cause a loss of link at power off or driver unload */
+ * can cause a loss of link at power off or driver unload
+ */
ctrl &= ~E1000_CTRL_SWDPIN3;
ew32(CTRL, ctrl);
}
@@ -808,9 +806,8 @@ static int e1000_is_need_ioport(struct pci_dev *pdev)
static netdev_features_t e1000_fix_features(struct net_device *netdev,
netdev_features_t features)
{
- /*
- * Since there is no support for separate rx/tx vlan accel
- * enable/disable make sure tx flag is always in same state as rx.
+ /* Since there is no support for separate Rx/Tx vlan accel
+ * enable/disable make sure Tx flag is always in same state as Rx.
*/
if (features & NETIF_F_HW_VLAN_RX)
features |= NETIF_F_HW_VLAN_TX;
@@ -1012,16 +1009,14 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
- /*
- * there is a workaround being applied below that limits
+ /* there is a workaround being applied below that limits
* 64-bit DMA addresses to 64-bit hardware. There are some
* 32-bit adapters that Tx hang when given 64-bit DMA addresses
*/
pci_using_dac = 0;
if ((hw->bus_type == e1000_bus_type_pcix) &&
!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- /*
- * according to DMA-API-HOWTO, coherent calls will always
+ /* according to DMA-API-HOWTO, coherent calls will always
* succeed if the set call did
*/
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
@@ -1099,7 +1094,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* before reading the EEPROM, reset the controller to
- * put the device in a known good starting state */
+ * put the device in a known good starting state
+ */
e1000_reset_hw(hw);
@@ -1107,8 +1103,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (e1000_validate_eeprom_checksum(hw) < 0) {
e_err(probe, "The EEPROM Checksum Is Not Valid\n");
e1000_dump_eeprom(adapter);
- /*
- * set MAC address to all zeroes to invalidate and temporary
+ /* set MAC address to all zeroes to invalidate and temporary
* disable this device for the user. This blocks regular
* traffic while still permitting ethtool ioctls from reaching
* the hardware as well as allowing the user to run the
@@ -1123,9 +1118,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* don't block initalization here due to bad MAC address */
memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
- memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
- if (!is_valid_ether_addr(netdev->perm_addr))
+ if (!is_valid_ether_addr(netdev->dev_addr))
e_err(probe, "Invalid MAC Address\n");
@@ -1170,7 +1164,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* now that we have the eeprom settings, apply the special cases
* where the eeprom may be wrong or the board simply won't support
- * wake on lan on a particular port */
+ * wake on lan on a particular port
+ */
switch (pdev->device) {
case E1000_DEV_ID_82546GB_PCIE:
adapter->eeprom_wol = 0;
@@ -1178,7 +1173,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case E1000_DEV_ID_82546EB_FIBER:
case E1000_DEV_ID_82546GB_FIBER:
/* Wake events only supported on port A for dual fiber
- * regardless of eeprom setting */
+ * regardless of eeprom setting
+ */
if (er32(STATUS) & E1000_STATUS_FUNC_1)
adapter->eeprom_wol = 0;
break;
@@ -1271,7 +1267,6 @@ err_pci_reg:
* Hot-Plug event, or because the driver is going to be removed from
* memory.
**/
-
static void e1000_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -1307,7 +1302,6 @@ static void e1000_remove(struct pci_dev *pdev)
* e1000_sw_init initializes the Adapter private data structure.
* e1000_init_hw_struct MUST be called before this function
**/
-
static int e1000_sw_init(struct e1000_adapter *adapter)
{
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
@@ -1338,7 +1332,6 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
* We allocate one ring per queue at run-time since we don't know the
* number of queues at compile-time.
**/
-
static int e1000_alloc_queues(struct e1000_adapter *adapter)
{
adapter->tx_ring = kcalloc(adapter->num_tx_queues,
@@ -1368,7 +1361,6 @@ static int e1000_alloc_queues(struct e1000_adapter *adapter)
* handler is registered with the OS, the watchdog task is started,
* and the stack is notified that the interface is ready.
**/
-
static int e1000_open(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1402,7 +1394,8 @@ static int e1000_open(struct net_device *netdev)
/* before we allocate an interrupt, we must be ready to handle it.
* Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
* as soon as we call pci_request_irq, so we have to setup our
- * clean_rx handler before we do so. */
+ * clean_rx handler before we do so.
+ */
e1000_configure(adapter);
err = e1000_request_irq(adapter);
@@ -1445,7 +1438,6 @@ err_setup_tx:
* needs to be disabled. A global MAC reset is issued to stop the
* hardware, and all transmit and receive resources are freed.
**/
-
static int e1000_close(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1460,10 +1452,11 @@ static int e1000_close(struct net_device *netdev)
e1000_free_all_rx_resources(adapter);
/* kill manageability vlan ID if supported, but not if a vlan with
- * the same ID is registered on the host OS (let 8021q kill it) */
+ * the same ID is registered on the host OS (let 8021q kill it)
+ */
if ((hw->mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
- !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
+ !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
}
@@ -1484,7 +1477,8 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
unsigned long end = begin + len;
/* First rev 82545 and 82546 need to not allow any memory
- * write location to cross 64k boundary due to errata 23 */
+ * write location to cross 64k boundary due to errata 23
+ */
if (hw->mac_type == e1000_82545 ||
hw->mac_type == e1000_ce4100 ||
hw->mac_type == e1000_82546) {
@@ -1501,7 +1495,6 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
*
* Return 0 on success, negative on failure
**/
-
static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
struct e1000_tx_ring *txdr)
{
@@ -1510,11 +1503,8 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
size = sizeof(struct e1000_buffer) * txdr->count;
txdr->buffer_info = vzalloc(size);
- if (!txdr->buffer_info) {
- e_err(probe, "Unable to allocate memory for the Tx descriptor "
- "ring\n");
+ if (!txdr->buffer_info)
return -ENOMEM;
- }
/* round up to nearest 4K */
@@ -1578,7 +1568,6 @@ setup_tx_desc_die:
*
* Return 0 on success, negative on failure
**/
-
int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
{
int i, err = 0;
@@ -1603,7 +1592,6 @@ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
*
* Configure the Tx unit of the MAC after a reset.
**/
-
static void e1000_configure_tx(struct e1000_adapter *adapter)
{
u64 tdba;
@@ -1624,8 +1612,10 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
ew32(TDT, 0);
ew32(TDH, 0);
- adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
- adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
+ adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
+ E1000_TDH : E1000_82542_TDH);
+ adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
+ E1000_TDT : E1000_82542_TDT);
break;
}
@@ -1680,7 +1670,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
adapter->txd_cmd |= E1000_TXD_CMD_RS;
/* Cache if we're 82544 running in PCI-X because we'll
- * need this to apply a workaround later in the send path. */
+ * need this to apply a workaround later in the send path.
+ */
if (hw->mac_type == e1000_82544 &&
hw->bus_type == e1000_bus_type_pcix)
adapter->pcix_82544 = true;
@@ -1696,7 +1687,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
*
* Returns 0 on success, negative on failure
**/
-
static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
struct e1000_rx_ring *rxdr)
{
@@ -1705,11 +1695,8 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
size = sizeof(struct e1000_buffer) * rxdr->count;
rxdr->buffer_info = vzalloc(size);
- if (!rxdr->buffer_info) {
- e_err(probe, "Unable to allocate memory for the Rx descriptor "
- "ring\n");
+ if (!rxdr->buffer_info)
return -ENOMEM;
- }
desc_len = sizeof(struct e1000_rx_desc);
@@ -1778,7 +1765,6 @@ setup_rx_desc_die:
*
* Return 0 on success, negative on failure
**/
-
int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
{
int i, err = 0;
@@ -1847,7 +1833,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
/* This is useful for sniffing bad packets. */
if (adapter->netdev->features & NETIF_F_RXALL) {
/* UPE and MPE will be handled by normal PROMISC logic
- * in e1000e_set_rx_mode */
+ * in e1000e_set_rx_mode
+ */
rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
E1000_RCTL_BAM | /* RX All Bcast Pkts */
E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
@@ -1869,7 +1856,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
*
* Configure the Rx unit of the MAC after a reset.
**/
-
static void e1000_configure_rx(struct e1000_adapter *adapter)
{
u64 rdba;
@@ -1902,7 +1888,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
}
/* Setup the HW Rx Head and Tail Descriptor Pointers and
- * the Base and Length of the Rx Descriptor Ring */
+ * the Base and Length of the Rx Descriptor Ring
+ */
switch (adapter->num_rx_queues) {
case 1:
default:
@@ -1912,8 +1899,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
ew32(RDT, 0);
ew32(RDH, 0);
- adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
- adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
+ adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
+ E1000_RDH : E1000_82542_RDH);
+ adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
+ E1000_RDT : E1000_82542_RDT);
break;
}
@@ -1939,7 +1928,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
*
* Free all transmit software resources
**/
-
static void e1000_free_tx_resources(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring)
{
@@ -1962,7 +1950,6 @@ static void e1000_free_tx_resources(struct e1000_adapter *adapter,
*
* Free all transmit software resources
**/
-
void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
{
int i;
@@ -1997,7 +1984,6 @@ static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
* @adapter: board private structure
* @tx_ring: ring to be cleaned
**/
-
static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring)
{
@@ -2033,7 +2019,6 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
* e1000_clean_all_tx_rings - Free Tx Buffers for all queues
* @adapter: board private structure
**/
-
static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
{
int i;
@@ -2049,7 +2034,6 @@ static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
*
* Free all receive software resources
**/
-
static void e1000_free_rx_resources(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring)
{
@@ -2072,7 +2056,6 @@ static void e1000_free_rx_resources(struct e1000_adapter *adapter,
*
* Free all receive software resources
**/
-
void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
{
int i;
@@ -2086,7 +2069,6 @@ void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
* @adapter: board private structure
* @rx_ring: ring to free buffers from
**/
-
static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring)
{
@@ -2145,7 +2127,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
* e1000_clean_all_rx_rings - Free Rx Buffers for all queues
* @adapter: board private structure
**/
-
static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
{
int i;
@@ -2205,7 +2186,6 @@ static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
*
* Returns 0 on success, negative on failure
**/
-
static int e1000_set_mac(struct net_device *netdev, void *p)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -2240,7 +2220,6 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
* responsible for configuring the hardware for proper unicast, multicast,
* promiscuous mode, and all-multi behavior.
**/
-
static void e1000_set_rx_mode(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -2253,10 +2232,8 @@ static void e1000_set_rx_mode(struct net_device *netdev)
int mta_reg_count = E1000_NUM_MTA_REGISTERS;
u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
- if (!mcarray) {
- e_err(probe, "memory allocation failed\n");
+ if (!mcarray)
return;
- }
/* Check for Promiscuous and All Multicast modes */
@@ -2326,10 +2303,10 @@ static void e1000_set_rx_mode(struct net_device *netdev)
}
/* write the hash table completely, write from bottom to avoid
- * both stupid write combining chipsets, and flushing each write */
+ * both stupid write combining chipsets, and flushing each write
+ */
for (i = mta_reg_count - 1; i >= 0 ; i--) {
- /*
- * If we are on an 82544 has an errata where writing odd
+ /* If we are on an 82544 has an errata where writing odd
* offsets overwrites the previous even offset, but writing
* backwards over the range solves the issue by always
* writing the odd offset first
@@ -2467,8 +2444,8 @@ static void e1000_watchdog(struct work_struct *work)
bool txb2b = true;
/* update snapshot of PHY registers on LSC */
e1000_get_speed_and_duplex(hw,
- &adapter->link_speed,
- &adapter->link_duplex);
+ &adapter->link_speed,
+ &adapter->link_duplex);
ctrl = er32(CTRL);
pr_info("%s NIC Link is Up %d Mbps %s, "
@@ -2542,7 +2519,8 @@ link_up:
/* We've lost link, so the controller stops DMA,
* but we've got queued Tx work that's never going
* to get done, so reset controller to flush Tx.
- * (Do the reset outside of interrupt context). */
+ * (Do the reset outside of interrupt context).
+ */
adapter->tx_timeout_count++;
schedule_work(&adapter->reset_task);
/* exit immediately since reset is imminent */
@@ -2552,8 +2530,7 @@ link_up:
/* Simple mode for Interrupt Throttle Rate (ITR) */
if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
- /*
- * Symmetric Tx/Rx gets a reduced ITR=2000;
+ /* Symmetric Tx/Rx gets a reduced ITR=2000;
* Total asymmetrical Tx or Rx gets ITR=8000;
* everyone else is between 2000-8000.
*/
@@ -2668,18 +2645,16 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
goto set_itr_now;
}
- adapter->tx_itr = e1000_update_itr(adapter,
- adapter->tx_itr,
- adapter->total_tx_packets,
- adapter->total_tx_bytes);
+ adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
+ adapter->total_tx_packets,
+ adapter->total_tx_bytes);
/* conservative mode (itr 3) eliminates the lowest_latency setting */
if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
adapter->tx_itr = low_latency;
- adapter->rx_itr = e1000_update_itr(adapter,
- adapter->rx_itr,
- adapter->total_rx_packets,
- adapter->total_rx_bytes);
+ adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
+ adapter->total_rx_packets,
+ adapter->total_rx_bytes);
/* conservative mode (itr 3) eliminates the lowest_latency setting */
if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
adapter->rx_itr = low_latency;
@@ -2705,10 +2680,11 @@ set_itr_now:
if (new_itr != adapter->itr) {
/* this attempts to bias the interrupt rate towards Bulk
* by adding intermediate steps when interrupt rate is
- * increasing */
+ * increasing
+ */
new_itr = new_itr > adapter->itr ?
- min(adapter->itr + (new_itr >> 2), new_itr) :
- new_itr;
+ min(adapter->itr + (new_itr >> 2), new_itr) :
+ new_itr;
adapter->itr = new_itr;
ew32(ITR, 1000000000 / (new_itr * 256));
}
@@ -2870,7 +2846,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
/* Workaround for Controller erratum --
* descriptor for non-tso packet in a linear SKB that follows a
* tso gets written back prematurely before the data is fully
- * DMA'd to the controller */
+ * DMA'd to the controller
+ */
if (!skb->data_len && tx_ring->last_tx_tso &&
!skb_is_gso(skb)) {
tx_ring->last_tx_tso = false;
@@ -2878,7 +2855,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
}
/* Workaround for premature desc write-backs
- * in TSO mode. Append 4-byte sentinel desc */
+ * in TSO mode. Append 4-byte sentinel desc
+ */
if (unlikely(mss && !nr_frags && size == len && size > 8))
size -= 4;
/* work-around for errata 10 and it applies
@@ -2891,7 +2869,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
size = 2015;
/* Workaround for potential 82544 hang in PCI-X. Avoid
- * terminating buffers within evenly-aligned dwords. */
+ * terminating buffers within evenly-aligned dwords.
+ */
if (unlikely(adapter->pcix_82544 &&
!((unsigned long)(skb->data + offset + size - 1) & 4) &&
size > 4))
@@ -2903,7 +2882,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info->mapped_as_page = false;
buffer_info->dma = dma_map_single(&pdev->dev,
skb->data + offset,
- size, DMA_TO_DEVICE);
+ size, DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->next_to_watch = i;
@@ -2934,12 +2913,15 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info = &tx_ring->buffer_info[i];
size = min(len, max_per_txd);
/* Workaround for premature desc write-backs
- * in TSO mode. Append 4-byte sentinel desc */
- if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
+ * in TSO mode. Append 4-byte sentinel desc
+ */
+ if (unlikely(mss && f == (nr_frags-1) &&
+ size == len && size > 8))
size -= 4;
/* Workaround for potential 82544 hang in PCI-X.
* Avoid terminating buffers within evenly-aligned
- * dwords. */
+ * dwords.
+ */
bufend = (unsigned long)
page_to_phys(skb_frag_page(frag));
bufend += offset + size - 1;
@@ -3003,7 +2985,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
- E1000_TXD_CMD_TSE;
+ E1000_TXD_CMD_TSE;
txd_upper |= E1000_TXD_POPTS_TXSM << 8;
if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
@@ -3044,13 +3026,15 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
- * such as IA-64). */
+ * such as IA-64).
+ */
wmb();
tx_ring->next_to_use = i;
writel(i, hw->hw_addr + tx_ring->tdt);
/* we need this if more than one processor can write to our tail
- * at a time, it syncronizes IO on IA64/Altix systems */
+ * at a time, it synchronizes IO on IA64/Altix systems
+ */
mmiowb();
}
@@ -3099,11 +3083,13 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
netif_stop_queue(netdev);
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
- * but since that doesn't exist yet, just open code it. */
+ * but since that doesn't exist yet, just open code it.
+ */
smp_mb();
/* We need to check again in a case another CPU has just
- * made room available. */
+ * made room available.
+ */
if (likely(E1000_DESC_UNUSED(tx_ring) < size))
return -EBUSY;
@@ -3114,7 +3100,7 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
}
static int e1000_maybe_stop_tx(struct net_device *netdev,
- struct e1000_tx_ring *tx_ring, int size)
+ struct e1000_tx_ring *tx_ring, int size)
{
if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
return 0;
@@ -3138,10 +3124,11 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
int tso;
unsigned int f;
- /* This goes back to the question of how to logically map a tx queue
+ /* This goes back to the question of how to logically map a Tx queue
* to a flow. Right now, performance is impacted slightly negatively
- * if using multiple tx queues. If the stack breaks away from a
- * single qdisc implementation, we can look at this again. */
+ * if using multiple Tx queues. If the stack breaks away from a
+ * single qdisc implementation, we can look at this again.
+ */
tx_ring = adapter->tx_ring;
if (unlikely(skb->len <= 0)) {
@@ -3166,7 +3153,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
* initiating the DMA for each buffer. The calc is:
* 4 = ceil(buffer len/mss). To make sure we don't
* overrun the FIFO, adjust the max buffer len if mss
- * drops. */
+ * drops.
+ */
if (mss) {
u8 hdr_len;
max_per_txd = min(mss << 2, max_per_txd);
@@ -3182,8 +3170,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
* this hardware's requirements
* NOTE: this is a TSO only workaround
* if end byte alignment not correct move us
- * into the next dword */
- if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
+ * into the next dword
+ */
+ if ((unsigned long)(skb_tail_pointer(skb) - 1)
+ & 4)
break;
/* fall through */
pull_size = min((unsigned int)4, skb->data_len);
@@ -3231,7 +3221,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
count += nr_frags;
/* need: count + 2 desc gap to keep tail from touching
- * head, otherwise try next time */
+ * head, otherwise try next time
+ */
if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
return NETDEV_TX_BUSY;
@@ -3270,7 +3261,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
tx_flags |= E1000_TX_FLAGS_NO_FCS;
count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
- nr_frags, mss);
+ nr_frags, mss);
if (count) {
netdev_sent_queue(netdev, skb->len);
@@ -3372,9 +3363,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
/* Print Registers */
e1000_regdump(adapter);
- /*
- * transmit dump
- */
+ /* transmit dump */
pr_info("TX Desc ring0 dump\n");
/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
@@ -3435,9 +3424,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
}
rx_ring_summary:
- /*
- * receive dump
- */
+ /* receive dump */
pr_info("\nRX Desc ring dump\n");
/* Legacy Receive Descriptor Format
@@ -3502,7 +3489,6 @@ exit:
* e1000_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
-
static void e1000_tx_timeout(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3530,7 +3516,6 @@ static void e1000_reset_task(struct work_struct *work)
* Returns the address of the device statistics structure.
* The statistics are actually updated from the watchdog.
**/
-
static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
{
/* only return the current stats */
@@ -3544,7 +3529,6 @@ static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
*
* Returns 0 on success, negative on failure
**/
-
static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3581,8 +3565,9 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
* means we reserve 2 more, this pushes us to allocate from the next
* larger slab size.
* i.e. RXBUFFER_2048 --> size-4096 slab
- * however with the new *_jumbo_rx* routines, jumbo receives will use
- * fragmented skbs */
+ * however with the new *_jumbo_rx* routines, jumbo receives will use
+ * fragmented skbs
+ */
if (max_frame <= E1000_RXBUFFER_2048)
adapter->rx_buffer_len = E1000_RXBUFFER_2048;
@@ -3617,7 +3602,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
* e1000_update_stats - Update the board statistics counters
* @adapter: board private structure
**/
-
void e1000_update_stats(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -3628,8 +3612,7 @@ void e1000_update_stats(struct e1000_adapter *adapter)
#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
- /*
- * Prevent stats update while adapter is being reset, or if the pci
+ /* Prevent stats update while adapter is being reset, or if the pci
* connection is down.
*/
if (adapter->link_speed == 0)
@@ -3719,7 +3702,8 @@ void e1000_update_stats(struct e1000_adapter *adapter)
/* Rx Errors */
/* RLEC on some newer hardware can be incorrect so build
- * our own version based on RUC and ROC */
+ * our own version based on RUC and ROC
+ */
netdev->stats.rx_errors = adapter->stats.rxerrc +
adapter->stats.crcerrs + adapter->stats.algnerrc +
adapter->stats.ruc + adapter->stats.roc +
@@ -3773,7 +3757,6 @@ void e1000_update_stats(struct e1000_adapter *adapter)
* @irq: interrupt number
* @data: pointer to a network interface device structure
**/
-
static irqreturn_t e1000_intr(int irq, void *data)
{
struct net_device *netdev = data;
@@ -3784,8 +3767,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
if (unlikely((!icr)))
return IRQ_NONE; /* Not our interrupt */
- /*
- * we might have caused the interrupt, but the above
+ /* we might have caused the interrupt, but the above
* read cleared it, and just in case the driver is
* down there is nothing to do so return handled
*/
@@ -3811,7 +3793,8 @@ static irqreturn_t e1000_intr(int irq, void *data)
__napi_schedule(&adapter->napi);
} else {
/* this really should not happen! if it does it is basically a
- * bug, but not a hard error, so enable ints and continue */
+ * bug, but not a hard error, so enable ints and continue
+ */
if (!test_bit(__E1000_DOWN, &adapter->flags))
e1000_irq_enable(adapter);
}
@@ -3825,7 +3808,8 @@ static irqreturn_t e1000_intr(int irq, void *data)
**/
static int e1000_clean(struct napi_struct *napi, int budget)
{
- struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
+ struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
+ napi);
int tx_clean_complete = 0, work_done = 0;
tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
@@ -3916,11 +3900,12 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
if (adapter->detect_tx_hung) {
/* Detect a transmit hang in hardware, this serializes the
- * check with the clearing of time_stamp and movement of i */
+ * check with the clearing of time_stamp and movement of i
+ */
adapter->detect_tx_hung = false;
if (tx_ring->buffer_info[eop].time_stamp &&
time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
- (adapter->tx_timeout_factor * HZ)) &&
+ (adapter->tx_timeout_factor * HZ)) &&
!(er32(STATUS) & E1000_STATUS_TXOFF)) {
/* detected Tx unit hang */
@@ -3963,7 +3948,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
* @csum: receive descriptor csum field
* @sk_buff: socket buffer with received data
**/
-
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
u32 csum, struct sk_buff *skb)
{
@@ -3999,7 +3983,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
* e1000_consume_page - helper function
**/
static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
- u16 length)
+ u16 length)
{
bi->page = NULL;
skb->len += length;
@@ -4095,11 +4079,11 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
last_byte)) {
spin_lock_irqsave(&adapter->stats_lock,
- irq_flags);
+ irq_flags);
e1000_tbi_adjust_stats(hw, &adapter->stats,
length, mapped);
spin_unlock_irqrestore(&adapter->stats_lock,
- irq_flags);
+ irq_flags);
length--;
} else {
if (netdev->features & NETIF_F_RXALL)
@@ -4107,7 +4091,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* recycle both page and skb */
buffer_info->skb = skb;
/* an error means any chain goes out the window
- * too */
+ * too
+ */
if (rx_ring->rx_skb_top)
dev_kfree_skb(rx_ring->rx_skb_top);
rx_ring->rx_skb_top = NULL;
@@ -4123,7 +4108,7 @@ process_skb:
/* this is the beginning of a chain */
rxtop = skb;
skb_fill_page_desc(rxtop, 0, buffer_info->page,
- 0, length);
+ 0, length);
} else {
/* this is the middle of a chain */
skb_fill_page_desc(rxtop,
@@ -4141,38 +4126,42 @@ process_skb:
skb_shinfo(rxtop)->nr_frags,
buffer_info->page, 0, length);
/* re-use the current skb, we only consumed the
- * page */
+ * page
+ */
buffer_info->skb = skb;
skb = rxtop;
rxtop = NULL;
e1000_consume_page(buffer_info, skb, length);
} else {
/* no chain, got EOP, this buf is the packet
- * copybreak to save the put_page/alloc_page */
+ * copybreak to save the put_page/alloc_page
+ */
if (length <= copybreak &&
skb_tailroom(skb) >= length) {
u8 *vaddr;
vaddr = kmap_atomic(buffer_info->page);
- memcpy(skb_tail_pointer(skb), vaddr, length);
+ memcpy(skb_tail_pointer(skb), vaddr,
+ length);
kunmap_atomic(vaddr);
/* re-use the page, so don't erase
- * buffer_info->page */
+ * buffer_info->page
+ */
skb_put(skb, length);
} else {
skb_fill_page_desc(skb, 0,
- buffer_info->page, 0,
- length);
+ buffer_info->page, 0,
+ length);
e1000_consume_page(buffer_info, skb,
- length);
+ length);
}
}
}
/* Receive Checksum Offload XXX recompute due to CRC strip? */
e1000_rx_checksum(adapter,
- (u32)(status) |
- ((u32)(rx_desc->errors) << 24),
- le16_to_cpu(rx_desc->csum), skb);
+ (u32)(status) |
+ ((u32)(rx_desc->errors) << 24),
+ le16_to_cpu(rx_desc->csum), skb);
total_rx_bytes += (skb->len - 4); /* don't count FCS */
if (likely(!(netdev->features & NETIF_F_RXFCS)))
@@ -4214,8 +4203,7 @@ next_desc:
return cleaned;
}
-/*
- * this should improve performance for small packets with large amounts
+/* this should improve performance for small packets with large amounts
* of reassembly being done in the stack
*/
static void e1000_check_copybreak(struct net_device *netdev,
@@ -4319,9 +4307,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
last_byte)) {
spin_lock_irqsave(&adapter->stats_lock, flags);
e1000_tbi_adjust_stats(hw, &adapter->stats,
- length, skb->data);
+ length, skb->data);
spin_unlock_irqrestore(&adapter->stats_lock,
- flags);
+ flags);
length--;
} else {
if (netdev->features & NETIF_F_RXALL)
@@ -4386,10 +4374,9 @@ next_desc:
* @rx_ring: pointer to receive ring structure
* @cleaned_count: number of buffers to allocate this pass
**/
-
static void
e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring, int cleaned_count)
+ struct e1000_rx_ring *rx_ring, int cleaned_count)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
@@ -4430,7 +4417,7 @@ check_page:
if (!buffer_info->dma) {
buffer_info->dma = dma_map_page(&pdev->dev,
- buffer_info->page, 0,
+ buffer_info->page, 0,
buffer_info->length,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
@@ -4460,7 +4447,8 @@ check_page:
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
- * such as IA-64). */
+ * such as IA-64).
+ */
wmb();
writel(i, adapter->hw.hw_addr + rx_ring->rdt);
}
@@ -4470,7 +4458,6 @@ check_page:
* e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
* @adapter: address of board private structure
**/
-
static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int cleaned_count)
@@ -4541,8 +4528,7 @@ map_skb:
break; /* while !buffer_info->skb */
}
- /*
- * XXX if it was allocated cleanly it will never map to a
+ /* XXX if it was allocated cleanly it will never map to a
* boundary crossing
*/
@@ -4580,7 +4566,8 @@ map_skb:
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
- * such as IA-64). */
+ * such as IA-64).
+ */
wmb();
writel(i, hw->hw_addr + rx_ring->rdt);
}
@@ -4590,7 +4577,6 @@ map_skb:
* e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
* @adapter:
**/
-
static void e1000_smartspeed(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -4603,7 +4589,8 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
if (adapter->smartspeed == 0) {
/* If Master/Slave config fault is asserted twice,
- * we assume back-to-back */
+ * we assume back-to-back
+ */
e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
@@ -4616,7 +4603,7 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
adapter->smartspeed++;
if (!e1000_phy_setup_autoneg(hw) &&
!e1000_read_phy_reg(hw, PHY_CTRL,
- &phy_ctrl)) {
+ &phy_ctrl)) {
phy_ctrl |= (MII_CR_AUTO_NEG_EN |
MII_CR_RESTART_AUTO_NEG);
e1000_write_phy_reg(hw, PHY_CTRL,
@@ -4647,7 +4634,6 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
* @ifreq:
* @cmd:
**/
-
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
@@ -4666,7 +4652,6 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
* @ifreq:
* @cmd:
**/
-
static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd)
{
@@ -4928,7 +4913,8 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
hw->autoneg = 0;
/* Make sure dplx is at most 1 bit and lsb of speed is not set
- * for the switch() below to work */
+ * for the switch() below to work
+ */
if ((spd & 1) || (dplx & ~1))
goto err_inval;
@@ -5131,8 +5117,7 @@ static void e1000_shutdown(struct pci_dev *pdev)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
+/* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
* the interrupt routine is executing.
*/
diff --git a/drivers/net/ethernet/intel/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c
index 750fc0194f37..c9cde352b1c8 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_param.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_param.c
@@ -267,7 +267,6 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter);
* value exists, a default value is used. The final value is stored
* in a variable in the adapter structure.
**/
-
void e1000_check_options(struct e1000_adapter *adapter)
{
struct e1000_option opt;
@@ -319,7 +318,8 @@ void e1000_check_options(struct e1000_adapter *adapter)
.def = E1000_DEFAULT_RXD,
.arg = { .r = {
.min = E1000_MIN_RXD,
- .max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD
+ .max = mac_type < e1000_82544 ? E1000_MAX_RXD :
+ E1000_MAX_82544_RXD
}}
};
@@ -408,7 +408,7 @@ void e1000_check_options(struct e1000_adapter *adapter)
if (num_TxAbsIntDelay > bd) {
adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
- adapter);
+ adapter);
} else {
adapter->tx_abs_int_delay = opt.def;
}
@@ -426,7 +426,7 @@ void e1000_check_options(struct e1000_adapter *adapter)
if (num_RxIntDelay > bd) {
adapter->rx_int_delay = RxIntDelay[bd];
e1000_validate_option(&adapter->rx_int_delay, &opt,
- adapter);
+ adapter);
} else {
adapter->rx_int_delay = opt.def;
}
@@ -444,7 +444,7 @@ void e1000_check_options(struct e1000_adapter *adapter)
if (num_RxAbsIntDelay > bd) {
adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
- adapter);
+ adapter);
} else {
adapter->rx_abs_int_delay = opt.def;
}
@@ -479,16 +479,17 @@ void e1000_check_options(struct e1000_adapter *adapter)
break;
case 4:
e_dev_info("%s set to simplified "
- "(2000-8000) ints mode\n", opt.name);
+ "(2000-8000) ints mode\n", opt.name);
adapter->itr_setting = adapter->itr;
break;
default:
e1000_validate_option(&adapter->itr, &opt,
- adapter);
+ adapter);
/* save the setting, because the dynamic bits
* change itr.
* clear the lower two bits because they are
- * used as control */
+ * used as control
+ */
adapter->itr_setting = adapter->itr & ~3;
break;
}
@@ -533,7 +534,6 @@ void e1000_check_options(struct e1000_adapter *adapter)
*
* Handles speed and duplex options on fiber adapters
**/
-
static void e1000_check_fiber_options(struct e1000_adapter *adapter)
{
int bd = adapter->bd_number;
@@ -559,7 +559,6 @@ static void e1000_check_fiber_options(struct e1000_adapter *adapter)
*
* Handles speed and duplex options on copper adapters
**/
-
static void e1000_check_copper_options(struct e1000_adapter *adapter)
{
struct e1000_option opt;
@@ -681,22 +680,22 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter)
e_dev_info("Using Autonegotiation at Half Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
- ADVERTISE_100_HALF;
+ ADVERTISE_100_HALF;
break;
case FULL_DUPLEX:
e_dev_info("Full Duplex specified without Speed\n");
e_dev_info("Using Autonegotiation at Full Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
- ADVERTISE_100_FULL |
- ADVERTISE_1000_FULL;
+ ADVERTISE_100_FULL |
+ ADVERTISE_1000_FULL;
break;
case SPEED_10:
e_dev_info("10 Mbps Speed specified without Duplex\n");
e_dev_info("Using Autonegotiation at 10 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
- ADVERTISE_10_FULL;
+ ADVERTISE_10_FULL;
break;
case SPEED_10 + HALF_DUPLEX:
e_dev_info("Forcing to 10 Mbps Half Duplex\n");
@@ -715,7 +714,7 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter)
e_dev_info("Using Autonegotiation at 100 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
- ADVERTISE_100_FULL;
+ ADVERTISE_100_FULL;
break;
case SPEED_100 + HALF_DUPLEX:
e_dev_info("Forcing to 100 Mbps Half Duplex\n");
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index e73c2c355993..e0991388664c 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -32,69 +32,6 @@
#include "e1000.h"
-#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00
-#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02
-#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10
-#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F
-
-#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008
-#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800
-#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010
-
-#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004
-#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000
-#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000
-
-#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C
-#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004
-
-#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
-#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000
-
-#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8
-#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9
-
-/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
-#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Disab. */
-#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060
-#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */
-#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */
-#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */
-
-/* PHY Specific Control Register 2 (Page 0, Register 26) */
-#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000
- /* 1=Reverse Auto-Negotiation */
-
-/* MAC Specific Control Register (Page 2, Register 21) */
-/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
-#define GG82563_MSCR_TX_CLK_MASK 0x0007
-#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004
-#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005
-#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007
-
-#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */
-
-/* DSP Distance Register (Page 5, Register 26) */
-#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M
- 1 = 50-80M
- 2 = 80-110M
- 3 = 110-140M
- 4 = >140M
- */
-
-/* Kumeran Mode Control Register (Page 193, Register 16) */
-#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
-
-/* Max number of times Kumeran read/write should be validated */
-#define GG82563_MAX_KMRN_RETRY 0x5
-
-/* Power Management Control Register (Page 193, Register 20) */
-#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001
- /* 1=Enable SERDES Electrical Idle */
-
-/* In-Band Control Register (Page 194, Register 18) */
-#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */
-
/* A table for the GG82563 cable length where the range is defined
* with a lower bound at "index" and the upper bound at
* "index + 5".
@@ -111,11 +48,10 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw);
static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw);
static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw);
static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex);
-static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw);
-static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
- u16 *data);
-static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
- u16 data);
+static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+ u16 *data);
+static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+ u16 data);
static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw);
/**
@@ -625,16 +561,16 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
e_dbg("GG82563 PSCR: %X\n", phy_data);
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_data);
if (ret_val)
return ret_val;
e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
/* Reset the phy to commit changes. */
- phy_data |= MII_CR_RESET;
+ phy_data |= BMCR_RESET;
- ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_data);
if (ret_val)
return ret_val;
@@ -696,7 +632,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = 0;
+ s32 ret_val;
u16 phy_data, index;
ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data);
@@ -774,6 +710,9 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
ctrl = er32(CTRL);
ret_val = e1000_acquire_phy_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
e_dbg("Issuing a global reset to MAC\n");
ew32(CTRL, ctrl | E1000_CTRL_RST);
e1000_release_phy_80003es2lan(hw);
@@ -833,6 +772,8 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
/* Setup link and flow control */
ret_val = mac->ops.setup_link(hw);
+ if (ret_val)
+ return ret_val;
/* Disable IBIST slave mode (far-end loopback) */
e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
@@ -1006,7 +947,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
return ret_val;
/* SW Reset the PHY so all changes take effect */
- ret_val = e1000e_commit_phy(hw);
+ ret_val = hw->phy.ops.commit(hw);
if (ret_val) {
e_dbg("Error Resetting the PHY\n");
return ret_val;
@@ -1272,7 +1213,7 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
u16 *data)
{
u32 kmrnctrlsta;
- s32 ret_val = 0;
+ s32 ret_val;
ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
if (ret_val)
@@ -1307,7 +1248,7 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
u16 data)
{
u32 kmrnctrlsta;
- s32 ret_val = 0;
+ s32 ret_val;
ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
if (ret_val)
@@ -1331,7 +1272,7 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
**/
static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
{
- s32 ret_val = 0;
+ s32 ret_val;
/* If there's an alternate MAC address place it in RAR0
* so that it will override the Si installed default perm
@@ -1434,18 +1375,18 @@ static const struct e1000_phy_operations es2_phy_ops = {
.acquire = e1000_acquire_phy_80003es2lan,
.check_polarity = e1000_check_polarity_m88,
.check_reset_block = e1000e_check_reset_block_generic,
- .commit = e1000e_phy_sw_reset,
- .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan,
- .get_cfg_done = e1000_get_cfg_done_80003es2lan,
- .get_cable_length = e1000_get_cable_length_80003es2lan,
- .get_info = e1000e_get_phy_info_m88,
- .read_reg = e1000_read_phy_reg_gg82563_80003es2lan,
+ .commit = e1000e_phy_sw_reset,
+ .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan,
+ .get_cfg_done = e1000_get_cfg_done_80003es2lan,
+ .get_cable_length = e1000_get_cable_length_80003es2lan,
+ .get_info = e1000e_get_phy_info_m88,
+ .read_reg = e1000_read_phy_reg_gg82563_80003es2lan,
.release = e1000_release_phy_80003es2lan,
- .reset = e1000e_phy_hw_reset_generic,
- .set_d0_lplu_state = NULL,
- .set_d3_lplu_state = e1000e_set_d3_lplu_state,
- .write_reg = e1000_write_phy_reg_gg82563_80003es2lan,
- .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
+ .reset = e1000e_phy_hw_reset_generic,
+ .set_d0_lplu_state = NULL,
+ .set_d3_lplu_state = e1000e_set_d3_lplu_state,
+ .write_reg = e1000_write_phy_reg_gg82563_80003es2lan,
+ .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
};
static const struct e1000_nvm_operations es2_nvm_ops = {
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
new file mode 100644
index 000000000000..90d363b2d280
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
@@ -0,0 +1,95 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_80003ES2LAN_H_
+#define _E1000E_80003ES2LAN_H_
+
+#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00
+#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02
+#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10
+#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F
+
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800
+#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010
+
+#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004
+#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000
+#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000
+
+#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C
+#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004
+
+#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gig Carry Extend Padding */
+#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000
+
+#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8
+#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9
+
+/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
+#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Dis */
+#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060
+#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */
+#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */
+#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */
+
+/* PHY Specific Control Register 2 (Page 0, Register 26) */
+#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 /* 1=Reverse Auto-Neg */
+
+/* MAC Specific Control Register (Page 2, Register 21) */
+/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
+#define GG82563_MSCR_TX_CLK_MASK 0x0007
+#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004
+#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005
+#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007
+
+#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */
+
+/* DSP Distance Register (Page 5, Register 26)
+ * 0 = <50M
+ * 1 = 50-80M
+ * 2 = 80-100M
+ * 3 = 110-140M
+ * 4 = >140M
+ */
+#define GG82563_DSPD_CABLE_LENGTH 0x0007
+
+/* Kumeran Mode Control Register (Page 193, Register 16) */
+#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
+
+/* Max number of times Kumeran read/write should be validated */
+#define GG82563_MAX_KMRN_RETRY 0x5
+
+/* Power Management Control Register (Page 193, Register 20) */
+/* 1=Enable SERDES Electrical Idle */
+#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001
+
+/* In-Band Control Register (Page 194, Register 18) */
+#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */
+
+#endif
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index c77d010d5c59..2faffbde179e 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -44,21 +44,6 @@
#include "e1000.h"
-#define ID_LED_RESERVED_F746 0xF746
-#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \
- (ID_LED_OFF1_ON2 << 8) | \
- (ID_LED_DEF1_DEF2 << 4) | \
- (ID_LED_DEF1_DEF2))
-
-#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
-#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */
-#define E1000_BASE1000T_STATUS 10
-#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
-#define E1000_RECEIVE_ERROR_COUNTER 21
-#define E1000_RECEIVE_ERROR_MAX 0xFFFF
-
-#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
-
static s32 e1000_get_phy_id_82571(struct e1000_hw *hw);
static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw);
static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw);
@@ -67,9 +52,7 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw);
static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw);
-static s32 e1000_setup_link_82571(struct e1000_hw *hw);
static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw);
-static void e1000_clear_vfta_82571(struct e1000_hw *hw);
static bool e1000_check_mng_mode_82574(struct e1000_hw *hw);
static s32 e1000_led_on_82574(struct e1000_hw *hw);
static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
@@ -449,13 +432,13 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
break;
case e1000_82574:
case e1000_82583:
- ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
+ ret_val = e1e_rphy(hw, MII_PHYSID1, &phy_id);
if (ret_val)
return ret_val;
phy->id = (u32)(phy_id << 16);
udelay(20);
- ret_val = e1e_rphy(hw, PHY_ID2, &phy_id);
+ ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id);
if (ret_val)
return ret_val;
@@ -556,16 +539,14 @@ static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
s32 i = 0;
extcnf_ctrl = er32(EXTCNF_CTRL);
- extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
do {
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
ew32(EXTCNF_CTRL, extcnf_ctrl);
extcnf_ctrl = er32(EXTCNF_CTRL);
if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
break;
- extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
-
usleep_range(2000, 4000);
i++;
} while (i < MDIO_OWNERSHIP_TIMEOUT);
@@ -937,6 +918,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
/* When LPLU is enabled, we should disable SmartSpeed */
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
+ if (ret_val)
+ return ret_val;
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
if (ret_val)
@@ -1329,9 +1312,10 @@ static void e1000_clear_vfta_82571(struct e1000_hw *hw)
*/
vfta_offset = (hw->mng_cookie.vlan_id >>
E1000_VFTA_ENTRY_SHIFT) &
- E1000_VFTA_ENTRY_MASK;
- vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id &
- E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+ E1000_VFTA_ENTRY_MASK;
+ vfta_bit_in_reg =
+ 1 << (hw->mng_cookie.vlan_id &
+ E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
}
break;
default:
@@ -1399,7 +1383,7 @@ bool e1000_check_phy_82574(struct e1000_hw *hw)
{
u16 status_1kbt = 0;
u16 receive_errors = 0;
- s32 ret_val = 0;
+ s32 ret_val;
/* Read PHY Receive Error counter first, if its is max - all F's then
* read the Base1000T status register If both are max then PHY is hung.
@@ -1544,13 +1528,12 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
ctrl = er32(CTRL);
status = er32(STATUS);
- rxcw = er32(RXCW);
+ er32(RXCW);
/* SYNCH bit and IV bit are sticky */
udelay(10);
rxcw = er32(RXCW);
if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
-
/* Receiver is synchronized with no invalid bits. */
switch (mac->serdes_link_state) {
case e1000_serdes_link_autoneg_complete:
@@ -1799,6 +1782,8 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
if (ret_val)
return ret_val;
ret_val = e1000e_update_nvm_checksum(hw);
+ if (ret_val)
+ return ret_val;
}
}
@@ -1812,7 +1797,7 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
{
if (hw->mac.type == e1000_82571) {
- s32 ret_val = 0;
+ s32 ret_val;
/* If there's an alternate MAC address place it in RAR0
* so that it will override the Si installed default perm
@@ -1931,7 +1916,7 @@ static const struct e1000_phy_operations e82_phy_ops_igp = {
.set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
.set_d3_lplu_state = e1000e_set_d3_lplu_state,
.write_reg = e1000e_write_phy_reg_igp,
- .cfg_on_link_up = NULL,
+ .cfg_on_link_up = NULL,
};
static const struct e1000_phy_operations e82_phy_ops_m88 = {
@@ -1940,7 +1925,7 @@ static const struct e1000_phy_operations e82_phy_ops_m88 = {
.check_reset_block = e1000e_check_reset_block_generic,
.commit = e1000e_phy_sw_reset,
.force_speed_duplex = e1000e_phy_force_speed_duplex_m88,
- .get_cfg_done = e1000e_get_cfg_done,
+ .get_cfg_done = e1000e_get_cfg_done_generic,
.get_cable_length = e1000e_get_cable_length_m88,
.get_info = e1000e_get_phy_info_m88,
.read_reg = e1000e_read_phy_reg_m88,
@@ -1949,7 +1934,7 @@ static const struct e1000_phy_operations e82_phy_ops_m88 = {
.set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
.set_d3_lplu_state = e1000e_set_d3_lplu_state,
.write_reg = e1000e_write_phy_reg_m88,
- .cfg_on_link_up = NULL,
+ .cfg_on_link_up = NULL,
};
static const struct e1000_phy_operations e82_phy_ops_bm = {
@@ -1958,7 +1943,7 @@ static const struct e1000_phy_operations e82_phy_ops_bm = {
.check_reset_block = e1000e_check_reset_block_generic,
.commit = e1000e_phy_sw_reset,
.force_speed_duplex = e1000e_phy_force_speed_duplex_m88,
- .get_cfg_done = e1000e_get_cfg_done,
+ .get_cfg_done = e1000e_get_cfg_done_generic,
.get_cable_length = e1000e_get_cable_length_m88,
.get_info = e1000e_get_phy_info_m88,
.read_reg = e1000e_read_phy_reg_bm2,
@@ -1967,7 +1952,7 @@ static const struct e1000_phy_operations e82_phy_ops_bm = {
.set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
.set_d3_lplu_state = e1000e_set_d3_lplu_state,
.write_reg = e1000e_write_phy_reg_bm2,
- .cfg_on_link_up = NULL,
+ .cfg_on_link_up = NULL,
};
static const struct e1000_nvm_operations e82571_nvm_ops = {
@@ -2044,6 +2029,7 @@ const struct e1000_info e1000_82574_info = {
| FLAG_HAS_MSIX
| FLAG_HAS_JUMBO_FRAMES
| FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
| FLAG_APME_IN_CTRL3
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
@@ -2065,6 +2051,7 @@ const struct e1000_info e1000_82583_info = {
.mac = e1000_82583,
.flags = FLAG_HAS_HW_VLAN_FILTER
| FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
| FLAG_APME_IN_CTRL3
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h
new file mode 100644
index 000000000000..85cb1a3b7cd4
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/82571.h
@@ -0,0 +1,58 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_82571_H_
+#define _E1000E_82571_H_
+
+#define ID_LED_RESERVED_F746 0xF746
+#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \
+ (ID_LED_OFF1_ON2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+
+#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
+#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */
+
+/* Intr Throttling - RW */
+#define E1000_EITR_82574(_n) (0x000E8 + (0x4 * (_n)))
+
+#define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */
+#define E1000_EIAC_MASK_82574 0x01F00000
+
+/* Manageability Operation Mode mask */
+#define E1000_NVM_INIT_CTRL2_MNGM 0x6000
+
+#define E1000_BASE1000T_STATUS 10
+#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
+#define E1000_RECEIVE_ERROR_COUNTER 21
+#define E1000_RECEIVE_ERROR_MAX 0xFFFF
+bool e1000_check_phy_82574(struct e1000_hw *hw);
+bool e1000e_get_laa_state_82571(struct e1000_hw *hw);
+void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state);
+
+#endif
diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile
index 591b71324505..c2dcfcc10857 100644
--- a/drivers/net/ethernet/intel/e1000e/Makefile
+++ b/drivers/net/ethernet/intel/e1000e/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel PRO/1000 Linux driver
-# Copyright(c) 1999 - 2012 Intel Corporation.
+# Copyright(c) 1999 - 2013 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -34,5 +34,5 @@ obj-$(CONFIG_E1000E) += e1000e.o
e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \
mac.o manage.o nvm.o phy.o \
- param.o ethtool.o netdev.o
+ param.o ethtool.o netdev.o ptp.o
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 02a12b69555f..fc3a4fe1ac71 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -29,25 +29,6 @@
#ifndef _E1000_DEFINES_H_
#define _E1000_DEFINES_H_
-#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
-#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
-#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
-#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
-#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
-#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
-#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
-#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
-#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
-#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
-#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
-#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
-#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
-#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
-#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
-#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
-#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
-#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
-
/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
#define REQ_TX_DESCRIPTOR_MULTIPLE 8
#define REQ_RX_DESCRIPTOR_MULTIPLE 8
@@ -86,7 +67,6 @@
#define E1000_CTRL_EXT_EIAME 0x01000000
#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
-#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
#define E1000_CTRL_EXT_LSECCK 0x00001000
#define E1000_CTRL_EXT_PHYPDEN 0x00100000
@@ -107,6 +87,7 @@
#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */
#define E1000_RXDEXT_STATERR_CE 0x01000000
#define E1000_RXDEXT_STATERR_SE 0x02000000
#define E1000_RXDEXT_STATERR_SEQ 0x04000000
@@ -115,19 +96,19 @@
/* mask to determine if packets should be dropped due to frame errors */
#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
- E1000_RXD_ERR_CE | \
- E1000_RXD_ERR_SE | \
- E1000_RXD_ERR_SEQ | \
- E1000_RXD_ERR_CXE | \
- E1000_RXD_ERR_RXE)
+ E1000_RXD_ERR_CE | \
+ E1000_RXD_ERR_SE | \
+ E1000_RXD_ERR_SEQ | \
+ E1000_RXD_ERR_CXE | \
+ E1000_RXD_ERR_RXE)
/* Same mask, but for extended and packet split descriptors */
#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
- E1000_RXDEXT_STATERR_CE | \
- E1000_RXDEXT_STATERR_SE | \
- E1000_RXDEXT_STATERR_SEQ | \
- E1000_RXDEXT_STATERR_CXE | \
- E1000_RXDEXT_STATERR_RXE)
+ E1000_RXDEXT_STATERR_CE | \
+ E1000_RXDEXT_STATERR_SE | \
+ E1000_RXDEXT_STATERR_SEQ | \
+ E1000_RXDEXT_STATERR_CXE | \
+ E1000_RXDEXT_STATERR_RXE)
#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000
#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
@@ -232,6 +213,7 @@
#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */
+#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
@@ -241,9 +223,9 @@
#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
-/* Bit definitions for the Management Data IO (MDIO) and Management Data
- * Clock (MDC) pins in the Device Control Register.
- */
+#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
+
+#define E1000_PCS_LSTS_AN_COMPLETE 0x10000
/* Device Status */
#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
@@ -259,8 +241,6 @@
#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
-/* Constants used to interpret the masked PCI-X bus speed. */
-
#define HALF_DUPLEX 1
#define FULL_DUPLEX 2
@@ -273,14 +253,15 @@
#define ADVERTISE_1000_FULL 0x0020
/* 1000/H is not supported, nor spec-compliant. */
-#define E1000_ALL_SPEED_DUPLEX ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
- ADVERTISE_100_HALF | ADVERTISE_100_FULL | \
- ADVERTISE_1000_FULL)
-#define E1000_ALL_NOT_GIG ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
- ADVERTISE_100_HALF | ADVERTISE_100_FULL)
-#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
-#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
-#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
+#define E1000_ALL_SPEED_DUPLEX ( \
+ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
+ ADVERTISE_100_FULL | ADVERTISE_1000_FULL)
+#define E1000_ALL_NOT_GIG ( \
+ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
+ ADVERTISE_100_FULL)
+#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
+#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
+#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
@@ -318,6 +299,7 @@
#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
+#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
/* Transmit Control */
#define E1000_TCTL_EN 0x00000002 /* enable Tx */
@@ -327,8 +309,6 @@
#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
-/* Transmit Arbitration Count */
-
/* SerDes Control */
#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
@@ -383,12 +363,23 @@
#define E1000_KABGTXD_BGSQLBIAS 0x00050000
+/* Low Power IDLE Control */
+#define E1000_LPIC_LPIET_SHIFT 24 /* Low Power Idle Entry Time */
+
/* PBA constants */
#define E1000_PBA_8K 0x0008 /* 8KB */
#define E1000_PBA_16K 0x0010 /* 16KB */
+#define E1000_PBA_RXA_MASK 0xFFFF
+
#define E1000_PBS_16K E1000_PBA_16K
+/* Uncorrectable/correctable ECC Error counts and enable bits */
+#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF
+#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00
+#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8
+#define E1000_PBECCSTS_ECC_ENABLE 0x00010000
+
#define IFS_MAX 80
#define IFS_MIN 40
#define IFS_RATIO 4
@@ -408,6 +399,7 @@
#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
+#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
@@ -431,11 +423,11 @@
* o LSC = Link Status Change
*/
#define IMS_ENABLE_MASK ( \
- E1000_IMS_RXT0 | \
- E1000_IMS_TXDW | \
- E1000_IMS_RXDMT0 | \
- E1000_IMS_RXSEQ | \
- E1000_IMS_LSC)
+ E1000_IMS_RXT0 | \
+ E1000_IMS_TXDW | \
+ E1000_IMS_RXDMT0 | \
+ E1000_IMS_RXSEQ | \
+ E1000_IMS_LSC)
/* Interrupt Mask Set */
#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -443,6 +435,7 @@
#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
+#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */
#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */
#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */
#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */
@@ -533,6 +526,28 @@
#define E1000_RXCW_C 0x20000000 /* Receive config */
#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
+#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
+#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */
+
+#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */
+#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
+#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00
+#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02
+#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
+#define E1000_TSYNCRXCTL_TYPE_ALL 0x08
+#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
+#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */
+#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */
+
+#define E1000_RXMTRL_PTP_V1_SYNC_MESSAGE 0x00000000
+#define E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE 0x00010000
+
+#define E1000_RXMTRL_PTP_V2_SYNC_MESSAGE 0x00000000
+#define E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE 0x01000000
+
+#define E1000_TIMINCA_INCPERIOD_SHIFT 24
+#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF
+
/* PCI Express Control */
#define E1000_GCR_RXD_NO_SNOOP 0x00000001
#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
@@ -548,66 +563,6 @@
E1000_GCR_TXDSCW_NO_SNOOP | \
E1000_GCR_TXDSCR_NO_SNOOP)
-/* PHY Control Register */
-#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
-#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
-#define MII_CR_POWER_DOWN 0x0800 /* Power down */
-#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
-#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
-#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
-#define MII_CR_SPEED_1000 0x0040
-#define MII_CR_SPEED_100 0x2000
-#define MII_CR_SPEED_10 0x0000
-
-/* PHY Status Register */
-#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
-#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
-
-/* Autoneg Advertisement Register */
-#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
-#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
-#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
-#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
-#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
-#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
-
-/* Link Partner Ability Register (Base Page) */
-#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */
-#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
-#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
-
-/* Autoneg Expansion Register */
-#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */
-
-/* 1000BASE-T Control Register */
-#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
-#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
- /* 0=DTE device */
-#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
- /* 0=Configure PHY as Slave */
-#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
- /* 0=Automatic Master/Slave config */
-
-/* 1000BASE-T Status Register */
-#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
-#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
-
-
-/* PHY 1000 MII Register/Bit Definitions */
-/* PHY Registers defined by IEEE */
-#define PHY_CONTROL 0x00 /* Control Register */
-#define PHY_STATUS 0x01 /* Status Register */
-#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
-#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
-#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
-#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
-#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */
-#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
-#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
-#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
-
-#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */
-
/* NVM Control */
#define E1000_EECD_SK 0x00000001 /* NVM Clock */
#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
@@ -639,6 +594,10 @@
/* NVM Word Offsets */
#define NVM_COMPAT 0x0003
#define NVM_ID_LED_SETTINGS 0x0004
+#define NVM_FUTURE_INIT_WORD1 0x0019
+#define NVM_COMPAT_VALID_CSUM 0x0001
+#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040
+
#define NVM_INIT_CONTROL2_REG 0x000F
#define NVM_INIT_CONTROL3_PORT_B 0x0014
#define NVM_INIT_3GIO_3 0x001A
@@ -647,8 +606,6 @@
#define NVM_ALT_MAC_ADDR_PTR 0x0037
#define NVM_CHECKSUM_REG 0x003F
-#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
-
#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */
#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */
@@ -757,9 +714,6 @@
#define M88E1000_PSCR_AUTO_X_1000T 0x0040
/* Auto crossover enabled all speeds */
#define M88E1000_PSCR_AUTO_X_MODE 0x0060
-/* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold)
- * 0=Normal 10BASE-T Rx Threshold
- */
#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
/* M88E1000 PHY Specific Status Register */
@@ -795,11 +749,6 @@
/* BME1000 PHY Specific Control Register */
#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */
-
-#define PHY_PAGE_SHIFT 5
-#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
- ((reg) & MAX_PHY_REG_ADDRESS))
-
/* Bits...
* 15-5: page
* 4-0: register offset
@@ -846,8 +795,4 @@
/* SerDes Control */
#define E1000_GEN_POLL_TIMEOUT 640
-/* FW Semaphore */
-#define E1000_FWSM_WLOCK_MAC_MASK 0x0380
-#define E1000_FWSM_WLOCK_MAC_SHIFT 7
-
#endif /* _E1000_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 6782a2eea1bc..fcc758138b8a 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -41,7 +41,11 @@
#include <linux/pci-aspm.h>
#include <linux/crc32.h>
#include <linux/if_vlan.h>
-
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/ptp_classify.h>
+#include <linux/mii.h>
#include "hw.h"
struct e1000_info;
@@ -75,9 +79,6 @@ struct e1000_info;
#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */
#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */
-/* Early Receive defines */
-#define E1000_ERT_2048 0x100
-
#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */
/* How many Tx Descriptors do we need to call netif_wake_queue ? */
@@ -94,70 +95,6 @@ struct e1000_info;
#define DEFAULT_JUMBO 9234
-/* BM/HV Specific Registers */
-#define BM_PORT_CTRL_PAGE 769
-
-#define PHY_UPPER_SHIFT 21
-#define BM_PHY_REG(page, reg) \
- (((reg) & MAX_PHY_REG_ADDRESS) |\
- (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\
- (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)))
-
-/* PHY Wakeup Registers and defines */
-#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17)
-#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0)
-#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
-#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
-#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
-#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2)))
-#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2)))
-#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2)))
-#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2)))
-#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1)))
-
-#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */
-#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */
-#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */
-#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */
-#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */
-#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */
-#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */
-
-#define HV_STATS_PAGE 778
-#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision Count */
-#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17)
-#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. Count */
-#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19)
-#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Coll. Count */
-#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21)
-#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision Count */
-#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24)
-#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision Count */
-#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26)
-#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */
-#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28)
-#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Transmit with no CRS */
-#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30)
-
-#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */
-
-/* BM PHY Copper Specific Status */
-#define BM_CS_STATUS 17
-#define BM_CS_STATUS_LINK_UP 0x0400
-#define BM_CS_STATUS_RESOLVED 0x0800
-#define BM_CS_STATUS_SPEED_MASK 0xC000
-#define BM_CS_STATUS_SPEED_1000 0x8000
-
-/* 82577 Mobile Phy Status Register */
-#define HV_M_STATUS 26
-#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000
-#define HV_M_STATUS_SPEED_MASK 0x0300
-#define HV_M_STATUS_SPEED_1000 0x0200
-#define HV_M_STATUS_LINK_UP 0x0040
-
-#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */
-#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000
-
/* Time to wait before putting the device into D3 if there's no link (in ms). */
#define LINK_TIMEOUT 100
@@ -309,6 +246,8 @@ struct e1000_adapter {
struct napi_struct napi;
+ unsigned int uncorr_errors; /* uncorrectable ECC errors */
+ unsigned int corr_errors; /* correctable ECC errors */
unsigned int restart_queue;
u32 txd_cmd;
@@ -353,6 +292,7 @@ struct e1000_adapter {
u64 gorc_old;
u32 alloc_rx_buff_failed;
u32 rx_dma_failed;
+ u32 rx_hwtstamp_cleared;
unsigned int rx_ps_pages;
u16 rx_ps_bsize0;
@@ -366,7 +306,7 @@ struct e1000_adapter {
/* structs defined in e1000_hw.h */
struct e1000_hw hw;
- spinlock_t stats64_lock;
+ spinlock_t stats64_lock; /* protects statistics counters */
struct e1000_hw_stats stats;
struct e1000_phy_info phy_info;
struct e1000_phy_stats phy_stats;
@@ -402,6 +342,16 @@ struct e1000_adapter {
u16 tx_ring_count;
u16 rx_ring_count;
+
+ struct hwtstamp_config hwtstamp_config;
+ struct delayed_work systim_overflow_work;
+ struct sk_buff *tx_hwtstamp_skb;
+ struct work_struct tx_hwtstamp_work;
+ spinlock_t systim_lock; /* protects SYSTIML/H regsters */
+ struct cyclecounter cc;
+ struct timecounter tc;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
};
struct e1000_info {
@@ -416,6 +366,40 @@ struct e1000_info {
const struct e1000_nvm_operations *nvm_ops;
};
+s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
+
+/* The system time is maintained by a 64-bit counter comprised of the 32-bit
+ * SYSTIMH and SYSTIML registers. How the counter increments (and therefore
+ * its resolution) is based on the contents of the TIMINCA register - it
+ * increments every incperiod (bits 31:24) clock ticks by incvalue (bits 23:0).
+ * For the best accuracy, the incperiod should be as small as possible. The
+ * incvalue is scaled by a factor as large as possible (while still fitting
+ * in bits 23:0) so that relatively small clock corrections can be made.
+ *
+ * As a result, a shift of INCVALUE_SHIFT_n is used to fit a value of
+ * INCVALUE_n into the TIMINCA register allowing 32+8+(24-INCVALUE_SHIFT_n)
+ * bits to count nanoseconds leaving the rest for fractional nonseconds.
+ */
+#define INCVALUE_96MHz 125
+#define INCVALUE_SHIFT_96MHz 17
+#define INCPERIOD_SHIFT_96MHz 2
+#define INCPERIOD_96MHz (12 >> INCPERIOD_SHIFT_96MHz)
+
+#define INCVALUE_25MHz 40
+#define INCVALUE_SHIFT_25MHz 18
+#define INCPERIOD_25MHz 1
+
+/* Another drawback of scaling the incvalue by a large factor is the
+ * 64-bit SYSTIM register overflows more quickly. This is dealt with
+ * by simply reading the clock before it overflows.
+ *
+ * Clock ns bits Overflows after
+ * ~~~~~~ ~~~~~~~ ~~~~~~~~~~~~~~~
+ * 96MHz 47-bit 2^(47-INCPERIOD_SHIFT_96MHz) / 10^9 / 3600 = 9.77 hrs
+ * 25MHz 46-bit 2^46 / 10^9 / 3600 = 19.55 hours
+ */
+#define E1000_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 60 * 4)
+
/* hardware capability, feature, and workaround flags */
#define FLAG_HAS_AMT (1 << 0)
#define FLAG_HAS_FLASH (1 << 1)
@@ -431,7 +415,7 @@ struct e1000_info {
#define FLAG_HAS_SMART_POWER_DOWN (1 << 11)
#define FLAG_IS_QUAD_PORT_A (1 << 12)
#define FLAG_IS_QUAD_PORT (1 << 13)
-/* reserved bit14 */
+#define FLAG_HAS_HW_TIMESTAMP (1 << 14)
#define FLAG_APME_IN_WUC (1 << 15)
#define FLAG_APME_IN_CTRL3 (1 << 16)
#define FLAG_APME_CHECK_PORT_B (1 << 17)
@@ -447,7 +431,7 @@ struct e1000_info {
#define FLAG_MSI_ENABLED (1 << 27)
/* reserved (1 << 28) */
#define FLAG_TSO_FORCE (1 << 29)
-#define FLAG_RX_RESTART_NOW (1 << 30)
+#define FLAG_RESTART_NOW (1 << 30)
#define FLAG_MSI_TEST_FAILED (1 << 31)
#define FLAG2_CRC_STRIPPING (1 << 0)
@@ -463,6 +447,7 @@ struct e1000_info {
#define FLAG2_NO_DISABLE_RX (1 << 10)
#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11)
#define FLAG2_DFLT_CRC_STRIPPING (1 << 12)
+#define FLAG2_CHECK_RX_HWTSTAMP (1 << 13)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -512,8 +497,6 @@ extern void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
extern unsigned int copybreak;
-extern char *e1000e_get_hw_dev_name(struct e1000_hw *hw);
-
extern const struct e1000_info e1000_82571_info;
extern const struct e1000_info e1000_82572_info;
extern const struct e1000_info e1000_82573_info;
@@ -527,138 +510,8 @@ extern const struct e1000_info e1000_pch2_info;
extern const struct e1000_info e1000_pch_lpt_info;
extern const struct e1000_info e1000_es2_info;
-extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
- u32 pba_num_size);
-
-extern s32 e1000e_commit_phy(struct e1000_hw *hw);
-
-extern bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw);
-
-extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw);
-extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state);
-
-extern void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
-extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
- bool state);
-extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
-extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
-extern void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw);
-extern void e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
-extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
-extern s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
-extern void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
-
-extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
-extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
-extern s32 e1000e_check_for_serdes_link(struct e1000_hw *hw);
-extern s32 e1000e_setup_led_generic(struct e1000_hw *hw);
-extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
-extern s32 e1000e_led_on_generic(struct e1000_hw *hw);
-extern s32 e1000e_led_off_generic(struct e1000_hw *hw);
-extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw);
-extern void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
-extern void e1000_set_lan_id_single_port(struct e1000_hw *hw);
-extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex);
-extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex);
-extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
-extern s32 e1000e_get_auto_rd_done(struct e1000_hw *hw);
-extern s32 e1000e_id_led_init_generic(struct e1000_hw *hw);
-extern void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw);
-extern s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw);
-extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw);
-extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw);
-extern s32 e1000e_setup_link_generic(struct e1000_hw *hw);
-extern void e1000_clear_vfta_generic(struct e1000_hw *hw);
-extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
-extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
- u8 *mc_addr_list,
- u32 mc_addr_count);
-extern void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
-extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
-extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
-extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw);
-extern s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data);
-extern void e1000e_config_collision_dist_generic(struct e1000_hw *hw);
-extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
-extern s32 e1000e_force_mac_fc(struct e1000_hw *hw);
-extern s32 e1000e_blink_led_generic(struct e1000_hw *hw);
-extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
-extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
-extern void e1000e_reset_adaptive(struct e1000_hw *hw);
-extern void e1000e_update_adaptive(struct e1000_hw *hw);
-
-extern s32 e1000e_setup_copper_link(struct e1000_hw *hw);
-extern s32 e1000e_get_phy_id(struct e1000_hw *hw);
-extern void e1000e_put_hw_semaphore(struct e1000_hw *hw);
-extern s32 e1000e_check_reset_block_generic(struct e1000_hw *hw);
-extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
-extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
-extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
-extern s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
-extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
- u16 *data);
-extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
-extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
-extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
- u16 data);
-extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
-extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
-extern s32 e1000e_get_cfg_done(struct e1000_hw *hw);
-extern s32 e1000e_get_cable_length_m88(struct e1000_hw *hw);
-extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw);
-extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw);
-extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
-extern s32 e1000e_determine_phy_address(struct e1000_hw *hw);
-extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
- u16 *phy_reg);
-extern s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
- u16 *phy_reg);
-extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
-extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
-extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
- u16 data);
-extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
- u16 *data);
-extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
- u32 usec_interval, bool *success);
-extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
-extern void e1000_power_up_phy_copper(struct e1000_hw *hw);
-extern void e1000_power_down_phy_copper(struct e1000_hw *hw);
-extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_check_downshift(struct e1000_hw *hw);
-extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
- u16 *data);
-extern s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
- u16 *data);
-extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
- u16 data);
-extern s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
- u16 data);
-extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
-extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
-extern s32 e1000_check_polarity_82577(struct e1000_hw *hw);
-extern s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
-extern s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
-extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
-
-extern s32 e1000_check_polarity_m88(struct e1000_hw *hw);
-extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
-extern s32 e1000_check_polarity_ife(struct e1000_hw *hw);
-extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
-extern s32 e1000_check_polarity_igp(struct e1000_hw *hw);
-extern bool e1000_check_phy_82574(struct e1000_hw *hw);
+extern void e1000e_ptp_init(struct e1000_adapter *adapter);
+extern void e1000e_ptp_remove(struct e1000_adapter *adapter);
static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
{
@@ -685,20 +538,7 @@ static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data)
return hw->phy.ops.write_reg_locked(hw, offset, data);
}
-static inline s32 e1000_get_cable_length(struct e1000_hw *hw)
-{
- return hw->phy.ops.get_cable_length(hw);
-}
-
-extern s32 e1000e_acquire_nvm(struct e1000_hw *hw);
-extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw);
-extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
-extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
-extern void e1000e_release_nvm(struct e1000_hw *hw);
extern void e1000e_reload_nvm_generic(struct e1000_hw *hw);
-extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
{
@@ -733,10 +573,6 @@ static inline s32 e1000_get_phy_info(struct e1000_hw *hw)
return hw->phy.ops.get_info(hw);
}
-extern bool e1000e_check_mng_mode_generic(struct e1000_hw *hw);
-extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw);
-extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
-
static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
{
return readl(hw->hw_addr + reg);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index f95bc6ee1c22..2c1813737f6d 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/vmalloc.h>
+#include <linux/mdio.h>
#include "e1000.h"
@@ -98,7 +99,6 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
E1000_STAT("rx_flow_control_xoff", stats.xoffrxc),
E1000_STAT("tx_flow_control_xon", stats.xontxc),
E1000_STAT("tx_flow_control_xoff", stats.xofftxc),
- E1000_STAT("rx_long_byte_count", stats.gorc),
E1000_STAT("rx_csum_offload_good", hw_csum_good),
E1000_STAT("rx_csum_offload_errors", hw_csum_err),
E1000_STAT("rx_header_split", rx_hdr_split),
@@ -108,6 +108,9 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
E1000_STAT("dropped_smbus", stats.mgpdc),
E1000_STAT("rx_dma_failed", rx_dma_failed),
E1000_STAT("tx_dma_failed", tx_dma_failed),
+ E1000_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+ E1000_STAT("uncorr_ecc_errors", uncorr_errors),
+ E1000_STAT("corr_ecc_errors", corr_errors),
};
#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
@@ -127,7 +130,6 @@ static int e1000_get_settings(struct net_device *netdev,
u32 speed;
if (hw->phy.media_type == e1000_media_type_copper) {
-
ecmd->supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
@@ -325,12 +327,12 @@ static int e1000_set_settings(struct net_device *netdev,
}
/* reset the link */
-
if (netif_running(adapter->netdev)) {
e1000e_down(adapter);
e1000e_up(adapter);
- } else
+ } else {
e1000e_reset(adapter);
+ }
clear_bit(__E1000_RESETTING, &adapter->state);
return 0;
@@ -415,7 +417,7 @@ static void e1000_set_msglevel(struct net_device *netdev, u32 data)
adapter->msg_enable = data;
}
-static int e1000_get_regs_len(struct net_device *netdev)
+static int e1000_get_regs_len(struct net_device __always_unused *netdev)
{
#define E1000_REGS_LEN 32 /* overestimate */
return E1000_REGS_LEN * sizeof(u32);
@@ -469,10 +471,10 @@ static void e1000_get_regs(struct net_device *netdev,
regs_buff[22] = adapter->phy_stats.receive_errors;
regs_buff[23] = regs_buff[13]; /* mdix mode */
}
- regs_buff[21] = 0; /* was idle_errors */
- e1e_rphy(hw, PHY_1000T_STATUS, &phy_data);
- regs_buff[24] = (u32)phy_data; /* phy local receiver status */
- regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
+ regs_buff[21] = 0; /* was idle_errors */
+ e1e_rphy(hw, MII_STAT1000, &phy_data);
+ regs_buff[24] = (u32)phy_data; /* phy local receiver status */
+ regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
}
static int e1000_get_eeprom_len(struct net_device *netdev)
@@ -759,8 +761,9 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
(test[pat] & write));
val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
if (val != (test[pat] & write & mask)) {
- e_err("pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
- reg + offset, val, (test[pat] & write & mask));
+ e_err("pattern test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n",
+ reg + (offset << 2), val,
+ (test[pat] & write & mask));
*data = reg;
return 1;
}
@@ -775,7 +778,7 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
__ew32(&adapter->hw, reg, write & mask);
val = __er32(&adapter->hw, reg);
if ((write & mask) != (val & mask)) {
- e_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
+ e_err("set/check test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n",
reg, (val & mask), (write & mask));
*data = reg;
return 1;
@@ -883,12 +886,20 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
E1000_FWSM_WLOCK_MAC_SHIFT;
for (i = 0; i < mac->rar_entry_count; i++) {
- /* Cannot test write-protected SHRAL[n] registers */
- if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac)))
- continue;
+ if (mac->type == e1000_pch_lpt) {
+ /* Cannot test write-protected SHRAL[n] registers */
+ if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac)))
+ continue;
- REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1),
- mask, 0xFFFFFFFF);
+ /* SHRAH[9] different than the others */
+ if (i == 10)
+ mask |= (1 << 30);
+ else
+ mask &= ~(1 << 30);
+ }
+
+ REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask,
+ 0xFFFFFFFF);
}
for (i = 0; i < mac->mta_reg_count; i++)
@@ -922,7 +933,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
return *data;
}
-static irqreturn_t e1000_test_intr(int irq, void *data)
+static irqreturn_t e1000_test_intr(int __always_unused irq, void *data)
{
struct net_device *netdev = (struct net_device *) data;
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1272,7 +1283,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
if (hw->phy.type == e1000_phy_ife) {
/* force 100, set loopback */
- e1e_wphy(hw, PHY_CONTROL, 0x6100);
+ e1e_wphy(hw, MII_BMCR, 0x6100);
/* Now set up the MAC to the same speed/duplex as the PHY. */
ctrl_reg = er32(CTRL);
@@ -1295,9 +1306,9 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
/* Auto-MDI/MDIX Off */
e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
/* reset to update Auto-MDI/MDIX */
- e1e_wphy(hw, PHY_CONTROL, 0x9140);
+ e1e_wphy(hw, MII_BMCR, 0x9140);
/* autoneg off */
- e1e_wphy(hw, PHY_CONTROL, 0x8140);
+ e1e_wphy(hw, MII_BMCR, 0x8140);
break;
case e1000_phy_gg82563:
e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC);
@@ -1309,7 +1320,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
phy_reg |= 0x006;
e1e_wphy(hw, PHY_REG(2, 21), phy_reg);
/* Assert SW reset for above settings to take effect */
- e1000e_commit_phy(hw);
+ hw->phy.ops.commit(hw);
mdelay(1);
/* Force Full Duplex */
e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
@@ -1343,7 +1354,6 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
e1e_rphy(hw, PHY_REG(776, 18), &phy_reg);
e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1);
/* Enable loopback on the PHY */
-#define I82577_PHY_LBK_CTRL 19
e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001);
break;
default:
@@ -1351,7 +1361,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
}
/* force 1000, set loopback */
- e1e_wphy(hw, PHY_CONTROL, 0x4140);
+ e1e_wphy(hw, MII_BMCR, 0x4140);
mdelay(250);
/* Now set up the MAC to the same speed/duplex as the PHY. */
@@ -1393,7 +1403,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 ctrl = er32(CTRL);
- int link = 0;
+ int link;
/* special requirements for 82571/82572 fiber adapters */
@@ -1526,11 +1536,12 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
hw->mac.autoneg = 1;
if (hw->phy.type == e1000_phy_gg82563)
e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x180);
- e1e_rphy(hw, PHY_CONTROL, &phy_reg);
- if (phy_reg & MII_CR_LOOPBACK) {
- phy_reg &= ~MII_CR_LOOPBACK;
- e1e_wphy(hw, PHY_CONTROL, phy_reg);
- e1000e_commit_phy(hw);
+ e1e_rphy(hw, MII_BMCR, &phy_reg);
+ if (phy_reg & BMCR_LOOPBACK) {
+ phy_reg &= ~BMCR_LOOPBACK;
+ e1e_wphy(hw, MII_BMCR, phy_reg);
+ if (hw->phy.ops.commit)
+ hw->phy.ops.commit(hw);
}
break;
}
@@ -1692,7 +1703,8 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
return *data;
}
-static int e1000e_get_sset_count(struct net_device *netdev, int sset)
+static int e1000e_get_sset_count(struct net_device __always_unused *netdev,
+ int sset)
{
switch (sset) {
case ETH_SS_TEST:
@@ -1955,7 +1967,7 @@ static int e1000_nway_reset(struct net_device *netdev)
}
static void e1000_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats,
+ struct ethtool_stats __always_unused *stats,
u64 *data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1984,8 +1996,8 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
}
}
-static void e1000_get_strings(struct net_device *netdev, u32 stringset,
- u8 *data)
+static void e1000_get_strings(struct net_device __always_unused *netdev,
+ u32 stringset, u8 *data)
{
u8 *p = data;
int i;
@@ -2005,7 +2017,8 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
}
static int e1000_get_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *info, u32 *rule_locs)
+ struct ethtool_rxnfc *info,
+ u32 __always_unused *rule_locs)
{
info->data = 0;
@@ -2051,6 +2064,171 @@ static int e1000_get_rxnfc(struct net_device *netdev,
}
}
+static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 cap_addr, adv_addr, lpa_addr, pcs_stat_addr, phy_data, lpi_ctrl;
+ u32 status, ret_val;
+
+ if (!(adapter->flags & FLAG_IS_ICH) ||
+ !(adapter->flags2 & FLAG2_HAS_EEE))
+ return -EOPNOTSUPP;
+
+ switch (hw->phy.type) {
+ case e1000_phy_82579:
+ cap_addr = I82579_EEE_CAPABILITY;
+ adv_addr = I82579_EEE_ADVERTISEMENT;
+ lpa_addr = I82579_EEE_LP_ABILITY;
+ pcs_stat_addr = I82579_EEE_PCS_STATUS;
+ break;
+ case e1000_phy_i217:
+ cap_addr = I217_EEE_CAPABILITY;
+ adv_addr = I217_EEE_ADVERTISEMENT;
+ lpa_addr = I217_EEE_LP_ABILITY;
+ pcs_stat_addr = I217_EEE_PCS_STATUS;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return -EBUSY;
+
+ /* EEE Capability */
+ ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data);
+ if (ret_val)
+ goto release;
+ edata->supported = mmd_eee_cap_to_ethtool_sup_t(phy_data);
+
+ /* EEE Advertised */
+ ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &phy_data);
+ if (ret_val)
+ goto release;
+ edata->advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+
+ /* EEE Link Partner Advertised */
+ ret_val = e1000_read_emi_reg_locked(hw, lpa_addr, &phy_data);
+ if (ret_val)
+ goto release;
+ edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+
+ /* EEE PCS Status */
+ ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data);
+ if (hw->phy.type == e1000_phy_82579)
+ phy_data <<= 8;
+
+release:
+ hw->phy.ops.release(hw);
+ if (ret_val)
+ return -ENODATA;
+
+ e1e_rphy(hw, I82579_LPI_CTRL, &lpi_ctrl);
+ status = er32(STATUS);
+
+ /* Result of the EEE auto negotiation - there is no register that
+ * has the status of the EEE negotiation so do a best-guess based
+ * on whether both Tx and Rx LPI indications have been received or
+ * base it on the link speed, the EEE advertised speeds on both ends
+ * and the speeds on which EEE is enabled locally.
+ */
+ if (((phy_data & E1000_EEE_TX_LPI_RCVD) &&
+ (phy_data & E1000_EEE_RX_LPI_RCVD)) ||
+ ((status & E1000_STATUS_SPEED_100) &&
+ (edata->advertised & ADVERTISED_100baseT_Full) &&
+ (edata->lp_advertised & ADVERTISED_100baseT_Full) &&
+ (lpi_ctrl & I82579_LPI_CTRL_100_ENABLE)) ||
+ ((status & E1000_STATUS_SPEED_1000) &&
+ (edata->advertised & ADVERTISED_1000baseT_Full) &&
+ (edata->lp_advertised & ADVERTISED_1000baseT_Full) &&
+ (lpi_ctrl & I82579_LPI_CTRL_1000_ENABLE)))
+ edata->eee_active = true;
+
+ edata->eee_enabled = !hw->dev_spec.ich8lan.eee_disable;
+ edata->tx_lpi_enabled = true;
+ edata->tx_lpi_timer = er32(LPIC) >> E1000_LPIC_LPIET_SHIFT;
+
+ return 0;
+}
+
+static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct ethtool_eee eee_curr;
+ s32 ret_val;
+
+ if (!(adapter->flags & FLAG_IS_ICH) ||
+ !(adapter->flags2 & FLAG2_HAS_EEE))
+ return -EOPNOTSUPP;
+
+ ret_val = e1000e_get_eee(netdev, &eee_curr);
+ if (ret_val)
+ return ret_val;
+
+ if (eee_curr.advertised != edata->advertised) {
+ e_err("Setting EEE advertisement is not supported\n");
+ return -EINVAL;
+ }
+
+ if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
+ e_err("Setting EEE tx-lpi is not supported\n");
+ return -EINVAL;
+ }
+
+ if (eee_curr.tx_lpi_timer != edata->tx_lpi_timer) {
+ e_err("Setting EEE Tx LPI timer is not supported\n");
+ return -EINVAL;
+ }
+
+ if (hw->dev_spec.ich8lan.eee_disable != !edata->eee_enabled) {
+ hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled;
+
+ /* reset the link */
+ if (netif_running(netdev))
+ e1000e_reinit_locked(adapter);
+ else
+ e1000e_reset(adapter);
+ }
+
+ return 0;
+}
+
+static int e1000e_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ ethtool_op_get_ts_info(netdev, info);
+
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
+ return 0;
+
+ info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_ALL));
+
+ if (adapter->ptp_clock)
+ info->phc_index = ptp_clock_index(adapter->ptp_clock);
+
+ return 0;
+}
+
static const struct ethtool_ops e1000_ethtool_ops = {
.get_settings = e1000_get_settings,
.set_settings = e1000_set_settings,
@@ -2078,7 +2256,9 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_coalesce = e1000_get_coalesce,
.set_coalesce = e1000_set_coalesce,
.get_rxnfc = e1000_get_rxnfc,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = e1000e_get_ts_info,
+ .get_eee = e1000e_get_eee,
+ .set_eee = e1000e_set_eee,
};
void e1000e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index cf217777586c..1e6b889aee87 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -29,331 +29,10 @@
#ifndef _E1000_HW_H_
#define _E1000_HW_H_
-#include <linux/types.h>
-
-struct e1000_hw;
-struct e1000_adapter;
-
+#include "regs.h"
#include "defines.h"
-enum e1e_registers {
- E1000_CTRL = 0x00000, /* Device Control - RW */
- E1000_STATUS = 0x00008, /* Device Status - RO */
- E1000_EECD = 0x00010, /* EEPROM/Flash Control - RW */
- E1000_EERD = 0x00014, /* EEPROM Read - RW */
- E1000_CTRL_EXT = 0x00018, /* Extended Device Control - RW */
- E1000_FLA = 0x0001C, /* Flash Access - RW */
- E1000_MDIC = 0x00020, /* MDI Control - RW */
- E1000_SCTL = 0x00024, /* SerDes Control - RW */
- E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */
- E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */
- E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */
- E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */
- E1000_FCT = 0x00030, /* Flow Control Type - RW */
- E1000_VET = 0x00038, /* VLAN Ether Type - RW */
- E1000_FEXTNVM3 = 0x0003C, /* Future Extended NVM 3 - RW */
- E1000_ICR = 0x000C0, /* Interrupt Cause Read - R/clr */
- E1000_ITR = 0x000C4, /* Interrupt Throttling Rate - RW */
- E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */
- E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */
- E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */
- E1000_EIAC_82574 = 0x000DC, /* Ext. Interrupt Auto Clear - RW */
- E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */
- E1000_IVAR = 0x000E4, /* Interrupt Vector Allocation - RW */
- E1000_EITR_82574_BASE = 0x000E8, /* Interrupt Throttling - RW */
-#define E1000_EITR_82574(_n) (E1000_EITR_82574_BASE + (_n << 2))
- E1000_RCTL = 0x00100, /* Rx Control - RW */
- E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */
- E1000_TXCW = 0x00178, /* Tx Configuration Word - RW */
- E1000_RXCW = 0x00180, /* Rx Configuration Word - RO */
- E1000_TCTL = 0x00400, /* Tx Control - RW */
- E1000_TCTL_EXT = 0x00404, /* Extended Tx Control - RW */
- E1000_TIPG = 0x00410, /* Tx Inter-packet gap -RW */
- E1000_AIT = 0x00458, /* Adaptive Interframe Spacing Throttle -RW */
- E1000_LEDCTL = 0x00E00, /* LED Control - RW */
- E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */
- E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */
- E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */
-#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
- E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */
- E1000_PBS = 0x01008, /* Packet Buffer Size */
- E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
- E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */
- E1000_FLOP = 0x0103C, /* FLASH Opcode Register */
- E1000_PBA_ECC = 0x01100, /* PBA ECC Register */
- E1000_ERT = 0x02008, /* Early Rx Threshold - RW */
- E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */
- E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */
- E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */
-/* Convenience macros
- *
- * Note: "_n" is the queue number of the register to be written to.
- *
- * Example usage:
- * E1000_RDBAL(current_rx_queue)
- */
- E1000_RDBAL_BASE = 0x02800, /* Rx Descriptor Base Address Low - RW */
-#define E1000_RDBAL(_n) (E1000_RDBAL_BASE + (_n << 8))
- E1000_RDBAH_BASE = 0x02804, /* Rx Descriptor Base Address High - RW */
-#define E1000_RDBAH(_n) (E1000_RDBAH_BASE + (_n << 8))
- E1000_RDLEN_BASE = 0x02808, /* Rx Descriptor Length - RW */
-#define E1000_RDLEN(_n) (E1000_RDLEN_BASE + (_n << 8))
- E1000_RDH_BASE = 0x02810, /* Rx Descriptor Head - RW */
-#define E1000_RDH(_n) (E1000_RDH_BASE + (_n << 8))
- E1000_RDT_BASE = 0x02818, /* Rx Descriptor Tail - RW */
-#define E1000_RDT(_n) (E1000_RDT_BASE + (_n << 8))
- E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */
- E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
-#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8))
- E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
-
- E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */
- E1000_TDBAL_BASE = 0x03800, /* Tx Descriptor Base Address Low - RW */
-#define E1000_TDBAL(_n) (E1000_TDBAL_BASE + (_n << 8))
- E1000_TDBAH_BASE = 0x03804, /* Tx Descriptor Base Address High - RW */
-#define E1000_TDBAH(_n) (E1000_TDBAH_BASE + (_n << 8))
- E1000_TDLEN_BASE = 0x03808, /* Tx Descriptor Length - RW */
-#define E1000_TDLEN(_n) (E1000_TDLEN_BASE + (_n << 8))
- E1000_TDH_BASE = 0x03810, /* Tx Descriptor Head - RW */
-#define E1000_TDH(_n) (E1000_TDH_BASE + (_n << 8))
- E1000_TDT_BASE = 0x03818, /* Tx Descriptor Tail - RW */
-#define E1000_TDT(_n) (E1000_TDT_BASE + (_n << 8))
- E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */
- E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */
-#define E1000_TXDCTL(_n) (E1000_TXDCTL_BASE + (_n << 8))
- E1000_TADV = 0x0382C, /* Tx Interrupt Absolute Delay Val - RW */
- E1000_TARC_BASE = 0x03840, /* Tx Arbitration Count (0) */
-#define E1000_TARC(_n) (E1000_TARC_BASE + (_n << 8))
- E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */
- E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */
- E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */
- E1000_RXERRC = 0x0400C, /* Receive Error Count - R/clr */
- E1000_MPC = 0x04010, /* Missed Packet Count - R/clr */
- E1000_SCC = 0x04014, /* Single Collision Count - R/clr */
- E1000_ECOL = 0x04018, /* Excessive Collision Count - R/clr */
- E1000_MCC = 0x0401C, /* Multiple Collision Count - R/clr */
- E1000_LATECOL = 0x04020, /* Late Collision Count - R/clr */
- E1000_COLC = 0x04028, /* Collision Count - R/clr */
- E1000_DC = 0x04030, /* Defer Count - R/clr */
- E1000_TNCRS = 0x04034, /* Tx-No CRS - R/clr */
- E1000_SEC = 0x04038, /* Sequence Error Count - R/clr */
- E1000_CEXTERR = 0x0403C, /* Carrier Extension Error Count - R/clr */
- E1000_RLEC = 0x04040, /* Receive Length Error Count - R/clr */
- E1000_XONRXC = 0x04048, /* XON Rx Count - R/clr */
- E1000_XONTXC = 0x0404C, /* XON Tx Count - R/clr */
- E1000_XOFFRXC = 0x04050, /* XOFF Rx Count - R/clr */
- E1000_XOFFTXC = 0x04054, /* XOFF Tx Count - R/clr */
- E1000_FCRUC = 0x04058, /* Flow Control Rx Unsupported Count- R/clr */
- E1000_PRC64 = 0x0405C, /* Packets Rx (64 bytes) - R/clr */
- E1000_PRC127 = 0x04060, /* Packets Rx (65-127 bytes) - R/clr */
- E1000_PRC255 = 0x04064, /* Packets Rx (128-255 bytes) - R/clr */
- E1000_PRC511 = 0x04068, /* Packets Rx (255-511 bytes) - R/clr */
- E1000_PRC1023 = 0x0406C, /* Packets Rx (512-1023 bytes) - R/clr */
- E1000_PRC1522 = 0x04070, /* Packets Rx (1024-1522 bytes) - R/clr */
- E1000_GPRC = 0x04074, /* Good Packets Rx Count - R/clr */
- E1000_BPRC = 0x04078, /* Broadcast Packets Rx Count - R/clr */
- E1000_MPRC = 0x0407C, /* Multicast Packets Rx Count - R/clr */
- E1000_GPTC = 0x04080, /* Good Packets Tx Count - R/clr */
- E1000_GORCL = 0x04088, /* Good Octets Rx Count Low - R/clr */
- E1000_GORCH = 0x0408C, /* Good Octets Rx Count High - R/clr */
- E1000_GOTCL = 0x04090, /* Good Octets Tx Count Low - R/clr */
- E1000_GOTCH = 0x04094, /* Good Octets Tx Count High - R/clr */
- E1000_RNBC = 0x040A0, /* Rx No Buffers Count - R/clr */
- E1000_RUC = 0x040A4, /* Rx Undersize Count - R/clr */
- E1000_RFC = 0x040A8, /* Rx Fragment Count - R/clr */
- E1000_ROC = 0x040AC, /* Rx Oversize Count - R/clr */
- E1000_RJC = 0x040B0, /* Rx Jabber Count - R/clr */
- E1000_MGTPRC = 0x040B4, /* Management Packets Rx Count - R/clr */
- E1000_MGTPDC = 0x040B8, /* Management Packets Dropped Count - R/clr */
- E1000_MGTPTC = 0x040BC, /* Management Packets Tx Count - R/clr */
- E1000_TORL = 0x040C0, /* Total Octets Rx Low - R/clr */
- E1000_TORH = 0x040C4, /* Total Octets Rx High - R/clr */
- E1000_TOTL = 0x040C8, /* Total Octets Tx Low - R/clr */
- E1000_TOTH = 0x040CC, /* Total Octets Tx High - R/clr */
- E1000_TPR = 0x040D0, /* Total Packets Rx - R/clr */
- E1000_TPT = 0x040D4, /* Total Packets Tx - R/clr */
- E1000_PTC64 = 0x040D8, /* Packets Tx (64 bytes) - R/clr */
- E1000_PTC127 = 0x040DC, /* Packets Tx (65-127 bytes) - R/clr */
- E1000_PTC255 = 0x040E0, /* Packets Tx (128-255 bytes) - R/clr */
- E1000_PTC511 = 0x040E4, /* Packets Tx (256-511 bytes) - R/clr */
- E1000_PTC1023 = 0x040E8, /* Packets Tx (512-1023 bytes) - R/clr */
- E1000_PTC1522 = 0x040EC, /* Packets Tx (1024-1522 Bytes) - R/clr */
- E1000_MPTC = 0x040F0, /* Multicast Packets Tx Count - R/clr */
- E1000_BPTC = 0x040F4, /* Broadcast Packets Tx Count - R/clr */
- E1000_TSCTC = 0x040F8, /* TCP Segmentation Context Tx - R/clr */
- E1000_TSCTFC = 0x040FC, /* TCP Segmentation Context Tx Fail - R/clr */
- E1000_IAC = 0x04100, /* Interrupt Assertion Count */
- E1000_ICRXPTC = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */
- E1000_ICRXATC = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */
- E1000_ICTXPTC = 0x0410C, /* Irq Cause Tx Packet Timer Expire Count */
- E1000_ICTXATC = 0x04110, /* Irq Cause Tx Abs Timer Expire Count */
- E1000_ICTXQEC = 0x04118, /* Irq Cause Tx Queue Empty Count */
- E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */
- E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */
- E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */
- E1000_RXCSUM = 0x05000, /* Rx Checksum Control - RW */
- E1000_RFCTL = 0x05008, /* Receive Filter Control */
- E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */
- E1000_RAL_BASE = 0x05400, /* Receive Address Low - RW */
-#define E1000_RAL(_n) (E1000_RAL_BASE + ((_n) * 8))
-#define E1000_RA (E1000_RAL(0))
- E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */
-#define E1000_RAH(_n) (E1000_RAH_BASE + ((_n) * 8))
- E1000_SHRAL_PCH_LPT_BASE = 0x05408,
-#define E1000_SHRAL_PCH_LPT(_n) (E1000_SHRAL_PCH_LPT_BASE + ((_n) * 8))
- E1000_SHRAH_PCH_LTP_BASE = 0x0540C,
-#define E1000_SHRAH_PCH_LPT(_n) (E1000_SHRAH_PCH_LTP_BASE + ((_n) * 8))
- E1000_SHRAL_BASE = 0x05438, /* Shared Receive Address Low - RW */
-#define E1000_SHRAL(_n) (E1000_SHRAL_BASE + ((_n) * 8))
- E1000_SHRAH_BASE = 0x0543C, /* Shared Receive Address High - RW */
-#define E1000_SHRAH(_n) (E1000_SHRAH_BASE + ((_n) * 8))
- E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */
- E1000_WUC = 0x05800, /* Wakeup Control - RW */
- E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */
- E1000_WUS = 0x05810, /* Wakeup Status - RO */
- E1000_MRQC = 0x05818, /* Multiple Receive Control - RW */
- E1000_MANC = 0x05820, /* Management Control - RW */
- E1000_FFLT = 0x05F00, /* Flexible Filter Length Table - RW Array */
- E1000_HOST_IF = 0x08800, /* Host Interface */
-
- E1000_KMRNCTRLSTA = 0x00034, /* MAC-PHY interface - RW */
- E1000_MANC2H = 0x05860, /* Management Control To Host - RW */
- E1000_MDEF_BASE = 0x05890, /* Management Decision Filters */
-#define E1000_MDEF(_n) (E1000_MDEF_BASE + ((_n) * 4))
- E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */
- E1000_GCR = 0x05B00, /* PCI-Ex Control */
- E1000_GCR2 = 0x05B64, /* PCI-Ex Control #2 */
- E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */
- E1000_SWSM = 0x05B50, /* SW Semaphore */
- E1000_FWSM = 0x05B54, /* FW Semaphore */
- E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */
- E1000_RETA_BASE = 0x05C00, /* Redirection Table - RW */
-#define E1000_RETA(_n) (E1000_RETA_BASE + ((_n) * 4))
- E1000_RSSRK_BASE = 0x05C80, /* RSS Random Key - RW */
-#define E1000_RSSRK(_n) (E1000_RSSRK_BASE + ((_n) * 4))
- E1000_FFLT_DBG = 0x05F04, /* Debug Register */
- E1000_PCH_RAICC_BASE = 0x05F50, /* Receive Address Initial CRC */
-#define E1000_PCH_RAICC(_n) (E1000_PCH_RAICC_BASE + ((_n) * 4))
-#define E1000_CRC_OFFSET E1000_PCH_RAICC_BASE
- E1000_HICR = 0x08F00, /* Host Interface Control */
-};
-
-#define E1000_MAX_PHY_ADDR 4
-
-/* IGP01E1000 Specific Registers */
-#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
-#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
-#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
-#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
-#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
-#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
-#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
-#define IGP_PAGE_SHIFT 5
-#define PHY_REG_MASK 0x1F
-
-#define BM_WUC_PAGE 800
-#define BM_WUC_ADDRESS_OPCODE 0x11
-#define BM_WUC_DATA_OPCODE 0x12
-#define BM_WUC_ENABLE_PAGE 769
-#define BM_WUC_ENABLE_REG 17
-#define BM_WUC_ENABLE_BIT (1 << 2)
-#define BM_WUC_HOST_WU_BIT (1 << 4)
-#define BM_WUC_ME_WU_BIT (1 << 5)
-
-#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
-#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
-#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
-
-#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
-#define IGP01E1000_PHY_POLARITY_MASK 0x0078
-
-#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
-#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
-
-#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
-
-#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
-#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
-#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
-
-#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
-
-#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
-#define IGP01E1000_PSSR_MDIX 0x0800
-#define IGP01E1000_PSSR_SPEED_MASK 0xC000
-#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
-
-#define IGP02E1000_PHY_CHANNEL_NUM 4
-#define IGP02E1000_PHY_AGC_A 0x11B1
-#define IGP02E1000_PHY_AGC_B 0x12B1
-#define IGP02E1000_PHY_AGC_C 0x14B1
-#define IGP02E1000_PHY_AGC_D 0x18B1
-
-#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */
-#define IGP02E1000_AGC_LENGTH_MASK 0x7F
-#define IGP02E1000_AGC_RANGE 15
-
-/* manage.c */
-#define E1000_VFTA_ENTRY_SHIFT 5
-#define E1000_VFTA_ENTRY_MASK 0x7F
-#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
-
-#define E1000_HICR_EN 0x01 /* Enable bit - RO */
-/* Driver sets this bit when done to put command in RAM */
-#define E1000_HICR_C 0x02
-#define E1000_HICR_FW_RESET_ENABLE 0x40
-#define E1000_HICR_FW_RESET 0x80
-
-#define E1000_FWSM_MODE_MASK 0xE
-#define E1000_FWSM_MODE_SHIFT 1
-
-#define E1000_MNG_IAMT_MODE 0x3
-#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10
-#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0
-#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
-#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
-#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
-#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
-
-/* nvm.c */
-#define E1000_STM_OPCODE 0xDB00
-
-#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
-#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
-#define E1000_KMRNCTRLSTA_REN 0x00200000
-#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */
-#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
-#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
-#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
-#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */
-#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
-#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
-#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002
-#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */
-
-#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
-#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */
-#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */
-#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */
-
-/* IFE PHY Extended Status Control */
-#define IFE_PESC_POLARITY_REVERSED 0x0100
-
-/* IFE PHY Special Control */
-#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010
-#define IFE_PSC_FORCE_POLARITY 0x0020
-
-/* IFE PHY Special Control and LED Control */
-#define IFE_PSCL_PROBE_MODE 0x0020
-#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
-#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
-
-/* IFE PHY MDIX Control */
-#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
-#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */
-#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */
-
-#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
+struct e1000_hw;
#define E1000_DEV_ID_82571EB_COPPER 0x105E
#define E1000_DEV_ID_82571EB_FIBER 0x105F
@@ -373,13 +52,11 @@ enum e1e_registers {
#define E1000_DEV_ID_82573L 0x109A
#define E1000_DEV_ID_82574L 0x10D3
#define E1000_DEV_ID_82574LA 0x10F6
-#define E1000_DEV_ID_82583V 0x150C
-
+#define E1000_DEV_ID_82583V 0x150C
#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB
-
#define E1000_DEV_ID_ICH8_82567V_3 0x1501
#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049
#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
@@ -414,12 +91,12 @@ enum e1e_registers {
#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A
#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559
-#define E1000_REVISION_4 4
+#define E1000_REVISION_4 4
-#define E1000_FUNC_1 1
+#define E1000_FUNC_1 1
-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
enum e1000_mac_type {
e1000_82571,
@@ -524,16 +201,6 @@ enum e1000_serdes_link_state {
e1000_serdes_link_forced_up
};
-/* Receive Descriptor */
-struct e1000_rx_desc {
- __le64 buffer_addr; /* Address of the descriptor's data buffer */
- __le16 length; /* Length of data DMAed into data buffer */
- __le16 csum; /* Packet checksum */
- u8 status; /* Descriptor status */
- u8 errors; /* Descriptor Errors */
- __le16 special;
-};
-
/* Receive Descriptor - Extended */
union e1000_rx_desc_extended {
struct {
@@ -656,7 +323,7 @@ struct e1000_data_desc {
struct {
u8 status; /* Descriptor status */
u8 popts; /* Packet Options */
- __le16 special; /* */
+ __le16 special;
} fields;
} upper;
};
@@ -752,7 +419,7 @@ struct e1000_host_command_header {
u8 checksum;
};
-#define E1000_HI_MAX_DATA_LENGTH 252
+#define E1000_HI_MAX_DATA_LENGTH 252
struct e1000_host_command_info {
struct e1000_host_command_header command_header;
u8 command_data[E1000_HI_MAX_DATA_LENGTH];
@@ -767,13 +434,18 @@ struct e1000_host_mng_command_header {
u16 command_length;
};
-#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
+#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
struct e1000_host_mng_command_info {
struct e1000_host_mng_command_header command_header;
u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
};
-/* Function pointers and static data for the MAC. */
+#include "mac.h"
+#include "phy.h"
+#include "nvm.h"
+#include "manage.h"
+
+/* Function pointers for the MAC. */
struct e1000_mac_operations {
s32 (*id_led_init)(struct e1000_hw *);
s32 (*blink_led)(struct e1000_hw *);
@@ -1002,4 +674,8 @@ struct e1000_hw {
} dev_spec;
};
+#include "82571.h"
+#include "80003es2lan.h"
+#include "ich8lan.h"
+
#endif
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 976336547607..dff7bff8b8e0 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -57,147 +57,6 @@
#include "e1000.h"
-#define ICH_FLASH_GFPREG 0x0000
-#define ICH_FLASH_HSFSTS 0x0004
-#define ICH_FLASH_HSFCTL 0x0006
-#define ICH_FLASH_FADDR 0x0008
-#define ICH_FLASH_FDATA0 0x0010
-#define ICH_FLASH_PR0 0x0074
-
-#define ICH_FLASH_READ_COMMAND_TIMEOUT 500
-#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500
-#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 3000000
-#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
-#define ICH_FLASH_CYCLE_REPEAT_COUNT 10
-
-#define ICH_CYCLE_READ 0
-#define ICH_CYCLE_WRITE 2
-#define ICH_CYCLE_ERASE 3
-
-#define FLASH_GFPREG_BASE_MASK 0x1FFF
-#define FLASH_SECTOR_ADDR_SHIFT 12
-
-#define ICH_FLASH_SEG_SIZE_256 256
-#define ICH_FLASH_SEG_SIZE_4K 4096
-#define ICH_FLASH_SEG_SIZE_8K 8192
-#define ICH_FLASH_SEG_SIZE_64K 65536
-
-
-#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */
-/* FW established a valid mode */
-#define E1000_ICH_FWSM_FW_VALID 0x00008000
-
-#define E1000_ICH_MNG_IAMT_MODE 0x2
-
-#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
- (ID_LED_DEF1_OFF2 << 8) | \
- (ID_LED_DEF1_ON2 << 4) | \
- (ID_LED_DEF1_DEF2))
-
-#define E1000_ICH_NVM_SIG_WORD 0x13
-#define E1000_ICH_NVM_SIG_MASK 0xC000
-#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0
-#define E1000_ICH_NVM_SIG_VALUE 0x80
-
-#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
-
-#define E1000_FEXTNVM_SW_CONFIG 1
-#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
-
-#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000
-#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000
-
-#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
-#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
-#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
-
-#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
-
-#define E1000_ICH_RAR_ENTRIES 7
-#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
-#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
-
-#define PHY_PAGE_SHIFT 5
-#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
- ((reg) & MAX_PHY_REG_ADDRESS))
-#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */
-#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */
-
-#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002
-#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
-#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200
-
-#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
-
-#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */
-
-/* SMBus Control Phy Register */
-#define CV_SMB_CTRL PHY_REG(769, 23)
-#define CV_SMB_CTRL_FORCE_SMBUS 0x0001
-
-/* SMBus Address Phy Register */
-#define HV_SMB_ADDR PHY_REG(768, 26)
-#define HV_SMB_ADDR_MASK 0x007F
-#define HV_SMB_ADDR_PEC_EN 0x0200
-#define HV_SMB_ADDR_VALID 0x0080
-#define HV_SMB_ADDR_FREQ_MASK 0x1100
-#define HV_SMB_ADDR_FREQ_LOW_SHIFT 8
-#define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12
-
-/* PHY Power Management Control */
-#define HV_PM_CTRL PHY_REG(770, 17)
-#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
-
-/* PHY Low Power Idle Control */
-#define I82579_LPI_CTRL PHY_REG(772, 20)
-#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
-#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80
-
-/* EMI Registers */
-#define I82579_EMI_ADDR 0x10
-#define I82579_EMI_DATA 0x11
-#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
-#define I82579_MSE_THRESHOLD 0x084F /* Mean Square Error Threshold */
-#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
-#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */
-#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */
-#define I217_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE supported */
-
-/* Intel Rapid Start Technology Support */
-#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70)
-#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080
-#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28)
-#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000
-#define I217_CGFREG PHY_REG(772, 29)
-#define I217_CGFREG_ENABLE_MTA_RESET 0x0002
-#define I217_MEMPWR PHY_REG(772, 26)
-#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
-
-/* Strapping Option Register - RO */
-#define E1000_STRAP 0x0000C
-#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
-#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
-#define E1000_STRAP_SMT_FREQ_MASK 0x00003000
-#define E1000_STRAP_SMT_FREQ_SHIFT 12
-
-/* OEM Bits Phy Register */
-#define HV_OEM_BITS PHY_REG(768, 25)
-#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */
-#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */
-#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
-
-#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
-#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
-
-/* KMRN Mode Control */
-#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
-#define HV_KMRN_MDIO_SLOW 0x0400
-
-/* KMRN FIFO Control and Status */
-#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16)
-#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000
-#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12
-
/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
/* Offset 04h HSFSTS */
union ich8_hws_flash_status {
@@ -252,7 +111,6 @@ union ich8_flash_protected_range {
u32 regval;
};
-static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
@@ -264,9 +122,7 @@ static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
u16 *data);
static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
u8 size, u16 *data);
-static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
-static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
@@ -278,7 +134,7 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
-static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
+static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
@@ -330,12 +186,12 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
u16 retry_count;
for (retry_count = 0; retry_count < 2; retry_count++) {
- ret_val = e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
+ ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg);
if (ret_val || (phy_reg == 0xFFFF))
continue;
phy_id = (u32)(phy_reg << 16);
- ret_val = e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
+ ret_val = e1e_rphy_locked(hw, MII_PHYSID2, &phy_reg);
if (ret_val || (phy_reg == 0xFFFF)) {
phy_id = 0;
continue;
@@ -378,10 +234,15 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
s32 ret_val;
u16 phy_reg;
+ /* Gate automatic PHY configuration by hardware on managed and
+ * non-managed 82579 and newer adapters.
+ */
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
+
ret_val = hw->phy.ops.acquire(hw);
if (ret_val) {
e_dbg("Failed to initialize PHY flow\n");
- return ret_val;
+ goto out;
}
/* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
@@ -402,13 +263,6 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
/* fall-through */
case e1000_pch2lan:
- /* Gate automatic PHY configuration by hardware on
- * non-managed 82579
- */
- if ((hw->mac.type == e1000_pch2lan) &&
- !(fwsm & E1000_ICH_FWSM_FW_VALID))
- e1000_gate_hw_phy_config_ich8lan(hw, true);
-
if (e1000_phy_is_accessible_pchlan(hw)) {
if (hw->mac.type == e1000_pch_lpt) {
/* Unforce SMBus mode in PHY */
@@ -443,6 +297,15 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
ew32(FEXTNVM3, mac_reg);
+ if (hw->mac.type == e1000_pch_lpt) {
+ /* Toggling LANPHYPC brings the PHY out of SMBus mode
+ * So ensure that the MAC is also out of SMBus mode
+ */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_reg);
+ }
+
/* Toggle LANPHYPC Value bit */
mac_reg = er32(CTRL);
mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
@@ -476,6 +339,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
*/
ret_val = e1000e_phy_hw_reset_generic(hw);
+out:
/* Ungate automatic PHY configuration on non-managed 82579 */
if ((hw->mac.type == e1000_pch2lan) &&
!(fwsm & E1000_ICH_FWSM_FW_VALID)) {
@@ -495,7 +359,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = 0;
+ s32 ret_val;
phy->addr = 1;
phy->reset_delay_us = 100;
@@ -778,68 +642,143 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
if (mac->type == e1000_ich8lan)
e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
- /* Gate automatic PHY configuration by hardware on managed
- * 82579 and i217
- */
- if ((mac->type == e1000_pch2lan || mac->type == e1000_pch_lpt) &&
- (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
- e1000_gate_hw_phy_config_ich8lan(hw, true);
-
return 0;
}
/**
+ * __e1000_access_emi_reg_locked - Read/write EMI register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: pointer to value to read/write from/to the EMI address
+ * @read: boolean flag to indicate read or write
+ *
+ * This helper function assumes the SW/FW/HW Semaphore is already acquired.
+ **/
+static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
+ u16 *data, bool read)
+{
+ s32 ret_val;
+
+ ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, address);
+ if (ret_val)
+ return ret_val;
+
+ if (read)
+ ret_val = e1e_rphy_locked(hw, I82579_EMI_DATA, data);
+ else
+ ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, *data);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_emi_reg_locked - Read Extended Management Interface register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: value to be read from the EMI address
+ *
+ * Assumes the SW/FW/HW Semaphore is already acquired.
+ **/
+s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
+{
+ return __e1000_access_emi_reg_locked(hw, addr, data, true);
+}
+
+/**
+ * e1000_write_emi_reg_locked - Write Extended Management Interface register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: value to be written to the EMI address
+ *
+ * Assumes the SW/FW/HW Semaphore is already acquired.
+ **/
+static s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
+{
+ return __e1000_access_emi_reg_locked(hw, addr, &data, false);
+}
+
+/**
* e1000_set_eee_pchlan - Enable/disable EEE support
* @hw: pointer to the HW structure
*
- * Enable/disable EEE based on setting in dev_spec structure. The bits in
- * the LPI Control register will remain set only if/when link is up.
+ * Enable/disable EEE based on setting in dev_spec structure, the duplex of
+ * the link and the EEE capabilities of the link partner. The LPI Control
+ * register bits will remain set only if/when link is up.
**/
static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
{
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
- s32 ret_val = 0;
- u16 phy_reg;
+ s32 ret_val;
+ u16 lpi_ctrl;
if ((hw->phy.type != e1000_phy_82579) &&
(hw->phy.type != e1000_phy_i217))
return 0;
- ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
+ ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
- if (dev_spec->eee_disable)
- phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
- else
- phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
-
- ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
+ ret_val = e1e_rphy_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
if (ret_val)
- return ret_val;
+ goto release;
+
+ /* Clear bits that enable EEE in various speeds */
+ lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
+
+ /* Enable EEE if not disabled by user */
+ if (!dev_spec->eee_disable) {
+ u16 lpa, pcs_status, data;
- if ((hw->phy.type == e1000_phy_i217) && !dev_spec->eee_disable) {
/* Save off link partner's EEE ability */
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- return ret_val;
- ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
- I217_EEE_LP_ABILITY);
+ switch (hw->phy.type) {
+ case e1000_phy_82579:
+ lpa = I82579_EEE_LP_ABILITY;
+ pcs_status = I82579_EEE_PCS_STATUS;
+ break;
+ case e1000_phy_i217:
+ lpa = I217_EEE_LP_ABILITY;
+ pcs_status = I217_EEE_PCS_STATUS;
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ goto release;
+ }
+ ret_val = e1000_read_emi_reg_locked(hw, lpa,
+ &dev_spec->eee_lp_ability);
if (ret_val)
goto release;
- e1e_rphy_locked(hw, I82579_EMI_DATA, &dev_spec->eee_lp_ability);
- /* EEE is not supported in 100Half, so ignore partner's EEE
- * in 100 ability if full-duplex is not advertised.
+ /* Enable EEE only for speeds in which the link partner is
+ * EEE capable.
*/
- e1e_rphy_locked(hw, PHY_LP_ABILITY, &phy_reg);
- if (!(phy_reg & NWAY_LPAR_100TX_FD_CAPS))
- dev_spec->eee_lp_ability &= ~I217_EEE_100_SUPPORTED;
-release:
- hw->phy.ops.release(hw);
+ if (dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
+ lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
+
+ if (dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
+ e1e_rphy_locked(hw, MII_LPA, &data);
+ if (data & LPA_100FULL)
+ lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
+ else
+ /* EEE is not supported in 100Half, so ignore
+ * partner's EEE in 100 ability if full-duplex
+ * is not advertised.
+ */
+ dev_spec->eee_lp_ability &=
+ ~I82579_EEE_100_SUPPORTED;
+ }
+
+ /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
+ ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
+ if (ret_val)
+ goto release;
}
- return 0;
+ ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
+release:
+ hw->phy.ops.release(hw);
+
+ return ret_val;
}
/**
@@ -1017,7 +956,7 @@ static DEFINE_MUTEX(nvm_mutex);
*
* Acquires the mutex for performing NVM operations.
**/
-static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
+static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw __always_unused *hw)
{
mutex_lock(&nvm_mutex);
@@ -1030,7 +969,7 @@ static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
*
* Releases the mutex used while performing NVM operations.
**/
-static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
+static void e1000_release_nvm_ich8lan(struct e1000_hw __always_unused *hw)
{
mutex_unlock(&nvm_mutex);
}
@@ -1322,7 +1261,7 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
u32 strap = er32(STRAP);
u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
E1000_STRAP_SMT_FREQ_SHIFT;
- s32 ret_val = 0;
+ s32 ret_val;
strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
@@ -1558,7 +1497,7 @@ release:
**/
s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
{
- s32 ret_val = 0;
+ s32 ret_val;
u32 ctrl_reg = 0;
u32 ctrl_ext = 0;
u32 reg = 0;
@@ -1727,7 +1666,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
*/
if (hw->phy.revision < 2) {
e1000e_phy_sw_reset(hw);
- ret_val = e1e_wphy(hw, PHY_CONTROL, 0x3140);
+ ret_val = e1e_wphy(hw, MII_BMCR, 0x3140);
}
}
@@ -1757,6 +1696,11 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
if (ret_val)
goto release;
ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF);
+ if (ret_val)
+ goto release;
+
+ /* set MSE higher to enable link to stay up when noise is high */
+ ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
release:
hw->phy.ops.release(hw);
@@ -1983,22 +1927,18 @@ static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
/* Set MDIO slow mode before any other MDIO access */
ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (ret_val)
+ return ret_val;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
- ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_THRESHOLD);
- if (ret_val)
- goto release;
/* set MSE higher to enable link to stay up when noise is high */
- ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0034);
- if (ret_val)
- goto release;
- ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_LINK_DOWN);
+ ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
if (ret_val)
goto release;
/* drop link after 5 times MSE threshold was reached */
- ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0005);
+ ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
release:
hw->phy.ops.release(hw);
@@ -2172,10 +2112,9 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
- ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
- I82579_LPI_UPDATE_TIMER);
- if (!ret_val)
- ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x1387);
+ ret_val = e1000_write_emi_reg_locked(hw,
+ I82579_LPI_UPDATE_TIMER,
+ 0x1387);
hw->phy.ops.release(hw);
}
@@ -2219,7 +2158,7 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
**/
static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
{
- s32 ret_val = 0;
+ s32 ret_val;
u16 oem_reg;
ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
@@ -2277,6 +2216,8 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
/* When LPLU is enabled, we should disable SmartSpeed */
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
+ if (ret_val)
+ return ret_val;
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
if (ret_val)
@@ -2949,19 +2890,32 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
{
s32 ret_val;
u16 data;
+ u16 word;
+ u16 valid_csum_mask;
- /* Read 0x19 and check bit 6. If this bit is 0, the checksum
- * needs to be fixed. This bit is an indication that the NVM
- * was prepared by OEM software and did not calculate the
- * checksum...a likely scenario.
+ /* Read NVM and check Invalid Image CSUM bit. If this bit is 0,
+ * the checksum needs to be fixed. This bit is an indication that
+ * the NVM was prepared by OEM software and did not calculate
+ * the checksum...a likely scenario.
*/
- ret_val = e1000_read_nvm(hw, 0x19, 1, &data);
+ switch (hw->mac.type) {
+ case e1000_pch_lpt:
+ word = NVM_COMPAT;
+ valid_csum_mask = NVM_COMPAT_VALID_CSUM;
+ break;
+ default:
+ word = NVM_FUTURE_INIT_WORD1;
+ valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
+ break;
+ }
+
+ ret_val = e1000_read_nvm(hw, word, 1, &data);
if (ret_val)
return ret_val;
- if (!(data & 0x40)) {
- data |= 0x40;
- ret_val = e1000_write_nvm(hw, 0x19, 1, &data);
+ if (!(data & valid_csum_mask)) {
+ data |= valid_csum_mask;
+ ret_val = e1000_write_nvm(hw, word, 1, &data);
if (ret_val)
return ret_val;
ret_val = e1000e_update_nvm_checksum(hw);
@@ -3624,6 +3578,17 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
if (hw->mac.type == e1000_ich8lan)
reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
ew32(RFCTL, reg);
+
+ /* Enable ECC on Lynxpoint */
+ if (hw->mac.type == e1000_pch_lpt) {
+ reg = er32(PBECCSTS);
+ reg |= E1000_PBECCSTS_ECC_ENABLE;
+ ew32(PBECCSTS, reg);
+
+ reg = er32(CTRL);
+ reg |= E1000_CTRL_MEHE;
+ ew32(CTRL, reg);
+ }
}
/**
@@ -3964,8 +3929,7 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
if (ret_val)
return;
reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
- ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
- reg_data);
+ e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, reg_data);
}
/**
@@ -4000,19 +3964,20 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
if (!dev_spec->eee_disable) {
u16 eee_advert;
- ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
- I217_EEE_ADVERTISEMENT);
+ ret_val =
+ e1000_read_emi_reg_locked(hw,
+ I217_EEE_ADVERTISEMENT,
+ &eee_advert);
if (ret_val)
goto release;
- e1e_rphy_locked(hw, I82579_EMI_DATA, &eee_advert);
/* Disable LPLU if both link partners support 100BaseT
* EEE and 100Full is advertised on both ends of the
* link.
*/
- if ((eee_advert & I217_EEE_100_SUPPORTED) &&
+ if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
(dev_spec->eee_lp_ability &
- I217_EEE_100_SUPPORTED) &&
+ I82579_EEE_100_SUPPORTED) &&
(hw->phy.autoneg_advertised & ADVERTISE_100_FULL))
phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
E1000_PHY_CTRL_NOND0A_LPLU);
@@ -4026,7 +3991,6 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
* The SMBus release must also be disabled on LCD reset.
*/
if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
-
/* Enable proxy to reset only on power good. */
e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg);
phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
@@ -4287,7 +4251,7 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
u32 bank = 0;
u32 status;
- e1000e_get_cfg_done(hw);
+ e1000e_get_cfg_done_generic(hw);
/* Wait for indication from h/w that it has completed basic config */
if (hw->mac.type >= e1000_ich10lan) {
@@ -4416,7 +4380,7 @@ static const struct e1000_mac_operations ich8_mac_ops = {
.reset_hw = e1000_reset_hw_ich8lan,
.init_hw = e1000_init_hw_ich8lan,
.setup_link = e1000_setup_link_ich8lan,
- .setup_physical_interface= e1000_setup_copper_link_ich8lan,
+ .setup_physical_interface = e1000_setup_copper_link_ich8lan,
/* id_led_init dependent on mac type */
.config_collision_dist = e1000e_config_collision_dist_generic,
.rar_set = e1000e_rar_set_generic,
@@ -4438,7 +4402,7 @@ static const struct e1000_phy_operations ich8_phy_ops = {
static const struct e1000_nvm_operations ich8_nvm_ops = {
.acquire = e1000_acquire_nvm_ich8lan,
- .read = e1000_read_nvm_ich8lan,
+ .read = e1000_read_nvm_ich8lan,
.release = e1000_release_nvm_ich8lan,
.reload = e1000e_reload_nvm_generic,
.update = e1000_update_nvm_checksum_ich8lan,
@@ -4520,6 +4484,7 @@ const struct e1000_info e1000_pch2_info = {
.mac = e1000_pch2lan,
.flags = FLAG_IS_ICH
| FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
| FLAG_HAS_CTRLEXT_ON_LOAD
| FLAG_HAS_AMT
| FLAG_HAS_FLASH
@@ -4528,7 +4493,7 @@ const struct e1000_info e1000_pch2_info = {
.flags2 = FLAG2_HAS_PHY_STATS
| FLAG2_HAS_EEE,
.pba = 26,
- .max_hw_frame_size = DEFAULT_JUMBO,
+ .max_hw_frame_size = 9018,
.get_variants = e1000_get_variants_ich8lan,
.mac_ops = &ich8_mac_ops,
.phy_ops = &ich8_phy_ops,
@@ -4539,6 +4504,7 @@ const struct e1000_info e1000_pch_lpt_info = {
.mac = e1000_pch_lpt,
.flags = FLAG_IS_ICH
| FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
| FLAG_HAS_CTRLEXT_ON_LOAD
| FLAG_HAS_AMT
| FLAG_HAS_FLASH
@@ -4547,7 +4513,7 @@ const struct e1000_info e1000_pch_lpt_info = {
.flags2 = FLAG2_HAS_PHY_STATS
| FLAG2_HAS_EEE,
.pba = 26,
- .max_hw_frame_size = DEFAULT_JUMBO,
+ .max_hw_frame_size = 9018,
.get_variants = e1000_get_variants_ich8lan,
.mac_ops = &ich8_mac_ops,
.phy_ops = &ich8_phy_ops,
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
new file mode 100644
index 000000000000..b6d3174d7d2d
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -0,0 +1,268 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_ICH8LAN_H_
+#define _E1000E_ICH8LAN_H_
+
+#define ICH_FLASH_GFPREG 0x0000
+#define ICH_FLASH_HSFSTS 0x0004
+#define ICH_FLASH_HSFCTL 0x0006
+#define ICH_FLASH_FADDR 0x0008
+#define ICH_FLASH_FDATA0 0x0010
+#define ICH_FLASH_PR0 0x0074
+
+/* Requires up to 10 seconds when MNG might be accessing part. */
+#define ICH_FLASH_READ_COMMAND_TIMEOUT 10000000
+#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 10000000
+#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 10000000
+#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
+#define ICH_FLASH_CYCLE_REPEAT_COUNT 10
+
+#define ICH_CYCLE_READ 0
+#define ICH_CYCLE_WRITE 2
+#define ICH_CYCLE_ERASE 3
+
+#define FLASH_GFPREG_BASE_MASK 0x1FFF
+#define FLASH_SECTOR_ADDR_SHIFT 12
+
+#define ICH_FLASH_SEG_SIZE_256 256
+#define ICH_FLASH_SEG_SIZE_4K 4096
+#define ICH_FLASH_SEG_SIZE_8K 8192
+#define ICH_FLASH_SEG_SIZE_64K 65536
+
+#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */
+/* FW established a valid mode */
+#define E1000_ICH_FWSM_FW_VALID 0x00008000
+#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */
+#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000
+
+#define E1000_ICH_MNG_IAMT_MODE 0x2
+
+#define E1000_FWSM_WLOCK_MAC_MASK 0x0380
+#define E1000_FWSM_WLOCK_MAC_SHIFT 7
+
+/* Shared Receive Address Registers */
+#define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8))
+#define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8))
+
+#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
+ (ID_LED_OFF1_OFF2 << 8) | \
+ (ID_LED_OFF1_ON2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+
+#define E1000_ICH_NVM_SIG_WORD 0x13
+#define E1000_ICH_NVM_SIG_MASK 0xC000
+#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0
+#define E1000_ICH_NVM_SIG_VALUE 0x80
+
+#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
+
+#define E1000_FEXTNVM_SW_CONFIG 1
+#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* different on ICH8M */
+
+#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000
+#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000
+
+#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
+
+#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
+
+#define E1000_ICH_RAR_ENTRIES 7
+#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
+#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
+
+#define PHY_PAGE_SHIFT 5
+#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
+ ((reg) & MAX_PHY_REG_ADDRESS))
+#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */
+#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */
+
+#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002
+#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
+#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200
+
+/* PHY Wakeup Registers and defines */
+#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17)
+#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0)
+#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
+#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
+#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
+#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2)))
+#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2)))
+#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2)))
+#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2)))
+#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1)))
+
+#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */
+#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */
+#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */
+#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */
+#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */
+#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */
+#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */
+
+#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
+#define HV_MUX_DATA_CTRL PHY_REG(776, 16)
+#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400
+#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004
+#define HV_STATS_PAGE 778
+/* Half-duplex collision counts */
+#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision */
+#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17)
+#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. */
+#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19)
+#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Collision */
+#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21)
+#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision */
+#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24)
+#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision */
+#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26)
+#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */
+#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28)
+#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Tx with no CRS */
+#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30)
+
+#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */
+
+#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
+#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
+
+/* SMBus Control Phy Register */
+#define CV_SMB_CTRL PHY_REG(769, 23)
+#define CV_SMB_CTRL_FORCE_SMBUS 0x0001
+
+/* SMBus Address Phy Register */
+#define HV_SMB_ADDR PHY_REG(768, 26)
+#define HV_SMB_ADDR_MASK 0x007F
+#define HV_SMB_ADDR_PEC_EN 0x0200
+#define HV_SMB_ADDR_VALID 0x0080
+#define HV_SMB_ADDR_FREQ_MASK 0x1100
+#define HV_SMB_ADDR_FREQ_LOW_SHIFT 8
+#define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12
+
+/* Strapping Option Register - RO */
+#define E1000_STRAP 0x0000C
+#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
+#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
+#define E1000_STRAP_SMT_FREQ_MASK 0x00003000
+#define E1000_STRAP_SMT_FREQ_SHIFT 12
+
+/* OEM Bits Phy Register */
+#define HV_OEM_BITS PHY_REG(768, 25)
+#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */
+#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */
+#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
+
+/* KMRN Mode Control */
+#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
+#define HV_KMRN_MDIO_SLOW 0x0400
+
+/* KMRN FIFO Control and Status */
+#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16)
+#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000
+#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12
+
+/* PHY Power Management Control */
+#define HV_PM_CTRL PHY_REG(770, 17)
+#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
+
+#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */
+
+/* PHY Low Power Idle Control */
+#define I82579_LPI_CTRL PHY_REG(772, 20)
+#define I82579_LPI_CTRL_100_ENABLE 0x2000
+#define I82579_LPI_CTRL_1000_ENABLE 0x4000
+#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
+#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80
+
+/* Extended Management Interface (EMI) Registers */
+#define I82579_EMI_ADDR 0x10
+#define I82579_EMI_DATA 0x11
+#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
+#define I82579_MSE_THRESHOLD 0x084F /* 82579 Mean Square Error Threshold */
+#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */
+#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
+#define I82579_EEE_PCS_STATUS 0x182D /* IEEE MMD Register 3.1 >> 8 */
+#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */
+#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */
+#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */
+#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */
+#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */
+#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */
+#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */
+#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */
+#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */
+
+#define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */
+#define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */
+
+/* Intel Rapid Start Technology Support */
+#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70)
+#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080
+#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28)
+#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000
+#define I217_CGFREG PHY_REG(772, 29)
+#define I217_CGFREG_ENABLE_MTA_RESET 0x0002
+#define I217_MEMPWR PHY_REG(772, 26)
+#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
+
+/* Receive Address Initial CRC Calculation */
+#define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4))
+
+/* Latency Tolerance Reporting */
+#define E1000_LTRV 0x000F8
+#define E1000_LTRV_SCALE_MAX 5
+#define E1000_LTRV_SCALE_FACTOR 5
+#define E1000_LTRV_REQ_SHIFT 15
+#define E1000_LTRV_NOSNOOP_SHIFT 16
+#define E1000_LTRV_SEND (1 << 30)
+
+/* Proprietary Latency Tolerance Reporting PCI Capability */
+#define E1000_PCI_LTR_CAP_LPT 0xA8
+
+/* OBFF Control & Threshold Defines */
+#define E1000_SVCR_OFF_EN 0x00000001
+#define E1000_SVCR_OFF_MASKINT 0x00001000
+#define E1000_SVCR_OFF_TIMER_MASK 0xFFFF0000
+#define E1000_SVCR_OFF_TIMER_SHIFT 16
+#define E1000_SVT_OFF_HWM_MASK 0x0000001F
+
+void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
+void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
+ bool state);
+void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
+void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
+void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw);
+void e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
+s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
+void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
+s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
+s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data);
+#endif /* _E1000E_ICH8LAN_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 54d9dafaf126..b78e02174601 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -165,7 +165,7 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
{
u32 i;
- s32 ret_val = 0;
+ s32 ret_val;
u16 offset, nvm_alt_mac_addr_offset, nvm_data;
u8 alt_mac_addr[ETH_ALEN];
@@ -1021,6 +1021,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
s32 ret_val = 0;
+ u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
u16 speed, duplex;
@@ -1052,14 +1053,14 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
* has completed. We read this twice because this reg has
* some "sticky" (latched) bits.
*/
- ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
+ ret_val = e1e_rphy(hw, MII_BMSR, &mii_status_reg);
if (ret_val)
return ret_val;
- ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
+ ret_val = e1e_rphy(hw, MII_BMSR, &mii_status_reg);
if (ret_val)
return ret_val;
- if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
+ if (!(mii_status_reg & BMSR_ANEGCOMPLETE)) {
e_dbg("Copper PHY and Auto Neg has not completed.\n");
return ret_val;
}
@@ -1070,11 +1071,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
* Page Ability Register (Address 5) to determine how
* flow control was negotiated.
*/
- ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg);
+ ret_val = e1e_rphy(hw, MII_ADVERTISE, &mii_nway_adv_reg);
if (ret_val)
return ret_val;
- ret_val =
- e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
+ ret_val = e1e_rphy(hw, MII_LPA, &mii_nway_lp_ability_reg);
if (ret_val)
return ret_val;
@@ -1111,8 +1111,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
* 1 | DC | 1 | DC | E1000_fc_full
*
*/
- if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+ if ((mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) &&
+ (mii_nway_lp_ability_reg & LPA_PAUSE_CAP)) {
/* Now we need to check if the user selected Rx ONLY
* of pause frames. In this case, we had to advertise
* FULL flow control because we could not advertise Rx
@@ -1134,10 +1134,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
*-------|---------|-------|---------|--------------------
* 0 | 1 | 1 | 1 | e1000_fc_tx_pause
*/
- else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ else if (!(mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) &&
+ (mii_nway_adv_reg & ADVERTISE_PAUSE_ASYM) &&
+ (mii_nway_lp_ability_reg & LPA_PAUSE_CAP) &&
+ (mii_nway_lp_ability_reg & LPA_PAUSE_ASYM)) {
hw->fc.current_mode = e1000_fc_tx_pause;
e_dbg("Flow Control = Tx PAUSE frames only.\n");
}
@@ -1148,10 +1148,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
*-------|---------|-------|---------|--------------------
* 1 | 1 | 0 | 1 | e1000_fc_rx_pause
*/
- else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
- !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ else if ((mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) &&
+ (mii_nway_adv_reg & ADVERTISE_PAUSE_ASYM) &&
+ !(mii_nway_lp_ability_reg & LPA_PAUSE_CAP) &&
+ (mii_nway_lp_ability_reg & LPA_PAUSE_ASYM)) {
hw->fc.current_mode = e1000_fc_rx_pause;
e_dbg("Flow Control = Rx PAUSE frames only.\n");
} else {
@@ -1185,6 +1185,130 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
}
}
+ /* Check for the case where we have SerDes media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+ if ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
+ mac->autoneg) {
+ /* Read the PCS_LSTS and check to see if AutoNeg
+ * has completed.
+ */
+ pcs_status_reg = er32(PCS_LSTAT);
+
+ if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
+ e_dbg("PCS Auto Neg has not completed.\n");
+ return ret_val;
+ }
+
+ /* The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement
+ * Register (PCS_ANADV) and the Auto_Negotiation Base
+ * Page Ability Register (PCS_LPAB) to determine how
+ * flow control was negotiated.
+ */
+ pcs_adv_reg = er32(PCS_ANADV);
+ pcs_lp_ability_reg = er32(PCS_LPAB);
+
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (PCS_ANADV) and two bits in the Auto Negotiation Base
+ * Page Ability Register (PCS_LPAB) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | e1000_fc_none
+ * 0 | 1 | 0 | DC | e1000_fc_none
+ * 0 | 1 | 1 | 0 | e1000_fc_none
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ * 1 | 0 | 0 | DC | e1000_fc_none
+ * 1 | DC | 1 | DC | e1000_fc_full
+ * 1 | 1 | 0 | 0 | e1000_fc_none
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ *
+ * Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | e1000_fc_full
+ *
+ */
+ if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
+ /* Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise Rx
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == e1000_fc_full) {
+ hw->fc.current_mode = e1000_fc_full;
+ e_dbg("Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ e_dbg("Flow Control = Rx PAUSE frames only.\n");
+ }
+ }
+ /* For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ */
+ else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+ (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_tx_pause;
+ e_dbg("Flow Control = Tx PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ */
+ else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+ !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ e_dbg("Flow Control = Rx PAUSE frames only.\n");
+ } else {
+ /* Per the IEEE spec, at this point flow control
+ * should be disabled.
+ */
+ hw->fc.current_mode = e1000_fc_none;
+ e_dbg("Flow Control = NONE.\n");
+ }
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ pcs_ctrl_reg = er32(PCS_LCTL);
+ pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+ ew32(PCS_LCTL, pcs_ctrl_reg);
+
+ ret_val = e1000e_force_mac_fc(hw);
+ if (ret_val) {
+ e_dbg("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ }
+
return 0;
}
@@ -1231,8 +1355,8 @@ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
* Sets the speed and duplex to gigabit full duplex (the only possible option)
* for fiber/serdes links.
**/
-s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed,
- u16 *duplex)
+s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw __always_unused
+ *hw, u16 *speed, u16 *duplex)
{
*speed = SPEED_1000;
*duplex = FULL_DUPLEX;
diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h
new file mode 100644
index 000000000000..a61fee404ebe
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/mac.h
@@ -0,0 +1,74 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_MAC_H_
+#define _E1000E_MAC_H_
+
+s32 e1000e_blink_led_generic(struct e1000_hw *hw);
+s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
+s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
+s32 e1000e_check_for_serdes_link(struct e1000_hw *hw);
+s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
+s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
+s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
+s32 e1000e_force_mac_fc(struct e1000_hw *hw);
+s32 e1000e_get_auto_rd_done(struct e1000_hw *hw);
+s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw);
+void e1000_set_lan_id_single_port(struct e1000_hw *hw);
+s32 e1000e_get_hw_semaphore(struct e1000_hw *hw);
+s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex);
+s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw,
+ u16 *speed, u16 *duplex);
+s32 e1000e_id_led_init_generic(struct e1000_hw *hw);
+s32 e1000e_led_on_generic(struct e1000_hw *hw);
+s32 e1000e_led_off_generic(struct e1000_hw *hw);
+void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count);
+s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
+s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw);
+s32 e1000e_setup_led_generic(struct e1000_hw *hw);
+s32 e1000e_setup_link_generic(struct e1000_hw *hw);
+s32 e1000e_validate_mdi_setting_generic(struct e1000_hw *hw);
+s32 e1000e_validate_mdi_setting_crossover_generic(struct e1000_hw *hw);
+
+void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw);
+void e1000_clear_vfta_generic(struct e1000_hw *hw);
+void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
+void e1000e_put_hw_semaphore(struct e1000_hw *hw);
+s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
+void e1000e_reset_adaptive(struct e1000_hw *hw);
+void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
+void e1000e_update_adaptive(struct e1000_hw *hw);
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
+
+void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
+void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
+void e1000e_config_collision_dist_generic(struct e1000_hw *hw);
+
+#endif
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
index 6dc47beb3adc..e4b0f1ef92f6 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.c
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -28,19 +28,6 @@
#include "e1000.h"
-enum e1000_mng_mode {
- e1000_mng_mode_none = 0,
- e1000_mng_mode_asf,
- e1000_mng_mode_pt,
- e1000_mng_mode_ipmi,
- e1000_mng_mode_host_if_only
-};
-
-#define E1000_FACTPS_MNGCG 0x20000000
-
-/* Intel(R) Active Management Technology signature */
-#define E1000_IAMT_SIGNATURE 0x544D4149
-
/**
* e1000_calculate_checksum - Calculate checksum for buffer
* @buffer: pointer to EEPROM
diff --git a/drivers/net/ethernet/intel/e1000e/manage.h b/drivers/net/ethernet/intel/e1000e/manage.h
new file mode 100644
index 000000000000..326897c29ea8
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/manage.h
@@ -0,0 +1,72 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_MANAGE_H_
+#define _E1000E_MANAGE_H_
+
+bool e1000e_check_mng_mode_generic(struct e1000_hw *hw);
+bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw);
+s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
+bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw);
+
+enum e1000_mng_mode {
+ e1000_mng_mode_none = 0,
+ e1000_mng_mode_asf,
+ e1000_mng_mode_pt,
+ e1000_mng_mode_ipmi,
+ e1000_mng_mode_host_if_only
+};
+
+#define E1000_FACTPS_MNGCG 0x20000000
+
+#define E1000_FWSM_MODE_MASK 0xE
+#define E1000_FWSM_MODE_SHIFT 1
+
+#define E1000_MNG_IAMT_MODE 0x3
+#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10
+#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0
+#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
+#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
+
+#define E1000_VFTA_ENTRY_SHIFT 5
+#define E1000_VFTA_ENTRY_MASK 0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
+
+#define E1000_HICR_EN 0x01 /* Enable bit - RO */
+/* Driver sets this bit when done to put command in RAM */
+#define E1000_HICR_C 0x02
+#define E1000_HICR_SV 0x04 /* Status Validity */
+#define E1000_HICR_FW_RESET_ENABLE 0x40
+#define E1000_HICR_FW_RESET 0x80
+
+/* Intel(R) Active Management Technology signature */
+#define E1000_IAMT_SIGNATURE 0x544D4149
+
+#endif
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index fbf75fdca994..a177b8b65c44 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -42,7 +42,6 @@
#include <linux/slab.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
-#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/cpu.h>
@@ -56,7 +55,7 @@
#define DRV_EXTRAVERSION "-k"
-#define DRV_VERSION "2.1.4" DRV_EXTRAVERSION
+#define DRV_VERSION "2.2.14" DRV_EXTRAVERSION
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -87,20 +86,7 @@ struct e1000_reg_info {
char *name;
};
-#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
-#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
-#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
-#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
-#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
-
-#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
-#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
-#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
-#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
-#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
-
static const struct e1000_reg_info e1000_reg_info_tbl[] = {
-
/* General Registers */
{E1000_CTRL, "CTRL"},
{E1000_STATUS, "STATUS"},
@@ -488,20 +474,87 @@ static int e1000_desc_unused(struct e1000_ring *ring)
}
/**
+ * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp
+ * @adapter: board private structure
+ * @hwtstamps: time stamp structure to update
+ * @systim: unsigned 64bit system time value.
+ *
+ * Convert the system time value stored in the RX/TXSTMP registers into a
+ * hwtstamp which can be used by the upper level time stamping functions.
+ *
+ * The 'systim_lock' spinlock is used to protect the consistency of the
+ * system time value. This is needed because reading the 64 bit time
+ * value involves reading two 32 bit registers. The first read latches the
+ * value.
+ **/
+static void e1000e_systim_to_hwtstamp(struct e1000_adapter *adapter,
+ struct skb_shared_hwtstamps *hwtstamps,
+ u64 systim)
+{
+ u64 ns;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->systim_lock, flags);
+ ns = timecounter_cyc2time(&adapter->tc, systim);
+ spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
+/**
+ * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp
+ * @adapter: board private structure
+ * @status: descriptor extended error and status field
+ * @skb: particular skb to include time stamp
+ *
+ * If the time stamp is valid, convert it into the timecounter ns value
+ * and store that result into the shhwtstamps structure which is passed
+ * up the network stack.
+ **/
+static void e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status,
+ struct sk_buff *skb)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u64 rxstmp;
+
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) ||
+ !(status & E1000_RXDEXT_STATERR_TST) ||
+ !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
+ return;
+
+ /* The Rx time stamp registers contain the time stamp. No other
+ * received packet will be time stamped until the Rx time stamp
+ * registers are read. Because only one packet can be time stamped
+ * at a time, the register values must belong to this packet and
+ * therefore none of the other additional attributes need to be
+ * compared.
+ */
+ rxstmp = (u64)er32(RXSTMPL);
+ rxstmp |= (u64)er32(RXSTMPH) << 32;
+ e1000e_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), rxstmp);
+
+ adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP;
+}
+
+/**
* e1000_receive_skb - helper function to handle Rx indications
* @adapter: board private structure
- * @status: descriptor status field as written by hardware
+ * @staterr: descriptor extended error and status field as written by hardware
* @vlan: descriptor vlan field as written by hardware (no le/be conversion)
* @skb: pointer to sk_buff to be indicated to stack
**/
static void e1000_receive_skb(struct e1000_adapter *adapter,
struct net_device *netdev, struct sk_buff *skb,
- u8 status, __le16 vlan)
+ u32 staterr, __le16 vlan)
{
u16 tag = le16_to_cpu(vlan);
+
+ e1000e_rx_hwtstamp(adapter, staterr, skb);
+
skb->protocol = eth_type_trans(skb, netdev);
- if (status & E1000_RXD_STAT_VP)
+ if (staterr & E1000_RXD_STAT_VP)
__vlan_hwaccel_put_tag(skb, tag);
napi_gro_receive(&adapter->napi, skb);
@@ -765,7 +818,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
struct e1000_buffer *buffer_info;
struct sk_buff *skb;
unsigned int i;
- unsigned int bufsz = 256 - 16 /* for skb_reserve */;
+ unsigned int bufsz = 256 - 16; /* for skb_reserve */
i = rx_ring->next_to_use;
buffer_info = &rx_ring->buffer_info[i];
@@ -1050,9 +1103,9 @@ static void e1000_print_hw_hang(struct work_struct *work)
adapter->tx_hang_recheck = false;
netif_stop_queue(netdev);
- e1e_rphy(hw, PHY_STATUS, &phy_status);
- e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
- e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
+ e1e_rphy(hw, MII_BMSR, &phy_status);
+ e1e_rphy(hw, MII_STAT1000, &phy_1000t_status);
+ e1e_rphy(hw, MII_ESTATUS, &phy_ext_status);
pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
@@ -1092,6 +1145,41 @@ static void e1000_print_hw_hang(struct work_struct *work)
}
/**
+ * e1000e_tx_hwtstamp_work - check for Tx time stamp
+ * @work: pointer to work struct
+ *
+ * This work function polls the TSYNCTXCTL valid bit to determine when a
+ * timestamp has been taken for the current stored skb. The timestamp must
+ * be for this skb because only one such packet is allowed in the queue.
+ */
+static void e1000e_tx_hwtstamp_work(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work, struct e1000_adapter,
+ tx_hwtstamp_work);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (!adapter->tx_hwtstamp_skb)
+ return;
+
+ if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ u64 txstmp;
+
+ txstmp = er32(TXSTMPL);
+ txstmp |= (u64)er32(TXSTMPH) << 32;
+
+ e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp);
+
+ skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps);
+ dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
+ adapter->tx_hwtstamp_skb = NULL;
+ } else {
+ /* reschedule to check later */
+ schedule_work(&adapter->tx_hwtstamp_work);
+ }
+}
+
+/**
* e1000_clean_tx_irq - Reclaim resources after transmit completes
* @tx_ring: Tx descriptor ring
*
@@ -1345,8 +1433,8 @@ copydone:
cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
adapter->rx_hdr_split++;
- e1000_receive_skb(adapter, netdev, skb,
- staterr, rx_desc->wb.middle.vlan);
+ e1000_receive_skb(adapter, netdev, skb, staterr,
+ rx_desc->wb.middle.vlan);
next_desc:
rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
@@ -1645,7 +1733,7 @@ static void e1000e_downshift_workaround(struct work_struct *work)
* @irq: interrupt number
* @data: pointer to a network interface device structure
**/
-static irqreturn_t e1000_intr_msi(int irq, void *data)
+static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1671,13 +1759,30 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
/* disable receives */
u32 rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
- adapter->flags |= FLAG_RX_RESTART_NOW;
+ adapter->flags |= FLAG_RESTART_NOW;
}
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+ /* Reset on uncorrectable ECC error */
+ if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
+ u32 pbeccsts = er32(PBECCSTS);
+
+ adapter->corr_errors +=
+ pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
+ adapter->uncorr_errors +=
+ (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
+ E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->reset_task);
+
+ /* return immediately since reset is imminent */
+ return IRQ_HANDLED;
+ }
+
if (napi_schedule_prep(&adapter->napi)) {
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
@@ -1694,7 +1799,7 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
* @irq: interrupt number
* @data: pointer to a network interface device structure
**/
-static irqreturn_t e1000_intr(int irq, void *data)
+static irqreturn_t e1000_intr(int __always_unused irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1734,13 +1839,30 @@ static irqreturn_t e1000_intr(int irq, void *data)
/* disable receives */
rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
- adapter->flags |= FLAG_RX_RESTART_NOW;
+ adapter->flags |= FLAG_RESTART_NOW;
}
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+ /* Reset on uncorrectable ECC error */
+ if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
+ u32 pbeccsts = er32(PBECCSTS);
+
+ adapter->corr_errors +=
+ pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
+ adapter->uncorr_errors +=
+ (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
+ E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->reset_task);
+
+ /* return immediately since reset is imminent */
+ return IRQ_HANDLED;
+ }
+
if (napi_schedule_prep(&adapter->napi)) {
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
@@ -1752,7 +1874,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t e1000_msix_other(int irq, void *data)
+static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1784,8 +1906,7 @@ no_link_interrupt:
return IRQ_HANDLED;
}
-
-static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
+static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1803,7 +1924,7 @@ static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
+static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1890,7 +2011,6 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
/* Auto-Mask Other interrupts upon ICR read */
-#define E1000_EIAC_MASK_82574 0x01F00000
ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
ctrl_ext |= E1000_CTRL_EXT_EIAME;
ew32(CTRL_EXT, ctrl_ext);
@@ -2104,6 +2224,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
if (adapter->msix_entries) {
ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
+ } else if (hw->mac.type == e1000_pch_lpt) {
+ ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
} else {
ew32(IMS, IMS_ENABLE_MASK);
}
@@ -2358,9 +2480,7 @@ void e1000e_free_rx_resources(struct e1000_ring *rx_ring)
* while increasing bulk throughput. This functionality is controlled
* by the InterruptThrottleRate module parameter.
**/
-static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
- u16 itr_setting, int packets,
- int bytes)
+static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
{
unsigned int retval = itr_setting;
@@ -2405,7 +2525,6 @@ static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
static void e1000_set_itr(struct e1000_adapter *adapter)
{
- struct e1000_hw *hw = &adapter->hw;
u16 current_itr;
u32 new_itr = adapter->itr;
@@ -2421,18 +2540,16 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
goto set_itr_now;
}
- adapter->tx_itr = e1000_update_itr(adapter,
- adapter->tx_itr,
- adapter->total_tx_packets,
- adapter->total_tx_bytes);
+ adapter->tx_itr = e1000_update_itr(adapter->tx_itr,
+ adapter->total_tx_packets,
+ adapter->total_tx_bytes);
/* conservative mode (itr 3) eliminates the lowest_latency setting */
if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
adapter->tx_itr = low_latency;
- adapter->rx_itr = e1000_update_itr(adapter,
- adapter->rx_itr,
- adapter->total_rx_packets,
- adapter->total_rx_bytes);
+ adapter->rx_itr = e1000_update_itr(adapter->rx_itr,
+ adapter->total_rx_packets,
+ adapter->total_rx_bytes);
/* conservative mode (itr 3) eliminates the lowest_latency setting */
if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
adapter->rx_itr = low_latency;
@@ -2468,10 +2585,7 @@ set_itr_now:
if (adapter->msix_entries)
adapter->rx_ring->set_itr = 1;
else
- if (new_itr)
- ew32(ITR, 1000000000 / (new_itr * 256));
- else
- ew32(ITR, 0);
+ e1000e_write_itr(adapter, new_itr);
}
}
@@ -3013,7 +3127,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
ew32(RCTL, rctl);
/* just started the receive unit, no need to restart */
- adapter->flags &= ~FLAG_RX_RESTART_NOW;
+ adapter->flags &= ~FLAG_RESTART_NOW;
}
/**
@@ -3108,18 +3222,23 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
rxcsum &= ~E1000_RXCSUM_TUOFL;
ew32(RXCSUM, rxcsum);
- if (adapter->hw.mac.type == e1000_pch2lan) {
- /* With jumbo frames, excessive C-state transition
- * latencies result in dropped transactions.
- */
- if (adapter->netdev->mtu > ETH_DATA_LEN) {
+ /* With jumbo frames, excessive C-state transition latencies result
+ * in dropped transactions.
+ */
+ if (adapter->netdev->mtu > ETH_DATA_LEN) {
+ u32 lat =
+ ((er32(PBA) & E1000_PBA_RXA_MASK) * 1024 -
+ adapter->max_frame_size) * 8 / 1000;
+
+ if (adapter->flags & FLAG_IS_ICH) {
u32 rxdctl = er32(RXDCTL(0));
ew32(RXDCTL(0), rxdctl | 0x3);
- pm_qos_update_request(&adapter->netdev->pm_qos_req, 55);
- } else {
- pm_qos_update_request(&adapter->netdev->pm_qos_req,
- PM_QOS_DEFAULT_VALUE);
}
+
+ pm_qos_update_request(&adapter->netdev->pm_qos_req, lat);
+ } else {
+ pm_qos_update_request(&adapter->netdev->pm_qos_req,
+ PM_QOS_DEFAULT_VALUE);
}
/* Enable Receives */
@@ -3308,6 +3427,241 @@ static void e1000e_setup_rss_hash(struct e1000_adapter *adapter)
}
/**
+ * e1000e_get_base_timinca - get default SYSTIM time increment attributes
+ * @adapter: board private structure
+ * @timinca: pointer to returned time increment attributes
+ *
+ * Get attributes for incrementing the System Time Register SYSTIML/H at
+ * the default base frequency, and set the cyclecounter shift value.
+ **/
+s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 incvalue, incperiod, shift;
+
+ /* Make sure clock is enabled on I217 before checking the frequency */
+ if ((hw->mac.type == e1000_pch_lpt) &&
+ !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) &&
+ !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) {
+ u32 fextnvm7 = er32(FEXTNVM7);
+
+ if (!(fextnvm7 & (1 << 0))) {
+ ew32(FEXTNVM7, fextnvm7 | (1 << 0));
+ e1e_flush();
+ }
+ }
+
+ switch (hw->mac.type) {
+ case e1000_pch2lan:
+ case e1000_pch_lpt:
+ /* On I217, the clock frequency is 25MHz or 96MHz as
+ * indicated by the System Clock Frequency Indication
+ */
+ if ((hw->mac.type != e1000_pch_lpt) ||
+ (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
+ /* Stable 96MHz frequency */
+ incperiod = INCPERIOD_96MHz;
+ incvalue = INCVALUE_96MHz;
+ shift = INCVALUE_SHIFT_96MHz;
+ adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz;
+ break;
+ }
+ /* fall-through */
+ case e1000_82574:
+ case e1000_82583:
+ /* Stable 25MHz frequency */
+ incperiod = INCPERIOD_25MHz;
+ incvalue = INCVALUE_25MHz;
+ shift = INCVALUE_SHIFT_25MHz;
+ adapter->cc.shift = shift;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *timinca = ((incperiod << E1000_TIMINCA_INCPERIOD_SHIFT) |
+ ((incvalue << shift) & E1000_TIMINCA_INCVALUE_MASK));
+
+ return 0;
+}
+
+/**
+ * e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable
+ * @adapter: board private structure
+ *
+ * Outgoing time stamping can be enabled and disabled. Play nice and
+ * disable it when requested, although it shouldn't cause any overhead
+ * when no packet needs it. At most one packet in the queue may be
+ * marked for time stamping, otherwise it would be impossible to tell
+ * for sure to which packet the hardware time stamp belongs.
+ *
+ * Incoming time stamping has to be configured via the hardware filters.
+ * Not all combinations are supported, in particular event type has to be
+ * specified. Matching the kind of event packet is not supported, with the
+ * exception of "all V2 events regardless of level 2 or 4".
+ **/
+static int e1000e_config_hwtstamp(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct hwtstamp_config *config = &adapter->hwtstamp_config;
+ u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
+ u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
+ u32 rxmtrl = 0;
+ u16 rxudp = 0;
+ bool is_l4 = false;
+ bool is_l2 = false;
+ u32 regval;
+ s32 ret_val;
+
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
+ return -EINVAL;
+
+ /* flags reserved for future extensions - must be zero */
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tsync_tx_ctl = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tsync_rx_ctl = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
+ rxmtrl = E1000_RXMTRL_PTP_V1_SYNC_MESSAGE;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
+ rxmtrl = E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ /* Also time stamps V2 L2 Path Delay Request/Response */
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2;
+ rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE;
+ is_l2 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ /* Also time stamps V2 L2 Path Delay Request/Response. */
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2;
+ rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE;
+ is_l2 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ /* Hardware cannot filter just V2 L4 Sync messages;
+ * fall-through to V2 (both L2 and L4) Sync.
+ */
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ /* Also time stamps V2 Path Delay Request/Response. */
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+ rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE;
+ is_l2 = true;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ /* Hardware cannot filter just V2 L4 Delay Request messages;
+ * fall-through to V2 (both L2 and L4) Delay Request.
+ */
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ /* Also time stamps V2 Path Delay Request/Response. */
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+ rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE;
+ is_l2 = true;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ /* Hardware cannot filter just V2 L4 or L2 Event messages;
+ * fall-through to all V2 (both L2 and L4) Events.
+ */
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ is_l2 = true;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ /* For V1, the hardware can only filter Sync messages or
+ * Delay Request messages but not both so fall-through to
+ * time stamp all packets.
+ */
+ case HWTSTAMP_FILTER_ALL:
+ is_l2 = true;
+ is_l4 = true;
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* enable/disable Tx h/w time stamping */
+ regval = er32(TSYNCTXCTL);
+ regval &= ~E1000_TSYNCTXCTL_ENABLED;
+ regval |= tsync_tx_ctl;
+ ew32(TSYNCTXCTL, regval);
+ if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) !=
+ (regval & E1000_TSYNCTXCTL_ENABLED)) {
+ e_err("Timesync Tx Control register not set as expected\n");
+ return -EAGAIN;
+ }
+
+ /* enable/disable Rx h/w time stamping */
+ regval = er32(TSYNCRXCTL);
+ regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
+ regval |= tsync_rx_ctl;
+ ew32(TSYNCRXCTL, regval);
+ if ((er32(TSYNCRXCTL) & (E1000_TSYNCRXCTL_ENABLED |
+ E1000_TSYNCRXCTL_TYPE_MASK)) !=
+ (regval & (E1000_TSYNCRXCTL_ENABLED |
+ E1000_TSYNCRXCTL_TYPE_MASK))) {
+ e_err("Timesync Rx Control register not set as expected\n");
+ return -EAGAIN;
+ }
+
+ /* L2: define ethertype filter for time stamped packets */
+ if (is_l2)
+ rxmtrl |= ETH_P_1588;
+
+ /* define which PTP packets get time stamped */
+ ew32(RXMTRL, rxmtrl);
+
+ /* Filter by destination port */
+ if (is_l4) {
+ rxudp = PTP_EV_PORT;
+ cpu_to_be16s(&rxudp);
+ }
+ ew32(RXUDP, rxudp);
+
+ e1e_flush();
+
+ /* Clear TSYNCRXCTL_VALID & TSYNCTXCTL_VALID bit */
+ er32(RXSTMPH);
+ er32(TXSTMPH);
+
+ /* Get and set the System Time Register SYSTIM base frequency */
+ ret_val = e1000e_get_base_timinca(adapter, &regval);
+ if (ret_val)
+ return ret_val;
+ ew32(TIMINCA, regval);
+
+ /* reset the ns time counter */
+ timecounter_init(&adapter->tc, &adapter->cc,
+ ktime_to_ns(ktime_get_real()));
+
+ return 0;
+}
+
+/**
* e1000_configure - configure the hardware for Rx and Tx
* @adapter: private board structure
**/
@@ -3473,14 +3827,17 @@ void e1000e_reset(struct e1000_adapter *adapter)
break;
case e1000_pch2lan:
case e1000_pch_lpt:
- fc->high_water = 0x05C20;
- fc->low_water = 0x05048;
- fc->pause_time = 0x0650;
fc->refresh_time = 0x0400;
- if (adapter->netdev->mtu > ETH_DATA_LEN) {
- pba = 14;
- ew32(PBA, pba);
+
+ if (adapter->netdev->mtu <= ETH_DATA_LEN) {
+ fc->high_water = 0x05C20;
+ fc->low_water = 0x05048;
+ fc->pause_time = 0x0650;
+ break;
}
+
+ fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH;
+ fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL;
break;
}
@@ -3533,6 +3890,9 @@ void e1000e_reset(struct e1000_adapter *adapter)
e1000e_reset_adaptive(hw);
+ /* initialize systim and reset the ns time counter */
+ e1000e_config_hwtstamp(adapter);
+
if (!netif_running(adapter->netdev) &&
!test_bit(__E1000_TESTING, &adapter->state)) {
e1000_power_down_phy(adapter);
@@ -3669,6 +4029,24 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
}
/**
+ * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
+ * @cc: cyclecounter structure
+ **/
+static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
+{
+ struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
+ cc);
+ struct e1000_hw *hw = &adapter->hw;
+ cycle_t systim;
+
+ /* latch SYSTIMH on read of SYSTIML */
+ systim = (cycle_t)er32(SYSTIML);
+ systim |= (cycle_t)er32(SYSTIMH) << 32;
+
+ return systim;
+}
+
+/**
* e1000_sw_init - Initialize general software structures (struct e1000_adapter)
* @adapter: board private structure to initialize
*
@@ -3694,6 +4072,17 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
if (e1000_alloc_queues(adapter))
return -ENOMEM;
+ /* Setup hardware time stamping cyclecounter */
+ if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
+ adapter->cc.read = e1000e_cyclecounter_read;
+ adapter->cc.mask = CLOCKSOURCE_MASK(64);
+ adapter->cc.mult = 1;
+ /* cc.shift set in e1000e_get_base_tininca() */
+
+ spin_lock_init(&adapter->systim_lock);
+ INIT_WORK(&adapter->tx_hwtstamp_work, e1000e_tx_hwtstamp_work);
+ }
+
/* Explicitly disable IRQ since the NIC can be in any state. */
e1000_irq_disable(adapter);
@@ -3706,7 +4095,7 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
* @irq: interrupt number
* @data: pointer to a network interface device structure
**/
-static irqreturn_t e1000_intr_msi_test(int irq, void *data)
+static irqreturn_t e1000_intr_msi_test(int __always_unused irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3877,10 +4266,8 @@ static int e1000_open(struct net_device *netdev)
e1000_update_mng_vlan(adapter);
/* DMA latency requirement to workaround jumbo issue */
- if (adapter->hw.mac.type == e1000_pch2lan)
- pm_qos_add_request(&adapter->netdev->pm_qos_req,
- PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
+ pm_qos_add_request(&adapter->netdev->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
/* before we allocate an interrupt, we must be ready to handle it.
* Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
@@ -3988,8 +4375,7 @@ static int e1000_close(struct net_device *netdev)
!test_bit(__E1000_TESTING, &adapter->state))
e1000e_release_hw_control(adapter);
- if (adapter->hw.mac.type == e1000_pch2lan)
- pm_qos_remove_request(&adapter->netdev->pm_qos_req);
+ pm_qos_remove_request(&adapter->netdev->pm_qos_req);
pm_runtime_put_sync(&pdev->dev);
@@ -4251,6 +4637,16 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
adapter->stats.mgptc += er32(MGTPTC);
adapter->stats.mgprc += er32(MGTPRC);
adapter->stats.mgpdc += er32(MGTPDC);
+
+ /* Correctable ECC Errors */
+ if (hw->mac.type == e1000_pch_lpt) {
+ u32 pbeccsts = er32(PBECCSTS);
+ adapter->corr_errors +=
+ pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
+ adapter->uncorr_errors +=
+ (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
+ E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+ }
}
/**
@@ -4266,14 +4662,14 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
(adapter->hw.phy.media_type == e1000_media_type_copper)) {
int ret_val;
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
- ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
- ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
- ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa);
- ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion);
- ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000);
- ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
- ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr);
+ ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr);
+ ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise);
+ ret_val |= e1e_rphy(hw, MII_LPA, &phy->lpa);
+ ret_val |= e1e_rphy(hw, MII_EXPANSION, &phy->expansion);
+ ret_val |= e1e_rphy(hw, MII_CTRL1000, &phy->ctrl1000);
+ ret_val |= e1e_rphy(hw, MII_STAT1000, &phy->stat1000);
+ ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus);
if (ret_val)
e_warn("Error reading PHY register\n");
} else {
@@ -4300,9 +4696,8 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
u32 ctrl = er32(CTRL);
/* Link status message must follow this format for user tools */
- printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
- adapter->netdev->name,
- adapter->link_speed,
+ pr_info("%s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
+ adapter->netdev->name, adapter->link_speed,
adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
(ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
(ctrl & E1000_CTRL_RFCE) ? "Rx" :
@@ -4355,11 +4750,11 @@ static void e1000e_enable_receives(struct e1000_adapter *adapter)
{
/* make sure the receive unit is started */
if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
- (adapter->flags & FLAG_RX_RESTART_NOW)) {
+ (adapter->flags & FLAG_RESTART_NOW)) {
struct e1000_hw *hw = &adapter->hw;
u32 rctl = er32(RCTL);
ew32(RCTL, rctl | E1000_RCTL_EN);
- adapter->flags &= ~FLAG_RX_RESTART_NOW;
+ adapter->flags &= ~FLAG_RESTART_NOW;
}
}
@@ -4435,6 +4830,13 @@ static void e1000_watchdog_task(struct work_struct *work)
&adapter->link_speed,
&adapter->link_duplex);
e1000_print_link_info(adapter);
+
+ /* check if SmartSpeed worked */
+ e1000e_check_downshift(hw);
+ if (phy->speed_downgraded)
+ netdev_warn(netdev,
+ "Link Speed was downgraded by SmartSpeed\n");
+
/* On supported PHYs, check for duplex mismatch only
* if link has autonegotiated at 10/100 half
*/
@@ -4446,9 +4848,9 @@ static void e1000_watchdog_task(struct work_struct *work)
(adapter->link_duplex == HALF_DUPLEX)) {
u16 autoneg_exp;
- e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
+ e1e_rphy(hw, MII_EXPANSION, &autoneg_exp);
- if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
+ if (!(autoneg_exp & EXPANSION_NWAY))
e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n");
}
@@ -4521,15 +4923,22 @@ static void e1000_watchdog_task(struct work_struct *work)
adapter->link_speed = 0;
adapter->link_duplex = 0;
/* Link status message must follow this format */
- printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
- adapter->netdev->name);
+ pr_info("%s NIC Link is Down\n", adapter->netdev->name);
netif_carrier_off(netdev);
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->phy_info_timer,
round_jiffies(jiffies + 2 * HZ));
- if (adapter->flags & FLAG_RX_NEEDS_RESTART)
- schedule_work(&adapter->reset_task);
+ /* The link is lost so the controller stops DMA.
+ * If there is queued Tx work that cannot be done
+ * or if on an 8000ES2LAN which requires a Rx packet
+ * buffer work-around on link down event, reset the
+ * controller to flush the Tx/Rx packet buffers.
+ * (Do the reset outside of interrupt context).
+ */
+ if ((adapter->flags & FLAG_RX_NEEDS_RESTART) ||
+ (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
+ adapter->flags |= FLAG_RESTART_NOW;
else
pm_schedule_suspend(netdev->dev.parent,
LINK_TIMEOUT);
@@ -4551,20 +4960,14 @@ link_up:
adapter->gotc_old = adapter->stats.gotc;
spin_unlock(&adapter->stats64_lock);
- e1000e_update_adaptive(&adapter->hw);
-
- if (!netif_carrier_ok(netdev) &&
- (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
- /* We've lost link, so the controller stops DMA,
- * but we've got queued Tx work that's never going
- * to get done, so reset controller to flush Tx.
- * (Do the reset outside of interrupt context).
- */
+ if (adapter->flags & FLAG_RESTART_NOW) {
schedule_work(&adapter->reset_task);
/* return immediately since reset is imminent */
return;
}
+ e1000e_update_adaptive(&adapter->hw);
+
/* Simple mode for Interrupt Throttle Rate (ITR) */
if (adapter->itr_setting == 4) {
/* Symmetric Tx/Rx gets a reduced ITR=2000;
@@ -4601,6 +5004,17 @@ link_up:
if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
e1000e_check_82574_phy_workaround(adapter);
+ /* Clear valid timestamp stuck in RXSTMPL/H due to a Rx error */
+ if (adapter->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
+ if ((adapter->flags2 & FLAG2_CHECK_RX_HWTSTAMP) &&
+ (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) {
+ er32(RXSTMPH);
+ adapter->rx_hwtstamp_cleared++;
+ } else {
+ adapter->flags2 |= FLAG2_CHECK_RX_HWTSTAMP;
+ }
+ }
+
/* Reset the timer */
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer,
@@ -4612,6 +5026,7 @@ link_up:
#define E1000_TX_FLAGS_TSO 0x00000004
#define E1000_TX_FLAGS_IPV4 0x00000008
#define E1000_TX_FLAGS_NO_FCS 0x00000010
+#define E1000_TX_FLAGS_HWTSTAMP 0x00000020
#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
#define E1000_TX_FLAGS_VLAN_SHIFT 16
@@ -4870,6 +5285,11 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
txd_lower &= ~(E1000_TXD_CMD_IFCS);
+ if (unlikely(tx_flags & E1000_TX_FLAGS_HWTSTAMP)) {
+ txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
+ txd_upper |= E1000_TXD_EXTCMD_TSTAMP;
+ }
+
i = tx_ring->next_to_use;
do {
@@ -4918,12 +5338,11 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
struct e1000_hw *hw = &adapter->hw;
u16 length, offset;
- if (vlan_tx_tag_present(skb)) {
- if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
- (adapter->hw.mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
- return 0;
- }
+ if (vlan_tx_tag_present(skb) &&
+ !((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
+ (adapter->hw.mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
+ return 0;
if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
return 0;
@@ -5094,7 +5513,15 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
nr_frags);
if (count) {
- skb_tx_timestamp(skb);
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ !adapter->tx_hwtstamp_skb)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
+ adapter->tx_hwtstamp_skb = skb_get(skb);
+ schedule_work(&adapter->tx_hwtstamp_work);
+ } else {
+ skb_tx_timestamp(skb);
+ }
netdev_sent_queue(netdev, skb->len);
e1000_tx_queue(tx_ring, tx_flags, count);
@@ -5134,10 +5561,9 @@ static void e1000_reset_task(struct work_struct *work)
if (test_bit(__E1000_DOWN, &adapter->state))
return;
- if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
- (adapter->flags & FLAG_RX_RESTART_NOW))) {
+ if (!(adapter->flags & FLAG_RESTART_NOW)) {
e1000e_dump(adapter);
- e_err("Reset adapter\n");
+ e_err("Reset adapter unexpectedly\n");
}
e1000e_reinit_locked(adapter);
}
@@ -5323,6 +5749,61 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
return 0;
}
+/**
+ * e1000e_hwtstamp_ioctl - control hardware time stamping
+ * @netdev: network interface device structure
+ * @ifreq: interface request
+ *
+ * Outgoing time stamping can be enabled and disabled. Play nice and
+ * disable it when requested, although it shouldn't cause any overhead
+ * when no packet needs it. At most one packet in the queue may be
+ * marked for time stamping, otherwise it would be impossible to tell
+ * for sure to which packet the hardware time stamp belongs.
+ *
+ * Incoming time stamping has to be configured via the hardware filters.
+ * Not all combinations are supported, in particular event type has to be
+ * specified. Matching the kind of event packet is not supported, with the
+ * exception of "all V2 events regardless of level 2 or 4".
+ **/
+static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config config;
+ int ret_val;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ adapter->hwtstamp_config = config;
+
+ ret_val = e1000e_config_hwtstamp(adapter);
+ if (ret_val)
+ return ret_val;
+
+ config = adapter->hwtstamp_config;
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ /* With V2 type filters which specify a Sync or Delay Request,
+ * Path Delay Request/Response messages are also time stamped
+ * by hardware so notify the caller the requested packets plus
+ * some others are time stamped.
+ */
+ config.rx_filter = HWTSTAMP_FILTER_SOME;
+ break;
+ default:
+ break;
+ }
+
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(config)) ? -EFAULT : 0;
+}
+
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
@@ -5330,6 +5811,8 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCGMIIREG:
case SIOCSMIIREG:
return e1000_mii_ioctl(netdev, ifr, cmd);
+ case SIOCSHWTSTAMP:
+ return e1000e_hwtstamp_ioctl(netdev, ifr);
default:
return -EOPNOTSUPP;
}
@@ -5340,7 +5823,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
struct e1000_hw *hw = &adapter->hw;
u32 i, mac_reg;
u16 phy_reg, wuc_enable;
- int retval = 0;
+ int retval;
/* copy MAC RARs to PHY RARs */
e1000_copy_rx_addrs_to_phy_ich8lan(hw);
@@ -5554,14 +6037,21 @@ static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
#else
static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
{
+ u16 aspm_ctl = 0;
+
+ if (state & PCIE_LINK_STATE_L0S)
+ aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L0S;
+ if (state & PCIE_LINK_STATE_L1)
+ aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L1;
+
/* Both device and parent should have the same ASPM setting.
* Disable ASPM in downstream component first and then upstream.
*/
- pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, state);
+ pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_ctl);
if (pdev->bus->self)
pcie_capability_clear_word(pdev->bus->self, PCI_EXP_LNKCTL,
- state);
+ aspm_ctl);
}
#endif
static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
@@ -5746,7 +6236,7 @@ static void e1000_shutdown(struct pci_dev *pdev)
#ifdef CONFIG_NET_POLL_CONTROLLER
-static irqreturn_t e1000_intr_msix(int irq, void *data)
+static irqreturn_t e1000_intr_msix(int __always_unused irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -5910,7 +6400,6 @@ static void e1000_io_resume(struct pci_dev *pdev)
*/
if (!(adapter->flags & FLAG_HAS_AMT))
e1000e_get_hw_control(adapter);
-
}
static void e1000_print_device_info(struct e1000_adapter *adapter)
@@ -6068,8 +6557,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
err = pci_request_selected_regions_exclusive(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM),
- e1000e_driver_name);
+ pci_select_bars(pdev, IORESOURCE_MEM),
+ e1000e_driver_name);
if (err)
goto err_pci_reg;
@@ -6228,11 +6717,10 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
"NVM Read Error while reading MAC address\n");
memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
- memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
- if (!is_valid_ether_addr(netdev->perm_addr)) {
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
- netdev->perm_addr);
+ netdev->dev_addr);
err = -EIO;
goto err_eeprom;
}
@@ -6318,6 +6806,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
+ /* init PTP hardware clock */
+ e1000e_ptp_init(adapter);
+
e1000_print_device_info(adapter);
if (pci_dev_run_wake(pdev))
@@ -6366,6 +6857,8 @@ static void e1000_remove(struct pci_dev *pdev)
struct e1000_adapter *adapter = netdev_priv(netdev);
bool down = test_bit(__E1000_DOWN, &adapter->state);
+ e1000e_ptp_remove(adapter);
+
/* The timers may be rescheduled, so explicitly disable them
* from being rescheduled.
*/
@@ -6380,6 +6873,14 @@ static void e1000_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->update_phy_task);
cancel_work_sync(&adapter->print_hang_task);
+ if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
+ cancel_work_sync(&adapter->tx_hwtstamp_work);
+ if (adapter->tx_hwtstamp_skb) {
+ dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
+ adapter->tx_hwtstamp_skb = NULL;
+ }
+ }
+
if (!(netdev->flags & IFF_UP))
e1000_power_down_phy(adapter);
@@ -6532,7 +7033,7 @@ static int __init e1000_init_module(void)
int ret;
pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
e1000e_driver_version);
- pr_info("Copyright(c) 1999 - 2012 Intel Corporation.\n");
+ pr_info("Copyright(c) 1999 - 2013 Intel Corporation.\n");
ret = pci_register_driver(&e1000_driver);
return ret;
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index b6468804cb2e..84fecc268162 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -359,7 +359,7 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
{
struct e1000_nvm_info *nvm = &hw->nvm;
- s32 ret_val;
+ s32 ret_val = -E1000_ERR_NVM;
u16 widx = 0;
/* A check for invalid values: offset too large, too many words,
@@ -371,16 +371,18 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
return -E1000_ERR_NVM;
}
- ret_val = nvm->ops.acquire(hw);
- if (ret_val)
- return ret_val;
-
while (widx < words) {
u8 write_opcode = NVM_WRITE_OPCODE_SPI;
- ret_val = e1000_ready_nvm_eeprom(hw);
+ ret_val = nvm->ops.acquire(hw);
if (ret_val)
- goto release;
+ return ret_val;
+
+ ret_val = e1000_ready_nvm_eeprom(hw);
+ if (ret_val) {
+ nvm->ops.release(hw);
+ return ret_val;
+ }
e1000_standby_nvm(hw);
@@ -413,12 +415,10 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
break;
}
}
+ usleep_range(10000, 20000);
+ nvm->ops.release(hw);
}
- usleep_range(10000, 20000);
-release:
- nvm->ops.release(hw);
-
return ret_val;
}
@@ -464,8 +464,8 @@ s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
if (nvm_data != NVM_PBA_PTR_GUARD) {
e_dbg("NVM PBA number is not stored as string\n");
- /* we will need 11 characters to store the PBA */
- if (pba_num_size < 11) {
+ /* make sure callers buffer is big enough to store the PBA */
+ if (pba_num_size < E1000_PBANUM_LENGTH) {
e_dbg("PBA string buffer too small\n");
return E1000_ERR_NO_SPACE;
}
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.h b/drivers/net/ethernet/intel/e1000e/nvm.h
new file mode 100644
index 000000000000..45fc69561627
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/nvm.h
@@ -0,0 +1,47 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_NVM_H_
+#define _E1000E_NVM_H_
+
+s32 e1000e_acquire_nvm(struct e1000_hw *hw);
+
+s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
+s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
+s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data);
+s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
+s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw);
+void e1000e_release_nvm(struct e1000_hw *hw);
+
+#define E1000_STM_OPCODE 0xDB00
+
+#endif
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index 89d536dd7ff5..98da75dff936 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -53,8 +53,7 @@ MODULE_PARM_DESC(copybreak,
*/
#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
#define E1000_PARAM(X, desc) \
- static int X[E1000_MAX_NIC+1] \
- = E1000_PARAM_INIT; \
+ static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
static unsigned int num_##X; \
module_param_array_named(X, X, int, &num_##X, 0); \
MODULE_PARM_DESC(X, desc);
@@ -447,8 +446,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (num_SmartPowerDownEnable > bd) {
unsigned int spd = SmartPowerDownEnable[bd];
e1000_validate_option(&spd, &opt, adapter);
- if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN)
- && spd)
+ if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && spd)
adapter->flags |= FLAG_SMART_POWER_DOWN;
}
}
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 28b38ff37e84..0930c136aa31 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -28,16 +28,12 @@
#include "e1000.h"
-static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw);
-static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw);
-static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
static s32 e1000_wait_autoneg(struct e1000_hw *hw);
-static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg);
static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
u16 *data, bool read, bool page_set);
static u32 e1000_get_phy_addr_for_hv_page(u32 page);
static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
- u16 *data, bool read);
+ u16 *data, bool read);
/* Cable length tables */
static const u16 e1000_m88_cable_length_table[] = {
@@ -57,48 +53,6 @@ static const u16 e1000_igp_2_cable_length_table[] = {
#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
ARRAY_SIZE(e1000_igp_2_cable_length_table)
-#define BM_PHY_REG_PAGE(offset) \
- ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF))
-#define BM_PHY_REG_NUM(offset) \
- ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\
- (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\
- ~MAX_PHY_REG_ADDRESS)))
-
-#define HV_INTC_FC_PAGE_START 768
-#define I82578_ADDR_REG 29
-#define I82577_ADDR_REG 16
-#define I82577_CFG_REG 22
-#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
-#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */
-#define I82577_CTRL_REG 23
-
-/* 82577 specific PHY registers */
-#define I82577_PHY_CTRL_2 18
-#define I82577_PHY_STATUS_2 26
-#define I82577_PHY_DIAG_STATUS 31
-
-/* I82577 PHY Status 2 */
-#define I82577_PHY_STATUS2_REV_POLARITY 0x0400
-#define I82577_PHY_STATUS2_MDIX 0x0800
-#define I82577_PHY_STATUS2_SPEED_MASK 0x0300
-#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
-
-/* I82577 PHY Control 2 */
-#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200
-#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
-#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600
-
-/* I82577 PHY Diagnostics Status */
-#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
-#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
-
-/* BM PHY Copper Specific Control 1 */
-#define BM_CS_CTRL1 16
-
-#define HV_MUX_DATA_CTRL PHY_REG(776, 16)
-#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400
-#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004
-
/**
* e1000e_check_reset_block_generic - Check if PHY reset is blocked
* @hw: pointer to the HW structure
@@ -135,13 +89,13 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
return 0;
while (retry_count < 2) {
- ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
+ ret_val = e1e_rphy(hw, MII_PHYSID1, &phy_id);
if (ret_val)
return ret_val;
phy->id = (u32)(phy_id << 16);
udelay(20);
- ret_val = e1e_rphy(hw, PHY_ID2, &phy_id);
+ ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id);
if (ret_val)
return ret_val;
@@ -645,31 +599,31 @@ static s32 e1000_set_master_slave_mode(struct e1000_hw *hw)
u16 phy_data;
/* Resolve Master/Slave mode */
- ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &phy_data);
+ ret_val = e1e_rphy(hw, MII_CTRL1000, &phy_data);
if (ret_val)
return ret_val;
/* load defaults for future use */
- hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ?
- ((phy_data & CR_1000T_MS_VALUE) ?
+ hw->phy.original_ms_type = (phy_data & CTL1000_ENABLE_MASTER) ?
+ ((phy_data & CTL1000_AS_MASTER) ?
e1000_ms_force_master : e1000_ms_force_slave) : e1000_ms_auto;
switch (hw->phy.ms_type) {
case e1000_ms_force_master:
- phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+ phy_data |= (CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER);
break;
case e1000_ms_force_slave:
- phy_data |= CR_1000T_MS_ENABLE;
- phy_data &= ~(CR_1000T_MS_VALUE);
+ phy_data |= CTL1000_ENABLE_MASTER;
+ phy_data &= ~(CTL1000_AS_MASTER);
break;
case e1000_ms_auto:
- phy_data &= ~CR_1000T_MS_ENABLE;
+ phy_data &= ~CTL1000_ENABLE_MASTER;
/* fall-through */
default:
break;
}
- return e1e_wphy(hw, PHY_1000T_CTRL, phy_data);
+ return e1e_wphy(hw, MII_CTRL1000, phy_data);
}
/**
@@ -792,7 +746,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
if (ret_val)
return ret_val;
/* Commit the changes. */
- ret_val = e1000e_commit_phy(hw);
+ ret_val = phy->ops.commit(hw);
if (ret_val) {
e_dbg("Error committing the PHY changes\n");
return ret_val;
@@ -848,10 +802,12 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
}
/* Commit the changes. */
- ret_val = e1000e_commit_phy(hw);
- if (ret_val) {
- e_dbg("Error committing the PHY changes\n");
- return ret_val;
+ if (phy->ops.commit) {
+ ret_val = phy->ops.commit(hw);
+ if (ret_val) {
+ e_dbg("Error committing the PHY changes\n");
+ return ret_val;
+ }
}
if (phy->type == e1000_phy_82578) {
@@ -895,10 +851,12 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
msleep(100);
/* disable lplu d0 during driver init */
- ret_val = e1000_set_d0_lplu_state(hw, false);
- if (ret_val) {
- e_dbg("Error Disabling LPLU D0\n");
- return ret_val;
+ if (hw->phy.ops.set_d0_lplu_state) {
+ ret_val = hw->phy.ops.set_d0_lplu_state(hw, false);
+ if (ret_val) {
+ e_dbg("Error Disabling LPLU D0\n");
+ return ret_val;
+ }
}
/* Configure mdi-mdix settings */
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &data);
@@ -943,12 +901,12 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
return ret_val;
/* Set auto Master/Slave resolution process */
- ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data);
+ ret_val = e1e_rphy(hw, MII_CTRL1000, &data);
if (ret_val)
return ret_val;
- data &= ~CR_1000T_MS_ENABLE;
- ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data);
+ data &= ~CTL1000_ENABLE_MASTER;
+ ret_val = e1e_wphy(hw, MII_CTRL1000, data);
if (ret_val)
return ret_val;
}
@@ -978,13 +936,13 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
phy->autoneg_advertised &= phy->autoneg_mask;
/* Read the MII Auto-Neg Advertisement Register (Address 4). */
- ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+ ret_val = e1e_rphy(hw, MII_ADVERTISE, &mii_autoneg_adv_reg);
if (ret_val)
return ret_val;
if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
/* Read the MII 1000Base-T Control Register (Address 9). */
- ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+ ret_val = e1e_rphy(hw, MII_CTRL1000, &mii_1000t_ctrl_reg);
if (ret_val)
return ret_val;
}
@@ -1000,36 +958,35 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
* Advertisement Register (Address 4) and the 1000 mb speed bits in
* the 1000Base-T Control Register (Address 9).
*/
- mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
- NWAY_AR_100TX_HD_CAPS |
- NWAY_AR_10T_FD_CAPS |
- NWAY_AR_10T_HD_CAPS);
- mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
+ mii_autoneg_adv_reg &= ~(ADVERTISE_100FULL |
+ ADVERTISE_100HALF |
+ ADVERTISE_10FULL | ADVERTISE_10HALF);
+ mii_1000t_ctrl_reg &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
e_dbg("autoneg_advertised %x\n", phy->autoneg_advertised);
/* Do we want to advertise 10 Mb Half Duplex? */
if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
e_dbg("Advertise 10mb Half duplex\n");
- mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_10HALF;
}
/* Do we want to advertise 10 Mb Full Duplex? */
if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
e_dbg("Advertise 10mb Full duplex\n");
- mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_10FULL;
}
/* Do we want to advertise 100 Mb Half Duplex? */
if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
e_dbg("Advertise 100mb Half duplex\n");
- mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_100HALF;
}
/* Do we want to advertise 100 Mb Full Duplex? */
if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
e_dbg("Advertise 100mb Full duplex\n");
- mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_100FULL;
}
/* We do not allow the Phy to advertise 1000 Mb Half Duplex */
@@ -1039,14 +996,14 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
/* Do we want to advertise 1000 Mb Full Duplex? */
if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
e_dbg("Advertise 1000mb Full duplex\n");
- mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+ mii_1000t_ctrl_reg |= ADVERTISE_1000FULL;
}
/* Check for a software override of the flow control settings, and
* setup the PHY advertisement registers accordingly. If
* auto-negotiation is enabled, then software will have to set the
* "PAUSE" bits to the correct value in the Auto-Negotiation
- * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
+ * Advertisement Register (MII_ADVERTISE) and re-start auto-
* negotiation.
*
* The possible values of the "fc" parameter are:
@@ -1064,7 +1021,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
/* Flow control (Rx & Tx) is completely disabled by a
* software over-ride.
*/
- mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ mii_autoneg_adv_reg &=
+ ~(ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
break;
case e1000_fc_rx_pause:
/* Rx Flow control is enabled, and Tx Flow control is
@@ -1076,34 +1034,36 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
* (in e1000e_config_fc_after_link_up) we will disable the
* hw's ability to send PAUSE frames.
*/
- mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ mii_autoneg_adv_reg |=
+ (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
break;
case e1000_fc_tx_pause:
/* Tx Flow control is enabled, and Rx Flow control is
* disabled, by a software over-ride.
*/
- mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
- mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+ mii_autoneg_adv_reg |= ADVERTISE_PAUSE_ASYM;
+ mii_autoneg_adv_reg &= ~ADVERTISE_PAUSE_CAP;
break;
case e1000_fc_full:
/* Flow control (both Rx and Tx) is enabled by a software
* over-ride.
*/
- mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ mii_autoneg_adv_reg |=
+ (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
break;
default:
e_dbg("Flow control param set incorrectly\n");
return -E1000_ERR_CONFIG;
}
- ret_val = e1e_wphy(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+ ret_val = e1e_wphy(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
if (ret_val)
return ret_val;
e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
if (phy->autoneg_mask & ADVERTISE_1000_FULL)
- ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
+ ret_val = e1e_wphy(hw, MII_CTRL1000, mii_1000t_ctrl_reg);
return ret_val;
}
@@ -1145,12 +1105,12 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
/* Restart auto-negotiation by setting the Auto Neg Enable bit and
* the Auto Neg Restart bit in the PHY control register.
*/
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl);
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_ctrl);
if (ret_val)
return ret_val;
- phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
- ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl);
+ phy_ctrl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_ctrl);
if (ret_val)
return ret_val;
@@ -1196,7 +1156,7 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw)
* depending on user settings.
*/
e_dbg("Forcing Speed and Duplex\n");
- ret_val = e1000_phy_force_speed_duplex(hw);
+ ret_val = hw->phy.ops.force_speed_duplex(hw);
if (ret_val) {
e_dbg("Error Forcing Speed and Duplex\n");
return ret_val;
@@ -1237,13 +1197,13 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
u16 phy_data;
bool link;
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_data);
if (ret_val)
return ret_val;
e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
- ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_data);
if (ret_val)
return ret_val;
@@ -1315,20 +1275,22 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
e_dbg("M88E1000 PSCR: %X\n", phy_data);
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_data);
if (ret_val)
return ret_val;
e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
- ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_data);
if (ret_val)
return ret_val;
/* Reset the phy to commit changes. */
- ret_val = e1000e_commit_phy(hw);
- if (ret_val)
- return ret_val;
+ if (hw->phy.ops.commit) {
+ ret_val = hw->phy.ops.commit(hw);
+ if (ret_val)
+ return ret_val;
+ }
if (phy->autoneg_wait_to_complete) {
e_dbg("Waiting for forced speed/duplex link on M88 phy.\n");
@@ -1406,13 +1368,13 @@ s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
u16 data;
bool link;
- ret_val = e1e_rphy(hw, PHY_CONTROL, &data);
+ ret_val = e1e_rphy(hw, MII_BMCR, &data);
if (ret_val)
return ret_val;
e1000e_phy_force_speed_duplex_setup(hw, &data);
- ret_val = e1e_wphy(hw, PHY_CONTROL, data);
+ ret_val = e1e_wphy(hw, MII_BMCR, data);
if (ret_val)
return ret_val;
@@ -1456,13 +1418,13 @@ s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
/**
* e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
* @hw: pointer to the HW structure
- * @phy_ctrl: pointer to current value of PHY_CONTROL
+ * @phy_ctrl: pointer to current value of MII_BMCR
*
* Forces speed and duplex on the PHY by doing the following: disable flow
* control, force speed/duplex on the MAC, disable auto speed detection,
* disable auto-negotiation, configure duplex, configure speed, configure
* the collision distance, write configuration to CTRL register. The
- * caller must write to the PHY_CONTROL register for these settings to
+ * caller must write to the MII_BMCR register for these settings to
* take affect.
**/
void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
@@ -1482,29 +1444,28 @@ void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
ctrl &= ~E1000_CTRL_ASDE;
/* Disable autoneg on the phy */
- *phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
+ *phy_ctrl &= ~BMCR_ANENABLE;
/* Forcing Full or Half Duplex? */
if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
ctrl &= ~E1000_CTRL_FD;
- *phy_ctrl &= ~MII_CR_FULL_DUPLEX;
+ *phy_ctrl &= ~BMCR_FULLDPLX;
e_dbg("Half Duplex\n");
} else {
ctrl |= E1000_CTRL_FD;
- *phy_ctrl |= MII_CR_FULL_DUPLEX;
+ *phy_ctrl |= BMCR_FULLDPLX;
e_dbg("Full Duplex\n");
}
/* Forcing 10mb or 100mb? */
if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
ctrl |= E1000_CTRL_SPD_100;
- *phy_ctrl |= MII_CR_SPEED_100;
- *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
+ *phy_ctrl |= BMCR_SPEED100;
+ *phy_ctrl &= ~BMCR_SPEED1000;
e_dbg("Forcing 100mb\n");
} else {
ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
- *phy_ctrl |= MII_CR_SPEED_10;
- *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+ *phy_ctrl &= ~(BMCR_SPEED1000 | BMCR_SPEED100);
e_dbg("Forcing 10mb\n");
}
@@ -1745,13 +1706,13 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
/* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
- ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
+ ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
if (ret_val)
break;
- ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
+ ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
if (ret_val)
break;
- if (phy_status & MII_SR_AUTONEG_COMPLETE)
+ if (phy_status & BMSR_ANEGCOMPLETE)
break;
msleep(100);
}
@@ -1778,21 +1739,21 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
u16 i, phy_status;
for (i = 0; i < iterations; i++) {
- /* Some PHYs require the PHY_STATUS register to be read
+ /* Some PHYs require the MII_BMSR register to be read
* twice due to the link bit being sticky. No harm doing
* it across the board.
*/
- ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
+ ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
if (ret_val)
/* If the first read fails, another entity may have
* ownership of the resources, wait and try again to
* see if they have relinquished the resources yet.
*/
udelay(usec_interval);
- ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
+ ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
if (ret_val)
break;
- if (phy_status & MII_SR_LINK_STATUS)
+ if (phy_status & BMSR_LSTATUS)
break;
if (usec_interval >= 1000)
mdelay(usec_interval/1000);
@@ -1962,21 +1923,19 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw)
phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX);
if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
- ret_val = e1000_get_cable_length(hw);
+ ret_val = hw->phy.ops.get_cable_length(hw);
if (ret_val)
return ret_val;
- ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &phy_data);
+ ret_val = e1e_rphy(hw, MII_STAT1000, &phy_data);
if (ret_val)
return ret_val;
- phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
- ? e1000_1000t_rx_status_ok
- : e1000_1000t_rx_status_not_ok;
+ phy->local_rx = (phy_data & LPA_1000LOCALRXOK)
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
- phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
- ? e1000_1000t_rx_status_ok
- : e1000_1000t_rx_status_not_ok;
+ phy->remote_rx = (phy_data & LPA_1000REMRXOK)
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
} else {
/* Set values to "undefined" */
phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
@@ -2026,21 +1985,19 @@ s32 e1000e_get_phy_info_igp(struct e1000_hw *hw)
if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
IGP01E1000_PSSR_SPEED_1000MBPS) {
- ret_val = e1000_get_cable_length(hw);
+ ret_val = phy->ops.get_cable_length(hw);
if (ret_val)
return ret_val;
- ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
+ ret_val = e1e_rphy(hw, MII_STAT1000, &data);
if (ret_val)
return ret_val;
- phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
- ? e1000_1000t_rx_status_ok
- : e1000_1000t_rx_status_not_ok;
+ phy->local_rx = (data & LPA_1000LOCALRXOK)
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
- phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
- ? e1000_1000t_rx_status_ok
- : e1000_1000t_rx_status_not_ok;
+ phy->remote_rx = (data & LPA_1000REMRXOK)
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
} else {
phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
phy->local_rx = e1000_1000t_rx_status_undefined;
@@ -2114,12 +2071,12 @@ s32 e1000e_phy_sw_reset(struct e1000_hw *hw)
s32 ret_val;
u16 phy_ctrl;
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl);
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_ctrl);
if (ret_val)
return ret_val;
- phy_ctrl |= MII_CR_RESET;
- ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl);
+ phy_ctrl |= BMCR_RESET;
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_ctrl);
if (ret_val)
return ret_val;
@@ -2166,17 +2123,17 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
phy->ops.release(hw);
- return e1000_get_phy_cfg_done(hw);
+ return phy->ops.get_cfg_done(hw);
}
/**
- * e1000e_get_cfg_done - Generic configuration done
+ * e1000e_get_cfg_done_generic - Generic configuration done
* @hw: pointer to the HW structure
*
* Generic function to wait 10 milli-seconds for configuration to complete
* and return success.
**/
-s32 e1000e_get_cfg_done(struct e1000_hw *hw)
+s32 e1000e_get_cfg_done_generic(struct e1000_hw __always_unused *hw)
{
mdelay(10);
@@ -2266,38 +2223,6 @@ s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw)
return 0;
}
-/* Internal function pointers */
-
-/**
- * e1000_get_phy_cfg_done - Generic PHY configuration done
- * @hw: pointer to the HW structure
- *
- * Return success if silicon family did not implement a family specific
- * get_cfg_done function.
- **/
-static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
-{
- if (hw->phy.ops.get_cfg_done)
- return hw->phy.ops.get_cfg_done(hw);
-
- return 0;
-}
-
-/**
- * e1000_phy_force_speed_duplex - Generic force PHY speed/duplex
- * @hw: pointer to the HW structure
- *
- * When the silicon family has not implemented a forced speed/duplex
- * function for the PHY, simply return 0.
- **/
-static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
-{
- if (hw->phy.ops.force_speed_duplex)
- return hw->phy.ops.force_speed_duplex(hw);
-
- return 0;
-}
-
/**
* e1000e_get_phy_type_from_id - Get PHY type from id
* @phy_id: phy_id read from the phy
@@ -2549,7 +2474,6 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
hw->phy.addr = 1;
if (offset > MAX_PHY_MULTI_PAGE_REG) {
-
/* Page is shifted left, PHY expects (page x 32) */
ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
page);
@@ -2672,7 +2596,7 @@ s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
**/
s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
{
- s32 ret_val = 0;
+ s32 ret_val;
/* Select Port Control Registers page */
ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
@@ -2781,9 +2705,9 @@ void e1000_power_up_phy_copper(struct e1000_hw *hw)
u16 mii_reg = 0;
/* The PHY will retain its settings across a power down/up cycle */
- e1e_rphy(hw, PHY_CONTROL, &mii_reg);
- mii_reg &= ~MII_CR_POWER_DOWN;
- e1e_wphy(hw, PHY_CONTROL, mii_reg);
+ e1e_rphy(hw, MII_BMCR, &mii_reg);
+ mii_reg &= ~BMCR_PDOWN;
+ e1e_wphy(hw, MII_BMCR, mii_reg);
}
/**
@@ -2799,50 +2723,13 @@ void e1000_power_down_phy_copper(struct e1000_hw *hw)
u16 mii_reg = 0;
/* The PHY will retain its settings across a power down/up cycle */
- e1e_rphy(hw, PHY_CONTROL, &mii_reg);
- mii_reg |= MII_CR_POWER_DOWN;
- e1e_wphy(hw, PHY_CONTROL, mii_reg);
+ e1e_rphy(hw, MII_BMCR, &mii_reg);
+ mii_reg |= BMCR_PDOWN;
+ e1e_wphy(hw, MII_BMCR, mii_reg);
usleep_range(1000, 2000);
}
/**
- * e1000e_commit_phy - Soft PHY reset
- * @hw: pointer to the HW structure
- *
- * Performs a soft PHY reset on those that apply. This is a function pointer
- * entry point called by drivers.
- **/
-s32 e1000e_commit_phy(struct e1000_hw *hw)
-{
- if (hw->phy.ops.commit)
- return hw->phy.ops.commit(hw);
-
- return 0;
-}
-
-/**
- * e1000_set_d0_lplu_state - Sets low power link up state for D0
- * @hw: pointer to the HW structure
- * @active: boolean used to enable/disable lplu
- *
- * Success returns 0, Failure returns 1
- *
- * The low power link up (lplu) state is set to the power management level D0
- * and SmartSpeed is disabled when active is true, else clear lplu for D0
- * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
- * is used during Dx states where the power conservation is most important.
- * During driver activity, SmartSpeed should be enabled so performance is
- * maintained. This is a function pointer entry point called by drivers.
- **/
-static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
-{
- if (hw->phy.ops.set_d0_lplu_state)
- return hw->phy.ops.set_d0_lplu_state(hw, active);
-
- return 0;
-}
-
-/**
* __e1000_read_phy_reg_hv - Read HV PHY register
* @hw: pointer to the HW structure
* @offset: register offset to be read
@@ -3104,8 +2991,8 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
u16 *data, bool read)
{
s32 ret_val;
- u32 addr_reg = 0;
- u32 data_reg = 0;
+ u32 addr_reg;
+ u32 data_reg;
/* This takes care of the difference with desktop vs mobile phy */
addr_reg = (hw->phy.type == e1000_phy_82578) ?
@@ -3154,8 +3041,8 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
return 0;
/* Do not apply workaround if in PHY loopback bit 14 set */
- e1e_rphy(hw, PHY_CONTROL, &data);
- if (data & PHY_CONTROL_LB)
+ e1e_rphy(hw, MII_BMCR, &data);
+ if (data & BMCR_LOOPBACK)
return 0;
/* check if link is up and at 1Gbps */
@@ -3173,8 +3060,9 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
msleep(200);
/* flush the packets in the fifo buffer */
- ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC |
- HV_MUX_DATA_CTRL_FORCE_SPEED);
+ ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL,
+ (HV_MUX_DATA_CTRL_GEN_TO_MAC |
+ HV_MUX_DATA_CTRL_FORCE_SPEED));
if (ret_val)
return ret_val;
@@ -3218,13 +3106,13 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
u16 phy_data;
bool link;
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_data);
if (ret_val)
return ret_val;
e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
- ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_data);
if (ret_val)
return ret_val;
@@ -3292,17 +3180,15 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
+ ret_val = e1e_rphy(hw, MII_STAT1000, &data);
if (ret_val)
return ret_val;
- phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
- ? e1000_1000t_rx_status_ok
- : e1000_1000t_rx_status_not_ok;
+ phy->local_rx = (data & LPA_1000LOCALRXOK)
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
- phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
- ? e1000_1000t_rx_status_ok
- : e1000_1000t_rx_status_not_ok;
+ phy->remote_rx = (data & LPA_1000REMRXOK)
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
} else {
phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
phy->local_rx = e1000_1000t_rx_status_undefined;
@@ -3333,7 +3219,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
I82577_DSTATUS_CABLE_LENGTH_SHIFT;
if (length == E1000_CABLE_LENGTH_UNDEFINED)
- ret_val = -E1000_ERR_PHY;
+ return -E1000_ERR_PHY;
phy->cable_length = length;
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
new file mode 100644
index 000000000000..f4f71b9991e3
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -0,0 +1,242 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_PHY_H_
+#define _E1000E_PHY_H_
+
+s32 e1000e_check_downshift(struct e1000_hw *hw);
+s32 e1000_check_polarity_m88(struct e1000_hw *hw);
+s32 e1000_check_polarity_igp(struct e1000_hw *hw);
+s32 e1000_check_polarity_ife(struct e1000_hw *hw);
+s32 e1000e_check_reset_block_generic(struct e1000_hw *hw);
+s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw);
+s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw);
+s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
+s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
+s32 e1000e_get_cable_length_m88(struct e1000_hw *hw);
+s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
+s32 e1000e_get_cfg_done_generic(struct e1000_hw *hw);
+s32 e1000e_get_phy_id(struct e1000_hw *hw);
+s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
+s32 e1000e_get_phy_info_m88(struct e1000_hw *hw);
+s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
+s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
+void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
+s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
+s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
+s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
+s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+s32 e1000e_setup_copper_link(struct e1000_hw *hw);
+s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+ u32 usec_interval, bool *success);
+s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw);
+enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
+s32 e1000e_determine_phy_address(struct e1000_hw *hw);
+s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
+s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
+s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
+void e1000_power_up_phy_copper(struct e1000_hw *hw);
+void e1000_power_down_phy_copper(struct e1000_hw *hw);
+s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
+s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
+s32 e1000_check_polarity_82577(struct e1000_hw *hw);
+s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
+s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
+
+#define E1000_MAX_PHY_ADDR 8
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
+#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
+#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
+#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
+#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
+#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
+#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
+#define IGP_PAGE_SHIFT 5
+#define PHY_REG_MASK 0x1F
+
+/* BM/HV Specific Registers */
+#define BM_PORT_CTRL_PAGE 769
+#define BM_WUC_PAGE 800
+#define BM_WUC_ADDRESS_OPCODE 0x11
+#define BM_WUC_DATA_OPCODE 0x12
+#define BM_WUC_ENABLE_PAGE BM_PORT_CTRL_PAGE
+#define BM_WUC_ENABLE_REG 17
+#define BM_WUC_ENABLE_BIT (1 << 2)
+#define BM_WUC_HOST_WU_BIT (1 << 4)
+#define BM_WUC_ME_WU_BIT (1 << 5)
+
+#define PHY_UPPER_SHIFT 21
+#define BM_PHY_REG(page, reg) \
+ (((reg) & MAX_PHY_REG_ADDRESS) |\
+ (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\
+ (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)))
+#define BM_PHY_REG_PAGE(offset) \
+ ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF))
+#define BM_PHY_REG_NUM(offset) \
+ ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\
+ (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\
+ ~MAX_PHY_REG_ADDRESS)))
+
+#define HV_INTC_FC_PAGE_START 768
+#define I82578_ADDR_REG 29
+#define I82577_ADDR_REG 16
+#define I82577_CFG_REG 22
+#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
+#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */
+#define I82577_CTRL_REG 23
+
+/* 82577 specific PHY registers */
+#define I82577_PHY_CTRL_2 18
+#define I82577_PHY_LBK_CTRL 19
+#define I82577_PHY_STATUS_2 26
+#define I82577_PHY_DIAG_STATUS 31
+
+/* I82577 PHY Status 2 */
+#define I82577_PHY_STATUS2_REV_POLARITY 0x0400
+#define I82577_PHY_STATUS2_MDIX 0x0800
+#define I82577_PHY_STATUS2_SPEED_MASK 0x0300
+#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
+
+/* I82577 PHY Control 2 */
+#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200
+#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
+#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600
+
+/* I82577 PHY Diagnostics Status */
+#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
+#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
+
+/* BM PHY Copper Specific Control 1 */
+#define BM_CS_CTRL1 16
+
+/* BM PHY Copper Specific Status */
+#define BM_CS_STATUS 17
+#define BM_CS_STATUS_LINK_UP 0x0400
+#define BM_CS_STATUS_RESOLVED 0x0800
+#define BM_CS_STATUS_SPEED_MASK 0xC000
+#define BM_CS_STATUS_SPEED_1000 0x8000
+
+/* 82577 Mobile Phy Status Register */
+#define HV_M_STATUS 26
+#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000
+#define HV_M_STATUS_SPEED_MASK 0x0300
+#define HV_M_STATUS_SPEED_1000 0x0200
+#define HV_M_STATUS_LINK_UP 0x0040
+
+#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
+#define IGP01E1000_PHY_POLARITY_MASK 0x0078
+
+#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
+
+#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
+
+#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
+#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
+#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
+
+#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
+
+#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
+#define IGP01E1000_PSSR_MDIX 0x0800
+#define IGP01E1000_PSSR_SPEED_MASK 0xC000
+#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
+
+#define IGP02E1000_PHY_CHANNEL_NUM 4
+#define IGP02E1000_PHY_AGC_A 0x11B1
+#define IGP02E1000_PHY_AGC_B 0x12B1
+#define IGP02E1000_PHY_AGC_C 0x14B1
+#define IGP02E1000_PHY_AGC_D 0x18B1
+
+#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */
+#define IGP02E1000_AGC_LENGTH_MASK 0x7F
+#define IGP02E1000_AGC_RANGE 15
+
+#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
+
+#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
+#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
+#define E1000_KMRNCTRLSTA_REN 0x00200000
+#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */
+#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
+#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
+#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
+#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */
+#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
+#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
+#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002 /* enable K1 */
+#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
+#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */
+#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */
+#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */
+
+/* IFE PHY Extended Status Control */
+#define IFE_PESC_POLARITY_REVERSED 0x0100
+
+/* IFE PHY Special Control */
+#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010
+#define IFE_PSC_FORCE_POLARITY 0x0020
+
+/* IFE PHY Special Control and LED Control */
+#define IFE_PSCL_PROBE_MODE 0x0020
+#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
+
+/* IFE PHY MDIX Control */
+#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */
+#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */
+
+#endif
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
new file mode 100644
index 000000000000..b477fa53ec94
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -0,0 +1,277 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* PTP 1588 Hardware Clock (PHC)
+ * Derived from PTP Hardware Clock driver for Intel 82576 and 82580 (igb)
+ * Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com>
+ */
+
+#include "e1000.h"
+
+/**
+ * e1000e_phc_adjfreq - adjust the frequency of the hardware clock
+ * @ptp: ptp clock structure
+ * @delta: Desired frequency change in parts per billion
+ *
+ * Adjust the frequency of the PHC cycle counter by the indicated delta from
+ * the base frequency.
+ **/
+static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+{
+ struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
+ ptp_clock_info);
+ struct e1000_hw *hw = &adapter->hw;
+ bool neg_adj = false;
+ u64 adjustment;
+ u32 timinca, incvalue;
+ s32 ret_val;
+
+ if ((delta > ptp->max_adj) || (delta <= -1000000000))
+ return -EINVAL;
+
+ if (delta < 0) {
+ neg_adj = true;
+ delta = -delta;
+ }
+
+ /* Get the System Time Register SYSTIM base frequency */
+ ret_val = e1000e_get_base_timinca(adapter, &timinca);
+ if (ret_val)
+ return ret_val;
+
+ incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK;
+
+ adjustment = incvalue;
+ adjustment *= delta;
+ adjustment = div_u64(adjustment, 1000000000);
+
+ incvalue = neg_adj ? (incvalue - adjustment) : (incvalue + adjustment);
+
+ timinca &= ~E1000_TIMINCA_INCVALUE_MASK;
+ timinca |= incvalue;
+
+ ew32(TIMINCA, timinca);
+
+ return 0;
+}
+
+/**
+ * e1000e_phc_adjtime - Shift the time of the hardware clock
+ * @ptp: ptp clock structure
+ * @delta: Desired change in nanoseconds
+ *
+ * Adjust the timer by resetting the timecounter structure.
+ **/
+static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
+ ptp_clock_info);
+ unsigned long flags;
+ s64 now;
+
+ spin_lock_irqsave(&adapter->systim_lock, flags);
+ now = timecounter_read(&adapter->tc);
+ now += delta;
+ timecounter_init(&adapter->tc, &adapter->cc, now);
+ spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
+ return 0;
+}
+
+/**
+ * e1000e_phc_gettime - Reads the current time from the hardware clock
+ * @ptp: ptp clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * Read the timecounter and return the correct value in ns after converting
+ * it into a struct timespec.
+ **/
+static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
+ ptp_clock_info);
+ unsigned long flags;
+ u32 remainder;
+ u64 ns;
+
+ spin_lock_irqsave(&adapter->systim_lock, flags);
+ ns = timecounter_read(&adapter->tc);
+ spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
+ ts->tv_sec = div_u64_rem(ns, NSEC_PER_SEC, &remainder);
+ ts->tv_nsec = remainder;
+
+ return 0;
+}
+
+/**
+ * e1000e_phc_settime - Set the current time on the hardware clock
+ * @ptp: ptp clock structure
+ * @ts: timespec containing the new time for the cycle counter
+ *
+ * Reset the timecounter to use a new base value instead of the kernel
+ * wall timer value.
+ **/
+static int e1000e_phc_settime(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
+ ptp_clock_info);
+ unsigned long flags;
+ u64 ns;
+
+ ns = ts->tv_sec * NSEC_PER_SEC;
+ ns += ts->tv_nsec;
+
+ /* reset the timecounter */
+ spin_lock_irqsave(&adapter->systim_lock, flags);
+ timecounter_init(&adapter->tc, &adapter->cc, ns);
+ spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
+ return 0;
+}
+
+/**
+ * e1000e_phc_enable - enable or disable an ancillary feature
+ * @ptp: ptp clock structure
+ * @request: Desired resource to enable or disable
+ * @on: Caller passes one to enable or zero to disable
+ *
+ * Enable (or disable) ancillary features of the PHC subsystem.
+ * Currently, no ancillary features are supported.
+ **/
+static int e1000e_phc_enable(struct ptp_clock_info __always_unused *ptp,
+ struct ptp_clock_request __always_unused *request,
+ int __always_unused on)
+{
+ return -EOPNOTSUPP;
+}
+
+static void e1000e_systim_overflow_work(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work, struct e1000_adapter,
+ systim_overflow_work.work);
+ struct e1000_hw *hw = &adapter->hw;
+ struct timespec ts;
+
+ adapter->ptp_clock_info.gettime(&adapter->ptp_clock_info, &ts);
+
+ e_dbg("SYSTIM overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
+
+ schedule_delayed_work(&adapter->systim_overflow_work,
+ E1000_SYSTIM_OVERFLOW_PERIOD);
+}
+
+static const struct ptp_clock_info e1000e_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .pps = 0,
+ .adjfreq = e1000e_phc_adjfreq,
+ .adjtime = e1000e_phc_adjtime,
+ .gettime = e1000e_phc_gettime,
+ .settime = e1000e_phc_settime,
+ .enable = e1000e_phc_enable,
+};
+
+/**
+ * e1000e_ptp_init - initialize PTP for devices which support it
+ * @adapter: board private structure
+ *
+ * This function performs the required steps for enabling PTP support.
+ * If PTP support has already been loaded it simply calls the cyclecounter
+ * init routine and exits.
+ **/
+void e1000e_ptp_init(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ adapter->ptp_clock = NULL;
+
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
+ return;
+
+ adapter->ptp_clock_info = e1000e_ptp_clock_info;
+
+ snprintf(adapter->ptp_clock_info.name,
+ sizeof(adapter->ptp_clock_info.name), "%pm",
+ adapter->netdev->perm_addr);
+
+ switch (hw->mac.type) {
+ case e1000_pch2lan:
+ case e1000_pch_lpt:
+ if ((hw->mac.type != e1000_pch_lpt) ||
+ (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
+ adapter->ptp_clock_info.max_adj = 24000000 - 1;
+ break;
+ }
+ /* fall-through */
+ case e1000_82574:
+ case e1000_82583:
+ adapter->ptp_clock_info.max_adj = 600000000 - 1;
+ break;
+ default:
+ break;
+ }
+
+ INIT_DELAYED_WORK(&adapter->systim_overflow_work,
+ e1000e_systim_overflow_work);
+
+ schedule_delayed_work(&adapter->systim_overflow_work,
+ E1000_SYSTIM_OVERFLOW_PERIOD);
+
+ adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info,
+ &adapter->pdev->dev);
+ if (IS_ERR(adapter->ptp_clock)) {
+ adapter->ptp_clock = NULL;
+ e_err("ptp_clock_register failed\n");
+ } else {
+ e_info("registered PHC clock\n");
+ }
+}
+
+/**
+ * e1000e_ptp_remove - disable PTP device and stop the overflow check
+ * @adapter: board private structure
+ *
+ * Stop the PTP support, and cancel the delayed work.
+ **/
+void e1000e_ptp_remove(struct e1000_adapter *adapter)
+{
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
+ return;
+
+ cancel_delayed_work_sync(&adapter->systim_overflow_work);
+
+ if (adapter->ptp_clock) {
+ ptp_clock_unregister(adapter->ptp_clock);
+ adapter->ptp_clock = NULL;
+ e_info("removed PHC\n");
+ }
+}
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
new file mode 100644
index 000000000000..794fe1497666
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -0,0 +1,252 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_REGS_H_
+#define _E1000E_REGS_H_
+
+#define E1000_CTRL 0x00000 /* Device Control - RW */
+#define E1000_STATUS 0x00008 /* Device Status - RO */
+#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
+#define E1000_EERD 0x00014 /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
+#define E1000_FLA 0x0001C /* Flash Access - RW */
+#define E1000_MDIC 0x00020 /* MDI Control - RW */
+#define E1000_SCTL 0x00024 /* SerDes Control - RW */
+#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
+#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
+#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
+#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
+#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
+#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
+#define E1000_FCT 0x00030 /* Flow Control Type - RW */
+#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
+#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
+#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
+#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
+#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
+#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
+#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
+#define E1000_IVAR 0x000E4 /* Interrupt Vector Allocation Register - RW */
+#define E1000_SVCR 0x000F0
+#define E1000_SVT 0x000F4
+#define E1000_LPIC 0x000FC /* Low Power IDLE control */
+#define E1000_RCTL 0x00100 /* Rx Control - RW */
+#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */
+#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */
+#define E1000_PBA_ECC 0x01100 /* PBA ECC Register */
+#define E1000_TCTL 0x00400 /* Tx Control - RW */
+#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */
+#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */
+#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
+#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
+#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
+#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
+#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
+#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
+#define E1000_PBS 0x01008 /* Packet Buffer Size */
+#define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */
+#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
+#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
+#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
+#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */
+#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
+#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
+#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
+#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
+/* Split and Replication Rx Control - RW */
+#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */
+#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */
+/* Convenience macros
+ *
+ * Note: "_n" is the queue number of the register to be written to.
+ *
+ * Example usage:
+ * E1000_RDBAL_REG(current_rx_queue)
+ */
+#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
+ (0x0C000 + ((_n) * 0x40)))
+#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
+ (0x0C004 + ((_n) * 0x40)))
+#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
+ (0x0C008 + ((_n) * 0x40)))
+#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
+ (0x0C010 + ((_n) * 0x40)))
+#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
+ (0x0C018 + ((_n) * 0x40)))
+#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
+ (0x0C028 + ((_n) * 0x40)))
+#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
+ (0x0E000 + ((_n) * 0x40)))
+#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
+ (0x0E004 + ((_n) * 0x40)))
+#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
+ (0x0E008 + ((_n) * 0x40)))
+#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
+ (0x0E010 + ((_n) * 0x40)))
+#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
+ (0x0E018 + ((_n) * 0x40)))
+#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
+ (0x0E028 + ((_n) * 0x40)))
+#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100))
+#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
+#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+ (0x054E0 + ((_i - 16) * 8)))
+#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+ (0x054E4 + ((_i - 16) * 8)))
+#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8))
+#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8))
+#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
+#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
+#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */
+#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */
+#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
+#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
+#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
+#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
+#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
+#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
+#define E1000_COLC 0x04028 /* Collision Count - R/clr */
+#define E1000_DC 0x04030 /* Defer Count - R/clr */
+#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */
+#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */
+#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */
+#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */
+#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */
+#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */
+#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */
+#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */
+#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */
+#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */
+#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */
+#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */
+#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */
+#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */
+#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */
+#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */
+#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */
+#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */
+#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */
+#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */
+#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */
+#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */
+#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */
+#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */
+#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */
+#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */
+#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */
+#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */
+#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */
+#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */
+#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */
+#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */
+#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */
+#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */
+#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */
+#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */
+#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */
+#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */
+#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */
+#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */
+#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */
+#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */
+#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
+#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */
+#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */
+#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */
+#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */
+#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */
+#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */
+#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
+#define E1000_CRC_OFFSET 0x05F50 /* CRC Offset register */
+
+#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */
+#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */
+#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */
+#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */
+#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */
+#define E1000_RFCTL 0x05008 /* Receive Filter Control */
+#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
+#define E1000_RA 0x05400 /* Receive Address - RW Array */
+#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
+#define E1000_WUC 0x05800 /* Wakeup Control - RW */
+#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
+#define E1000_WUS 0x05810 /* Wakeup Status - RO */
+#define E1000_MANC 0x05820 /* Management Control - RW */
+#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */
+#define E1000_HOST_IF 0x08800 /* Host Interface */
+
+#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */
+#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */
+/* Management Decision Filters */
+#define E1000_MDEF(_n) (0x05890 + (4 * (_n)))
+#define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */
+#define E1000_GCR 0x05B00 /* PCI-Ex Control */
+#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */
+#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
+#define E1000_SWSM 0x05B50 /* SW Semaphore */
+#define E1000_FWSM 0x05B54 /* FW Semaphore */
+/* Driver-only SW semaphore (not used by BOOT agents) */
+#define E1000_SWSM2 0x05B58
+#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
+#define E1000_HICR 0x08F00 /* Host Interface Control */
+
+/* RSS registers */
+#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
+#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */
+#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */
+#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
+#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
+#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
+#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
+#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
+#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
+#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
+#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
+#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
+#define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */
+#define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */
+
+#endif
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index 624476cfa727..f19700e285bb 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel 82575 PCI-Express Ethernet Linux driver
-# Copyright(c) 1999 - 2012 Intel Corporation.
+# Copyright(c) 1999 - 2013 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -34,4 +34,4 @@ obj-$(CONFIG_IGB) += igb.o
igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
- e1000_i210.o igb_ptp.o
+ e1000_i210.o igb_ptp.o igb_hwmon.o
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index fdaaf2709d0a..84e7e0909def 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -33,6 +33,7 @@
#include <linux/types.h>
#include <linux/if_ether.h>
+#include <linux/i2c.h>
#include "e1000_mac.h"
#include "e1000_82575.h"
@@ -110,184 +111,168 @@ static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
return ext_mdio;
}
-static s32 igb_get_invariants_82575(struct e1000_hw *hw)
+/**
+ * igb_init_phy_params_82575 - Init PHY func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
- struct e1000_nvm_info *nvm = &hw->nvm;
- struct e1000_mac_info *mac = &hw->mac;
- struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
- u32 eecd;
- s32 ret_val;
- u16 size;
- u32 ctrl_ext = 0;
+ s32 ret_val = 0;
+ u32 ctrl_ext;
- switch (hw->device_id) {
- case E1000_DEV_ID_82575EB_COPPER:
- case E1000_DEV_ID_82575EB_FIBER_SERDES:
- case E1000_DEV_ID_82575GB_QUAD_COPPER:
- mac->type = e1000_82575;
- break;
- case E1000_DEV_ID_82576:
- case E1000_DEV_ID_82576_NS:
- case E1000_DEV_ID_82576_NS_SERDES:
- case E1000_DEV_ID_82576_FIBER:
- case E1000_DEV_ID_82576_SERDES:
- case E1000_DEV_ID_82576_QUAD_COPPER:
- case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
- case E1000_DEV_ID_82576_SERDES_QUAD:
- mac->type = e1000_82576;
- break;
- case E1000_DEV_ID_82580_COPPER:
- case E1000_DEV_ID_82580_FIBER:
- case E1000_DEV_ID_82580_QUAD_FIBER:
- case E1000_DEV_ID_82580_SERDES:
- case E1000_DEV_ID_82580_SGMII:
- case E1000_DEV_ID_82580_COPPER_DUAL:
- case E1000_DEV_ID_DH89XXCC_SGMII:
- case E1000_DEV_ID_DH89XXCC_SERDES:
- case E1000_DEV_ID_DH89XXCC_BACKPLANE:
- case E1000_DEV_ID_DH89XXCC_SFP:
- mac->type = e1000_82580;
- break;
- case E1000_DEV_ID_I350_COPPER:
- case E1000_DEV_ID_I350_FIBER:
- case E1000_DEV_ID_I350_SERDES:
- case E1000_DEV_ID_I350_SGMII:
- mac->type = e1000_i350;
- break;
- case E1000_DEV_ID_I210_COPPER:
- case E1000_DEV_ID_I210_COPPER_OEM1:
- case E1000_DEV_ID_I210_COPPER_IT:
- case E1000_DEV_ID_I210_FIBER:
- case E1000_DEV_ID_I210_SERDES:
- case E1000_DEV_ID_I210_SGMII:
- mac->type = e1000_i210;
- break;
- case E1000_DEV_ID_I211_COPPER:
- mac->type = e1000_i211;
- break;
- default:
- return -E1000_ERR_MAC_INIT;
- break;
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ phy->type = e1000_phy_none;
+ goto out;
}
- /* Set media type */
- /*
- * The 82575 uses bits 22:23 for link mode. The mode can be changed
- * based on the EEPROM. We cannot rely upon device ID. There
- * is no distinguishable difference between fiber and internal
- * SerDes mode on the 82575. There can be an external PHY attached
- * on the SGMII interface. For this, we'll set sgmii_active to true.
- */
- phy->media_type = e1000_media_type_copper;
- dev_spec->sgmii_active = false;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->reset_delay_us = 100;
ctrl_ext = rd32(E1000_CTRL_EXT);
- switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
- case E1000_CTRL_EXT_LINK_MODE_SGMII:
- dev_spec->sgmii_active = true;
- break;
- case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
- case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
- hw->phy.media_type = e1000_media_type_internal_serdes;
- break;
- default:
- break;
+
+ if (igb_sgmii_active_82575(hw)) {
+ phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
+ ctrl_ext |= E1000_CTRL_I2C_ENA;
+ } else {
+ phy->ops.reset = igb_phy_hw_reset;
+ ctrl_ext &= ~E1000_CTRL_I2C_ENA;
}
- /* Set mta register count */
- mac->mta_reg_count = 128;
- /* Set rar entry count */
- switch (mac->type) {
- case e1000_82576:
- mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
+ wr32(E1000_CTRL_EXT, ctrl_ext);
+ igb_reset_mdicnfg_82580(hw);
+
+ if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
+ phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
+ phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
+ } else {
+ switch (hw->mac.type) {
+ case e1000_82580:
+ case e1000_i350:
+ phy->ops.read_reg = igb_read_phy_reg_82580;
+ phy->ops.write_reg = igb_write_phy_reg_82580;
+ break;
+ case e1000_i210:
+ case e1000_i211:
+ phy->ops.read_reg = igb_read_phy_reg_gs40g;
+ phy->ops.write_reg = igb_write_phy_reg_gs40g;
+ break;
+ default:
+ phy->ops.read_reg = igb_read_phy_reg_igp;
+ phy->ops.write_reg = igb_write_phy_reg_igp;
+ }
+ }
+
+ /* set lan id */
+ hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
+ E1000_STATUS_FUNC_SHIFT;
+
+ /* Set phy->phy_addr and phy->id. */
+ ret_val = igb_get_phy_id_82575(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Verify phy id and set remaining function pointers */
+ switch (phy->id) {
+ case I347AT4_E_PHY_ID:
+ case M88E1112_E_PHY_ID:
+ case M88E1111_I_PHY_ID:
+ phy->type = e1000_phy_m88;
+ phy->ops.get_phy_info = igb_get_phy_info_m88;
+ if (phy->id == I347AT4_E_PHY_ID ||
+ phy->id == M88E1112_E_PHY_ID)
+ phy->ops.get_cable_length =
+ igb_get_cable_length_m88_gen2;
+ else
+ phy->ops.get_cable_length = igb_get_cable_length_m88;
+ phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
break;
- case e1000_82580:
- mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
+ case IGP03E1000_E_PHY_ID:
+ phy->type = e1000_phy_igp_3;
+ phy->ops.get_phy_info = igb_get_phy_info_igp;
+ phy->ops.get_cable_length = igb_get_cable_length_igp_2;
+ phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
+ phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
+ phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
break;
- case e1000_i350:
- mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
+ case I82580_I_PHY_ID:
+ case I350_I_PHY_ID:
+ phy->type = e1000_phy_82580;
+ phy->ops.force_speed_duplex =
+ igb_phy_force_speed_duplex_82580;
+ phy->ops.get_cable_length = igb_get_cable_length_82580;
+ phy->ops.get_phy_info = igb_get_phy_info_82580;
+ phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
+ phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
break;
- default:
- mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
+ case I210_I_PHY_ID:
+ phy->type = e1000_phy_i210;
+ phy->ops.check_polarity = igb_check_polarity_m88;
+ phy->ops.get_phy_info = igb_get_phy_info_m88;
+ phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
+ phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
+ phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
+ phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ goto out;
}
- /* reset */
- if (mac->type >= e1000_82580)
- mac->ops.reset_hw = igb_reset_hw_82580;
- else
- mac->ops.reset_hw = igb_reset_hw_82575;
- if (mac->type >= e1000_i210) {
- mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
- mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
- } else {
- mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
- mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
- }
+out:
+ return ret_val;
+}
- /* Set if part includes ASF firmware */
- mac->asf_firmware_present = true;
- /* Set if manageability features are enabled. */
- mac->arc_subsystem_valid =
- (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
- ? true : false;
- /* enable EEE on i350 parts and later parts */
- if (mac->type >= e1000_i350)
- dev_spec->eee_disable = false;
- else
- dev_spec->eee_disable = true;
- /* physical interface link setup */
- mac->ops.setup_physical_interface =
- (hw->phy.media_type == e1000_media_type_copper)
- ? igb_setup_copper_link_82575
- : igb_setup_serdes_link_82575;
+/**
+ * igb_init_nvm_params_82575 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = rd32(E1000_EECD);
+ u16 size;
- /* NVM initialization */
- eecd = rd32(E1000_EECD);
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
E1000_EECD_SIZE_EX_SHIFT);
-
- /*
- * Added to a constant, "size" becomes the left-shift value
+ /* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
size += NVM_WORD_SIZE_BASE_SHIFT;
- /*
- * Check for invalid size
+ /* Just in case size is out of range, cap it to the largest
+ * EEPROM size supported
*/
- if ((hw->mac.type == e1000_82576) && (size > 15)) {
- pr_notice("The NVM size is not valid, defaulting to 32K\n");
+ if (size > 15)
size = 15;
- }
nvm->word_size = 1 << size;
if (hw->mac.type < e1000_i210) {
- nvm->opcode_bits = 8;
- nvm->delay_usec = 1;
+ nvm->opcode_bits = 8;
+ nvm->delay_usec = 1;
+
switch (nvm->override) {
case e1000_nvm_override_spi_large:
- nvm->page_size = 32;
+ nvm->page_size = 32;
nvm->address_bits = 16;
break;
case e1000_nvm_override_spi_small:
- nvm->page_size = 8;
+ nvm->page_size = 8;
nvm->address_bits = 8;
break;
default:
- nvm->page_size = eecd
- & E1000_EECD_ADDR_BITS ? 32 : 8;
- nvm->address_bits = eecd
- & E1000_EECD_ADDR_BITS ? 16 : 8;
+ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+ nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
+ 16 : 8;
break;
}
if (nvm->word_size == (1 << 15))
nvm->page_size = 128;
nvm->type = e1000_nvm_eeprom_spi;
- } else
+ } else {
nvm->type = e1000_nvm_flash_hw;
+ }
/* NVM Function Pointers */
switch (hw->mac.type) {
@@ -344,118 +329,176 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
break;
}
- /* if part supports SR-IOV then initialize mailbox parameters */
+ return 0;
+}
+
+/**
+ * igb_init_mac_params_82575 - Init MAC func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ /* Set rar entry count */
switch (mac->type) {
case e1000_82576:
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
+ break;
+ case e1000_82580:
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
+ break;
case e1000_i350:
- igb_init_mbx_params_pf(hw);
+ mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
break;
default:
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
break;
}
+ /* reset */
+ if (mac->type >= e1000_82580)
+ mac->ops.reset_hw = igb_reset_hw_82580;
+ else
+ mac->ops.reset_hw = igb_reset_hw_82575;
- /* setup PHY parameters */
- if (phy->media_type != e1000_media_type_copper) {
- phy->type = e1000_phy_none;
- return 0;
- }
-
- phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
- phy->reset_delay_us = 100;
-
- ctrl_ext = rd32(E1000_CTRL_EXT);
+ if (mac->type >= e1000_i210) {
+ mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
+ mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
- /* PHY function pointers */
- if (igb_sgmii_active_82575(hw)) {
- phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
- ctrl_ext |= E1000_CTRL_I2C_ENA;
} else {
- phy->ops.reset = igb_phy_hw_reset;
- ctrl_ext &= ~E1000_CTRL_I2C_ENA;
+ mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
+ mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
}
- wr32(E1000_CTRL_EXT, ctrl_ext);
- igb_reset_mdicnfg_82580(hw);
-
- if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
- phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
- phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
- } else if ((hw->mac.type == e1000_82580)
- || (hw->mac.type == e1000_i350)) {
- phy->ops.read_reg = igb_read_phy_reg_82580;
- phy->ops.write_reg = igb_write_phy_reg_82580;
- } else if (hw->phy.type >= e1000_phy_i210) {
- phy->ops.read_reg = igb_read_phy_reg_gs40g;
- phy->ops.write_reg = igb_write_phy_reg_gs40g;
- } else {
- phy->ops.read_reg = igb_read_phy_reg_igp;
- phy->ops.write_reg = igb_write_phy_reg_igp;
- }
+ /* Set if part includes ASF firmware */
+ mac->asf_firmware_present = true;
+ /* Set if manageability features are enabled. */
+ mac->arc_subsystem_valid =
+ (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
+ ? true : false;
+ /* enable EEE on i350 parts and later parts */
+ if (mac->type >= e1000_i350)
+ dev_spec->eee_disable = false;
+ else
+ dev_spec->eee_disable = true;
+ /* physical interface link setup */
+ mac->ops.setup_physical_interface =
+ (hw->phy.media_type == e1000_media_type_copper)
+ ? igb_setup_copper_link_82575
+ : igb_setup_serdes_link_82575;
- /* set lan id */
- hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
- E1000_STATUS_FUNC_SHIFT;
+ return 0;
+}
- /* Set phy->phy_addr and phy->id. */
- ret_val = igb_get_phy_id_82575(hw);
- if (ret_val)
- return ret_val;
+static s32 igb_get_invariants_82575(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
+ s32 ret_val;
+ u32 ctrl_ext = 0;
- /* Verify phy id and set remaining function pointers */
- switch (phy->id) {
- case I347AT4_E_PHY_ID:
- case M88E1112_E_PHY_ID:
- case M88E1111_I_PHY_ID:
- phy->type = e1000_phy_m88;
- phy->ops.get_phy_info = igb_get_phy_info_m88;
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82575EB_COPPER:
+ case E1000_DEV_ID_82575EB_FIBER_SERDES:
+ case E1000_DEV_ID_82575GB_QUAD_COPPER:
+ mac->type = e1000_82575;
+ break;
+ case E1000_DEV_ID_82576:
+ case E1000_DEV_ID_82576_NS:
+ case E1000_DEV_ID_82576_NS_SERDES:
+ case E1000_DEV_ID_82576_FIBER:
+ case E1000_DEV_ID_82576_SERDES:
+ case E1000_DEV_ID_82576_QUAD_COPPER:
+ case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
+ case E1000_DEV_ID_82576_SERDES_QUAD:
+ mac->type = e1000_82576;
+ break;
+ case E1000_DEV_ID_82580_COPPER:
+ case E1000_DEV_ID_82580_FIBER:
+ case E1000_DEV_ID_82580_QUAD_FIBER:
+ case E1000_DEV_ID_82580_SERDES:
+ case E1000_DEV_ID_82580_SGMII:
+ case E1000_DEV_ID_82580_COPPER_DUAL:
+ case E1000_DEV_ID_DH89XXCC_SGMII:
+ case E1000_DEV_ID_DH89XXCC_SERDES:
+ case E1000_DEV_ID_DH89XXCC_BACKPLANE:
+ case E1000_DEV_ID_DH89XXCC_SFP:
+ mac->type = e1000_82580;
+ break;
+ case E1000_DEV_ID_I350_COPPER:
+ case E1000_DEV_ID_I350_FIBER:
+ case E1000_DEV_ID_I350_SERDES:
+ case E1000_DEV_ID_I350_SGMII:
+ mac->type = e1000_i350;
+ break;
+ case E1000_DEV_ID_I210_COPPER:
+ case E1000_DEV_ID_I210_COPPER_OEM1:
+ case E1000_DEV_ID_I210_COPPER_IT:
+ case E1000_DEV_ID_I210_FIBER:
+ case E1000_DEV_ID_I210_SERDES:
+ case E1000_DEV_ID_I210_SGMII:
+ mac->type = e1000_i210;
+ break;
+ case E1000_DEV_ID_I211_COPPER:
+ mac->type = e1000_i211;
+ break;
+ default:
+ return -E1000_ERR_MAC_INIT;
+ break;
+ }
- if (phy->id == I347AT4_E_PHY_ID ||
- phy->id == M88E1112_E_PHY_ID)
- phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
- else
- phy->ops.get_cable_length = igb_get_cable_length_m88;
+ /* Set media type */
+ /*
+ * The 82575 uses bits 22:23 for link mode. The mode can be changed
+ * based on the EEPROM. We cannot rely upon device ID. There
+ * is no distinguishable difference between fiber and internal
+ * SerDes mode on the 82575. There can be an external PHY attached
+ * on the SGMII interface. For this, we'll set sgmii_active to true.
+ */
+ hw->phy.media_type = e1000_media_type_copper;
+ dev_spec->sgmii_active = false;
- if (phy->id == I210_I_PHY_ID) {
- phy->ops.get_cable_length =
- igb_get_cable_length_m88_gen2;
- phy->ops.set_d0_lplu_state =
- igb_set_d0_lplu_state_82580;
- phy->ops.set_d3_lplu_state =
- igb_set_d3_lplu_state_82580;
- }
- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
+ case E1000_CTRL_EXT_LINK_MODE_SGMII:
+ dev_spec->sgmii_active = true;
break;
- case IGP03E1000_E_PHY_ID:
- phy->type = e1000_phy_igp_3;
- phy->ops.get_phy_info = igb_get_phy_info_igp;
- phy->ops.get_cable_length = igb_get_cable_length_igp_2;
- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
+ case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
+ case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
+ hw->phy.media_type = e1000_media_type_internal_serdes;
break;
- case I82580_I_PHY_ID:
- case I350_I_PHY_ID:
- phy->type = e1000_phy_82580;
- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580;
- phy->ops.get_cable_length = igb_get_cable_length_82580;
- phy->ops.get_phy_info = igb_get_phy_info_82580;
- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
+ default:
break;
- case I210_I_PHY_ID:
- phy->type = e1000_phy_i210;
- phy->ops.get_phy_info = igb_get_phy_info_m88;
- phy->ops.check_polarity = igb_check_polarity_m88;
- phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
+ }
+
+ /* mac initialization and operations */
+ ret_val = igb_init_mac_params_82575(hw);
+ if (ret_val)
+ goto out;
+
+ /* NVM initialization */
+ ret_val = igb_init_nvm_params_82575(hw);
+ if (ret_val)
+ goto out;
+
+ /* if part supports SR-IOV then initialize mailbox parameters */
+ switch (mac->type) {
+ case e1000_82576:
+ case e1000_i350:
+ igb_init_mbx_params_pf(hw);
break;
default:
- return -E1000_ERR_PHY;
+ break;
}
- return 0;
+ /* setup PHY parameters */
+ ret_val = igb_init_phy_params_82575(hw);
+
+out:
+ return ret_val;
}
/**
@@ -2302,18 +2345,157 @@ out:
return ret_val;
}
+static const u8 e1000_emc_temp_data[4] = {
+ E1000_EMC_INTERNAL_DATA,
+ E1000_EMC_DIODE1_DATA,
+ E1000_EMC_DIODE2_DATA,
+ E1000_EMC_DIODE3_DATA
+};
+static const u8 e1000_emc_therm_limit[4] = {
+ E1000_EMC_INTERNAL_THERM_LIMIT,
+ E1000_EMC_DIODE1_THERM_LIMIT,
+ E1000_EMC_DIODE2_THERM_LIMIT,
+ E1000_EMC_DIODE3_THERM_LIMIT
+};
+
+/* igb_get_thermal_sensor_data_generic - Gathers thermal sensor data
+ * @hw: pointer to hardware structure
+ *
+ * Updates the temperatures in mac.thermal_sensor_data
+ */
+s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
+{
+ s32 status = E1000_SUCCESS;
+ u16 ets_offset;
+ u16 ets_cfg;
+ u16 ets_sensor;
+ u8 num_sensors;
+ u8 sensor_index;
+ u8 sensor_location;
+ u8 i;
+ struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
+ if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
+ return E1000_NOT_IMPLEMENTED;
+
+ data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF);
+
+ /* Return the internal sensor only if ETS is unsupported */
+ hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
+ if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
+ return status;
+
+ hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
+ if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
+ != NVM_ETS_TYPE_EMC)
+ return E1000_NOT_IMPLEMENTED;
+
+ num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
+ if (num_sensors > E1000_MAX_SENSORS)
+ num_sensors = E1000_MAX_SENSORS;
+
+ for (i = 1; i < num_sensors; i++) {
+ hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
+ sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
+ NVM_ETS_DATA_INDEX_SHIFT);
+ sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
+ NVM_ETS_DATA_LOC_SHIFT);
+
+ if (sensor_location != 0)
+ hw->phy.ops.read_i2c_byte(hw,
+ e1000_emc_temp_data[sensor_index],
+ E1000_I2C_THERMAL_SENSOR_ADDR,
+ &data->sensor[i].temp);
+ }
+ return status;
+}
+
+/* igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds
+ * @hw: pointer to hardware structure
+ *
+ * Sets the thermal sensor thresholds according to the NVM map
+ * and save off the threshold and location values into mac.thermal_sensor_data
+ */
+s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
+{
+ s32 status = E1000_SUCCESS;
+ u16 ets_offset;
+ u16 ets_cfg;
+ u16 ets_sensor;
+ u8 low_thresh_delta;
+ u8 num_sensors;
+ u8 sensor_index;
+ u8 sensor_location;
+ u8 therm_limit;
+ u8 i;
+ struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
+ if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
+ return E1000_NOT_IMPLEMENTED;
+
+ memset(data, 0, sizeof(struct e1000_thermal_sensor_data));
+
+ data->sensor[0].location = 0x1;
+ data->sensor[0].caution_thresh =
+ (rd32(E1000_THHIGHTC) & 0xFF);
+ data->sensor[0].max_op_thresh =
+ (rd32(E1000_THLOWTC) & 0xFF);
+
+ /* Return the internal sensor only if ETS is unsupported */
+ hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
+ if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
+ return status;
+
+ hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
+ if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
+ != NVM_ETS_TYPE_EMC)
+ return E1000_NOT_IMPLEMENTED;
+
+ low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >>
+ NVM_ETS_LTHRES_DELTA_SHIFT);
+ num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
+
+ for (i = 1; i <= num_sensors; i++) {
+ hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
+ sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
+ NVM_ETS_DATA_INDEX_SHIFT);
+ sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
+ NVM_ETS_DATA_LOC_SHIFT);
+ therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK;
+
+ hw->phy.ops.write_i2c_byte(hw,
+ e1000_emc_therm_limit[sensor_index],
+ E1000_I2C_THERMAL_SENSOR_ADDR,
+ therm_limit);
+
+ if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) {
+ data->sensor[i].location = sensor_location;
+ data->sensor[i].caution_thresh = therm_limit;
+ data->sensor[i].max_op_thresh = therm_limit -
+ low_thresh_delta;
+ }
+ }
+ return status;
+}
+
static struct e1000_mac_operations e1000_mac_ops_82575 = {
.init_hw = igb_init_hw_82575,
.check_for_link = igb_check_for_link_82575,
.rar_set = igb_rar_set,
.read_mac_addr = igb_read_mac_addr_82575,
.get_speed_and_duplex = igb_get_speed_and_duplex_copper,
+#ifdef CONFIG_IGB_HWMON
+ .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic,
+ .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic,
+#endif
};
static struct e1000_phy_operations e1000_phy_ops_82575 = {
.acquire = igb_acquire_phy_82575,
.get_cfg_done = igb_get_cfg_done_82575,
.release = igb_release_phy_82575,
+ .write_i2c_byte = igb_write_i2c_byte,
+ .read_i2c_byte = igb_read_i2c_byte,
};
static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 44b76b3b6816..73ab41f0e032 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -32,6 +32,10 @@ extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
extern void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
extern void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
+extern s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+extern s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
(ID_LED_DEF1_DEF2 << 8) | \
@@ -260,5 +264,16 @@ void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
u16 igb_rxpbs_adjust_82580(u32 data);
s32 igb_set_eee_i350(struct e1000_hw *);
-
+s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *);
+s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw);
+
+#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8
+#define E1000_EMC_INTERNAL_DATA 0x00
+#define E1000_EMC_INTERNAL_THERM_LIMIT 0x20
+#define E1000_EMC_DIODE1_DATA 0x01
+#define E1000_EMC_DIODE1_THERM_LIMIT 0x19
+#define E1000_EMC_DIODE2_DATA 0x23
+#define E1000_EMC_DIODE2_THERM_LIMIT 0x1A
+#define E1000_EMC_DIODE3_DATA 0x2A
+#define E1000_EMC_DIODE3_THERM_LIMIT 0x30
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 45dce06eff26..7e13337d3b9d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -470,6 +470,7 @@
#define E1000_ERR_NO_SPACE 17
#define E1000_ERR_NVM_PBA_SECTION 18
#define E1000_ERR_INVM_VALUE_NOT_FOUND 19
+#define E1000_ERR_I2C 20
/* Loop limit on how long we wait for auto-negotiation to complete */
#define COPPER_LINK_UP_LIMIT 10
@@ -674,6 +675,18 @@
#define NVM_COMB_VER_SHFT 8
#define NVM_VER_INVALID 0xFFFF
#define NVM_ETRACK_SHIFT 16
+#define NVM_ETS_CFG 0x003E
+#define NVM_ETS_LTHRES_DELTA_MASK 0x07C0
+#define NVM_ETS_LTHRES_DELTA_SHIFT 6
+#define NVM_ETS_TYPE_MASK 0x0038
+#define NVM_ETS_TYPE_SHIFT 3
+#define NVM_ETS_TYPE_EMC 0x000
+#define NVM_ETS_NUM_SENSORS_MASK 0x0007
+#define NVM_ETS_DATA_LOC_MASK 0x3C00
+#define NVM_ETS_DATA_LOC_SHIFT 10
+#define NVM_ETS_DATA_INDEX_MASK 0x0300
+#define NVM_ETS_DATA_INDEX_SHIFT 8
+#define NVM_ETS_DATA_HTHRESH_MASK 0x00FF
#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index c2a51dcda550..0d5cf9c63d0d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -325,6 +325,10 @@ struct e1000_mac_operations {
s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
void (*release_swfw_sync)(struct e1000_hw *, u16);
+#ifdef CONFIG_IGB_HWMON
+ s32 (*get_thermal_sensor_data)(struct e1000_hw *);
+ s32 (*init_thermal_sensor_thresh)(struct e1000_hw *);
+#endif
};
@@ -342,6 +346,8 @@ struct e1000_phy_operations {
s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
s32 (*write_reg)(struct e1000_hw *, u32, u16);
+ s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *);
+ s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8);
};
struct e1000_nvm_operations {
@@ -354,6 +360,19 @@ struct e1000_nvm_operations {
s32 (*valid_led_default)(struct e1000_hw *, u16 *);
};
+#define E1000_MAX_SENSORS 3
+
+struct e1000_thermal_diode_data {
+ u8 location;
+ u8 temp;
+ u8 caution_thresh;
+ u8 max_op_thresh;
+};
+
+struct e1000_thermal_sensor_data {
+ struct e1000_thermal_diode_data sensor[E1000_MAX_SENSORS];
+};
+
struct e1000_info {
s32 (*get_invariants)(struct e1000_hw *);
struct e1000_mac_operations *mac_ops;
@@ -399,6 +418,7 @@ struct e1000_mac_info {
bool report_tx_early;
bool serdes_has_link;
bool tx_pkt_filtering;
+ struct e1000_thermal_sensor_data thermal_sensor_data;
};
struct e1000_phy_info {
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index fbcdbebb0b5f..6a42344f24f1 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 1c89358a99ab..e4e1a73b7c75 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 101e6e4da97f..a5c7200b9a71 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index e2b2c4b9c951..e6d6ce433261 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index 5988b8958baf..38e0df350904 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index dbcfa3d5caec..c13b56d9edb2 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index fbb7604db364..5b62adbe134d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 7012d458c6f7..6bfc0c43aace 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2012 Intel Corporation.
+ Copyright(c) 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index fe76004aca4e..2918c979b5bb 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index ed282f877d9a..784fd1c40989 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index e5db48594e8a..15343286082e 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -75,6 +75,14 @@
#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
+#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */
+#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */
+#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */
+#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */
+#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */
+#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */
+#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */
+#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */
/* IEEE 1588 TIMESYNCH */
#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
@@ -124,6 +132,14 @@
/* Split and Replication RX Control - RW */
#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
+
+/* Thermal sensor configuration and status registers */
+#define E1000_THMJT 0x08100 /* Junction Temperature */
+#define E1000_THLOWTC 0x08104 /* Low Threshold Control */
+#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */
+#define E1000_THHIGHTC 0x0810C /* High Threshold Control */
+#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
+
/*
* Convenience macros
*
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 17f1686ee411..d27edbc63923 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -39,6 +39,8 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/bitops.h>
#include <linux/if_vlan.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
struct igb_adapter;
@@ -137,8 +139,6 @@ struct vf_data_storage {
#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
-/* How many Tx Descriptors do we need to call netif_wake_queue ? */
-#define IGB_TX_QUEUE_WAKE 16
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
@@ -167,6 +167,17 @@ enum igb_tx_flags {
#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
#define IGB_TX_FLAGS_VLAN_SHIFT 16
+/*
+ * The largest size we can write to the descriptor is 65535. In order to
+ * maintain a power of two alignment we have to limit ourselves to 32K.
+ */
+#define IGB_MAX_TXD_PWR 15
+#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct igb_tx_buffer {
@@ -219,6 +230,7 @@ struct igb_ring {
struct igb_tx_buffer *tx_buffer_info;
struct igb_rx_buffer *rx_buffer_info;
};
+ unsigned long last_rx_timestamp;
void *desc; /* descriptor ring memory */
unsigned long flags; /* ring specific flags */
void __iomem *tail; /* pointer to ring tail register */
@@ -272,10 +284,18 @@ struct igb_q_vector {
enum e1000_ring_flags_t {
IGB_RING_FLAG_RX_SCTP_CSUM,
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
+ IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,
IGB_RING_FLAG_TX_CTX_IDX,
IGB_RING_FLAG_TX_DETECT_HANG
};
+#define ring_uses_build_skb(ring) \
+ test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+#define set_ring_build_skb_enabled(ring) \
+ set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+#define clear_ring_build_skb_enabled(ring) \
+ clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+
#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
#define IGB_RX_DESC(R, i) \
@@ -301,6 +321,32 @@ static inline int igb_desc_unused(struct igb_ring *ring)
return ring->count + ring->next_to_clean - ring->next_to_use - 1;
}
+struct igb_i2c_client_list {
+ struct i2c_client *client;
+ struct igb_i2c_client_list *next;
+};
+
+#ifdef CONFIG_IGB_HWMON
+
+#define IGB_HWMON_TYPE_LOC 0
+#define IGB_HWMON_TYPE_TEMP 1
+#define IGB_HWMON_TYPE_CAUTION 2
+#define IGB_HWMON_TYPE_MAX 3
+
+struct hwmon_attr {
+ struct device_attribute dev_attr;
+ struct e1000_hw *hw;
+ struct e1000_thermal_diode_data *sensor;
+ char name[12];
+ };
+
+struct hwmon_buff {
+ struct device *device;
+ struct hwmon_attr *hwmon_list;
+ unsigned int n_hwmon;
+ };
+#endif
+
/* board specific private data structure */
struct igb_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -386,11 +432,22 @@ struct igb_adapter {
struct delayed_work ptp_overflow_work;
struct work_struct ptp_tx_work;
struct sk_buff *ptp_tx_skb;
+ unsigned long ptp_tx_start;
+ unsigned long last_rx_ptp_check;
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
+ u32 tx_hwtstamp_timeouts;
+ u32 rx_hwtstamp_cleared;
char fw_version[32];
+#ifdef CONFIG_IGB_HWMON
+ struct hwmon_buff igb_hwmon_buff;
+ bool ets;
+#endif
+ struct i2c_algo_bit_data i2c_algo;
+ struct i2c_adapter i2c_adap;
+ struct igb_i2c_client_list *i2c_clients;
};
#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -449,6 +506,7 @@ extern void igb_ptp_init(struct igb_adapter *adapter);
extern void igb_ptp_stop(struct igb_adapter *adapter);
extern void igb_ptp_reset(struct igb_adapter *adapter);
extern void igb_ptp_tx_work(struct work_struct *work);
+extern void igb_ptp_rx_hang(struct igb_adapter *adapter);
extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
struct sk_buff *skb);
@@ -466,7 +524,10 @@ static inline void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd);
-
+#ifdef CONFIG_IGB_HWMON
+extern void igb_sysfs_exit(struct igb_adapter *adapter);
+extern int igb_sysfs_init(struct igb_adapter *adapter);
+#endif
static inline s32 igb_reset_phy(struct e1000_hw *hw)
{
if (hw->phy.ops.reset)
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index bfe9208c4b18..a3830a8ba4c1 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -92,6 +92,8 @@ static const struct igb_stats igb_gstrings_stats[] = {
IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
IGB_STAT("os2bmc_tx_by_host", stats.o2bspc),
IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc),
+ IGB_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+ IGB_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
};
#define IGB_NETDEV_STAT(_net_stat) { \
@@ -1889,7 +1891,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
} else {
hw->mac.ops.check_for_link(&adapter->hw);
if (hw->mac.autoneg)
- msleep(4000);
+ msleep(5000);
if (!(rd32(E1000_STATUS) & E1000_STATUS_LU))
*data = 1;
@@ -2272,12 +2274,21 @@ static int igb_get_ts_info(struct net_device *dev,
struct igb_adapter *adapter = netdev_priv(dev);
switch (adapter->hw.mac.type) {
+ case e1000_82575:
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ return 0;
case e1000_82576:
case e1000_82580:
case e1000_i350:
case e1000_i210:
case e1000_i211:
info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
new file mode 100644
index 000000000000..0a9b073d0b03
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -0,0 +1,242 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "igb.h"
+#include "e1000_82575.h"
+#include "e1000_hw.h"
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/hwmon.h>
+#include <linux/pci.h>
+
+#ifdef CONFIG_IGB_HWMON
+/* hwmon callback functions */
+static ssize_t igb_hwmon_show_location(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+ dev_attr);
+ return sprintf(buf, "loc%u\n",
+ igb_attr->sensor->location);
+}
+
+static ssize_t igb_hwmon_show_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+ dev_attr);
+ unsigned int value;
+
+ /* reset the temp field */
+ igb_attr->hw->mac.ops.get_thermal_sensor_data(igb_attr->hw);
+
+ value = igb_attr->sensor->temp;
+
+ /* display millidegree */
+ value *= 1000;
+
+ return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t igb_hwmon_show_cautionthresh(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+ dev_attr);
+ unsigned int value = igb_attr->sensor->caution_thresh;
+
+ /* display millidegree */
+ value *= 1000;
+
+ return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t igb_hwmon_show_maxopthresh(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+ dev_attr);
+ unsigned int value = igb_attr->sensor->max_op_thresh;
+
+ /* display millidegree */
+ value *= 1000;
+
+ return sprintf(buf, "%u\n", value);
+}
+
+/* igb_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file.
+ * @ adapter: pointer to the adapter structure
+ * @ offset: offset in the eeprom sensor data table
+ * @ type: type of sensor data to display
+ *
+ * For each file we want in hwmon's sysfs interface we need a device_attribute
+ * This is included in our hwmon_attr struct that contains the references to
+ * the data structures we need to get the data to display.
+ */
+static int igb_add_hwmon_attr(struct igb_adapter *adapter,
+ unsigned int offset, int type) {
+ int rc;
+ unsigned int n_attr;
+ struct hwmon_attr *igb_attr;
+
+ n_attr = adapter->igb_hwmon_buff.n_hwmon;
+ igb_attr = &adapter->igb_hwmon_buff.hwmon_list[n_attr];
+
+ switch (type) {
+ case IGB_HWMON_TYPE_LOC:
+ igb_attr->dev_attr.show = igb_hwmon_show_location;
+ snprintf(igb_attr->name, sizeof(igb_attr->name),
+ "temp%u_label", offset);
+ break;
+ case IGB_HWMON_TYPE_TEMP:
+ igb_attr->dev_attr.show = igb_hwmon_show_temp;
+ snprintf(igb_attr->name, sizeof(igb_attr->name),
+ "temp%u_input", offset);
+ break;
+ case IGB_HWMON_TYPE_CAUTION:
+ igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh;
+ snprintf(igb_attr->name, sizeof(igb_attr->name),
+ "temp%u_max", offset);
+ break;
+ case IGB_HWMON_TYPE_MAX:
+ igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh;
+ snprintf(igb_attr->name, sizeof(igb_attr->name),
+ "temp%u_crit", offset);
+ break;
+ default:
+ rc = -EPERM;
+ return rc;
+ }
+
+ /* These always the same regardless of type */
+ igb_attr->sensor =
+ &adapter->hw.mac.thermal_sensor_data.sensor[offset];
+ igb_attr->hw = &adapter->hw;
+ igb_attr->dev_attr.store = NULL;
+ igb_attr->dev_attr.attr.mode = S_IRUGO;
+ igb_attr->dev_attr.attr.name = igb_attr->name;
+ sysfs_attr_init(&igb_attr->dev_attr.attr);
+ rc = device_create_file(&adapter->pdev->dev,
+ &igb_attr->dev_attr);
+ if (rc == 0)
+ ++adapter->igb_hwmon_buff.n_hwmon;
+
+ return rc;
+}
+
+static void igb_sysfs_del_adapter(struct igb_adapter *adapter)
+{
+ int i;
+
+ if (adapter == NULL)
+ return;
+
+ for (i = 0; i < adapter->igb_hwmon_buff.n_hwmon; i++) {
+ device_remove_file(&adapter->pdev->dev,
+ &adapter->igb_hwmon_buff.hwmon_list[i].dev_attr);
+ }
+
+ kfree(adapter->igb_hwmon_buff.hwmon_list);
+
+ if (adapter->igb_hwmon_buff.device)
+ hwmon_device_unregister(adapter->igb_hwmon_buff.device);
+}
+
+/* called from igb_main.c */
+void igb_sysfs_exit(struct igb_adapter *adapter)
+{
+ igb_sysfs_del_adapter(adapter);
+}
+
+/* called from igb_main.c */
+int igb_sysfs_init(struct igb_adapter *adapter)
+{
+ struct hwmon_buff *igb_hwmon = &adapter->igb_hwmon_buff;
+ unsigned int i;
+ int n_attrs;
+ int rc = 0;
+
+ /* If this method isn't defined we don't support thermals */
+ if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
+ goto exit;
+
+ /* Don't create thermal hwmon interface if no sensors present */
+ rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw));
+ if (rc)
+ goto exit;
+
+ /* Allocation space for max attributes
+ * max num sensors * values (loc, temp, max, caution)
+ */
+ n_attrs = E1000_MAX_SENSORS * 4;
+ igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
+ GFP_KERNEL);
+ if (!igb_hwmon->hwmon_list) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ igb_hwmon->device = hwmon_device_register(&adapter->pdev->dev);
+ if (IS_ERR(igb_hwmon->device)) {
+ rc = PTR_ERR(igb_hwmon->device);
+ goto err;
+ }
+
+ for (i = 0; i < E1000_MAX_SENSORS; i++) {
+
+ /* Only create hwmon sysfs entries for sensors that have
+ * meaningful data.
+ */
+ if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0)
+ continue;
+
+ /* Bail if any hwmon attr struct fails to initialize */
+ rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION);
+ rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC);
+ rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP);
+ rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX);
+ if (rc)
+ goto err;
+ }
+
+ goto exit;
+
+err:
+ igb_sysfs_del_adapter(adapter);
+exit:
+ return rc;
+}
+#endif
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 31cfe2ec75df..ed79a1c53b59 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -57,6 +57,7 @@
#ifdef CONFIG_IGB_DCA
#include <linux/dca.h>
#endif
+#include <linux/i2c.h>
#include "igb.h"
#define MAJ 4
@@ -68,7 +69,8 @@ char igb_driver_name[] = "igb";
char igb_driver_version[] = DRV_VERSION;
static const char igb_driver_string[] =
"Intel(R) Gigabit Ethernet Network Driver";
-static const char igb_copyright[] = "Copyright (c) 2007-2012 Intel Corporation.";
+static const char igb_copyright[] =
+ "Copyright (c) 2007-2013 Intel Corporation.";
static const struct e1000_info *igb_info_tbl[] = {
[board_82575] = &e1000_82575_info,
@@ -193,6 +195,7 @@ static const struct dev_pm_ops igb_pm_ops = {
};
#endif
static void igb_shutdown(struct pci_dev *);
+static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
#ifdef CONFIG_IGB_DCA
static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
static struct notifier_block dca_notifier = {
@@ -234,6 +237,7 @@ static struct pci_driver igb_driver = {
.driver.pm = &igb_pm_ops,
#endif
.shutdown = igb_shutdown,
+ .sriov_configure = igb_pci_sriov_configure,
.err_handler = &igb_err_handler
};
@@ -565,6 +569,91 @@ exit:
return;
}
+/* igb_get_i2c_data - Reads the I2C SDA data bit
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Returns the I2C data bit value
+ */
+static int igb_get_i2c_data(void *data)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ s32 i2cctl = rd32(E1000_I2CPARAMS);
+
+ return ((i2cctl & E1000_I2C_DATA_IN) != 0);
+}
+
+/* igb_set_i2c_data - Sets the I2C data bit
+ * @data: pointer to hardware structure
+ * @state: I2C data value (0 or 1) to set
+ *
+ * Sets the I2C data bit
+ */
+static void igb_set_i2c_data(void *data, int state)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ s32 i2cctl = rd32(E1000_I2CPARAMS);
+
+ if (state)
+ i2cctl |= E1000_I2C_DATA_OUT;
+ else
+ i2cctl &= ~E1000_I2C_DATA_OUT;
+
+ i2cctl &= ~E1000_I2C_DATA_OE_N;
+ i2cctl |= E1000_I2C_CLK_OE_N;
+ wr32(E1000_I2CPARAMS, i2cctl);
+ wrfl();
+
+}
+
+/* igb_set_i2c_clk - Sets the I2C SCL clock
+ * @data: pointer to hardware structure
+ * @state: state to set clock
+ *
+ * Sets the I2C clock line to state
+ */
+static void igb_set_i2c_clk(void *data, int state)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ s32 i2cctl = rd32(E1000_I2CPARAMS);
+
+ if (state) {
+ i2cctl |= E1000_I2C_CLK_OUT;
+ i2cctl &= ~E1000_I2C_CLK_OE_N;
+ } else {
+ i2cctl &= ~E1000_I2C_CLK_OUT;
+ i2cctl &= ~E1000_I2C_CLK_OE_N;
+ }
+ wr32(E1000_I2CPARAMS, i2cctl);
+ wrfl();
+}
+
+/* igb_get_i2c_clk - Gets the I2C SCL clock state
+ * @data: pointer to hardware structure
+ *
+ * Gets the I2C clock state
+ */
+static int igb_get_i2c_clk(void *data)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ s32 i2cctl = rd32(E1000_I2CPARAMS);
+
+ return ((i2cctl & E1000_I2C_CLK_IN) != 0);
+}
+
+static const struct i2c_algo_bit_data igb_i2c_algo = {
+ .setsda = igb_set_i2c_data,
+ .setscl = igb_set_i2c_clk,
+ .getsda = igb_get_i2c_data,
+ .getscl = igb_get_i2c_clk,
+ .udelay = 5,
+ .timeout = 20,
+};
+
/**
* igb_get_hw_dev - return device
* used by hardware layer to print debugging information
@@ -1708,6 +1797,18 @@ void igb_reset(struct igb_adapter *adapter)
igb_force_mac_fc(hw);
igb_init_dmac(adapter, pba);
+#ifdef CONFIG_IGB_HWMON
+ /* Re-initialize the thermal sensor on i350 devices. */
+ if (!test_bit(__IGB_DOWN, &adapter->state)) {
+ if (mac->type == e1000_i350 && hw->bus.func == 0) {
+ /* If present, re-initialize the external thermal sensor
+ * interface.
+ */
+ if (adapter->ets)
+ mac->ops.init_thermal_sensor_thresh(hw);
+ }
+ }
+#endif
if (!netif_running(adapter->netdev))
igb_power_down_link(adapter);
@@ -1822,6 +1923,37 @@ void igb_set_fw_version(struct igb_adapter *adapter)
return;
}
+static const struct i2c_board_info i350_sensor_info = {
+ I2C_BOARD_INFO("i350bb", 0Xf8),
+};
+
+/* igb_init_i2c - Init I2C interface
+ * @adapter: pointer to adapter structure
+ *
+ */
+static s32 igb_init_i2c(struct igb_adapter *adapter)
+{
+ s32 status = E1000_SUCCESS;
+
+ /* I2C interface supported on i350 devices */
+ if (adapter->hw.mac.type != e1000_i350)
+ return E1000_SUCCESS;
+
+ /* Initialize the i2c bus which is controlled by the registers.
+ * This bus will use the i2c_algo_bit structue that implements
+ * the protocol through toggling of the 4 bits in the register.
+ */
+ adapter->i2c_adap.owner = THIS_MODULE;
+ adapter->i2c_algo = igb_i2c_algo;
+ adapter->i2c_algo.data = adapter;
+ adapter->i2c_adap.algo_data = &adapter->i2c_algo;
+ adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
+ strlcpy(adapter->i2c_adap.name, "igb BB",
+ sizeof(adapter->i2c_adap.name));
+ status = i2c_bit_add_bus(&adapter->i2c_adap);
+ return status;
+}
+
/**
* igb_probe - Device Initialization Routine
* @pdev: PCI device information struct
@@ -2022,9 +2154,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev, "NVM Read Error\n");
memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
- memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
- if (!is_valid_ether_addr(netdev->perm_addr)) {
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
dev_err(&pdev->dev, "Invalid MAC Address\n");
err = -EIO;
goto err_eeprom;
@@ -2115,6 +2246,13 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* reset the hardware with the new settings */
igb_reset(adapter);
+ /* Init the I2C interface */
+ err = igb_init_i2c(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "failed to init i2c interface\n");
+ goto err_eeprom;
+ }
+
/* let the f/w know that the h/w is now under the control of the
* driver. */
igb_get_hw_control(adapter);
@@ -2135,7 +2273,27 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
#endif
+#ifdef CONFIG_IGB_HWMON
+ /* Initialize the thermal sensor on i350 devices. */
+ if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
+ u16 ets_word;
+ /*
+ * Read the NVM to determine if this i350 device supports an
+ * external thermal sensor.
+ */
+ hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
+ if (ets_word != 0x0000 && ets_word != 0xFFFF)
+ adapter->ets = true;
+ else
+ adapter->ets = false;
+ if (igb_sysfs_init(adapter))
+ dev_err(&pdev->dev,
+ "failed to allocate sysfs resources\n");
+ } else {
+ adapter->ets = false;
+ }
+#endif
/* do hw tstamp init after resetting */
igb_ptp_init(adapter);
@@ -2176,6 +2334,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_register:
igb_release_hw_control(adapter);
+ memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
err_eeprom:
if (!igb_check_reset_block(hw))
igb_reset_phy(hw);
@@ -2196,6 +2355,111 @@ err_dma:
return err;
}
+#ifdef CONFIG_PCI_IOV
+static int igb_disable_sriov(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* reclaim resources allocated to VFs */
+ if (adapter->vf_data) {
+ /* disable iov and allow time for transactions to clear */
+ if (igb_vfs_are_assigned(adapter)) {
+ dev_warn(&pdev->dev,
+ "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
+ return -EPERM;
+ } else {
+ pci_disable_sriov(pdev);
+ msleep(500);
+ }
+
+ kfree(adapter->vf_data);
+ adapter->vf_data = NULL;
+ adapter->vfs_allocated_count = 0;
+ wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
+ wrfl();
+ msleep(100);
+ dev_info(&pdev->dev, "IOV Disabled\n");
+
+ /* Re-enable DMA Coalescing flag since IOV is turned off */
+ adapter->flags |= IGB_FLAG_DMAC;
+ }
+
+ return 0;
+}
+
+static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ int old_vfs = pci_num_vf(pdev);
+ int err = 0;
+ int i;
+
+ if (!num_vfs)
+ goto out;
+ else if (old_vfs && old_vfs == num_vfs)
+ goto out;
+ else if (old_vfs && old_vfs != num_vfs)
+ err = igb_disable_sriov(pdev);
+
+ if (err)
+ goto out;
+
+ if (num_vfs > 7) {
+ err = -EPERM;
+ goto out;
+ }
+
+ adapter->vfs_allocated_count = num_vfs;
+
+ adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
+ sizeof(struct vf_data_storage), GFP_KERNEL);
+
+ /* if allocation failed then we do not support SR-IOV */
+ if (!adapter->vf_data) {
+ adapter->vfs_allocated_count = 0;
+ dev_err(&pdev->dev,
+ "Unable to allocate memory for VF Data Storage\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
+ if (err)
+ goto err_out;
+
+ dev_info(&pdev->dev, "%d VFs allocated\n",
+ adapter->vfs_allocated_count);
+ for (i = 0; i < adapter->vfs_allocated_count; i++)
+ igb_vf_configure(adapter, i);
+
+ /* DMA Coalescing is not supported in IOV mode. */
+ adapter->flags &= ~IGB_FLAG_DMAC;
+ goto out;
+
+err_out:
+ kfree(adapter->vf_data);
+ adapter->vf_data = NULL;
+ adapter->vfs_allocated_count = 0;
+out:
+ return err;
+}
+
+#endif
+/*
+ * igb_remove_i2c - Cleanup I2C interface
+ * @adapter: pointer to adapter structure
+ *
+ */
+static void igb_remove_i2c(struct igb_adapter *adapter)
+{
+
+ /* free the adapter bus structure */
+ i2c_del_adapter(&adapter->i2c_adap);
+}
+
/**
* igb_remove - Device Removal Routine
* @pdev: PCI device information struct
@@ -2212,8 +2476,11 @@ static void igb_remove(struct pci_dev *pdev)
struct e1000_hw *hw = &adapter->hw;
pm_runtime_get_noresume(&pdev->dev);
+#ifdef CONFIG_IGB_HWMON
+ igb_sysfs_exit(adapter);
+#endif
+ igb_remove_i2c(adapter);
igb_ptp_stop(adapter);
-
/*
* The watchdog timer may be rescheduled, so explicitly
* disable watchdog from being rescheduled.
@@ -2243,23 +2510,7 @@ static void igb_remove(struct pci_dev *pdev)
igb_clear_interrupt_scheme(adapter);
#ifdef CONFIG_PCI_IOV
- /* reclaim resources allocated to VFs */
- if (adapter->vf_data) {
- /* disable iov and allow time for transactions to clear */
- if (igb_vfs_are_assigned(adapter)) {
- dev_info(&pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
- } else {
- pci_disable_sriov(pdev);
- msleep(500);
- }
-
- kfree(adapter->vf_data);
- adapter->vf_data = NULL;
- wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
- wrfl();
- msleep(100);
- dev_info(&pdev->dev, "IOV Disabled\n");
- }
+ igb_disable_sriov(pdev);
#endif
iounmap(hw->hw_addr);
@@ -2290,103 +2541,22 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
#ifdef CONFIG_PCI_IOV
struct pci_dev *pdev = adapter->pdev;
struct e1000_hw *hw = &adapter->hw;
- int old_vfs = pci_num_vf(adapter->pdev);
- int i;
/* Virtualization features not supported on i210 family. */
if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
return;
- if (old_vfs) {
- dev_info(&pdev->dev, "%d pre-allocated VFs found - override "
- "max_vfs setting of %d\n", old_vfs, max_vfs);
- adapter->vfs_allocated_count = old_vfs;
- }
-
- if (!adapter->vfs_allocated_count)
- return;
-
- adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
- sizeof(struct vf_data_storage), GFP_KERNEL);
-
- /* if allocation failed then we do not support SR-IOV */
- if (!adapter->vf_data) {
- adapter->vfs_allocated_count = 0;
- dev_err(&pdev->dev, "Unable to allocate memory for VF "
- "Data Storage\n");
- goto out;
- }
-
- if (!old_vfs) {
- if (pci_enable_sriov(pdev, adapter->vfs_allocated_count))
- goto err_out;
- }
- dev_info(&pdev->dev, "%d VFs allocated\n",
- adapter->vfs_allocated_count);
- for (i = 0; i < adapter->vfs_allocated_count; i++)
- igb_vf_configure(adapter, i);
+ igb_enable_sriov(pdev, max_vfs);
+ pci_sriov_set_totalvfs(pdev, 7);
- /* DMA Coalescing is not supported in IOV mode. */
- adapter->flags &= ~IGB_FLAG_DMAC;
- goto out;
-err_out:
- kfree(adapter->vf_data);
- adapter->vf_data = NULL;
- adapter->vfs_allocated_count = 0;
-out:
- return;
#endif /* CONFIG_PCI_IOV */
}
-/**
- * igb_sw_init - Initialize general software structures (struct igb_adapter)
- * @adapter: board private structure to initialize
- *
- * igb_sw_init initializes the Adapter private data structure.
- * Fields are initialized based on PCI device information and
- * OS network device settings (MTU size).
- **/
-static int igb_sw_init(struct igb_adapter *adapter)
+static void igb_init_queue_configuration(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
u32 max_rss_queues;
- pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
-
- /* set default ring sizes */
- adapter->tx_ring_count = IGB_DEFAULT_TXD;
- adapter->rx_ring_count = IGB_DEFAULT_RXD;
-
- /* set default ITR values */
- adapter->rx_itr_setting = IGB_DEFAULT_ITR;
- adapter->tx_itr_setting = IGB_DEFAULT_ITR;
-
- /* set default work limits */
- adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
-
- adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
- VLAN_HLEN;
- adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
-
- spin_lock_init(&adapter->stats64_lock);
-#ifdef CONFIG_PCI_IOV
- switch (hw->mac.type) {
- case e1000_82576:
- case e1000_i350:
- if (max_vfs > 7) {
- dev_warn(&pdev->dev,
- "Maximum of 7 VFs per PF, using max\n");
- adapter->vfs_allocated_count = 7;
- } else
- adapter->vfs_allocated_count = max_vfs;
- break;
- default:
- break;
- }
-#endif /* CONFIG_PCI_IOV */
-
/* Determine the maximum number of RSS queues supported. */
switch (hw->mac.type) {
case e1000_i211:
@@ -2445,11 +2615,64 @@ static int igb_sw_init(struct igb_adapter *adapter)
adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
break;
}
+}
+
+/**
+ * igb_sw_init - Initialize general software structures (struct igb_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * igb_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int igb_sw_init(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
+
+ /* set default ring sizes */
+ adapter->tx_ring_count = IGB_DEFAULT_TXD;
+ adapter->rx_ring_count = IGB_DEFAULT_RXD;
+
+ /* set default ITR values */
+ adapter->rx_itr_setting = IGB_DEFAULT_ITR;
+ adapter->tx_itr_setting = IGB_DEFAULT_ITR;
+
+ /* set default work limits */
+ adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
+
+ adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
+ VLAN_HLEN;
+ adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+
+ spin_lock_init(&adapter->stats64_lock);
+#ifdef CONFIG_PCI_IOV
+ switch (hw->mac.type) {
+ case e1000_82576:
+ case e1000_i350:
+ if (max_vfs > 7) {
+ dev_warn(&pdev->dev,
+ "Maximum of 7 VFs per PF, using max\n");
+ adapter->vfs_allocated_count = 7;
+ } else
+ adapter->vfs_allocated_count = max_vfs;
+ if (adapter->vfs_allocated_count)
+ dev_warn(&pdev->dev,
+ "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
+ break;
+ default:
+ break;
+ }
+#endif /* CONFIG_PCI_IOV */
+
+ igb_init_queue_configuration(adapter);
/* Setup and initialize a copy of the hw vlan table array */
- adapter->shadow_vfta = kzalloc(sizeof(u32) *
- E1000_VLAN_FILTER_TBL_SIZE,
- GFP_ATOMIC);
+ adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
+ GFP_ATOMIC);
/* This call may decrease the number of queues */
if (igb_init_interrupt_scheme(adapter, true)) {
@@ -3131,6 +3354,20 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
wr32(E1000_RXDCTL(reg_idx), rxdctl);
}
+static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
+ struct igb_ring *rx_ring)
+{
+#define IGB_MAX_BUILD_SKB_SIZE \
+ (SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \
+ (NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN))
+
+ /* set build_skb flag */
+ if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE)
+ set_ring_build_skb_enabled(rx_ring);
+ else
+ clear_ring_build_skb_enabled(rx_ring);
+}
+
/**
* igb_configure_rx - Configure receive Unit after Reset
* @adapter: board private structure
@@ -3150,8 +3387,11 @@ static void igb_configure_rx(struct igb_adapter *adapter)
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */
- for (i = 0; i < adapter->num_rx_queues; i++)
- igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igb_ring *rx_ring = adapter->rx_ring[i];
+ igb_set_rx_buffer_len(adapter, rx_ring);
+ igb_configure_rx_ring(adapter, rx_ring);
+ }
}
/**
@@ -3768,6 +4008,7 @@ static void igb_watchdog_task(struct work_struct *work)
}
igb_spoof_check(adapter);
+ igb_ptp_rx_hang(adapter);
/* Reset the timer */
if (!test_bit(__IGB_DOWN, &adapter->state))
@@ -4193,13 +4434,6 @@ static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
-/*
- * The largest size we can write to the descriptor is 65535. In order to
- * maintain a power of two alignment we have to limit ourselves to 32K.
- */
-#define IGB_MAX_TXD_PWR 15
-#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
-
static void igb_tx_map(struct igb_ring *tx_ring,
struct igb_tx_buffer *first,
const u8 hdr_len)
@@ -4368,15 +4602,25 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
struct igb_tx_buffer *first;
int tso;
u32 tx_flags = 0;
+ u16 count = TXD_USE_COUNT(skb_headlen(skb));
__be16 protocol = vlan_get_protocol(skb);
u8 hdr_len = 0;
- /* need: 1 descriptor per page,
+ /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
* + 2 desc gap to keep tail from touching head,
- * + 1 desc for skb->data,
* + 1 desc for context descriptor,
- * otherwise try next time */
- if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
+ * otherwise try next time
+ */
+ if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
+ unsigned short f;
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+ } else {
+ count += skb_shinfo(skb)->nr_frags;
+ }
+
+ if (igb_maybe_stop_tx(tx_ring, count + 3)) {
/* this is a hard error */
return NETDEV_TX_BUSY;
}
@@ -4387,12 +4631,15 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
first->bytecount = skb->len;
first->gso_segs = 1;
+ skb_tx_timestamp(skb);
+
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
!(adapter->ptp_tx_skb))) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IGB_TX_FLAGS_TSTAMP;
adapter->ptp_tx_skb = skb_get(skb);
+ adapter->ptp_tx_start = jiffies;
if (adapter->hw.mac.type == e1000_82576)
schedule_work(&adapter->ptp_tx_work);
}
@@ -4415,7 +4662,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
igb_tx_map(tx_ring, first, hdr_len);
/* Make sure there is space in the ring for the next send. */
- igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
+ igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
return NETDEV_TX_OK;
@@ -4969,7 +5216,7 @@ static int igb_vf_configure(struct igb_adapter *adapter, int vf)
{
unsigned char mac_addr[ETH_ALEN];
- eth_random_addr(mac_addr);
+ eth_zero_addr(mac_addr);
igb_set_vf_mac(adapter, vf, mac_addr);
return 0;
@@ -5322,9 +5569,9 @@ static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
{
unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
- /* generate a new mac address as we were hotplug removed/added */
+ /* clear mac address as we were hotplug removed/added */
if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
- eth_random_addr(vf_mac);
+ eth_zero_addr(vf_mac);
/* process remaining reset events */
igb_vf_reset(adapter, vf);
@@ -5703,7 +5950,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
break;
/* prevent any other reads prior to eop_desc */
- rmb();
+ read_barrier_depends();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
@@ -5819,9 +6066,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
}
}
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(total_packets &&
netif_carrier_ok(tx_ring->netdev) &&
- igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
+ igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
@@ -5870,6 +6118,41 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
DMA_FROM_DEVICE);
}
+static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
+ struct page *page,
+ unsigned int truesize)
+{
+ /* avoid re-using remote pages */
+ if (unlikely(page_to_nid(page) != numa_node_id()))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= IGB_RX_BUFSZ;
+
+ /* since we are the only owner of the page and we need to
+ * increment it, just set the value to 2 in order to avoid
+ * an unnecessary locked operation
+ */
+ atomic_set(&page->_count, 2);
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+
+ if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
+ return false;
+
+ /* bump ref count on page before it is given to the stack */
+ get_page(page);
+#endif
+
+ return true;
+}
+
/**
* igb_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
@@ -5892,6 +6175,11 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
{
struct page *page = rx_buffer->page;
unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = IGB_RX_BUFSZ;
+#else
+ unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+#endif
if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
unsigned char *va = page_address(page) + rx_buffer->page_offset;
@@ -5914,38 +6202,88 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
}
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rx_buffer->page_offset, size, IGB_RX_BUFSZ);
+ rx_buffer->page_offset, size, truesize);
- /* avoid re-using remote pages */
- if (unlikely(page_to_nid(page) != numa_node_id()))
- return false;
+ return igb_can_reuse_rx_page(rx_buffer, page, truesize);
+}
+static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc)
+{
+ struct igb_rx_buffer *rx_buffer;
+ struct sk_buff *skb;
+ struct page *page;
+ void *page_addr;
+ unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192)
- /* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
- return false;
+ unsigned int truesize = IGB_RX_BUFSZ;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ SKB_DATA_ALIGN(NET_SKB_PAD +
+ NET_IP_ALIGN +
+ size);
+#endif
- /* flip page offset to other buffer */
- rx_buffer->page_offset ^= IGB_RX_BUFSZ;
+ /* If we spanned a buffer we have a huge mess so test for it */
+ BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));
- /*
- * since we are the only owner of the page and we need to
- * increment it, just set the value to 2 in order to avoid
- * an unnecessary locked operation
- */
- atomic_set(&page->_count, 2);
-#else
- /* move offset up to the next cache line */
- rx_buffer->page_offset += SKB_DATA_ALIGN(size);
+ /* Guarantee this function can be used by verifying buffer sizes */
+ BUILD_BUG_ON(SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) < (NET_SKB_PAD +
+ NET_IP_ALIGN +
+ IGB_TS_HDR_LEN +
+ ETH_FRAME_LEN +
+ ETH_FCS_LEN));
- if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
- return false;
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ page = rx_buffer->page;
+ prefetchw(page);
- /* bump ref count on page before it is given to the stack */
- get_page(page);
+ page_addr = page_address(page) + rx_buffer->page_offset;
+
+ /* prefetch first cache line of first page */
+ prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN);
+#if L1_CACHE_BYTES < 128
+ prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN);
#endif
- return true;
+ /* build an skb to around the page buffer */
+ skb = build_skb(page_addr, truesize);
+ if (unlikely(!skb)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return NULL;
+ }
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
+ __skb_put(skb, size);
+
+ /* pull timestamp out of packet data */
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
+ __skb_pull(skb, IGB_TS_HDR_LEN);
+ }
+
+ if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) {
+ /* hand second half of page back to the ring */
+ igb_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page(rx_ring->dev, rx_buffer->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+
+ /* clear contents of buffer_info */
+ rx_buffer->dma = 0;
+ rx_buffer->page = NULL;
+
+ return skb;
}
static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
@@ -5957,13 +6295,6 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
- /*
- * This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc until we know the
- * RXD_STAT_DD bit is set
- */
- rmb();
-
page = rx_buffer->page;
prefetchw(page);
@@ -6363,8 +6694,17 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
break;
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * RXD_STAT_DD bit is set
+ */
+ rmb();
+
/* retrieve a buffer from the ring */
- skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
+ if (ring_uses_build_skb(rx_ring))
+ skb = igb_build_rx_buffer(rx_ring, rx_desc);
+ else
+ skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
/* exit if we failed to retrieve a buffer */
if (!skb)
@@ -6451,6 +6791,14 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
return true;
}
+static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
+{
+ if (ring_uses_build_skb(rx_ring))
+ return NET_SKB_PAD + NET_IP_ALIGN;
+ else
+ return 0;
+}
+
/**
* igb_alloc_rx_buffers - Replace used receive buffers; packet split
* @adapter: address of board private structure
@@ -6477,7 +6825,9 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma +
+ bi->page_offset +
+ igb_rx_offset(rx_ring));
rx_desc++;
bi++;
@@ -6903,6 +7253,72 @@ static void igb_shutdown(struct pci_dev *pdev)
}
}
+#ifdef CONFIG_PCI_IOV
+static int igb_sriov_reinit(struct pci_dev *dev)
+{
+ struct net_device *netdev = pci_get_drvdata(dev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
+
+ rtnl_lock();
+
+ if (netif_running(netdev))
+ igb_close(netdev);
+
+ igb_clear_interrupt_scheme(adapter);
+
+ igb_init_queue_configuration(adapter);
+
+ if (igb_init_interrupt_scheme(adapter, true)) {
+ dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ if (netif_running(netdev))
+ igb_open(netdev);
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int igb_pci_disable_sriov(struct pci_dev *dev)
+{
+ int err = igb_disable_sriov(dev);
+
+ if (!err)
+ err = igb_sriov_reinit(dev);
+
+ return err;
+}
+
+static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
+{
+ int err = igb_enable_sriov(dev, num_vfs);
+
+ if (err)
+ goto out;
+
+ err = igb_sriov_reinit(dev);
+ if (!err)
+ return num_vfs;
+
+out:
+ return err;
+}
+
+#endif
+static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+#ifdef CONFIG_PCI_IOV
+ if (num_vfs == 0)
+ return igb_pci_disable_sriov(dev);
+ else
+ return igb_pci_enable_sriov(dev, num_vfs);
+#endif
+ return 0;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling 'interrupt' - used by things like netconsole to send skbs
@@ -7308,4 +7724,133 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
}
}
+static DEFINE_SPINLOCK(i2c_clients_lock);
+
+/* igb_get_i2c_client - returns matching client
+ * in adapters's client list.
+ * @adapter: adapter struct
+ * @dev_addr: device address of i2c needed.
+ */
+static struct i2c_client *
+igb_get_i2c_client(struct igb_adapter *adapter, u8 dev_addr)
+{
+ ulong flags;
+ struct igb_i2c_client_list *client_list;
+ struct i2c_client *client = NULL;
+ struct i2c_board_info client_info = {
+ I2C_BOARD_INFO("igb", 0x00),
+ };
+
+ spin_lock_irqsave(&i2c_clients_lock, flags);
+ client_list = adapter->i2c_clients;
+
+ /* See if we already have an i2c_client */
+ while (client_list) {
+ if (client_list->client->addr == (dev_addr >> 1)) {
+ client = client_list->client;
+ goto exit;
+ } else {
+ client_list = client_list->next;
+ }
+ }
+
+ /* no client_list found, create a new one */
+ client_list = kzalloc(sizeof(*client_list), GFP_ATOMIC);
+ if (client_list == NULL)
+ goto exit;
+
+ /* dev_addr passed to us is left-shifted by 1 bit
+ * i2c_new_device call expects it to be flush to the right.
+ */
+ client_info.addr = dev_addr >> 1;
+ client_info.platform_data = adapter;
+ client_list->client = i2c_new_device(&adapter->i2c_adap, &client_info);
+ if (client_list->client == NULL) {
+ dev_info(&adapter->pdev->dev,
+ "Failed to create new i2c device..\n");
+ goto err_no_client;
+ }
+
+ /* insert new client at head of list */
+ client_list->next = adapter->i2c_clients;
+ adapter->i2c_clients = client_list;
+
+ client = client_list->client;
+ goto exit;
+
+err_no_client:
+ kfree(client_list);
+exit:
+ spin_unlock_irqrestore(&i2c_clients_lock, flags);
+ return client;
+}
+
+/* igb_read_i2c_byte - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @dev_addr: device address
+ * @data: value read
+ *
+ * Performs byte read operation over I2C interface at
+ * a specified device address.
+ */
+s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
+ struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr);
+ s32 status;
+ u16 swfw_mask = 0;
+
+ if (!this_client)
+ return E1000_ERR_I2C;
+
+ swfw_mask = E1000_SWFW_PHY0_SM;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)
+ != E1000_SUCCESS)
+ return E1000_ERR_SWFW_SYNC;
+
+ status = i2c_smbus_read_byte_data(this_client, byte_offset);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+ if (status < 0)
+ return E1000_ERR_I2C;
+ else {
+ *data = status;
+ return E1000_SUCCESS;
+ }
+}
+
+/* igb_write_i2c_byte - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @dev_addr: device address
+ * @data: value to write
+ *
+ * Performs byte write operation over I2C interface at
+ * a specified device address.
+ */
+s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
+ struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr);
+ s32 status;
+ u16 swfw_mask = E1000_SWFW_PHY0_SM;
+
+ if (!this_client)
+ return E1000_ERR_I2C;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS)
+ return E1000_ERR_SWFW_SYNC;
+ status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+ if (status)
+ return E1000_ERR_I2C;
+ else
+ return E1000_SUCCESS;
+
+}
/* igb_main.c */
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index ab3429729bde..0987822359f0 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/pci.h>
+#include <linux/ptp_classify.h>
#include "igb.h"
@@ -70,6 +71,7 @@
*/
#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
+#define IGB_PTP_TX_TIMEOUT (HZ * 15)
#define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT)
#define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1)
#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
@@ -396,6 +398,15 @@ void igb_ptp_tx_work(struct work_struct *work)
if (!adapter->ptp_tx_skb)
return;
+ if (time_is_before_jiffies(adapter->ptp_tx_start +
+ IGB_PTP_TX_TIMEOUT)) {
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ adapter->tx_hwtstamp_timeouts++;
+ dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang");
+ return;
+ }
+
tsynctxctl = rd32(E1000_TSYNCTXCTL);
if (tsynctxctl & E1000_TSYNCTXCTL_VALID)
igb_ptp_tx_hwtstamp(adapter);
@@ -419,6 +430,51 @@ static void igb_ptp_overflow_check(struct work_struct *work)
}
/**
+ * igb_ptp_rx_hang - detect error case when Rx timestamp registers latched
+ * @adapter: private network adapter structure
+ *
+ * This watchdog task is scheduled to detect error case where hardware has
+ * dropped an Rx packet that was timestamped when the ring is full. The
+ * particular error is rare but leaves the device in a state unable to timestamp
+ * any future packets.
+ */
+void igb_ptp_rx_hang(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct igb_ring *rx_ring;
+ u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL);
+ unsigned long rx_event;
+ int n;
+
+ if (hw->mac.type != e1000_82576)
+ return;
+
+ /* If we don't have a valid timestamp in the registers, just update the
+ * timeout counter and exit
+ */
+ if (!(tsyncrxctl & E1000_TSYNCRXCTL_VALID)) {
+ adapter->last_rx_ptp_check = jiffies;
+ return;
+ }
+
+ /* Determine the most recent watchdog or rx_timestamp event */
+ rx_event = adapter->last_rx_ptp_check;
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ if (time_after(rx_ring->last_rx_timestamp, rx_event))
+ rx_event = rx_ring->last_rx_timestamp;
+ }
+
+ /* Only need to read the high RXSTMP register to clear the lock */
+ if (time_is_before_jiffies(rx_event + 5 * HZ)) {
+ rd32(E1000_RXSTMPH);
+ adapter->last_rx_ptp_check = jiffies;
+ adapter->rx_hwtstamp_cleared++;
+ dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang");
+ }
+}
+
+/**
* igb_ptp_tx_hwtstamp - utility function which checks for TX time stamp
* @adapter: Board private structure.
*
@@ -643,7 +699,6 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
else
wr32(E1000_ETQF(3), 0);
-#define PTP_PORT 319
/* L4 Queue Filter[3]: filter by destination port and protocol */
if (is_l4) {
u32 ftqf = (IPPROTO_UDP /* UDP */
@@ -652,12 +707,12 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
| E1000_FTQF_MASK); /* mask all inputs */
ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
- wr32(E1000_IMIR(3), htons(PTP_PORT));
+ wr32(E1000_IMIR(3), htons(PTP_EV_PORT));
wr32(E1000_IMIREXT(3),
(E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
if (hw->mac.type == e1000_82576) {
/* enable source port check */
- wr32(E1000_SPQF(3), htons(PTP_PORT));
+ wr32(E1000_SPQF(3), htons(PTP_EV_PORT));
ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
}
wr32(E1000_FTQF(3), ftqf);
@@ -801,6 +856,10 @@ void igb_ptp_stop(struct igb_adapter *adapter)
}
cancel_work_sync(&adapter->ptp_tx_work);
+ if (adapter->ptp_tx_skb) {
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ }
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index fdca7b672776..a1463e3d14c0 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -127,8 +127,8 @@ struct igbvf_buffer {
/* Tx */
struct {
unsigned long time_stamp;
+ union e1000_adv_tx_desc *next_to_watch;
u16 length;
- u16 next_to_watch;
u16 mapped_as_page;
};
/* Rx */
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 277f5dfe3d90..d60cd4393415 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -797,20 +797,31 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
struct sk_buff *skb;
union e1000_adv_tx_desc *tx_desc, *eop_desc;
unsigned int total_bytes = 0, total_packets = 0;
- unsigned int i, eop, count = 0;
+ unsigned int i, count = 0;
bool cleaned = false;
i = tx_ring->next_to_clean;
- eop = tx_ring->buffer_info[i].next_to_watch;
- eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
+ buffer_info = &tx_ring->buffer_info[i];
+ eop_desc = buffer_info->next_to_watch;
+
+ do {
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ read_barrier_depends();
+
+ /* if DD is not set pending work has not been completed */
+ if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ buffer_info->next_to_watch = NULL;
- while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
- (count < tx_ring->count)) {
- rmb(); /* read buffer_info after eop_desc status */
for (cleaned = false; !cleaned; count++) {
tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
- buffer_info = &tx_ring->buffer_info[i];
- cleaned = (i == eop);
+ cleaned = (tx_desc == eop_desc);
skb = buffer_info->skb;
if (skb) {
@@ -831,10 +842,12 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
i++;
if (i == tx_ring->count)
i = 0;
+
+ buffer_info = &tx_ring->buffer_info[i];
}
- eop = tx_ring->buffer_info[i].next_to_watch;
- eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
- }
+
+ eop_desc = buffer_info->next_to_watch;
+ } while (count < tx_ring->count);
tx_ring->next_to_clean = i;
@@ -1399,12 +1412,10 @@ static void igbvf_set_multi(struct net_device *netdev)
int i;
if (!netdev_mc_empty(netdev)) {
- mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
- if (!mta_list) {
- dev_err(&adapter->pdev->dev,
- "failed to allocate multicast filter list\n");
+ mta_list = kmalloc_array(netdev_mc_count(netdev), ETH_ALEN,
+ GFP_ATOMIC);
+ if (!mta_list)
return;
- }
}
/* prepare a packed array of only addresses. */
@@ -1738,7 +1749,6 @@ static int igbvf_set_mac(struct net_device *netdev, void *p)
return -EADDRNOTAVAIL;
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
return 0;
}
@@ -1964,7 +1974,6 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
context_desc->seqnum_seed = 0;
buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
buffer_info->dma = 0;
i++;
if (i == tx_ring->count)
@@ -2024,7 +2033,6 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
context_desc->mss_l4len_idx = 0;
buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
buffer_info->dma = 0;
i++;
if (i == tx_ring->count)
@@ -2064,8 +2072,7 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
struct igbvf_ring *tx_ring,
- struct sk_buff *skb,
- unsigned int first)
+ struct sk_buff *skb)
{
struct igbvf_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev;
@@ -2080,7 +2087,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
buffer_info->length = len;
/* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = false;
buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
DMA_TO_DEVICE);
@@ -2103,7 +2109,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
buffer_info->length = len;
buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = true;
buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
DMA_TO_DEVICE);
@@ -2112,7 +2117,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
}
tx_ring->buffer_info[i].skb = skb;
- tx_ring->buffer_info[first].next_to_watch = i;
return ++count;
@@ -2123,7 +2127,6 @@ dma_error:
buffer_info->dma = 0;
buffer_info->time_stamp = 0;
buffer_info->length = 0;
- buffer_info->next_to_watch = 0;
buffer_info->mapped_as_page = false;
if (count)
count--;
@@ -2142,7 +2145,8 @@ dma_error:
static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
struct igbvf_ring *tx_ring,
- int tx_flags, int count, u32 paylen,
+ int tx_flags, int count,
+ unsigned int first, u32 paylen,
u8 hdr_len)
{
union e1000_adv_tx_desc *tx_desc = NULL;
@@ -2192,6 +2196,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
* such as IA-64). */
wmb();
+ tx_ring->buffer_info[first].next_to_watch = tx_desc;
tx_ring->next_to_use = i;
writel(i, adapter->hw.hw_addr + tx_ring->tail);
/* we need this if more than one processor can write to our tail
@@ -2258,11 +2263,11 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
* count reflects descriptors mapped, if 0 then mapping error
* has occurred and we need to rewind the descriptor queue
*/
- count = igbvf_tx_map_adv(adapter, tx_ring, skb, first);
+ count = igbvf_tx_map_adv(adapter, tx_ring, skb);
if (count) {
igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
- skb->len, hdr_len);
+ first, skb->len, hdr_len);
/* Make sure there is space in the ring for the next send. */
igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
} else {
@@ -2736,30 +2741,24 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = hw->mac.ops.reset_hw(hw);
if (err) {
dev_info(&pdev->dev,
- "PF still in reset state, assigning new address."
- " Is the PF interface up?\n");
- eth_hw_addr_random(netdev);
- memcpy(adapter->hw.mac.addr, netdev->dev_addr,
- netdev->addr_len);
+ "PF still in reset state. Is the PF interface up?\n");
} else {
err = hw->mac.ops.read_mac_addr(hw);
- if (err) {
- dev_err(&pdev->dev, "Error reading MAC address\n");
- goto err_hw_init;
- }
+ if (err)
+ dev_info(&pdev->dev, "Error reading MAC address.\n");
+ else if (is_zero_ether_addr(adapter->hw.mac.addr))
+ dev_info(&pdev->dev, "MAC address not assigned by administrator.\n");
memcpy(netdev->dev_addr, adapter->hw.mac.addr,
- netdev->addr_len);
+ netdev->addr_len);
}
if (!is_valid_ether_addr(netdev->dev_addr)) {
- dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
- netdev->dev_addr);
- err = -EIO;
- goto err_hw_init;
+ dev_info(&pdev->dev, "Assigning random MAC address.\n");
+ eth_hw_addr_random(netdev);
+ memcpy(adapter->hw.mac.addr, netdev->dev_addr,
+ netdev->addr_len);
}
- memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
-
setup_timer(&adapter->watchdog_timer, &igbvf_watchdog,
(unsigned long) adapter);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index ae96c10251be..ea4808373435 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -500,9 +500,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
- memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
- if (!is_valid_ether_addr(netdev->perm_addr)) {
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
err = -EIO;
goto err_eeprom;
@@ -709,11 +708,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
size = sizeof(struct ixgb_buffer) * txdr->count;
txdr->buffer_info = vzalloc(size);
- if (!txdr->buffer_info) {
- netif_err(adapter, probe, adapter->netdev,
- "Unable to allocate transmit descriptor ring memory\n");
+ if (!txdr->buffer_info)
return -ENOMEM;
- }
/* round up to nearest 4K */
@@ -798,11 +794,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
size = sizeof(struct ixgb_buffer) * rxdr->count;
rxdr->buffer_info = vzalloc(size);
- if (!rxdr->buffer_info) {
- netif_err(adapter, probe, adapter->netdev,
- "Unable to allocate receive descriptor ring\n");
+ if (!rxdr->buffer_info)
return -ENOMEM;
- }
/* Round up to nearest 4K */
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index f3a632bf8d96..be2989e60009 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel 10 Gigabit PCI Express Linux driver
-# Copyright(c) 1999 - 2012 Intel Corporation.
+# Copyright(c) 1999 - 2013 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -32,7 +32,7 @@
obj-$(CONFIG_IXGBE) += ixgbe.o
-ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o ixgbe_debugfs.o\
+ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe_ptp.o
@@ -40,4 +40,5 @@ ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o
+ixgbe-$(CONFIG_DEBUG_FS) += ixgbe_debugfs.o
ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 8e786764c60e..a8e10cff7a89 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -35,6 +35,7 @@
#include <linux/cpumask.h>
#include <linux/aer.h>
#include <linux/if_vlan.h>
+#include <linux/jiffies.h>
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
@@ -91,21 +92,26 @@
*/
#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
-#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
-
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
-#define IXGBE_TX_FLAGS_CSUM (u32)(1)
-#define IXGBE_TX_FLAGS_HW_VLAN (u32)(1 << 1)
-#define IXGBE_TX_FLAGS_SW_VLAN (u32)(1 << 2)
-#define IXGBE_TX_FLAGS_TSO (u32)(1 << 3)
-#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 4)
-#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5)
-#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6)
-#define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7)
-#define IXGBE_TX_FLAGS_TSTAMP (u32)(1 << 8)
-#define IXGBE_TX_FLAGS_NO_IFCS (u32)(1 << 9)
+enum ixgbe_tx_flags {
+ /* cmd_type flags */
+ IXGBE_TX_FLAGS_HW_VLAN = 0x01,
+ IXGBE_TX_FLAGS_TSO = 0x02,
+ IXGBE_TX_FLAGS_TSTAMP = 0x04,
+
+ /* olinfo flags */
+ IXGBE_TX_FLAGS_CC = 0x08,
+ IXGBE_TX_FLAGS_IPV4 = 0x10,
+ IXGBE_TX_FLAGS_CSUM = 0x20,
+
+ /* software defined flags */
+ IXGBE_TX_FLAGS_SW_VLAN = 0x40,
+ IXGBE_TX_FLAGS_FCOE = 0x80,
+};
+
+/* VLAN info */
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
@@ -150,7 +156,7 @@ struct vf_macvlans {
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
-#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
@@ -195,6 +201,7 @@ struct ixgbe_rx_queue_stats {
enum ixgbe_ring_state_t {
__IXGBE_TX_FDIR_INIT_DONE,
+ __IXGBE_TX_XPS_INIT_DONE,
__IXGBE_TX_DETECT_HANG,
__IXGBE_HANG_CHECK_ARMED,
__IXGBE_RX_RSC_ENABLED,
@@ -224,6 +231,7 @@ struct ixgbe_ring {
struct ixgbe_tx_buffer *tx_buffer_info;
struct ixgbe_rx_buffer *rx_buffer_info;
};
+ unsigned long last_rx_timestamp;
unsigned long state;
u8 __iomem *tail;
dma_addr_t dma; /* phys. address of descriptor ring */
@@ -271,15 +279,10 @@ enum ixgbe_ring_f_enum {
#define IXGBE_MAX_RSS_INDICES 16
#define IXGBE_MAX_VMDQ_INDICES 64
-#define IXGBE_MAX_FDIR_INDICES 64
-#ifdef IXGBE_FCOE
+#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */
#define IXGBE_MAX_FCOE_INDICES 8
-#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
-#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
-#else
-#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES
-#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
-#endif /* IXGBE_FCOE */
+#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
+#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
struct ixgbe_ring_feature {
u16 limit; /* upper limit on feature indices */
u16 indices; /* current value of indices */
@@ -573,11 +576,14 @@ struct ixgbe_adapter {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
+ struct work_struct ptp_tx_work;
+ struct sk_buff *ptp_tx_skb;
+ unsigned long ptp_tx_start;
unsigned long last_overflow_check;
+ unsigned long last_rx_ptp_check;
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
- int rx_hwtstamp_filter;
u32 base_incval;
/* SR-IOV */
@@ -614,6 +620,7 @@ enum ixgbe_state_t {
__IXGBE_DOWN,
__IXGBE_SERVICE_SCHED,
__IXGBE_IN_SFP_INIT,
+ __IXGBE_READ_I2C,
};
struct ixgbe_cb {
@@ -694,8 +701,8 @@ extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
extern void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
-extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
#endif
+extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
extern void ixgbe_do_reset(struct net_device *netdev);
#ifdef CONFIG_IXGBE_HWMON
@@ -742,15 +749,32 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
- struct sk_buff *skb);
-extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb);
+extern void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
+extern void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
+ struct sk_buff *skb);
+static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
+ return;
+
+ __ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
+
+ /*
+ * Update the last_rx_timestamp timer in order to enable watchdog check
+ * for error case of latched timestamp on a dropped packet.
+ */
+ rx_ring->last_rx_timestamp = jiffies;
+}
+
extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
struct ifreq *ifr, int cmd);
extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
+#ifdef CONFIG_PCI_IOV
+void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
+#endif
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 42537336110c..d0113fc97b6f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -41,7 +41,6 @@
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete);
static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
@@ -633,15 +632,15 @@ out:
* ixgbe_setup_mac_link_82598 - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: true if auto-negotiation enabled
* @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Set the link speed in the AUTOC register and restarts link.
**/
static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
+ bool autoneg = false;
s32 status = 0;
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -685,20 +684,18 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
* ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: true if autonegotiation enabled
* @autoneg_wait_to_complete: true if waiting is needed to complete
*
* Sets the link speed in the AUTOC register in the MAC and restarts link.
**/
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete)
{
s32 status;
/* Setup the PHY according to input speed */
- status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+ status = hw->phy.ops.setup_link_speed(hw, speed,
autoneg_wait_to_complete);
/* Set up MAC */
ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
@@ -1006,15 +1003,16 @@ static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
}
/**
- * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
+ * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
* @hw: pointer to hardware structure
- * @byte_offset: EEPROM byte offset to read
+ * @dev_addr: address to read from
+ * @byte_offset: byte offset to read from dev_addr
* @eeprom_data: value read
*
- * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
+ * Performs 8 byte read operation to SFP module's data over I2C interface.
**/
-static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data)
+static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
+ u8 byte_offset, u8 *eeprom_data)
{
s32 status = 0;
u16 sfp_addr = 0;
@@ -1028,7 +1026,7 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
* 0xC30D. These registers are used to talk to the SFP+
* module's EEPROM through the SDA/SCL (I2C) interface.
*/
- sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
+ sfp_addr = (dev_addr << 8) + byte_offset;
sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
hw->phy.ops.write_reg(hw,
IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
@@ -1060,7 +1058,6 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
*eeprom_data = (u8)(sfp_data >> 8);
} else {
status = IXGBE_ERR_PHY;
- goto out;
}
out:
@@ -1068,6 +1065,36 @@ out:
}
/**
+ * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to read
+ * @eeprom_data: value read
+ *
+ * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data)
+{
+ return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
+ byte_offset, eeprom_data);
+}
+
+/**
+ * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset at address 0xA2
+ * @eeprom_data: value read
+ *
+ * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
+ **/
+static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data)
+{
+ return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
+ byte_offset, sff8472_data);
+}
+
+/**
* ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
* @hw: pointer to hardware structure
*
@@ -1300,6 +1327,7 @@ static struct ixgbe_phy_operations phy_ops_82598 = {
.write_reg = &ixgbe_write_phy_reg_generic,
.setup_link = &ixgbe_setup_phy_link_generic,
.setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
+ .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598,
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598,
.check_overtemp = &ixgbe_tn_check_overtemp,
};
@@ -1311,4 +1339,3 @@ struct ixgbe_info ixgbe_82598_info = {
.eeprom_ops = &eeprom_ops_82598,
.phy_ops = &phy_ops_82598,
};
-
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 1073aea5da40..203a00c24330 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -45,21 +45,17 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete);
static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete);
static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete);
static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete);
static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
@@ -234,13 +230,13 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
* ixgbe_get_link_capabilities_82599 - Determines link capabilities
* @hw: pointer to hardware structure
* @speed: pointer to link speed
- * @negotiation: true when autoneg or autotry is enabled
+ * @autoneg: true when autoneg or autotry is enabled
*
* Determines the link capabilities by reading the AUTOC register.
**/
static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
- bool *negotiation)
+ bool *autoneg)
{
s32 status = 0;
u32 autoc = 0;
@@ -251,7 +247,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
*speed = IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = true;
+ *autoneg = true;
goto out;
}
@@ -268,22 +264,22 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
switch (autoc & IXGBE_AUTOC_LMS_MASK) {
case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = false;
+ *autoneg = false;
break;
case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
- *negotiation = false;
+ *autoneg = false;
break;
case IXGBE_AUTOC_LMS_1G_AN:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = true;
+ *autoneg = true;
break;
case IXGBE_AUTOC_LMS_10G_SERIAL:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
- *negotiation = false;
+ *autoneg = false;
break;
case IXGBE_AUTOC_LMS_KX4_KX_KR:
@@ -295,7 +291,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
*speed |= IXGBE_LINK_SPEED_10GB_FULL;
if (autoc & IXGBE_AUTOC_KX_SUPP)
*speed |= IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = true;
+ *autoneg = true;
break;
case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
@@ -306,12 +302,12 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
*speed |= IXGBE_LINK_SPEED_10GB_FULL;
if (autoc & IXGBE_AUTOC_KX_SUPP)
*speed |= IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = true;
+ *autoneg = true;
break;
case IXGBE_AUTOC_LMS_SGMII_1G_100M:
*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
- *negotiation = false;
+ *autoneg = false;
break;
default:
@@ -323,7 +319,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
if (hw->phy.multispeed_fiber) {
*speed |= IXGBE_LINK_SPEED_10GB_FULL |
IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = true;
+ *autoneg = true;
}
out:
@@ -510,14 +506,12 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
* ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: true if autonegotiation enabled
* @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Set the link speed in the AUTOC register and restarts link.
**/
static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete)
{
s32 status = 0;
@@ -527,11 +521,11 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
u32 i = 0;
bool link_up = false;
- bool negotiation;
+ bool autoneg = false;
/* Mask off requested but non-supported speeds */
status = hw->mac.ops.get_link_capabilities(hw, &link_speed,
- &negotiation);
+ &autoneg);
if (status != 0)
return status;
@@ -564,7 +558,6 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
status = ixgbe_setup_mac_link_82599(hw,
IXGBE_LINK_SPEED_10GB_FULL,
- autoneg,
autoneg_wait_to_complete);
if (status != 0)
return status;
@@ -617,7 +610,6 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
status = ixgbe_setup_mac_link_82599(hw,
IXGBE_LINK_SPEED_1GB_FULL,
- autoneg,
autoneg_wait_to_complete);
if (status != 0)
return status;
@@ -646,7 +638,6 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
if (speedcnt > 1)
status = ixgbe_setup_mac_link_multispeed_fiber(hw,
highest_link_speed,
- autoneg,
autoneg_wait_to_complete);
out:
@@ -666,13 +657,12 @@ out:
* ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: true if autonegotiation enabled
* @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Implements the Intel SmartSpeed algorithm.
**/
static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
+ ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
s32 status = 0;
@@ -703,7 +693,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
/* First, try to get link with full advertisement */
hw->phy.smart_speed_active = false;
for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
- status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+ status = ixgbe_setup_mac_link_82599(hw, speed,
autoneg_wait_to_complete);
if (status != 0)
goto out;
@@ -738,7 +728,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
/* Turn SmartSpeed on to disable KR support */
hw->phy.smart_speed_active = true;
- status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+ status = ixgbe_setup_mac_link_82599(hw, speed,
autoneg_wait_to_complete);
if (status != 0)
goto out;
@@ -764,7 +754,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
/* We didn't get link. Turn SmartSpeed back off. */
hw->phy.smart_speed_active = false;
- status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+ status = ixgbe_setup_mac_link_82599(hw, speed,
autoneg_wait_to_complete);
out:
@@ -778,14 +768,13 @@ out:
* ixgbe_setup_mac_link_82599 - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: true if autonegotiation enabled
* @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Set the link speed in the AUTOC register and restarts link.
**/
static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
s32 status = 0;
u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -799,6 +788,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
u32 i;
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
bool got_lock = false;
+ bool autoneg = false;
/* Check to see if speed passed in is supported. */
status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
@@ -911,20 +901,18 @@ out:
* ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: true if autonegotiation enabled
* @autoneg_wait_to_complete: true if waiting is needed to complete
*
* Restarts link on PHY and MAC based on settings passed in.
**/
static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete)
{
s32 status;
/* Setup the PHY according to input speed */
- status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+ status = hw->phy.ops.setup_link_speed(hw, speed,
autoneg_wait_to_complete);
/* Set up MAC */
ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
@@ -2253,6 +2241,7 @@ static struct ixgbe_phy_operations phy_ops_82599 = {
.setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
.read_i2c_byte = &ixgbe_read_i2c_byte_generic,
.write_i2c_byte = &ixgbe_write_i2c_byte_generic,
+ .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic,
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
.check_overtemp = &ixgbe_tn_check_overtemp,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 5e68afdd502a..99e472ebaa75 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index f7a0970a251c..bc3948ead6e0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 9bc17c0cb972..1f2c805684dd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
index 1f4108ee154b..1634de8b627f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index 87592b458c9c..ac780770863d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
index ba835708fcac..3164f5453b8f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 4eac80d01857..05e23b80b5e3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index 4dec47faeb00..a4ef07631d1e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index f1e002d5fa8f..f3d68f9696ba 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -30,6 +30,7 @@
#include <linux/dcbnl.h>
#include "ixgbe_dcb_82598.h"
#include "ixgbe_dcb_82599.h"
+#include "ixgbe_sriov.h"
/* Callbacks for DCB netlink in the kernel */
#define BIT_DCB_MODE 0x01
@@ -301,7 +302,6 @@ static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
*setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc;
}
-#ifdef IXGBE_FCOE
static void ixgbe_dcbnl_devreset(struct net_device *dev)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
@@ -320,7 +320,6 @@ static void ixgbe_dcbnl_devreset(struct net_device *dev)
clear_bit(__IXGBE_RESETTING, &adapter->state);
}
-#endif
static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
{
@@ -450,7 +449,6 @@ static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- u8 rval = 0;
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
switch (tcid) {
@@ -461,14 +459,14 @@ static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
*num = adapter->dcb_cfg.num_tcs.pfc_tcs;
break;
default:
- rval = -EINVAL;
+ return -EINVAL;
break;
}
} else {
- rval = -EINVAL;
+ return -EINVAL;
}
- return rval;
+ return 0;
}
static int ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
@@ -541,6 +539,7 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
int i, err = 0;
__u8 max_tc = 0;
+ __u8 map_chg = 0;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
@@ -550,15 +549,22 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
GFP_KERNEL);
if (!adapter->ixgbe_ieee_ets)
return -ENOMEM;
- }
- memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets));
+ /* initialize UP2TC mappings to invalid value */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ adapter->ixgbe_ieee_ets->prio_tc[i] =
+ IEEE_8021QAZ_MAX_TCS;
+ }
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (ets->prio_tc[i] > max_tc)
max_tc = ets->prio_tc[i];
+ if (ets->prio_tc[i] != adapter->ixgbe_ieee_ets->prio_tc[i])
+ map_chg = 1;
}
+ memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets));
+
if (max_tc)
max_tc++;
@@ -567,6 +573,8 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
if (max_tc != netdev_get_num_tc(dev))
err = ixgbe_setup_tc(dev, max_tc);
+ else if (map_chg)
+ ixgbe_dcbnl_devreset(dev);
if (err)
goto err_out;
@@ -643,9 +651,11 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
return err;
err = dcb_ieee_setapp(dev, app);
+ if (err)
+ return err;
#ifdef IXGBE_FCOE
- if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+ if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
app->protocol == ETH_P_FCOE) {
u8 app_mask = dcb_ieee_getapp_mask(dev, app);
@@ -656,6 +666,23 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
ixgbe_dcbnl_devreset(dev);
}
#endif
+
+ /* VF devices should use default UP when available */
+ if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+ app->protocol == 0) {
+ int vf;
+
+ adapter->default_up = app->priority;
+
+ for (vf = 0; vf < adapter->num_vfs; vf++) {
+ struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+
+ if (!vfinfo->pf_qos)
+ ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
+ app->priority, vf);
+ }
+ }
+
return 0;
}
@@ -683,6 +710,24 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
ixgbe_dcbnl_devreset(dev);
}
#endif
+ /* IF default priority is being removed clear VF default UP */
+ if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+ app->protocol == 0 && adapter->default_up == app->priority) {
+ int vf;
+ long unsigned int app_mask = dcb_ieee_getapp_mask(dev, app);
+ int qos = app_mask ? find_first_bit(&app_mask, 8) : 0;
+
+ adapter->default_up = qos;
+
+ for (vf = 0; vf < adapter->num_vfs; vf++) {
+ struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+
+ if (!vfinfo->pf_qos)
+ ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
+ qos, vf);
+ }
+ }
+
return err;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index 50aa546b8c7a..c5933f6dceee 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -24,9 +24,6 @@
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
-
-#ifdef CONFIG_DEBUG_FS
-
#include <linux/debugfs.h>
#include <linux/module.h>
@@ -277,5 +274,3 @@ void ixgbe_dbg_exit(void)
{
debugfs_remove_recursive(ixgbe_dbg_root);
}
-
-#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 326858424345..f4d2e9e3c6d5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -39,6 +39,7 @@
#include <linux/uaccess.h>
#include "ixgbe.h"
+#include "ixgbe_phy.h"
#define IXGBE_ALL_RAR_ENTRIES 16
@@ -156,7 +157,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
struct ixgbe_hw *hw = &adapter->hw;
ixgbe_link_speed supported_link;
u32 link_speed = 0;
- bool autoneg;
+ bool autoneg = false;
bool link_up;
hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
@@ -333,10 +334,10 @@ static int ixgbe_set_settings(struct net_device *netdev,
return err;
/* this sets the link speed and restarts auto-neg */
hw->mac.autotry_restart = true;
- err = hw->mac.ops.setup_link(hw, advertised, true, true);
+ err = hw->mac.ops.setup_link(hw, advertised, true);
if (err) {
e_info(probe, "setup link failed with code %d\n", err);
- hw->mac.ops.setup_link(hw, old, true, true);
+ hw->mac.ops.setup_link(hw, old, true);
}
} else {
/* in this case we currently only support 10Gb/FULL */
@@ -1040,6 +1041,9 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
p = (char *) adapter +
ixgbe_gstrings_stats[i].stat_offset;
break;
+ default:
+ data[i] = 0;
+ continue;
}
data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
@@ -1096,8 +1100,10 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
switch (stringset) {
case ETH_SS_TEST:
- memcpy(data, *ixgbe_gstrings_test,
- IXGBE_TEST_LEN * ETH_GSTRING_LEN);
+ for (i = 0; i < IXGBE_TEST_LEN; i++) {
+ memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
break;
case ETH_SS_STATS:
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
@@ -1837,19 +1843,11 @@ static void ixgbe_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
bool if_running = netif_running(netdev);
set_bit(__IXGBE_TESTING, &adapter->state);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
- /* Offline tests */
-
- e_info(hw, "offline testing starting\n");
-
- /* Link test performed before hardware reset so autoneg doesn't
- * interfere with test result */
- if (ixgbe_link_test(adapter, &data[4]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
int i;
for (i = 0; i < adapter->num_vfs; i++) {
@@ -1870,12 +1868,24 @@ static void ixgbe_diag_test(struct net_device *netdev,
}
}
+ /* Offline tests */
+ e_info(hw, "offline testing starting\n");
+
if (if_running)
/* indicate we're in test mode */
dev_close(netdev);
- else
- ixgbe_reset(adapter);
+ /* bringing adapter down disables SFP+ optics */
+ if (hw->mac.ops.enable_tx_laser)
+ hw->mac.ops.enable_tx_laser(hw);
+
+ /* Link test performed before hardware reset so autoneg doesn't
+ * interfere with test result
+ */
+ if (ixgbe_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ ixgbe_reset(adapter);
e_info(hw, "register testing starting\n");
if (ixgbe_reg_test(adapter, &data[0]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1908,16 +1918,22 @@ static void ixgbe_diag_test(struct net_device *netdev,
skip_loopback:
ixgbe_reset(adapter);
+ /* clear testing bit and return adapter to previous state */
clear_bit(__IXGBE_TESTING, &adapter->state);
if (if_running)
dev_open(netdev);
} else {
e_info(hw, "online testing starting\n");
+
+ /* if adapter is down, SFP+ optics will be disabled */
+ if (!if_running && hw->mac.ops.enable_tx_laser)
+ hw->mac.ops.enable_tx_laser(hw);
+
/* Online tests */
if (ixgbe_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- /* Online tests aren't run; pass by default */
+ /* Offline tests aren't run; pass by default */
data[0] = 0;
data[1] = 0;
data[2] = 0;
@@ -1925,6 +1941,10 @@ skip_loopback:
clear_bit(__IXGBE_TESTING, &adapter->state);
}
+
+ /* if adapter was down, ensure SFP+ optics are disabled again */
+ if (!if_running && hw->mac.ops.disable_tx_laser)
+ hw->mac.ops.disable_tx_laser(hw);
skip_ol_tests:
msleep_interruptible(4 * 1000);
}
@@ -2093,13 +2113,17 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_q_vector *q_vector;
int i;
- u16 tx_itr_param, rx_itr_param;
+ u16 tx_itr_param, rx_itr_param, tx_itr_prev;
bool need_reset = false;
- /* don't accept tx specific changes if we've got mixed RxTx vectors */
- if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
- && ec->tx_coalesce_usecs)
- return -EINVAL;
+ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
+ /* reject Tx specific changes in case of mixed RxTx vectors */
+ if (ec->tx_coalesce_usecs)
+ return -EINVAL;
+ tx_itr_prev = adapter->rx_itr_setting;
+ } else {
+ tx_itr_prev = adapter->tx_itr_setting;
+ }
if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
(ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
@@ -2125,8 +2149,25 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
else
tx_itr_param = adapter->tx_itr_setting;
+ /* mixed Rx/Tx */
+ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
+ adapter->tx_itr_setting = adapter->rx_itr_setting;
+
+#if IS_ENABLED(CONFIG_BQL)
+ /* detect ITR changes that require update of TXDCTL.WTHRESH */
+ if ((adapter->tx_itr_setting > 1) &&
+ (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
+ if ((tx_itr_prev == 1) ||
+ (tx_itr_prev > IXGBE_100K_ITR))
+ need_reset = true;
+ } else {
+ if ((tx_itr_prev > 1) &&
+ (tx_itr_prev < IXGBE_100K_ITR))
+ need_reset = true;
+ }
+#endif
/* check the old value and enable RSC if necessary */
- need_reset = ixgbe_update_rsc(adapter);
+ need_reset |= ixgbe_update_rsc(adapter);
for (i = 0; i < adapter->num_q_vectors; i++) {
q_vector = adapter->q_vector[i];
@@ -2695,6 +2736,14 @@ static int ixgbe_get_ts_info(struct net_device *dev,
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
break;
default:
@@ -2704,6 +2753,225 @@ static int ixgbe_get_ts_info(struct net_device *dev,
return 0;
}
+static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
+{
+ unsigned int max_combined;
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
+
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+ /* We only support one q_vector without MSI-X */
+ max_combined = 1;
+ } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ /* SR-IOV currently only allows one queue on the PF */
+ max_combined = 1;
+ } else if (tcs > 1) {
+ /* For DCB report channels per traffic class */
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ /* 8 TC w/ 4 queues per TC */
+ max_combined = 4;
+ } else if (tcs > 4) {
+ /* 8 TC w/ 8 queues per TC */
+ max_combined = 8;
+ } else {
+ /* 4 TC w/ 16 queues per TC */
+ max_combined = 16;
+ }
+ } else if (adapter->atr_sample_rate) {
+ /* support up to 64 queues with ATR */
+ max_combined = IXGBE_MAX_FDIR_INDICES;
+ } else {
+ /* support up to 16 queues with RSS */
+ max_combined = IXGBE_MAX_RSS_INDICES;
+ }
+
+ return max_combined;
+}
+
+static void ixgbe_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+ /* report maximum channels */
+ ch->max_combined = ixgbe_max_channels(adapter);
+
+ /* report info for other vector */
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ ch->max_other = NON_Q_VECTORS;
+ ch->other_count = NON_Q_VECTORS;
+ }
+
+ /* record RSS queues */
+ ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
+
+ /* nothing else to report if RSS is disabled */
+ if (ch->combined_count == 1)
+ return;
+
+ /* we do not support ATR queueing if SR-IOV is enabled */
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ return;
+
+ /* same thing goes for being DCB enabled */
+ if (netdev_get_num_tc(dev) > 1)
+ return;
+
+ /* if ATR is disabled we can exit */
+ if (!adapter->atr_sample_rate)
+ return;
+
+ /* report flow director queues as maximum channels */
+ ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
+}
+
+static int ixgbe_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ unsigned int count = ch->combined_count;
+
+ /* verify they are not requesting separate vectors */
+ if (!count || ch->rx_count || ch->tx_count)
+ return -EINVAL;
+
+ /* verify other_count has not changed */
+ if (ch->other_count != NON_Q_VECTORS)
+ return -EINVAL;
+
+ /* verify the number of channels does not exceed hardware limits */
+ if (count > ixgbe_max_channels(adapter))
+ return -EINVAL;
+
+ /* update feature limits from largest to smallest supported values */
+ adapter->ring_feature[RING_F_FDIR].limit = count;
+
+ /* cap RSS limit at 16 */
+ if (count > IXGBE_MAX_RSS_INDICES)
+ count = IXGBE_MAX_RSS_INDICES;
+ adapter->ring_feature[RING_F_RSS].limit = count;
+
+#ifdef IXGBE_FCOE
+ /* cap FCoE limit at 8 */
+ if (count > IXGBE_FCRETA_SIZE)
+ count = IXGBE_FCRETA_SIZE;
+ adapter->ring_feature[RING_F_FCOE].limit = count;
+
+#endif
+ /* use setup TC to update any traffic class queue mapping */
+ return ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
+}
+
+static int ixgbe_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 status;
+ u8 sff8472_rev, addr_mode;
+ int ret_val = 0;
+ bool page_swap = false;
+
+ /* avoid concurent i2c reads */
+ while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+ msleep(100);
+
+ /* used by the service task */
+ set_bit(__IXGBE_READ_I2C, &adapter->state);
+
+ /* Check whether we support SFF-8472 or not */
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_SFF_8472_COMP,
+ &sff8472_rev);
+ if (status != 0) {
+ ret_val = -EIO;
+ goto err_out;
+ }
+
+ /* addressing mode is not supported */
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_SFF_8472_SWAP,
+ &addr_mode);
+ if (status != 0) {
+ ret_val = -EIO;
+ goto err_out;
+ }
+
+ if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
+ e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
+ page_swap = true;
+ }
+
+ if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
+ /* We have a SFP, but it does not support SFF-8472 */
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else {
+ /* We have a SFP which supports a revision of SFF-8472. */
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ }
+
+err_out:
+ clear_bit(__IXGBE_READ_I2C, &adapter->state);
+ return ret_val;
+}
+
+static int ixgbe_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ u8 databyte = 0xFF;
+ int i = 0;
+ int ret_val = 0;
+
+ /* ixgbe_get_module_info is called before this function in all
+ * cases, so we do not need any checks we already do above,
+ * and can trust ee->len to be a known value.
+ */
+
+ while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+ msleep(100);
+ set_bit(__IXGBE_READ_I2C, &adapter->state);
+
+ /* Read the first block, SFF-8079 */
+ for (i = 0; i < ETH_MODULE_SFF_8079_LEN; i++) {
+ status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
+ if (status != 0) {
+ /* Error occured while reading module */
+ ret_val = -EIO;
+ goto err_out;
+ }
+ data[i] = databyte;
+ }
+
+ /* If the second block is requested, check if SFF-8472 is supported. */
+ if (ee->len == ETH_MODULE_SFF_8472_LEN) {
+ if (data[IXGBE_SFF_SFF_8472_COMP] == IXGBE_SFF_SFF_8472_UNSUP)
+ return -EOPNOTSUPP;
+
+ /* Read the second block, SFF-8472 */
+ for (i = ETH_MODULE_SFF_8079_LEN;
+ i < ETH_MODULE_SFF_8472_LEN; i++) {
+ status = hw->phy.ops.read_i2c_sff8472(hw,
+ i - ETH_MODULE_SFF_8079_LEN, &databyte);
+ if (status != 0) {
+ /* Error occured while reading module */
+ ret_val = -EIO;
+ goto err_out;
+ }
+ data[i] = databyte;
+ }
+ }
+
+err_out:
+ clear_bit(__IXGBE_READ_I2C, &adapter->state);
+
+ return ret_val;
+}
+
static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_settings = ixgbe_get_settings,
.set_settings = ixgbe_set_settings,
@@ -2732,7 +3000,11 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_coalesce = ixgbe_set_coalesce,
.get_rxnfc = ixgbe_get_rxnfc,
.set_rxnfc = ixgbe_set_rxnfc,
+ .get_channels = ixgbe_get_channels,
+ .set_channels = ixgbe_set_channels,
.get_ts_info = ixgbe_get_ts_info,
+ .get_module_info = ixgbe_get_module_info,
+ .get_module_eeprom = ixgbe_get_module_eeprom,
};
void ixgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 252850d9a3e0..f58db453a97e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -544,15 +544,14 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring,
first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len,
skb_shinfo(skb)->gso_size);
first->bytecount += (first->gso_segs - 1) * *hdr_len;
- first->tx_flags |= IXGBE_TX_FLAGS_FSO;
+ first->tx_flags |= IXGBE_TX_FLAGS_TSO;
}
/* set flag indicating FCOE to ixgbe_tx_map call */
- first->tx_flags |= IXGBE_TX_FLAGS_FCOE;
+ first->tx_flags |= IXGBE_TX_FLAGS_FCOE | IXGBE_TX_FLAGS_CC;
- /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */
+ /* mss_l4len_id: use 0 for FSO as TSO, no need for L4LEN */
mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
- mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens = skb_transport_offset(skb) +
@@ -717,10 +716,8 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
/* Extra buffer to be shared by all DDPs for HW work around */
buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
- if (!buffer) {
- e_err(drv, "failed to allocate extra DDP buffer\n");
+ if (!buffer)
return -ENOMEM;
- }
dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, dma)) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index bf724da99375..3a02759b5e95 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 8c74f739011d..ef5f7a678ce1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -386,7 +386,6 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
fcoe = &adapter->ring_feature[RING_F_FCOE];
/* limit ourselves based on feature limits */
- fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
if (fcoe_i) {
@@ -562,9 +561,6 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
if (vmdq_i > 1 && fcoe_i) {
- /* reserve no more than number of CPUs */
- fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
-
/* alloc queues for FCoE separately */
fcoe->indices = fcoe_i;
fcoe->offset = vmdq_i * rss_i;
@@ -623,8 +619,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
if (rss_i > 1 && adapter->atr_sample_rate) {
f = &adapter->ring_feature[RING_F_FDIR];
- f->indices = min_t(u16, num_online_cpus(), f->limit);
- rss_i = max_t(u16, rss_i, f->indices);
+ rss_i = f->indices = f->limit;
if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
@@ -776,19 +771,23 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
{
struct ixgbe_q_vector *q_vector;
struct ixgbe_ring *ring;
- int node = -1;
+ int node = NUMA_NO_NODE;
int cpu = -1;
int ring_count, size;
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
ring_count = txr_count + rxr_count;
size = sizeof(struct ixgbe_q_vector) +
(sizeof(struct ixgbe_ring) * ring_count);
/* customize cpu for Flow Director mapping */
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
- if (cpu_online(v_idx)) {
- cpu = v_idx;
- node = cpu_to_node(cpu);
+ if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
+ u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
+ if (rss_i > 1 && adapter->atr_sample_rate) {
+ if (cpu_online(v_idx)) {
+ cpu = v_idx;
+ node = cpu_to_node(cpu);
+ }
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 20a5af6d87d0..68478d6dfa2d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -66,7 +66,7 @@ static char ixgbe_default_device_descr[] =
#define DRV_VERSION "3.11.33-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
- "Copyright (c) 1999-2012 Intel Corporation.";
+ "Copyright (c) 1999-2013 Intel Corporation.";
static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_82598] = &ixgbe_82598_info,
@@ -803,6 +803,7 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
/* Do the reset outside of interrupt context */
if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+ e_warn(drv, "initiating reset due to tx timeout\n");
ixgbe_service_event_schedule(adapter);
}
}
@@ -837,7 +838,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
break;
/* prevent any other reads prior to eop_desc */
- rmb();
+ read_barrier_depends();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
@@ -850,9 +851,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs;
- if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP))
- ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb);
-
/* free the skb */
dev_kfree_skb_any(tx_buffer->skb);
@@ -1401,6 +1399,7 @@ static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
/* set gso_size to avoid messing up TCP MSS */
skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
IXGBE_CB(skb)->append_cnt);
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
}
static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
@@ -1441,7 +1440,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
ixgbe_rx_checksum(rx_ring, rx_desc, skb);
- ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
+ ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
if ((dev->features & NETIF_F_HW_VLAN_RX) &&
ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
@@ -2180,10 +2179,10 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
return;
if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
- u32 autoneg;
+ u32 speed;
bool link_up = false;
- hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
if (link_up)
return;
@@ -2787,13 +2786,19 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
/*
* set WTHRESH to encourage burst writeback, it should not be set
- * higher than 1 when ITR is 0 as it could cause false TX hangs
+ * higher than 1 when:
+ * - ITR is 0 as it could cause false TX hangs
+ * - ITR is set to > 100k int/sec and BQL is enabled
*
* In order to avoid issues WTHRESH + PTHRESH should always be equal
* to or less than the number of on chip descriptors, which is
* currently 40.
*/
+#if IS_ENABLED(CONFIG_BQL)
+ if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
+#else
if (!ring->q_vector || (ring->q_vector->itr < 8))
+#endif
txdctl |= (1 << 16); /* WTHRESH = 1 */
else
txdctl |= (8 << 16); /* WTHRESH = 8 */
@@ -2814,6 +2819,16 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
ring->atr_sample_rate = 0;
}
+ /* initialize XPS */
+ if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
+ struct ixgbe_q_vector *q_vector = ring->q_vector;
+
+ if (q_vector)
+ netif_set_xps_queue(adapter->netdev,
+ &q_vector->affinity_mask,
+ ring->queue_index);
+ }
+
clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
/* enable queue */
@@ -3996,25 +4011,25 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
**/
static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
{
- u32 autoneg;
- bool negotiation, link_up = false;
+ u32 speed;
+ bool autoneg, link_up = false;
u32 ret = IXGBE_ERR_LINK_SETUP;
if (hw->mac.ops.check_link)
- ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
+ ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
if (ret)
goto link_cfg_out;
- autoneg = hw->phy.autoneg_advertised;
- if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
- ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
- &negotiation);
+ speed = hw->phy.autoneg_advertised;
+ if ((!speed) && (hw->mac.ops.get_link_capabilities))
+ ret = hw->mac.ops.get_link_capabilities(hw, &speed,
+ &autoneg);
if (ret)
goto link_cfg_out;
if (hw->mac.ops.setup_link)
- ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up);
+ ret = hw->mac.ops.setup_link(hw, speed, link_up);
link_cfg_out:
return ret;
}
@@ -4466,7 +4481,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
- unsigned int rss;
+ unsigned int rss, fdir;
u32 fwsm;
#ifdef CONFIG_IXGBE_DCB
int j;
@@ -4481,38 +4496,57 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_device_id = pdev->subsystem_device;
- /* Set capability flags */
+ /* Set common capability flags and settings */
rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
adapter->ring_feature[RING_F_RSS].limit = rss;
+ adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
+ adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+ adapter->max_q_vectors = MAX_Q_VECTORS_82599;
+ adapter->atr_sample_rate = 20;
+ fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
+ adapter->ring_feature[RING_F_FDIR].limit = fdir;
+ adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
+#ifdef CONFIG_IXGBE_DCA
+ adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
+#endif
+#ifdef IXGBE_FCOE
+ adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
+#ifdef CONFIG_IXGBE_DCB
+ /* Default traffic class to use for FCoE */
+ adapter->fcoe.up = IXGBE_FCOE_DEFTC;
+#endif /* CONFIG_IXGBE_DCB */
+#endif /* IXGBE_FCOE */
+
+ /* Set MAC specific capability flags and exceptions */
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
+ adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
+ adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
+
if (hw->device_id == IXGBE_DEV_ID_82598AT)
adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
+
adapter->max_q_vectors = MAX_Q_VECTORS_82598;
+ adapter->ring_feature[RING_F_FDIR].limit = 0;
+ adapter->atr_sample_rate = 0;
+ adapter->fdir_pballoc = 0;
+#ifdef IXGBE_FCOE
+ adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
+#ifdef CONFIG_IXGBE_DCB
+ adapter->fcoe.up = 0;
+#endif /* IXGBE_DCB */
+#endif /* IXGBE_FCOE */
+ break;
+ case ixgbe_mac_82599EB:
+ if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
break;
case ixgbe_mac_X540:
fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
if (fwsm & IXGBE_FWSM_TS_ENABLED)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
- case ixgbe_mac_82599EB:
- adapter->max_q_vectors = MAX_Q_VECTORS_82599;
- adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
- adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
- if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
- adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
- /* Flow Director hash filters enabled */
- adapter->atr_sample_rate = 20;
- adapter->ring_feature[RING_F_FDIR].limit =
- IXGBE_MAX_FDIR_INDICES;
- adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
-#ifdef IXGBE_FCOE
- adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
- adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
-#ifdef CONFIG_IXGBE_DCB
- /* Default traffic class to use for FCoE */
- adapter->fcoe.up = IXGBE_FCOE_DEFTC;
-#endif
-#endif /* IXGBE_FCOE */
break;
default:
break;
@@ -4871,7 +4905,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
*/
if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
(adapter->hw.mac.type == ixgbe_mac_82599EB) &&
- (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
+ (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
@@ -5534,6 +5568,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
break;
}
+ adapter->last_rx_ptp_check = jiffies;
+
if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
ixgbe_ptp_start_cyclecounter(adapter);
@@ -5614,6 +5650,7 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
* to get done, so reset controller to flush Tx.
* (Do the reset outside of interrupt context).
*/
+ e_warn(drv, "initiating reset to clear Tx work after link loss\n");
adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
}
}
@@ -5678,6 +5715,10 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
return;
+ /* concurent i2c reads are not supported */
+ if (test_bit(__IXGBE_READ_I2C, &adapter->state))
+ return;
+
/* someone else is in init, wait until next service event */
if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
return;
@@ -5738,8 +5779,8 @@ sfp_out:
static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 autoneg;
- bool negotiation;
+ u32 speed;
+ bool autoneg = false;
if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
return;
@@ -5750,11 +5791,11 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
- autoneg = hw->phy.autoneg_advertised;
- if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
- hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
+ speed = hw->phy.autoneg_advertised;
+ if ((!speed) && (hw->mac.ops.get_link_capabilities))
+ hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
if (hw->mac.ops.setup_link)
- hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
+ hw->mac.ops.setup_link(hw, speed, true);
adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
adapter->link_check_timeout = jiffies;
@@ -5878,7 +5919,6 @@ static void ixgbe_service_task(struct work_struct *work)
struct ixgbe_adapter *adapter = container_of(work,
struct ixgbe_adapter,
service_task);
-
ixgbe_reset_subtask(adapter);
ixgbe_sfp_detection_subtask(adapter);
ixgbe_sfp_link_config_subtask(adapter);
@@ -5886,7 +5926,11 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_watchdog_subtask(adapter);
ixgbe_fdir_reinit_subtask(adapter);
ixgbe_check_hang_subtask(adapter);
- ixgbe_ptp_overflow_check(adapter);
+
+ if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) {
+ ixgbe_ptp_overflow_check(adapter);
+ ixgbe_ptp_rx_hang(adapter);
+ }
ixgbe_service_event_complete(adapter);
}
@@ -5899,6 +5943,9 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
u32 vlan_macip_lens, type_tucmd;
u32 mss_l4len_idx, l4len;
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
if (!skb_is_gso(skb))
return 0;
@@ -5941,10 +5988,9 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len;
- /* mss_l4len_id: use 1 as index for TSO */
+ /* mss_l4len_id: use 0 as index for TSO */
mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
- mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens = skb_network_header_len(skb);
@@ -5966,12 +6012,9 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
u32 type_tucmd = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
- if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN)) {
- if (unlikely(skb->no_fcs))
- first->tx_flags |= IXGBE_TX_FLAGS_NO_IFCS;
- if (!(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
- return;
- }
+ if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
+ !(first->tx_flags & IXGBE_TX_FLAGS_CC))
+ return;
} else {
u8 l4_hdr = 0;
switch (first->protocol) {
@@ -6029,30 +6072,32 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
type_tucmd, mss_l4len_idx);
}
-static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
+#define IXGBE_SET_FLAG(_input, _flag, _result) \
+ ((_flag <= _result) ? \
+ ((u32)(_input & _flag) * (_result / _flag)) : \
+ ((u32)(_input & _flag) / (_flag / _result)))
+
+static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
{
/* set type for advanced descriptor with frame checksum insertion */
- __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
- IXGBE_ADVTXD_DCMD_DEXT);
+ u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
+ IXGBE_ADVTXD_DCMD_DEXT |
+ IXGBE_ADVTXD_DCMD_IFCS;
/* set HW vlan bit if vlan is present */
- if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN)
- cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
-
- if (tx_flags & IXGBE_TX_FLAGS_TSTAMP)
- cmd_type |= cpu_to_le32(IXGBE_ADVTXD_MAC_TSTAMP);
+ cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
+ IXGBE_ADVTXD_DCMD_VLE);
/* set segmentation enable bits for TSO/FSO */
-#ifdef IXGBE_FCOE
- if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FSO))
-#else
- if (tx_flags & IXGBE_TX_FLAGS_TSO)
-#endif
- cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
+ cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
+ IXGBE_ADVTXD_DCMD_TSE);
+
+ /* set timestamp bit if present */
+ cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
+ IXGBE_ADVTXD_MAC_TSTAMP);
/* insert frame checksum */
- if (!(tx_flags & IXGBE_TX_FLAGS_NO_IFCS))
- cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS);
+ cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
return cmd_type;
}
@@ -6060,36 +6105,27 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
u32 tx_flags, unsigned int paylen)
{
- __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
/* enable L4 checksum for TSO and TX checksum offload */
- if (tx_flags & IXGBE_TX_FLAGS_CSUM)
- olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
+ olinfo_status |= IXGBE_SET_FLAG(tx_flags,
+ IXGBE_TX_FLAGS_CSUM,
+ IXGBE_ADVTXD_POPTS_TXSM);
/* enble IPv4 checksum for TSO */
- if (tx_flags & IXGBE_TX_FLAGS_IPV4)
- olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
-
- /* use index 1 context for TSO/FSO/FCOE */
-#ifdef IXGBE_FCOE
- if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FCOE))
-#else
- if (tx_flags & IXGBE_TX_FLAGS_TSO)
-#endif
- olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
+ olinfo_status |= IXGBE_SET_FLAG(tx_flags,
+ IXGBE_TX_FLAGS_IPV4,
+ IXGBE_ADVTXD_POPTS_IXSM);
/*
* Check Context must be set if Tx switch is enabled, which it
* always is for case where virtual functions are running
*/
-#ifdef IXGBE_FCOE
- if (tx_flags & (IXGBE_TX_FLAGS_TXSW | IXGBE_TX_FLAGS_FCOE))
-#else
- if (tx_flags & IXGBE_TX_FLAGS_TXSW)
-#endif
- olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
+ olinfo_status |= IXGBE_SET_FLAG(tx_flags,
+ IXGBE_TX_FLAGS_CC,
+ IXGBE_ADVTXD_CC);
- tx_desc->read.olinfo_status = olinfo_status;
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
@@ -6099,22 +6135,22 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
const u8 hdr_len)
{
- dma_addr_t dma;
struct sk_buff *skb = first->skb;
struct ixgbe_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned int data_len = skb->data_len;
- unsigned int size = skb_headlen(skb);
- unsigned int paylen = skb->len - hdr_len;
+ struct skb_frag_struct *frag;
+ dma_addr_t dma;
+ unsigned int data_len, size;
u32 tx_flags = first->tx_flags;
- __le32 cmd_type;
+ u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
u16 i = tx_ring->next_to_use;
tx_desc = IXGBE_TX_DESC(tx_ring, i);
- ixgbe_tx_olinfo_status(tx_desc, tx_flags, paylen);
- cmd_type = ixgbe_tx_cmd_type(tx_flags);
+ ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
+
+ size = skb_headlen(skb);
+ data_len = skb->data_len;
#ifdef IXGBE_FCOE
if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
@@ -6128,19 +6164,22 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
#endif
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
- goto dma_error;
- /* record length, and DMA address */
- dma_unmap_len_set(first, len, size);
- dma_unmap_addr_set(first, dma, dma);
+ tx_buffer = first;
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buffer, len, size);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
- for (;;) {
while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
tx_desc->read.cmd_type_len =
- cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
+ cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
i++;
tx_desc++;
@@ -6148,18 +6187,18 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
tx_desc = IXGBE_TX_DESC(tx_ring, 0);
i = 0;
}
+ tx_desc->read.olinfo_status = 0;
dma += IXGBE_MAX_DATA_PER_TXD;
size -= IXGBE_MAX_DATA_PER_TXD;
tx_desc->read.buffer_addr = cpu_to_le64(dma);
- tx_desc->read.olinfo_status = 0;
}
if (likely(!data_len))
break;
- tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
i++;
tx_desc++;
@@ -6167,6 +6206,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
tx_desc = IXGBE_TX_DESC(tx_ring, 0);
i = 0;
}
+ tx_desc->read.olinfo_status = 0;
#ifdef IXGBE_FCOE
size = min_t(unsigned int, data_len, skb_frag_size(frag));
@@ -6177,22 +6217,13 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
- goto dma_error;
tx_buffer = &tx_ring->tx_buffer_info[i];
- dma_unmap_len_set(tx_buffer, len, size);
- dma_unmap_addr_set(tx_buffer, dma, dma);
-
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
- tx_desc->read.olinfo_status = 0;
-
- frag++;
}
/* write last descriptor with RS and EOP bits */
- cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
- tx_desc->read.cmd_type_len = cmd_type;
+ cmd_type |= size | IXGBE_TXD_CMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
@@ -6353,38 +6384,40 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
return __ixgbe_maybe_stop_tx(tx_ring, size);
}
+#ifdef IXGBE_FCOE
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
- int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
- smp_processor_id();
-#ifdef IXGBE_FCOE
- __be16 protocol = vlan_get_protocol(skb);
+ struct ixgbe_adapter *adapter;
+ struct ixgbe_ring_feature *f;
+ int txq;
- if (((protocol == htons(ETH_P_FCOE)) ||
- (protocol == htons(ETH_P_FIP))) &&
- (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
- struct ixgbe_ring_feature *f;
+ /*
+ * only execute the code below if protocol is FCoE
+ * or FIP and we have FCoE enabled on the adapter
+ */
+ switch (vlan_get_protocol(skb)) {
+ case __constant_htons(ETH_P_FCOE):
+ case __constant_htons(ETH_P_FIP):
+ adapter = netdev_priv(dev);
- f = &adapter->ring_feature[RING_F_FCOE];
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+ break;
+ default:
+ return __netdev_pick_tx(dev, skb);
+ }
- while (txq >= f->indices)
- txq -= f->indices;
- txq += adapter->ring_feature[RING_F_FCOE].offset;
+ f = &adapter->ring_feature[RING_F_FCOE];
- return txq;
- }
-#endif
+ txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
+ smp_processor_id();
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
- while (unlikely(txq >= dev->real_num_tx_queues))
- txq -= dev->real_num_tx_queues;
- return txq;
- }
+ while (txq >= f->indices)
+ txq -= f->indices;
- return skb_tx_hash(dev, skb);
+ return txq + f->offset;
}
+#endif
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring)
@@ -6445,6 +6478,11 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
+
+ /* schedule check for Tx timestamp */
+ adapter->ptp_tx_skb = skb_get(skb);
+ adapter->ptp_tx_start = jiffies;
+ schedule_work(&adapter->ptp_tx_work);
}
#ifdef CONFIG_PCI_IOV
@@ -6453,7 +6491,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
* Tx switch had been disabled.
*/
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- tx_flags |= IXGBE_TX_FLAGS_TXSW;
+ tx_flags |= IXGBE_TX_FLAGS_CC;
#endif
/* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */
@@ -6784,6 +6822,7 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
}
}
+#endif /* CONFIG_IXGBE_DCB */
/**
* ixgbe_setup_tc - configure net_device for multiple traffic classes
*
@@ -6809,6 +6848,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
ixgbe_close(dev);
ixgbe_clear_interrupt_scheme(adapter);
+#ifdef CONFIG_IXGBE_DCB
if (tc) {
netdev_set_num_tc(dev, tc);
ixgbe_set_prio_tc_map(adapter);
@@ -6831,15 +6871,28 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
adapter->dcb_cfg.pfc_mode_enable = false;
}
- ixgbe_init_interrupt_scheme(adapter);
ixgbe_validate_rtr(adapter, tc);
+
+#endif /* CONFIG_IXGBE_DCB */
+ ixgbe_init_interrupt_scheme(adapter);
+
if (netif_running(dev))
- ixgbe_open(dev);
+ return ixgbe_open(dev);
return 0;
}
-#endif /* CONFIG_IXGBE_DCB */
+#ifdef CONFIG_PCI_IOV
+void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ rtnl_lock();
+ ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
+ rtnl_unlock();
+}
+
+#endif
void ixgbe_do_reset(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -6985,7 +7038,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return err;
}
-static int ixgbe_ndo_fdb_del(struct ndmsg *ndm,
+static int ixgbe_ndo_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr)
{
@@ -7062,7 +7115,8 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
}
static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev)
+ struct net_device *dev,
+ u32 filter_mask)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
u16 mode;
@@ -7082,7 +7136,9 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
.ndo_start_xmit = ixgbe_xmit_frame,
+#ifdef IXGBE_FCOE
.ndo_select_queue = ixgbe_select_queue,
+#endif
.ndo_set_rx_mode = ixgbe_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ixgbe_set_mac,
@@ -7194,9 +7250,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
static int cards_found;
int i, err, pci_using_dac;
+ unsigned int indices = MAX_TX_QUEUES;
u8 part_str[IXGBE_PBANUM_LENGTH];
- unsigned int indices = num_possible_cpus();
- unsigned int dcb_max = 0;
#ifdef IXGBE_FCOE
u16 device_caps;
#endif
@@ -7245,25 +7300,15 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
pci_save_state(pdev);
+ if (ii->mac == ixgbe_mac_82598EB) {
#ifdef CONFIG_IXGBE_DCB
- if (ii->mac == ixgbe_mac_82598EB)
- dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
- IXGBE_MAX_RSS_INDICES);
- else
- dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
- IXGBE_MAX_FDIR_INDICES);
+ /* 8 TC w/ 4 queues per TC */
+ indices = 4 * MAX_TRAFFIC_CLASS;
+#else
+ indices = IXGBE_MAX_RSS_INDICES;
#endif
+ }
- if (ii->mac == ixgbe_mac_82598EB)
- indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
- else
- indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
-
-#ifdef IXGBE_FCOE
- indices += min_t(unsigned int, num_possible_cpus(),
- IXGBE_MAX_FCOE_INDICES);
-#endif
- indices = max_t(unsigned int, dcb_max, indices);
netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
if (!netdev) {
err = -ENOMEM;
@@ -7366,7 +7411,15 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
#ifdef CONFIG_PCI_IOV
- ixgbe_enable_sriov(adapter, ii);
+ /* SR-IOV not supported on the 82598 */
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ goto skip_sriov;
+ /* Mailbox */
+ ixgbe_init_mbx_params_pf(hw);
+ memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
+ ixgbe_enable_sriov(adapter);
+ pci_sriov_set_totalvfs(pdev, 63);
+skip_sriov:
#endif
netdev->features = NETIF_F_SG |
@@ -7410,13 +7463,17 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef IXGBE_FCOE
if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
+ unsigned int fcoe_l;
+
if (hw->mac.ops.get_device_caps) {
hw->mac.ops.get_device_caps(hw, &device_caps);
if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
}
- adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE;
+
+ fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
+ adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
netdev->features |= NETIF_F_FSO |
NETIF_F_FCOE_CRC;
@@ -7444,9 +7501,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
- memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
- if (!is_valid_ether_addr(netdev->perm_addr)) {
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
e_dev_err("invalid MAC address\n");
err = -EIO;
goto err_sw_init;
@@ -7623,8 +7679,14 @@ static void ixgbe_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
- ixgbe_disable_sriov(adapter);
-
+#ifdef CONFIG_PCI_IOV
+ /*
+ * Only disable SR-IOV on unload if the user specified the now
+ * deprecated max_vfs module parameter.
+ */
+ if (max_vfs)
+ ixgbe_disable_sriov(adapter);
+#endif
ixgbe_clear_interrupt_scheme(adapter);
ixgbe_release_hw_control(adapter);
@@ -7729,6 +7791,8 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
if (vfdev) {
e_dev_err("Issuing VFLR to VF %d\n", vf);
pci_write_config_dword(vfdev, 0xA8, 0x00008000);
+ /* Free device reference count */
+ pci_dev_put(vfdev);
}
pci_cleanup_aer_uncorrect_error_status(pdev);
@@ -7838,6 +7902,7 @@ static struct pci_driver ixgbe_driver = {
.resume = ixgbe_resume,
#endif
.shutdown = ixgbe_shutdown,
+ .sriov_configure = ixgbe_pci_sriov_configure,
.err_handler = &ixgbe_err_handler
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index 1f3e32b576a5..d4a64e665398 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index 42dd65e6ac97..e44ff47659b5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 71659edf81aa..060d2ad2ac96 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -494,11 +494,9 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
* ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: true if autonegotiation enabled
**/
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete)
{
@@ -854,11 +852,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_IDENTIFIER,
- &identifier);
+ &identifier);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
/* LAN ID is needed for sfp_type determination */
@@ -872,26 +868,20 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
IXGBE_SFF_1GBE_COMP_CODES,
&comp_codes_1g);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_10GBE_COMP_CODES,
&comp_codes_10g);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_CABLE_TECHNOLOGY,
&cable_tech);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
/* ID Module
@@ -986,30 +976,24 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
if (hw->phy.type != ixgbe_phy_nl) {
hw->phy.id = identifier;
status = hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_VENDOR_OUI_BYTE0,
- &oui_bytes[0]);
+ IXGBE_SFF_VENDOR_OUI_BYTE0,
+ &oui_bytes[0]);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE1,
&oui_bytes[1]);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE2,
&oui_bytes[2]);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
vendor_oui =
@@ -1206,6 +1190,22 @@ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
}
/**
+ * ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset at address 0xA2
+ * @eeprom_data: value read
+ *
+ * Performs byte read operation to SFP module's SFF-8472 data over I2C
+ **/
+s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data)
+{
+ return hw->phy.ops.read_i2c_byte(hw, byte_offset,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ sff8472_data);
+}
+
+/**
* ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface
* @hw: pointer to hardware structure
* @byte_offset: EEPROM byte offset to write
@@ -1293,9 +1293,9 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
break;
fail:
+ ixgbe_i2c_bus_clear(hw);
hw->mac.ops.release_swfw_sync(hw, swfw_mask);
msleep(100);
- ixgbe_i2c_bus_clear(hw);
retry++;
if (retry < max_retry)
hw_dbg(hw, "I2C byte read error - Retrying.\n");
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index cc18165b4c05..886a3431cf5b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -30,6 +30,7 @@
#include "ixgbe_type.h"
#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0
+#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2
/* EEPROM byte offsets */
#define IXGBE_SFF_IDENTIFIER 0x0
@@ -41,6 +42,8 @@
#define IXGBE_SFF_10GBE_COMP_CODES 0x3
#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
+#define IXGBE_SFF_SFF_8472_SWAP 0x5C
+#define IXGBE_SFF_SFF_8472_COMP 0x5E
/* Bitmasks */
#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
@@ -51,6 +54,7 @@
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define IXGBE_SFF_ADDRESSING_MODE 0x4
#define IXGBE_I2C_EEPROM_READ_MASK 0x100
#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
@@ -88,6 +92,9 @@
#define IXGBE_TN_LASI_STATUS_REG 0x9005
#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
+/* SFP+ SFF-8472 Compliance code */
+#define IXGBE_SFF_SFF_8472_UNSUP 0x00
+
s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
@@ -98,7 +105,6 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete);
s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
@@ -126,6 +132,8 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
+s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data);
s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 eeprom_data);
#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 1a751c9d09c4..331987d6815c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -96,15 +96,12 @@
#define IXGBE_MAX_TIMEADJ_VALUE 0x7FFFFFFFFFFFFFFFULL
#define IXGBE_OVERFLOW_PERIOD (HZ * 30)
+#define IXGBE_PTP_TX_TIMEOUT (HZ * 15)
#ifndef NSECS_PER_SEC
#define NSECS_PER_SEC 1000000000ULL
#endif
-static struct sock_filter ptp_filter[] = {
- PTP_FILTER
-};
-
/**
* ixgbe_ptp_setup_sdp
* @hw: the hardware private structure
@@ -405,149 +402,145 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr)
}
}
-
/**
- * ixgbe_ptp_overflow_check - delayed work to detect SYSTIME overflow
- * @work: structure containing information about this work task
+ * ixgbe_ptp_overflow_check - watchdog task to detect SYSTIME overflow
+ * @adapter: private adapter struct
*
- * this work function is scheduled to continue reading the timecounter
+ * this watchdog task periodically reads the timecounter
* in order to prevent missing when the system time registers wrap
- * around. This needs to be run approximately twice a minute when no
- * PTP activity is occurring.
+ * around. This needs to be run approximately twice a minute.
*/
void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
{
- unsigned long elapsed_jiffies = adapter->last_overflow_check - jiffies;
+ bool timeout = time_is_before_jiffies(adapter->last_overflow_check +
+ IXGBE_OVERFLOW_PERIOD);
struct timespec ts;
- if ((adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) &&
- (elapsed_jiffies >= IXGBE_OVERFLOW_PERIOD)) {
+ if (timeout) {
ixgbe_ptp_gettime(&adapter->ptp_caps, &ts);
adapter->last_overflow_check = jiffies;
}
}
/**
- * ixgbe_ptp_match - determine if this skb matches a ptp packet
- * @skb: pointer to the skb
- * @hwtstamp: pointer to the hwtstamp_config to check
- *
- * Determine whether the skb should have been timestamped, assuming the
- * hwtstamp was set via the hwtstamp ioctl. Returns non-zero when the packet
- * should have a timestamp waiting in the registers, and 0 otherwise.
+ * ixgbe_ptp_rx_hang - detect error case when Rx timestamp registers latched
+ * @adapter: private network adapter structure
*
- * V1 packets have to check the version type to determine whether they are
- * correct. However, we can't directly access the data because it might be
- * fragmented in the SKB, in paged memory. In order to work around this, we
- * use skb_copy_bits which will properly copy the data whether it is in the
- * paged memory fragments or not. We have to copy the IP header as well as the
- * message type.
+ * this watchdog task is scheduled to detect error case where hardware has
+ * dropped an Rx packet that was timestamped when the ring is full. The
+ * particular error is rare but leaves the device in a state unable to timestamp
+ * any future packets.
*/
-static int ixgbe_ptp_match(struct sk_buff *skb, int rx_filter)
+void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter)
{
- struct iphdr iph;
- u8 msgtype;
- unsigned int type, offset;
-
- if (rx_filter == HWTSTAMP_FILTER_NONE)
- return 0;
-
- type = sk_run_filter(skb, ptp_filter);
-
- if (likely(rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT))
- return type & PTP_CLASS_V2;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_ring *rx_ring;
+ u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
+ unsigned long rx_event;
+ int n;
- /* For the remaining cases actually check message type */
- switch (type) {
- case PTP_CLASS_V1_IPV4:
- skb_copy_bits(skb, OFF_IHL, &iph, sizeof(iph));
- offset = ETH_HLEN + (iph.ihl << 2) + UDP_HLEN + OFF_PTP_CONTROL;
- break;
- case PTP_CLASS_V1_IPV6:
- offset = OFF_PTP6 + OFF_PTP_CONTROL;
- break;
- default:
- /* other cases invalid or handled above */
- return 0;
+ /* if we don't have a valid timestamp in the registers, just update the
+ * timeout counter and exit
+ */
+ if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) {
+ adapter->last_rx_ptp_check = jiffies;
+ return;
}
- /* Make sure our buffer is long enough */
- if (skb->len < offset)
- return 0;
+ /* determine the most recent watchdog or rx_timestamp event */
+ rx_event = adapter->last_rx_ptp_check;
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ if (time_after(rx_ring->last_rx_timestamp, rx_event))
+ rx_event = rx_ring->last_rx_timestamp;
+ }
- skb_copy_bits(skb, offset, &msgtype, sizeof(msgtype));
+ /* only need to read the high RXSTMP register to clear the lock */
+ if (time_is_before_jiffies(rx_event + 5*HZ)) {
+ IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
+ adapter->last_rx_ptp_check = jiffies;
- switch (rx_filter) {
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- return (msgtype == IXGBE_RXMTRL_V1_SYNC_MSG);
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- return (msgtype == IXGBE_RXMTRL_V1_DELAY_REQ_MSG);
- break;
- default:
- return 0;
+ e_warn(drv, "clearing RX Timestamp hang");
}
}
/**
* ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp
- * @q_vector: structure containing interrupt and ring information
- * @skb: particular skb to send timestamp with
+ * @adapter: the private adapter struct
*
* if the timestamp is valid, we convert it into the timecounter ns
* value, then store that result into the shhwtstamps structure which
* is passed up the network stack
*/
-void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
- struct sk_buff *skb)
+static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter)
{
- struct ixgbe_adapter *adapter;
- struct ixgbe_hw *hw;
+ struct ixgbe_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps shhwtstamps;
u64 regval = 0, ns;
- u32 tsynctxctl;
unsigned long flags;
- /* we cannot process timestamps on a ring without a q_vector */
- if (!q_vector || !q_vector->adapter)
- return;
-
- adapter = q_vector->adapter;
- hw = &adapter->hw;
-
- tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) << 32;
- /*
- * if TX timestamp is not valid, exit after clearing the
- * timestamp registers
- */
- if (!(tsynctxctl & IXGBE_TSYNCTXCTL_VALID))
- return;
-
spin_lock_irqsave(&adapter->tmreg_lock, flags);
ns = timecounter_cyc2time(&adapter->tc, regval);
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(ns);
- skb_tstamp_tx(skb, &shhwtstamps);
+ skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
+
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+}
+
+/**
+ * ixgbe_ptp_tx_hwtstamp_work
+ * @work: pointer to the work struct
+ *
+ * This work item polls TSYNCTXCTL valid bit to determine when a Tx hardware
+ * timestamp has been taken for the current skb. It is necesary, because the
+ * descriptor's "done" bit does not correlate with the timestamp event.
+ */
+static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
+{
+ struct ixgbe_adapter *adapter = container_of(work, struct ixgbe_adapter,
+ ptp_tx_work);
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool timeout = time_is_before_jiffies(adapter->ptp_tx_start +
+ IXGBE_PTP_TX_TIMEOUT);
+ u32 tsynctxctl;
+
+ /* we have to have a valid skb */
+ if (!adapter->ptp_tx_skb)
+ return;
+
+ if (timeout) {
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ e_warn(drv, "clearing Tx Timestamp hang");
+ return;
+ }
+
+ tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
+ if (tsynctxctl & IXGBE_TSYNCTXCTL_VALID)
+ ixgbe_ptp_tx_hwtstamp(adapter);
+ else
+ /* reschedule to keep checking if it's not available yet */
+ schedule_work(&adapter->ptp_tx_work);
}
/**
- * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
+ * __ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
* @q_vector: structure containing interrupt and ring information
- * @rx_desc: the rx descriptor
* @skb: particular skb to send timestamp with
*
* if the timestamp is valid, we convert it into the timecounter ns
* value, then store that result into the shhwtstamps structure which
* is passed up the network stack
*/
-void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
+ struct sk_buff *skb)
{
struct ixgbe_adapter *adapter;
struct ixgbe_hw *hw;
@@ -563,37 +556,17 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
adapter = q_vector->adapter;
hw = &adapter->hw;
- if (likely(!ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter)))
- return;
-
+ /*
+ * Read the tsyncrxctl register afterwards in order to prevent taking an
+ * I/O hit on every packet.
+ */
tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
-
- /* Check if we have a valid timestamp and make sure the skb should
- * have been timestamped */
if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID))
return;
- /*
- * Always read the registers, in order to clear a possible fault
- * because of stagnant RX timestamp values for a packet that never
- * reached the queue.
- */
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32;
- /*
- * If the timestamp bit is set in the packet's descriptor, we know the
- * timestamp belongs to this packet. No other packet can be
- * timestamped until the registers for timestamping have been read.
- * Therefor only one packet with this bit can be in the queue at a
- * time, and the rx timestamp values that were in the registers belong
- * to this packet.
- *
- * If nothing went wrong, then it should have a skb_shared_tx that we
- * can turn into a skb_shared_hwtstamps.
- */
- if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
- return;
spin_lock_irqsave(&adapter->tmreg_lock, flags);
ns = timecounter_cyc2time(&adapter->tc, regval);
@@ -660,11 +633,11 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
- tsync_rx_mtrl = IXGBE_RXMTRL_V1_SYNC_MSG;
+ tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
- tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
+ tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
@@ -698,9 +671,6 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
return 0;
}
- /* Store filter value for later use */
- adapter->rx_hwtstamp_filter = config.rx_filter;
-
/* define ethertype filter for timestamping L2 packets */
if (is_l2)
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
@@ -902,11 +872,8 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
return;
}
- /* initialize the ptp filter */
- if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter)))
- e_dev_warn("ptp_filter_init failed\n");
-
spin_lock_init(&adapter->tmreg_lock);
+ INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work);
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
@@ -938,6 +905,12 @@ void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
ixgbe_ptp_setup_sdp(adapter);
+ cancel_work_sync(&adapter->ptp_tx_work);
+ if (adapter->ptp_tx_skb) {
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ }
+
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
adapter->ptp_clock = NULL;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 85cddac673ef..d44b4d21268c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -44,50 +44,11 @@
#include "ixgbe_sriov.h"
#ifdef CONFIG_PCI_IOV
-void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
- const struct ixgbe_info *ii)
+static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int num_vf_macvlans, i;
struct vf_macvlans *mv_list;
- int pre_existing_vfs = 0;
-
- pre_existing_vfs = pci_num_vf(adapter->pdev);
- if (!pre_existing_vfs && !adapter->num_vfs)
- return;
-
- /* If there are pre-existing VFs then we have to force
- * use of that many because they were not deleted the last
- * time someone removed the PF driver. That would have
- * been because they were allocated to guest VMs and can't
- * be removed. Go ahead and just re-enable the old amount.
- * If the user wants to change the number of VFs they can
- * use ethtool while making sure no VFs are allocated to
- * guest VMs... i.e. the right way.
- */
- if (pre_existing_vfs) {
- adapter->num_vfs = pre_existing_vfs;
- dev_warn(&adapter->pdev->dev, "Virtual Functions already "
- "enabled for this device - Please reload all "
- "VF drivers to avoid spoofed packet errors\n");
- } else {
- int err;
- /*
- * The 82599 supports up to 64 VFs per physical function
- * but this implementation limits allocation to 63 so that
- * basic networking resources are still available to the
- * physical function. If the user requests greater thn
- * 63 VFs then it is an error - reset to default of zero.
- */
- adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, 63);
-
- err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
- if (err) {
- e_err(probe, "Failed to enable PCI sriov: %d\n", err);
- adapter->num_vfs = 0;
- return;
- }
- }
adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs);
@@ -128,12 +89,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
kcalloc(adapter->num_vfs,
sizeof(struct vf_data_storage), GFP_KERNEL);
if (adapter->vfinfo) {
- /* Now that we're sure SR-IOV is enabled
- * and memory allocated set up the mailbox parameters
- */
- ixgbe_init_mbx_params_pf(hw);
- memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
-
/* limit trafffic classes based on VFs enabled */
if ((adapter->hw.mac.type == ixgbe_mac_82599EB) &&
(adapter->num_vfs < 16)) {
@@ -157,10 +112,62 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
/* enable spoof checking for all VFs */
for (i = 0; i < adapter->num_vfs; i++)
adapter->vfinfo[i].spoofchk_enabled = true;
+ return 0;
+ }
+
+ return -ENOMEM;
+}
+
+/* Note this function is called when the user wants to enable SR-IOV
+ * VFs using the now deprecated module parameter
+ */
+void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
+{
+ int pre_existing_vfs = 0;
+
+ pre_existing_vfs = pci_num_vf(adapter->pdev);
+ if (!pre_existing_vfs && !adapter->num_vfs)
return;
+
+ if (!pre_existing_vfs)
+ dev_warn(&adapter->pdev->dev,
+ "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
+
+ /* If there are pre-existing VFs then we have to force
+ * use of that many - over ride any module parameter value.
+ * This may result from the user unloading the PF driver
+ * while VFs were assigned to guest VMs or because the VFs
+ * have been created via the new PCI SR-IOV sysfs interface.
+ */
+ if (pre_existing_vfs) {
+ adapter->num_vfs = pre_existing_vfs;
+ dev_warn(&adapter->pdev->dev,
+ "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n");
+ } else {
+ int err;
+ /*
+ * The 82599 supports up to 64 VFs per physical function
+ * but this implementation limits allocation to 63 so that
+ * basic networking resources are still available to the
+ * physical function. If the user requests greater thn
+ * 63 VFs then it is an error - reset to default of zero.
+ */
+ adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, 63);
+
+ err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+ if (err) {
+ e_err(probe, "Failed to enable PCI sriov: %d\n", err);
+ adapter->num_vfs = 0;
+ return;
+ }
}
- /* Oh oh */
+ if (!__ixgbe_enable_sriov(adapter))
+ return;
+
+ /* If we have gotten to this point then there is no memory available
+ * to manage the VF devices - print message and bail.
+ */
e_err(probe, "Unable to allocate memory for VF Data Storage - "
"SRIOV disabled\n");
ixgbe_disable_sriov(adapter);
@@ -200,11 +207,12 @@ static bool ixgbe_vfs_are_assigned(struct ixgbe_adapter *adapter)
}
#endif /* #ifdef CONFIG_PCI_IOV */
-void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
+int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 gpie;
u32 vmdctl;
+ int rss;
/* set num VFs to 0 to prevent access to vfinfo */
adapter->num_vfs = 0;
@@ -219,7 +227,7 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
/* if SR-IOV is already disabled then there is nothing to do */
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
- return;
+ return 0;
#ifdef CONFIG_PCI_IOV
/*
@@ -229,7 +237,7 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
*/
if (ixgbe_vfs_are_assigned(adapter)) {
e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n");
- return;
+ return -EPERM;
}
/* disable iov and allow time for transactions to clear */
pci_disable_sriov(adapter->pdev);
@@ -252,10 +260,94 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
adapter->ring_feature[RING_F_VMDQ].offset = 0;
+ rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
+ adapter->ring_feature[RING_F_RSS].limit = rss;
+
/* take a breather then clean up driver data */
msleep(100);
adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+ return 0;
+}
+
+static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
+{
+#ifdef CONFIG_PCI_IOV
+ struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
+ int err = 0;
+ int i;
+ int pre_existing_vfs = pci_num_vf(dev);
+
+ if (pre_existing_vfs && pre_existing_vfs != num_vfs)
+ err = ixgbe_disable_sriov(adapter);
+ else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
+ goto out;
+
+ if (err)
+ goto err_out;
+
+ /* While the SR-IOV capability structure reports total VFs to be
+ * 64 we limit the actual number that can be allocated to 63 so
+ * that some transmit/receive resources can be reserved to the
+ * PF. The PCI bus driver already checks for other values out of
+ * range.
+ */
+ if (num_vfs > 63) {
+ err = -EPERM;
+ goto err_out;
+ }
+
+ adapter->num_vfs = num_vfs;
+
+ err = __ixgbe_enable_sriov(adapter);
+ if (err)
+ goto err_out;
+
+ for (i = 0; i < adapter->num_vfs; i++)
+ ixgbe_vf_configuration(dev, (i | 0x10000000));
+
+ err = pci_enable_sriov(dev, num_vfs);
+ if (err) {
+ e_dev_warn("Failed to enable PCI sriov: %d\n", err);
+ goto err_out;
+ }
+ ixgbe_sriov_reinit(adapter);
+
+out:
+ return num_vfs;
+
+err_out:
+ return err;
+#endif
+ return 0;
+}
+
+static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
+{
+ struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
+ int err;
+ u32 current_flags = adapter->flags;
+
+ err = ixgbe_disable_sriov(adapter);
+
+ /* Only reinit if no error and state changed */
+ if (!err && current_flags != adapter->flags) {
+ /* ixgbe_disable_sriov() doesn't clear VMDQ flag */
+ adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
+#ifdef CONFIG_PCI_IOV
+ ixgbe_sriov_reinit(adapter);
+#endif
+ }
+
+ return err;
+}
+
+int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+ if (num_vfs == 0)
+ return ixgbe_pci_sriov_disable(dev);
+ else
+ return ixgbe_pci_sriov_enable(dev, num_vfs);
}
static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
@@ -447,15 +539,6 @@ static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
}
-static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter,
- u16 vid, u16 qos, u32 vf)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT;
-
- IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir);
-}
-
static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 1be1d30e4e78..4713f9fc7f46 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -41,12 +41,20 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi);
void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
-void ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
+int ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
#ifdef CONFIG_PCI_IOV
-void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
- const struct ixgbe_info *ii);
+void ixgbe_enable_sriov(struct ixgbe_adapter *adapter);
#endif
+int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+static inline void ixgbe_set_vmvir(struct ixgbe_adapter *adapter,
+ u16 vid, u16 qos, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir);
+}
#endif /* _IXGBE_SRIOV_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
index 16ddf14e8ba4..d118def16f35 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 9cd8a13711d3..6652e96c352d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -2822,7 +2822,7 @@ struct ixgbe_mac_operations {
void (*disable_tx_laser)(struct ixgbe_hw *);
void (*enable_tx_laser)(struct ixgbe_hw *);
void (*flap_tx_laser)(struct ixgbe_hw *);
- s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
+ s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
bool *);
@@ -2869,12 +2869,12 @@ struct ixgbe_phy_operations {
s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
s32 (*setup_link)(struct ixgbe_hw *);
- s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
- bool);
+ s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
+ s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
s32 (*check_overtemp)(struct ixgbe_hw *);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index c73b92993391..66c5e946284e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -72,14 +72,13 @@ static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
* ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: true if autonegotiation enabled
* @autoneg_wait_to_complete: true when waiting for completion is needed
**/
static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
- return hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+ return hw->phy.ops.setup_link_speed(hw, speed,
autoneg_wait_to_complete);
}
@@ -879,6 +878,7 @@ static struct ixgbe_phy_operations phy_ops_X540 = {
.setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
.read_i2c_byte = &ixgbe_read_i2c_byte_generic,
.write_i2c_byte = &ixgbe_write_i2c_byte_generic,
+ .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic,
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
.check_overtemp = &ixgbe_tn_check_overtemp,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 8f2070439b59..c9d0c12d6f04 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -99,6 +99,7 @@ static int ixgbevf_get_settings(struct net_device *netdev,
ecmd->transceiver = XCVR_DUMMY1;
ecmd->port = -1;
+ hw->mac.get_link_status = 1;
hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
if (link_up) {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 257357ae66c3..c3db6cd69b68 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -750,12 +750,37 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
static irqreturn_t ixgbevf_msix_other(int irq, void *data)
{
struct ixgbevf_adapter *adapter = data;
+ struct pci_dev *pdev = adapter->pdev;
struct ixgbe_hw *hw = &adapter->hw;
+ u32 msg;
+ bool got_ack = false;
hw->mac.get_link_status = 1;
+ if (!hw->mbx.ops.check_for_ack(hw))
+ got_ack = true;
- if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
- mod_timer(&adapter->watchdog_timer, jiffies);
+ if (!hw->mbx.ops.check_for_msg(hw)) {
+ hw->mbx.ops.read(hw, &msg, 1);
+
+ if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) {
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 1));
+ adapter->link_up = false;
+ }
+
+ if (msg & IXGBE_VT_MSGTYPE_NACK)
+ dev_info(&pdev->dev,
+ "Last Request of type %2.2x to PF Nacked\n",
+ msg & 0xFF);
+ hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
+ }
+
+ /* checking for the ack clears the PFACK bit. Place
+ * it back in the v2p_mailbox cache so that anyone
+ * polling for an ack will not miss it
+ */
+ if (got_ack)
+ hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
@@ -2095,6 +2120,9 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
int i;
+ if (!adapter->link_up)
+ return;
+
UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
adapter->stats.vfgprc);
UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
@@ -2217,9 +2245,23 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
if (link_up) {
if (!netif_carrier_ok(netdev)) {
- hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n",
- (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
- 10 : 1);
+ char *link_speed_string;
+ switch (link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ link_speed_string = "10 Gbps";
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ link_speed_string = "1 Gbps";
+ break;
+ case IXGBE_LINK_SPEED_100_FULL:
+ link_speed_string = "100 Mbps";
+ break;
+ default:
+ link_speed_string = "unknown speed";
+ break;
+ }
+ dev_info(&adapter->pdev->dev,
+ "NIC Link is Up, %s\n", link_speed_string);
netif_carrier_on(netdev);
netif_tx_wake_all_queues(netdev);
}
@@ -2227,7 +2269,7 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
adapter->link_up = false;
adapter->link_speed = 0;
if (netif_carrier_ok(netdev)) {
- hw_dbg(&adapter->hw, "NIC Link is Down\n");
+ dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
}
@@ -3328,8 +3370,6 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_sw_init;
/* The HW MAC address was set and/or determined in sw_init */
- memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
-
if (!is_valid_ether_addr(netdev->dev_addr)) {
pr_err("invalid MAC address\n");
err = -EIO;
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index bc58f1dc22f5..5409fe876a44 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -695,9 +695,9 @@ static void netdev_get_drvinfo(struct net_device *dev,
{
struct korina_private *lp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, lp->dev->name);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, lp->dev->name, sizeof(info->bus_info));
}
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index c124e67a1a1c..6a2127489af7 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -302,9 +302,9 @@ ltq_etop_hw_init(struct net_device *dev)
static void
ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strcpy(info->driver, "Lantiq ETOP");
- strcpy(info->bus_info, "internal");
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, "Lantiq ETOP", sizeof(info->driver));
+ strlcpy(info->bus_info, "internal", sizeof(info->bus_info));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static int
@@ -393,8 +393,8 @@ ltq_etop_mdio_probe(struct net_device *dev)
return -ENODEV;
}
- phydev = phy_connect(dev, dev_name(&phydev->dev), &ltq_etop_mdio_link,
- 0, priv->pldata->mii_mode);
+ phydev = phy_connect(dev, dev_name(&phydev->dev),
+ &ltq_etop_mdio_link, priv->pldata->mii_mode);
if (IS_ERR(phydev)) {
netdev_err(dev, "Could not attach to PHY\n");
@@ -655,7 +655,7 @@ ltq_etop_init(struct net_device *dev)
/* Set addr_assign_type here, ltq_etop_set_mac_address would reset it. */
if (random_mac)
- dev->addr_assign_type |= NET_ADDR_RANDOM;
+ dev->addr_assign_type = NET_ADDR_RANDOM;
ltq_etop_set_multicast_list(dev);
err = ltq_etop_mdio_init(dev);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 84c13263c514..29140502b71a 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1879,12 +1879,10 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
memset(rxq->rx_desc_area, 0, size);
rxq->rx_desc_area_size = size;
- rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
- GFP_KERNEL);
- if (rxq->rx_skb == NULL) {
- netdev_err(mp->dev, "can't allocate rx skb ring\n");
+ rxq->rx_skb = kmalloc_array(rxq->rx_ring_size, sizeof(*rxq->rx_skb),
+ GFP_KERNEL);
+ if (rxq->rx_skb == NULL)
goto out_free;
- }
rx_desc = rxq->rx_desc_area;
for (i = 0; i < rxq->rx_ring_size; i++) {
@@ -2789,7 +2787,7 @@ static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
phy_reset(mp);
- phy_attach(mp->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_GMII);
+ phy_attach(mp->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_GMII);
if (speed == 0) {
phy->autoneg = AUTONEG_ENABLE;
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 74f1c157a480..77b7c80262f4 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -164,7 +164,6 @@ static int orion_mdio_probe(struct platform_device *pdev)
bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
if (!bus->irq) {
- dev_err(&pdev->dev, "Cannot allocate PHY IRQ array\n");
mdiobus_free(bus);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index b6025c305e10..cd345b8969bc 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -12,7 +12,6 @@
*/
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 10d678d3dd01..037ed866c22f 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -627,7 +627,6 @@ static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
memcpy(oldMac, dev->dev_addr, ETH_ALEN);
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
netif_addr_lock_bh(dev);
update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
@@ -1391,7 +1390,7 @@ static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex)
struct phy_device *phy = pep->phy;
ethernet_phy_reset(pep);
- phy_attach(pep->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_MII);
+ phy_attach(pep->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_MII);
if (speed == 0) {
phy->autoneg = AUTONEG_ENABLE;
@@ -1444,10 +1443,10 @@ static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static void pxa168_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strncpy(info->driver, DRIVER_NAME, 32);
- strncpy(info->version, DRIVER_VERSION, 32);
- strncpy(info->fw_version, "N/A", 32);
- strncpy(info->bus_info, "N/A", 32);
+ strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
}
static const struct ethtool_ops pxa168_ethtool_ops = {
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 5544a1fe2f94..171f4b3dda07 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3855,7 +3855,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
/* read the mac address */
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
return dev;
}
@@ -3917,10 +3916,9 @@ static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* space for skge@pci:0000:04:00.0 */
hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
+ strlen(pci_name(pdev)) + 1, GFP_KERNEL);
- if (!hw) {
- dev_err(&pdev->dev, "cannot allocate hardware struct\n");
+ if (!hw)
goto err_out_free_regions;
- }
+
sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
hw->pdev = pdev;
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 3269eb38cc57..fc07ca35721b 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4801,7 +4801,6 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
/* read the mac address */
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
return dev;
}
@@ -4970,10 +4969,8 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
+ strlen(pci_name(pdev)) + 1, GFP_KERNEL);
- if (!hw) {
- dev_err(&pdev->dev, "cannot allocate hardware struct\n");
+ if (!hw)
goto err_out_free_regions;
- }
hw->pdev = pdev;
sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 03447dad07e9..00f25b5f297f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -35,6 +35,8 @@
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/mlx4/driver.h>
+#include <linux/in.h>
+#include <net/ip.h>
#include "mlx4_en.h"
#include "en_port.h"
@@ -494,7 +496,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
port_up = 1;
- mlx4_en_stop_port(dev);
+ mlx4_en_stop_port(dev, 1);
}
mlx4_en_free_resources(priv);
@@ -589,7 +591,7 @@ static int mlx4_en_set_rxfh_indir(struct net_device *dev,
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
port_up = 1;
- mlx4_en_stop_port(dev);
+ mlx4_en_stop_port(dev, 1);
}
priv->prof->rss_rings = rss_rings;
@@ -664,27 +666,90 @@ static int mlx4_en_validate_flow(struct net_device *dev,
if ((cmd->fs.flow_type & FLOW_EXT)) {
if (cmd->fs.m_ext.vlan_etype ||
- !(cmd->fs.m_ext.vlan_tci == 0 ||
- cmd->fs.m_ext.vlan_tci == cpu_to_be16(0xfff)))
+ !((cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
+ 0 ||
+ (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
+ cpu_to_be16(VLAN_VID_MASK)))
return -EINVAL;
+
+ if (cmd->fs.m_ext.vlan_tci) {
+ if (be16_to_cpu(cmd->fs.h_ext.vlan_tci) >= VLAN_N_VID)
+ return -EINVAL;
+
+ }
}
return 0;
}
+static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc *cmd,
+ struct list_head *rule_list_h,
+ struct mlx4_spec_list *spec_l2,
+ unsigned char *mac)
+{
+ int err = 0;
+ __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
+ memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
+ memcpy(spec_l2->eth.dst_mac, mac, ETH_ALEN);
+
+ if ((cmd->fs.flow_type & FLOW_EXT) &&
+ (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
+ spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
+ spec_l2->eth.vlan_id_msk = cpu_to_be16(VLAN_VID_MASK);
+ }
+
+ list_add_tail(&spec_l2->list, rule_list_h);
+
+ return err;
+}
+
+static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv,
+ struct ethtool_rxnfc *cmd,
+ struct list_head *rule_list_h,
+ struct mlx4_spec_list *spec_l2,
+ __be32 ipv4_dst)
+{
+#ifdef CONFIG_INET
+ unsigned char mac[ETH_ALEN];
+
+ if (!ipv4_is_multicast(ipv4_dst)) {
+ if (cmd->fs.flow_type & FLOW_MAC_EXT)
+ memcpy(&mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
+ else
+ memcpy(&mac, priv->dev->dev_addr, ETH_ALEN);
+ } else {
+ ip_eth_mc_map(ipv4_dst, mac);
+ }
+
+ return mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, &mac[0]);
+#else
+ return -EINVAL;
+#endif
+}
+
static int add_ip_rule(struct mlx4_en_priv *priv,
- struct ethtool_rxnfc *cmd,
- struct list_head *list_h)
+ struct ethtool_rxnfc *cmd,
+ struct list_head *list_h)
{
- struct mlx4_spec_list *spec_l3;
+ int err;
+ struct mlx4_spec_list *spec_l2 = NULL;
+ struct mlx4_spec_list *spec_l3 = NULL;
struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec;
- spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL);
- if (!spec_l3) {
- en_err(priv, "Fail to alloc ethtool rule.\n");
- return -ENOMEM;
+ spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
+ spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
+ if (!spec_l2 || !spec_l3) {
+ err = -ENOMEM;
+ goto free_spec;
}
+ err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, spec_l2,
+ cmd->fs.h_u.
+ usr_ip4_spec.ip4dst);
+ if (err)
+ goto free_spec;
spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src;
if (l3_mask->ip4src)
@@ -695,34 +760,52 @@ static int add_ip_rule(struct mlx4_en_priv *priv,
list_add_tail(&spec_l3->list, list_h);
return 0;
+
+free_spec:
+ kfree(spec_l2);
+ kfree(spec_l3);
+ return err;
}
static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
struct ethtool_rxnfc *cmd,
struct list_head *list_h, int proto)
{
- struct mlx4_spec_list *spec_l3;
- struct mlx4_spec_list *spec_l4;
+ int err;
+ struct mlx4_spec_list *spec_l2 = NULL;
+ struct mlx4_spec_list *spec_l3 = NULL;
+ struct mlx4_spec_list *spec_l4 = NULL;
struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
- spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL);
- spec_l4 = kzalloc(sizeof *spec_l4, GFP_KERNEL);
- if (!spec_l4 || !spec_l3) {
- en_err(priv, "Fail to alloc ethtool rule.\n");
- kfree(spec_l3);
- kfree(spec_l4);
- return -ENOMEM;
+ spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
+ spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
+ spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL);
+ if (!spec_l2 || !spec_l3 || !spec_l4) {
+ err = -ENOMEM;
+ goto free_spec;
}
spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
if (proto == TCP_V4_FLOW) {
+ err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
+ spec_l2,
+ cmd->fs.h_u.
+ tcp_ip4_spec.ip4dst);
+ if (err)
+ goto free_spec;
spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src;
spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst;
spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc;
spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst;
} else {
+ err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
+ spec_l2,
+ cmd->fs.h_u.
+ udp_ip4_spec.ip4dst);
+ if (err)
+ goto free_spec;
spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src;
spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst;
@@ -744,6 +827,12 @@ static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
list_add_tail(&spec_l4->list, list_h);
return 0;
+
+free_spec:
+ kfree(spec_l2);
+ kfree(spec_l3);
+ kfree(spec_l4);
+ return err;
}
static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
@@ -751,43 +840,23 @@ static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
struct list_head *rule_list_h)
{
int err;
- __be64 be_mac;
struct ethhdr *eth_spec;
- struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_spec_list *spec_l2;
- __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
+ struct mlx4_en_priv *priv = netdev_priv(dev);
err = mlx4_en_validate_flow(dev, cmd);
if (err)
return err;
- spec_l2 = kzalloc(sizeof *spec_l2, GFP_KERNEL);
- if (!spec_l2)
- return -ENOMEM;
-
- if (cmd->fs.flow_type & FLOW_MAC_EXT) {
- memcpy(&be_mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
- } else {
- u64 mac = priv->mac & MLX4_MAC_MASK;
- be_mac = cpu_to_be64(mac << 16);
- }
-
- spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
- memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
- if ((cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) != ETHER_FLOW)
- memcpy(spec_l2->eth.dst_mac, &be_mac, ETH_ALEN);
-
- if ((cmd->fs.flow_type & FLOW_EXT) && cmd->fs.m_ext.vlan_tci) {
- spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
- spec_l2->eth.vlan_id_msk = cpu_to_be16(0xfff);
- }
-
- list_add_tail(&spec_l2->list, rule_list_h);
-
switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
case ETHER_FLOW:
+ spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
+ if (!spec_l2)
+ return -ENOMEM;
+
eth_spec = &cmd->fs.h_u.ether_spec;
- memcpy(&spec_l2->eth.dst_mac, eth_spec->h_dest, ETH_ALEN);
+ mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2,
+ &eth_spec->h_dest[0]);
spec_l2->eth.ether_type = eth_spec->h_proto;
if (eth_spec->h_proto)
spec_l2->eth.ether_type_enable = 1;
@@ -861,6 +930,7 @@ static int mlx4_en_flow_replace(struct net_device *dev,
loc_rule->id = 0;
memset(&loc_rule->flow_spec, 0,
sizeof(struct ethtool_rx_flow_spec));
+ list_del(&loc_rule->list);
}
err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
if (err) {
@@ -871,6 +941,7 @@ static int mlx4_en_flow_replace(struct net_device *dev,
loc_rule->id = reg_id;
memcpy(&loc_rule->flow_spec, &cmd->fs,
sizeof(struct ethtool_rx_flow_spec));
+ list_add_tail(&loc_rule->list, &priv->ethtool_list);
out_free_list:
list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
@@ -904,6 +975,7 @@ static int mlx4_en_flow_detach(struct net_device *dev,
}
rule->id = 0;
memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec));
+ list_del(&rule->list);
out:
return err;
@@ -952,7 +1024,8 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT ||
cmd->cmd == ETHTOOL_GRXCLSRULE ||
cmd->cmd == ETHTOOL_GRXCLSRLALL) &&
- mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
+ (mdev->dev->caps.steering_mode !=
+ MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up))
return -EINVAL;
switch (cmd->cmd) {
@@ -988,7 +1061,8 @@ static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
+ if (mdev->dev->caps.steering_mode !=
+ MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up)
return -EINVAL;
switch (cmd->cmd) {
@@ -1037,7 +1111,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
port_up = 1;
- mlx4_en_stop_port(dev);
+ mlx4_en_stop_port(dev, 1);
}
mlx4_en_free_resources(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 3a2b8c65642d..b2cca58de910 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -64,7 +64,7 @@ static const char mlx4_en_version[] =
/* Enable RSS UDP traffic */
MLX4_EN_PARM_INT(udp_rss, 1,
- "Enable RSS for incomming UDP traffic or disabled (0)");
+ "Enable RSS for incoming UDP traffic or disabled (0)");
/* Priority pausing */
MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
@@ -95,6 +95,28 @@ int en_print(const char *level, const struct mlx4_en_priv *priv,
return i;
}
+void mlx4_en_update_loopback_state(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ priv->flags &= ~(MLX4_EN_FLAG_RX_FILTER_NEEDED|
+ MLX4_EN_FLAG_ENABLE_HW_LOOPBACK);
+
+ /* Drop the packet if SRIOV is not enabled
+ * and not performing the selftest or flb disabled
+ */
+ if (mlx4_is_mfunc(priv->mdev->dev) &&
+ !(features & NETIF_F_LOOPBACK) && !priv->validate_loopback)
+ priv->flags |= MLX4_EN_FLAG_RX_FILTER_NEEDED;
+
+ /* Set dmac in Tx WQE if we are in SRIOV mode or if loopback selftest
+ * is requested
+ */
+ if (mlx4_is_mfunc(priv->mdev->dev) || priv->validate_loopback)
+ priv->flags |= MLX4_EN_FLAG_ENABLE_HW_LOOPBACK;
+}
+
static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
{
struct mlx4_en_profile *params = &mdev->profile;
@@ -191,10 +213,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
printk_once(KERN_INFO "%s", mlx4_en_version);
- mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev) {
- dev_err(&dev->pdev->dev, "Device struct alloc failed, "
- "aborting.\n");
err = -ENOMEM;
goto err_free_res;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 75a3f467bb5b..5088dc5c3d1a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -132,17 +132,14 @@ static void mlx4_en_filter_work(struct work_struct *work)
.priority = MLX4_DOMAIN_RFS,
};
int rc;
- __be64 mac;
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
list_add_tail(&spec_eth.list, &rule.list);
list_add_tail(&spec_ip.list, &rule.list);
list_add_tail(&spec_tcp.list, &rule.list);
- mac = cpu_to_be64((priv->mac & MLX4_MAC_MASK) << 16);
-
rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
- memcpy(spec_eth.eth.dst_mac, &mac, ETH_ALEN);
+ memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
filter->activated = 0;
@@ -413,6 +410,235 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
return 0;
}
+static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
+{
+ unsigned int i;
+ for (i = ETH_ALEN - 1; i; --i) {
+ dst_mac[i] = src_mac & 0xff;
+ src_mac >>= 8;
+ }
+ memset(&dst_mac[ETH_ALEN], 0, 2);
+}
+
+static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
+ unsigned char *mac, int *qpn, u64 *reg_id)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_dev *dev = mdev->dev;
+ int err;
+
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_B0: {
+ struct mlx4_qp qp;
+ u8 gid[16] = {0};
+
+ qp.qpn = *qpn;
+ memcpy(&gid[10], mac, ETH_ALEN);
+ gid[5] = priv->port;
+
+ err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
+ break;
+ }
+ case MLX4_STEERING_MODE_DEVICE_MANAGED: {
+ struct mlx4_spec_list spec_eth = { {NULL} };
+ __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ struct mlx4_net_trans_rule rule = {
+ .queue_mode = MLX4_NET_TRANS_Q_FIFO,
+ .exclusive = 0,
+ .allow_loopback = 1,
+ .promisc_mode = MLX4_FS_PROMISC_NONE,
+ .priority = MLX4_DOMAIN_NIC,
+ };
+
+ rule.port = priv->port;
+ rule.qpn = *qpn;
+ INIT_LIST_HEAD(&rule.list);
+
+ spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
+ memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
+ memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
+ list_add_tail(&spec_eth.list, &rule.list);
+
+ err = mlx4_flow_attach(dev, &rule, reg_id);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ if (err)
+ en_warn(priv, "Failed Attaching Unicast\n");
+
+ return err;
+}
+
+static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
+ unsigned char *mac, int qpn, u64 reg_id)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_dev *dev = mdev->dev;
+
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_B0: {
+ struct mlx4_qp qp;
+ u8 gid[16] = {0};
+
+ qp.qpn = qpn;
+ memcpy(&gid[10], mac, ETH_ALEN);
+ gid[5] = priv->port;
+
+ mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
+ break;
+ }
+ case MLX4_STEERING_MODE_DEVICE_MANAGED: {
+ mlx4_flow_detach(dev, reg_id);
+ break;
+ }
+ default:
+ en_err(priv, "Invalid steering mode.\n");
+ }
+}
+
+static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_dev *dev = mdev->dev;
+ struct mlx4_mac_entry *entry;
+ int index = 0;
+ int err = 0;
+ u64 reg_id;
+ int *qpn = &priv->base_qpn;
+ u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
+
+ en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
+ priv->dev->dev_addr);
+ index = mlx4_register_mac(dev, priv->port, mac);
+ if (index < 0) {
+ err = index;
+ en_err(priv, "Failed adding MAC: %pM\n",
+ priv->dev->dev_addr);
+ return err;
+ }
+
+ if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
+ int base_qpn = mlx4_get_base_qpn(dev, priv->port);
+ *qpn = base_qpn + index;
+ return 0;
+ }
+
+ err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
+ en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
+ if (err) {
+ en_err(priv, "Failed to reserve qp for mac registration\n");
+ goto qp_err;
+ }
+
+ err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
+ if (err)
+ goto steer_err;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
+ memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
+ entry->reg_id = reg_id;
+
+ hlist_add_head_rcu(&entry->hlist,
+ &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
+
+ return 0;
+
+alloc_err:
+ mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
+
+steer_err:
+ mlx4_qp_release_range(dev, *qpn, 1);
+
+qp_err:
+ mlx4_unregister_mac(dev, priv->port, mac);
+ return err;
+}
+
+static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_dev *dev = mdev->dev;
+ int qpn = priv->base_qpn;
+ u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
+
+ en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
+ priv->dev->dev_addr);
+ mlx4_unregister_mac(dev, priv->port, mac);
+
+ if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
+ struct mlx4_mac_entry *entry;
+ struct hlist_node *n, *tmp;
+ struct hlist_head *bucket;
+ unsigned int mac_hash;
+
+ mac_hash = priv->dev->dev_addr[MLX4_EN_MAC_HASH_IDX];
+ bucket = &priv->mac_hash[mac_hash];
+ hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
+ if (ether_addr_equal_64bits(entry->mac,
+ priv->dev->dev_addr)) {
+ en_dbg(DRV, priv, "Releasing qp: port %d, MAC %pM, qpn %d\n",
+ priv->port, priv->dev->dev_addr, qpn);
+ mlx4_en_uc_steer_release(priv, entry->mac,
+ qpn, entry->reg_id);
+ mlx4_qp_release_range(dev, qpn, 1);
+
+ hlist_del_rcu(&entry->hlist);
+ kfree_rcu(entry, rcu);
+ break;
+ }
+ }
+ }
+}
+
+static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
+ unsigned char *new_mac, unsigned char *prev_mac)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_dev *dev = mdev->dev;
+ int err = 0;
+ u64 new_mac_u64 = mlx4_en_mac_to_u64(new_mac);
+
+ if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
+ struct hlist_head *bucket;
+ unsigned int mac_hash;
+ struct mlx4_mac_entry *entry;
+ struct hlist_node *n, *tmp;
+ u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac);
+
+ bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
+ hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
+ if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
+ mlx4_en_uc_steer_release(priv, entry->mac,
+ qpn, entry->reg_id);
+ mlx4_unregister_mac(dev, priv->port,
+ prev_mac_u64);
+ hlist_del_rcu(&entry->hlist);
+ synchronize_rcu();
+ memcpy(entry->mac, new_mac, ETH_ALEN);
+ entry->reg_id = 0;
+ mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
+ hlist_add_head_rcu(&entry->hlist,
+ &priv->mac_hash[mac_hash]);
+ mlx4_register_mac(dev, priv->port, new_mac_u64);
+ err = mlx4_en_uc_steer_add(priv, new_mac,
+ &qpn,
+ &entry->reg_id);
+ return err;
+ }
+ }
+ return -EINVAL;
+ }
+
+ return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
+}
+
u64 mlx4_en_mac_to_u64(u8 *addr)
{
u64 mac = 0;
@@ -435,7 +661,6 @@ static int mlx4_en_set_mac(struct net_device *dev, void *addr)
return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
- priv->mac = mlx4_en_mac_to_u64(dev->dev_addr);
queue_work(mdev->workqueue, &priv->mac_task);
return 0;
}
@@ -450,13 +675,14 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
/* Remove old MAC and insert the new one */
- err = mlx4_replace_mac(mdev->dev, priv->port,
- priv->base_qpn, priv->mac);
+ err = mlx4_en_replace_mac(priv, priv->base_qpn,
+ priv->dev->dev_addr, priv->prev_mac);
if (err)
en_err(priv, "Failed changing HW MAC address\n");
+ memcpy(priv->prev_mac, priv->dev->dev_addr,
+ sizeof(priv->prev_mac));
} else
- en_dbg(HW, priv, "Port is down while "
- "registering mac, exiting...\n");
+ en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
mutex_unlock(&mdev->state_lock);
}
@@ -482,7 +708,6 @@ static void mlx4_en_cache_mclist(struct net_device *dev)
netdev_for_each_mc_addr(ha, dev) {
tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
if (!tmp) {
- en_err(priv, "failed to allocate multicast list\n");
mlx4_en_clear_list(dev);
return;
}
@@ -526,181 +751,153 @@ static void update_mclist_flags(struct mlx4_en_priv *priv,
}
}
if (!found) {
- new_mc = kmalloc(sizeof(struct mlx4_en_mc_list),
+ new_mc = kmemdup(src_tmp,
+ sizeof(struct mlx4_en_mc_list),
GFP_KERNEL);
- if (!new_mc) {
- en_err(priv, "Failed to allocate current multicast list\n");
+ if (!new_mc)
return;
- }
- memcpy(new_mc, src_tmp,
- sizeof(struct mlx4_en_mc_list));
+
new_mc->action = MCLIST_ADD;
list_add_tail(&new_mc->list, dst);
}
}
}
-static void mlx4_en_set_multicast(struct net_device *dev)
+static void mlx4_en_set_rx_mode(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
if (!priv->port_up)
return;
- queue_work(priv->mdev->workqueue, &priv->mcast_task);
+ queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
}
-static void mlx4_en_do_set_multicast(struct work_struct *work)
+static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
+ struct mlx4_en_dev *mdev)
{
- struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
- mcast_task);
- struct mlx4_en_dev *mdev = priv->mdev;
- struct net_device *dev = priv->dev;
- struct mlx4_en_mc_list *mclist, *tmp;
- u64 mcast_addr = 0;
- u8 mc_list[16] = {0};
int err = 0;
- mutex_lock(&mdev->state_lock);
- if (!mdev->device_up) {
- en_dbg(HW, priv, "Card is not up, "
- "ignoring multicast change.\n");
- goto out;
- }
- if (!priv->port_up) {
- en_dbg(HW, priv, "Port is down, "
- "ignoring multicast change.\n");
- goto out;
- }
-
- if (!netif_carrier_ok(dev)) {
- if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
- if (priv->port_state.link_state) {
- priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
- netif_carrier_on(dev);
- en_dbg(LINK, priv, "Link Up\n");
- }
- }
- }
-
- /*
- * Promsicuous mode: disable all filters
- */
-
- if (dev->flags & IFF_PROMISC) {
- if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
- if (netif_msg_rx_status(priv))
- en_warn(priv, "Entering promiscuous mode\n");
- priv->flags |= MLX4_EN_FLAG_PROMISC;
-
- /* Enable promiscouos mode */
- switch (mdev->dev->caps.steering_mode) {
- case MLX4_STEERING_MODE_DEVICE_MANAGED:
- err = mlx4_flow_steer_promisc_add(mdev->dev,
- priv->port,
- priv->base_qpn,
- MLX4_FS_PROMISC_UPLINK);
- if (err)
- en_err(priv, "Failed enabling promiscuous mode\n");
- priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
- break;
-
- case MLX4_STEERING_MODE_B0:
- err = mlx4_unicast_promisc_add(mdev->dev,
- priv->base_qpn,
- priv->port);
- if (err)
- en_err(priv, "Failed enabling unicast promiscuous mode\n");
-
- /* Add the default qp number as multicast
- * promisc
- */
- if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
- err = mlx4_multicast_promisc_add(mdev->dev,
- priv->base_qpn,
- priv->port);
- if (err)
- en_err(priv, "Failed enabling multicast promiscuous mode\n");
- priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
- }
- break;
-
- case MLX4_STEERING_MODE_A0:
- err = mlx4_SET_PORT_qpn_calc(mdev->dev,
- priv->port,
- priv->base_qpn,
- 1);
- if (err)
- en_err(priv, "Failed enabling promiscuous mode\n");
- break;
- }
-
- /* Disable port multicast filter (unconditionally) */
- err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
- 0, MLX4_MCAST_DISABLE);
- if (err)
- en_err(priv, "Failed disabling "
- "multicast filter\n");
-
- /* Disable port VLAN filter */
- err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
- if (err)
- en_err(priv, "Failed disabling VLAN filter\n");
- }
- goto out;
- }
-
- /*
- * Not in promiscuous mode
- */
-
- if (priv->flags & MLX4_EN_FLAG_PROMISC) {
+ if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
if (netif_msg_rx_status(priv))
- en_warn(priv, "Leaving promiscuous mode\n");
- priv->flags &= ~MLX4_EN_FLAG_PROMISC;
+ en_warn(priv, "Entering promiscuous mode\n");
+ priv->flags |= MLX4_EN_FLAG_PROMISC;
- /* Disable promiscouos mode */
+ /* Enable promiscouos mode */
switch (mdev->dev->caps.steering_mode) {
case MLX4_STEERING_MODE_DEVICE_MANAGED:
- err = mlx4_flow_steer_promisc_remove(mdev->dev,
- priv->port,
- MLX4_FS_PROMISC_UPLINK);
+ err = mlx4_flow_steer_promisc_add(mdev->dev,
+ priv->port,
+ priv->base_qpn,
+ MLX4_FS_PROMISC_UPLINK);
if (err)
- en_err(priv, "Failed disabling promiscuous mode\n");
- priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ en_err(priv, "Failed enabling promiscuous mode\n");
+ priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
break;
case MLX4_STEERING_MODE_B0:
- err = mlx4_unicast_promisc_remove(mdev->dev,
- priv->base_qpn,
- priv->port);
+ err = mlx4_unicast_promisc_add(mdev->dev,
+ priv->base_qpn,
+ priv->port);
if (err)
- en_err(priv, "Failed disabling unicast promiscuous mode\n");
- /* Disable Multicast promisc */
- if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
- err = mlx4_multicast_promisc_remove(mdev->dev,
- priv->base_qpn,
- priv->port);
+ en_err(priv, "Failed enabling unicast promiscuous mode\n");
+
+ /* Add the default qp number as multicast
+ * promisc
+ */
+ if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
+ err = mlx4_multicast_promisc_add(mdev->dev,
+ priv->base_qpn,
+ priv->port);
if (err)
- en_err(priv, "Failed disabling multicast promiscuous mode\n");
- priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ en_err(priv, "Failed enabling multicast promiscuous mode\n");
+ priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
}
break;
case MLX4_STEERING_MODE_A0:
err = mlx4_SET_PORT_qpn_calc(mdev->dev,
priv->port,
- priv->base_qpn, 0);
+ priv->base_qpn,
+ 1);
if (err)
- en_err(priv, "Failed disabling promiscuous mode\n");
+ en_err(priv, "Failed enabling promiscuous mode\n");
break;
}
- /* Enable port VLAN filter */
+ /* Disable port multicast filter (unconditionally) */
+ err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
+ 0, MLX4_MCAST_DISABLE);
+ if (err)
+ en_err(priv, "Failed disabling multicast filter\n");
+
+ /* Disable port VLAN filter */
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
if (err)
- en_err(priv, "Failed enabling VLAN filter\n");
+ en_err(priv, "Failed disabling VLAN filter\n");
}
+}
+
+static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
+ struct mlx4_en_dev *mdev)
+{
+ int err = 0;
+
+ if (netif_msg_rx_status(priv))
+ en_warn(priv, "Leaving promiscuous mode\n");
+ priv->flags &= ~MLX4_EN_FLAG_PROMISC;
+
+ /* Disable promiscouos mode */
+ switch (mdev->dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ err = mlx4_flow_steer_promisc_remove(mdev->dev,
+ priv->port,
+ MLX4_FS_PROMISC_UPLINK);
+ if (err)
+ en_err(priv, "Failed disabling promiscuous mode\n");
+ priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ break;
+
+ case MLX4_STEERING_MODE_B0:
+ err = mlx4_unicast_promisc_remove(mdev->dev,
+ priv->base_qpn,
+ priv->port);
+ if (err)
+ en_err(priv, "Failed disabling unicast promiscuous mode\n");
+ /* Disable Multicast promisc */
+ if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
+ err = mlx4_multicast_promisc_remove(mdev->dev,
+ priv->base_qpn,
+ priv->port);
+ if (err)
+ en_err(priv, "Failed disabling multicast promiscuous mode\n");
+ priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ }
+ break;
+
+ case MLX4_STEERING_MODE_A0:
+ err = mlx4_SET_PORT_qpn_calc(mdev->dev,
+ priv->port,
+ priv->base_qpn, 0);
+ if (err)
+ en_err(priv, "Failed disabling promiscuous mode\n");
+ break;
+ }
+
+ /* Enable port VLAN filter */
+ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
+ if (err)
+ en_err(priv, "Failed enabling VLAN filter\n");
+}
+
+static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
+ struct net_device *dev,
+ struct mlx4_en_dev *mdev)
+{
+ struct mlx4_en_mc_list *mclist, *tmp;
+ u64 mcast_addr = 0;
+ u8 mc_list[16] = {0};
+ int err = 0;
/* Enable/disable the multicast filter according to IFF_ALLMULTI */
if (dev->flags & IFF_ALLMULTI) {
@@ -767,9 +964,9 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
/* Update multicast list - we cache all addresses so they won't
* change while HW is updated holding the command semaphor */
- netif_tx_lock_bh(dev);
+ netif_addr_lock_bh(dev);
mlx4_en_cache_mclist(dev);
- netif_tx_unlock_bh(dev);
+ netif_addr_unlock_bh(dev);
list_for_each_entry(mclist, &priv->mc_list, list) {
mcast_addr = mlx4_en_mac_to_u64(mclist->addr);
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
@@ -814,6 +1011,170 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
}
}
}
+}
+
+static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
+ struct net_device *dev,
+ struct mlx4_en_dev *mdev)
+{
+ struct netdev_hw_addr *ha;
+ struct mlx4_mac_entry *entry;
+ struct hlist_node *n, *tmp;
+ bool found;
+ u64 mac;
+ int err = 0;
+ struct hlist_head *bucket;
+ unsigned int i;
+ int removed = 0;
+ u32 prev_flags;
+
+ /* Note that we do not need to protect our mac_hash traversal with rcu,
+ * since all modification code is protected by mdev->state_lock
+ */
+
+ /* find what to remove */
+ for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
+ bucket = &priv->mac_hash[i];
+ hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
+ found = false;
+ netdev_for_each_uc_addr(ha, dev) {
+ if (ether_addr_equal_64bits(entry->mac,
+ ha->addr)) {
+ found = true;
+ break;
+ }
+ }
+
+ /* MAC address of the port is not in uc list */
+ if (ether_addr_equal_64bits(entry->mac, dev->dev_addr))
+ found = true;
+
+ if (!found) {
+ mac = mlx4_en_mac_to_u64(entry->mac);
+ mlx4_en_uc_steer_release(priv, entry->mac,
+ priv->base_qpn,
+ entry->reg_id);
+ mlx4_unregister_mac(mdev->dev, priv->port, mac);
+
+ hlist_del_rcu(&entry->hlist);
+ kfree_rcu(entry, rcu);
+ en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
+ entry->mac, priv->port);
+ ++removed;
+ }
+ }
+ }
+
+ /* if we didn't remove anything, there is no use in trying to add
+ * again once we are in a forced promisc mode state
+ */
+ if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
+ return;
+
+ prev_flags = priv->flags;
+ priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
+
+ /* find what to add */
+ netdev_for_each_uc_addr(ha, dev) {
+ found = false;
+ bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
+ hlist_for_each_entry(entry, n, bucket, hlist) {
+ if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
+ ha->addr, priv->port);
+ priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
+ break;
+ }
+ mac = mlx4_en_mac_to_u64(ha->addr);
+ memcpy(entry->mac, ha->addr, ETH_ALEN);
+ err = mlx4_register_mac(mdev->dev, priv->port, mac);
+ if (err < 0) {
+ en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
+ ha->addr, priv->port, err);
+ kfree(entry);
+ priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
+ break;
+ }
+ err = mlx4_en_uc_steer_add(priv, ha->addr,
+ &priv->base_qpn,
+ &entry->reg_id);
+ if (err) {
+ en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
+ ha->addr, priv->port, err);
+ mlx4_unregister_mac(mdev->dev, priv->port, mac);
+ kfree(entry);
+ priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
+ break;
+ } else {
+ unsigned int mac_hash;
+ en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
+ ha->addr, priv->port);
+ mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
+ bucket = &priv->mac_hash[mac_hash];
+ hlist_add_head_rcu(&entry->hlist, bucket);
+ }
+ }
+ }
+
+ if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
+ en_warn(priv, "Forcing promiscuous mode on port:%d\n",
+ priv->port);
+ } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
+ en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
+ priv->port);
+ }
+}
+
+static void mlx4_en_do_set_rx_mode(struct work_struct *work)
+{
+ struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+ rx_mode_task);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct net_device *dev = priv->dev;
+
+ mutex_lock(&mdev->state_lock);
+ if (!mdev->device_up) {
+ en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
+ goto out;
+ }
+ if (!priv->port_up) {
+ en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
+ goto out;
+ }
+
+ if (!netif_carrier_ok(dev)) {
+ if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
+ if (priv->port_state.link_state) {
+ priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
+ netif_carrier_on(dev);
+ en_dbg(LINK, priv, "Link Up\n");
+ }
+ }
+ }
+
+ if (dev->priv_flags & IFF_UNICAST_FLT)
+ mlx4_en_do_uc_filter(priv, dev, mdev);
+
+ /* Promsicuous mode: disable all filters */
+ if ((dev->flags & IFF_PROMISC) ||
+ (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
+ mlx4_en_set_promisc_mode(priv, mdev);
+ goto out;
+ }
+
+ /* Not in promiscuous mode */
+ if (priv->flags & MLX4_EN_FLAG_PROMISC)
+ mlx4_en_clear_promisc_mode(priv, mdev);
+
+ mlx4_en_do_multicast(priv, dev, mdev);
out:
mutex_unlock(&mdev->state_lock);
}
@@ -876,9 +1237,8 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
- en_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
- "rx_frames:%d rx_usecs:%d\n",
- priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
+ en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
+ priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
/* Setup cq moderation params */
for (i = 0; i < priv->rx_ring_num; i++) {
@@ -959,8 +1319,8 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
cq->moder_time = moder_time;
err = mlx4_en_set_cq_moder(priv, cq);
if (err)
- en_err(priv, "Failed modifying moderation "
- "for cq:%d\n", ring);
+ en_err(priv, "Failed modifying moderation for cq:%d\n",
+ ring);
}
priv->last_moder_packets[ring] = rx_packets;
priv->last_moder_bytes[ring] = rx_bytes;
@@ -977,12 +1337,12 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
struct mlx4_en_dev *mdev = priv->mdev;
int err;
- err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
- if (err)
- en_dbg(HW, priv, "Could not update stats\n");
-
mutex_lock(&mdev->state_lock);
if (mdev->device_up) {
+ err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
+ if (err)
+ en_dbg(HW, priv, "Could not update stats\n");
+
if (priv->port_up)
mlx4_en_auto_moderation(priv);
@@ -1039,6 +1399,9 @@ int mlx4_en_start_port(struct net_device *dev)
INIT_LIST_HEAD(&priv->mc_list);
INIT_LIST_HEAD(&priv->curr_list);
+ INIT_LIST_HEAD(&priv->ethtool_list);
+ memset(&priv->ethtool_rules[0], 0,
+ sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
/* Calculate Rx buf size */
dev->mtu = min(dev->mtu, priv->max_mtu);
@@ -1074,8 +1437,7 @@ int mlx4_en_start_port(struct net_device *dev)
/* Set qp number */
en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
- err = mlx4_get_eth_qp(mdev->dev, priv->port,
- priv->mac, &priv->base_qpn);
+ err = mlx4_en_get_qp(priv);
if (err) {
en_err(priv, "Failed getting eth qp\n");
goto cq_err;
@@ -1138,8 +1500,8 @@ int mlx4_en_start_port(struct net_device *dev)
priv->prof->rx_pause,
priv->prof->rx_ppp);
if (err) {
- en_err(priv, "Failed setting port general configurations "
- "for port %d, with error %d\n", priv->port, err);
+ en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
+ priv->port, err);
goto tx_err;
}
/* Set default qp number */
@@ -1167,23 +1529,16 @@ int mlx4_en_start_port(struct net_device *dev)
/* Must redo promiscuous mode setup. */
priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
- if (mdev->dev->caps.steering_mode ==
- MLX4_STEERING_MODE_DEVICE_MANAGED) {
- mlx4_flow_steer_promisc_remove(mdev->dev,
- priv->port,
- MLX4_FS_PROMISC_UPLINK);
- mlx4_flow_steer_promisc_remove(mdev->dev,
- priv->port,
- MLX4_FS_PROMISC_ALL_MULTI);
- }
/* Schedule multicast task to populate multicast list */
- queue_work(mdev->workqueue, &priv->mcast_task);
+ queue_work(mdev->workqueue, &priv->rx_mode_task);
mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
priv->port_up = true;
netif_tx_start_all_queues(dev);
+ netif_device_attach(dev);
+
return 0;
tx_err:
@@ -1195,7 +1550,7 @@ tx_err:
rss_err:
mlx4_en_release_rss_steer(priv);
mac_err:
- mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
+ mlx4_en_put_qp(priv);
cq_err:
while (rx_index--)
mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
@@ -1206,11 +1561,12 @@ cq_err:
}
-void mlx4_en_stop_port(struct net_device *dev)
+void mlx4_en_stop_port(struct net_device *dev, int detach)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_mc_list *mclist, *tmp;
+ struct ethtool_flow_id *flow, *tmp_flow;
int i;
u8 mc_list[16] = {0};
@@ -1221,12 +1577,42 @@ void mlx4_en_stop_port(struct net_device *dev)
/* Synchronize with tx routine */
netif_tx_lock_bh(dev);
+ if (detach)
+ netif_device_detach(dev);
netif_tx_stop_all_queues(dev);
netif_tx_unlock_bh(dev);
+ netif_tx_disable(dev);
+
/* Set port as not active */
priv->port_up = false;
+ /* Promsicuous mode */
+ if (mdev->dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
+ MLX4_EN_FLAG_MC_PROMISC);
+ mlx4_flow_steer_promisc_remove(mdev->dev,
+ priv->port,
+ MLX4_FS_PROMISC_UPLINK);
+ mlx4_flow_steer_promisc_remove(mdev->dev,
+ priv->port,
+ MLX4_FS_PROMISC_ALL_MULTI);
+ } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
+ priv->flags &= ~MLX4_EN_FLAG_PROMISC;
+
+ /* Disable promiscouos mode */
+ mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
+ priv->port);
+
+ /* Disable Multicast promisc */
+ if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
+ mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
+ priv->port);
+ priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ }
+ }
+
/* Detach All multicasts */
memset(&mc_list[10], 0xff, ETH_ALEN);
mc_list[5] = priv->port; /* needed for B0 steering support */
@@ -1263,8 +1649,20 @@ void mlx4_en_stop_port(struct net_device *dev)
mlx4_en_release_rss_steer(priv);
/* Unregister Mac address for the port */
- mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
- mdev->mac_removed[priv->port] = 1;
+ mlx4_en_put_qp(priv);
+ if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN))
+ mdev->mac_removed[priv->port] = 1;
+
+ /* Remove flow steering rules for the port*/
+ if (mdev->dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ ASSERT_RTNL();
+ list_for_each_entry_safe(flow, tmp_flow,
+ &priv->ethtool_list, list) {
+ mlx4_flow_detach(mdev->dev, flow->id);
+ list_del(&flow->list);
+ }
+ }
/* Free RX Rings */
for (i = 0; i < priv->rx_ring_num; i++) {
@@ -1284,15 +1682,12 @@ static void mlx4_en_restart(struct work_struct *work)
watchdog_task);
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
- int i;
en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
- mlx4_en_stop_port(dev);
- for (i = 0; i < priv->tx_ring_num; i++)
- netdev_tx_reset_queue(priv->tx_ring[i].tx_queue);
+ mlx4_en_stop_port(dev, 1);
if (mlx4_en_start_port(dev))
en_err(priv, "Failed restarting port %d\n", priv->port);
}
@@ -1362,7 +1757,7 @@ static int mlx4_en_close(struct net_device *dev)
mutex_lock(&mdev->state_lock);
- mlx4_en_stop_port(dev);
+ mlx4_en_stop_port(dev, 0);
netif_carrier_off(dev);
mutex_unlock(&mdev->state_lock);
@@ -1437,9 +1832,6 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num);
if (!priv->dev->rx_cpu_rmap)
goto err;
-
- INIT_LIST_HEAD(&priv->filters);
- spin_lock_init(&priv->filters_lock);
#endif
return 0;
@@ -1503,7 +1895,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
* the port */
en_dbg(DRV, priv, "Change MTU called with card down!?\n");
} else {
- mlx4_en_stop_port(dev);
+ mlx4_en_stop_port(dev, 1);
err = mlx4_en_start_port(dev);
if (err) {
en_err(priv, "Failed restarting port:%d\n",
@@ -1527,17 +1919,92 @@ static int mlx4_en_set_features(struct net_device *netdev,
priv->ctrl_flags &=
cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
+ mlx4_en_update_loopback_state(netdev, features);
+
return 0;
}
+static int mlx4_en_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 flags)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_dev *mdev = priv->mdev->dev;
+ int err;
+
+ if (!mlx4_is_mfunc(mdev))
+ return -EOPNOTSUPP;
+
+ /* Hardware does not support aging addresses, allow only
+ * permanent addresses if ndm_state is given
+ */
+ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+ en_info(priv, "Add FDB only supports static addresses\n");
+ return -EINVAL;
+ }
+
+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+ err = dev_uc_add_excl(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_add_excl(dev, addr);
+ else
+ err = -EINVAL;
+
+ /* Only return duplicate errors if NLM_F_EXCL is set */
+ if (err == -EEXIST && !(flags & NLM_F_EXCL))
+ err = 0;
+
+ return err;
+}
+
+static int mlx4_en_fdb_del(struct ndmsg *ndm,
+ struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_dev *mdev = priv->mdev->dev;
+ int err;
+
+ if (!mlx4_is_mfunc(mdev))
+ return -EOPNOTSUPP;
+
+ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+ en_info(priv, "Del FDB only supports static addresses\n");
+ return -EINVAL;
+ }
+
+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+ err = dev_uc_del(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_del(dev, addr);
+ else
+ err = -EINVAL;
+
+ return err;
+}
+
+static int mlx4_en_fdb_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev, int idx)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_dev *mdev = priv->mdev->dev;
+
+ if (mlx4_is_mfunc(mdev))
+ idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
+
+ return idx;
+}
+
static const struct net_device_ops mlx4_netdev_ops = {
.ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close,
.ndo_start_xmit = mlx4_en_xmit,
.ndo_select_queue = mlx4_en_select_queue,
.ndo_get_stats = mlx4_en_get_stats,
- .ndo_set_rx_mode = mlx4_en_set_multicast,
+ .ndo_set_rx_mode = mlx4_en_set_rx_mode,
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = mlx4_en_change_mtu,
@@ -1552,6 +2019,9 @@ static const struct net_device_ops mlx4_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
+ .ndo_fdb_add = mlx4_en_fdb_add,
+ .ndo_fdb_del = mlx4_en_fdb_del,
+ .ndo_fdb_dump = mlx4_en_fdb_dump,
};
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -1608,7 +2078,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->mac_index = -1;
priv->msg_enable = MLX4_EN_MSG_LEVEL;
spin_lock_init(&priv->stats_lock);
- INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
+ INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
@@ -1618,22 +2088,35 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
#endif
+ for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
+ INIT_HLIST_HEAD(&priv->mac_hash[i]);
+
/* Query for default mac and max mtu */
priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
- priv->mac = mdev->dev->caps.def_mac[priv->port];
- if (ILLEGAL_MAC(priv->mac)) {
- en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
- priv->port, priv->mac);
+
+ /* Set default MAC */
+ dev->addr_len = ETH_ALEN;
+ mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
+ priv->port, dev->dev_addr);
err = -EINVAL;
goto out;
}
+ memcpy(priv->prev_mac, dev->dev_addr, sizeof(priv->prev_mac));
+
priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
err = mlx4_en_alloc_resources(priv);
if (err)
goto out;
+#ifdef CONFIG_RFS_ACCEL
+ INIT_LIST_HEAD(&priv->filters);
+ spin_lock_init(&priv->filters_lock);
+#endif
+
/* Allocate page for receive rings */
err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
@@ -1653,13 +2136,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
- /* Set defualt MAC */
- dev->addr_len = ETH_ALEN;
- for (i = 0; i < ETH_ALEN; i++) {
- dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
- dev->perm_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
- }
-
/*
* Set driver features
*/
@@ -1679,6 +2155,9 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
MLX4_STEERING_MODE_DEVICE_MANAGED)
dev->hw_features |= NETIF_F_NTUPLE;
+ if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
mdev->pndev[port] = dev;
netif_carrier_off(dev);
@@ -1692,6 +2171,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
+ mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
+
/* Configure port */
mlx4_en_calc_rx_buf(dev);
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index fed26d867f4e..ce38654bbdd0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -563,9 +563,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
unsigned int length;
int polled = 0;
int ip_summed;
- struct ethhdr *ethh;
- dma_addr_t dma;
- u64 s_mac;
int factor = priv->cqe_factor;
if (!priv->port_up)
@@ -603,21 +600,41 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
goto next;
}
- /* Get pointer to first fragment since we haven't skb yet and
- * cast it to ethhdr struct */
- dma = be64_to_cpu(rx_desc->data[0].addr);
- dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
- DMA_FROM_DEVICE);
- ethh = (struct ethhdr *)(page_address(frags[0].page) +
- frags[0].offset);
- s_mac = mlx4_en_mac_to_u64(ethh->h_source);
-
- /* If source MAC is equal to our own MAC and not performing
- * the selftest or flb disabled - drop the packet */
- if (s_mac == priv->mac &&
- !((dev->features & NETIF_F_LOOPBACK) ||
- priv->validate_loopback))
- goto next;
+ /* Check if we need to drop the packet if SRIOV is not enabled
+ * and not performing the selftest or flb disabled
+ */
+ if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) {
+ struct ethhdr *ethh;
+ dma_addr_t dma;
+ /* Get pointer to first fragment since we haven't
+ * skb yet and cast it to ethhdr struct
+ */
+ dma = be64_to_cpu(rx_desc->data[0].addr);
+ dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
+ DMA_FROM_DEVICE);
+ ethh = (struct ethhdr *)(page_address(frags[0].page) +
+ frags[0].offset);
+
+ if (is_multicast_ether_addr(ethh->h_dest)) {
+ struct mlx4_mac_entry *entry;
+ struct hlist_node *n;
+ struct hlist_head *bucket;
+ unsigned int mac_hash;
+
+ /* Drop the packet, since HW loopback-ed it */
+ mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX];
+ bucket = &priv->mac_hash[mac_hash];
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(entry, n, bucket, hlist) {
+ if (ether_addr_equal_64bits(entry->mac,
+ ethh->h_source)) {
+ rcu_read_unlock();
+ goto next;
+ }
+ }
+ rcu_read_unlock();
+ }
+ }
/*
* Packet is OK - process it.
@@ -835,11 +852,9 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
struct mlx4_qp_context *context;
int err = 0;
- context = kmalloc(sizeof *context , GFP_KERNEL);
- if (!context) {
- en_err(priv, "Failed to allocate qp context\n");
+ context = kmalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
return -ENOMEM;
- }
err = mlx4_qp_alloc(mdev->dev, qpn, qp);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index bf2e5d3f177c..3488c6d9e6b5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -87,6 +87,8 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
priv->loopback_ok = 0;
priv->validate_loopback = 1;
+ mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
+
/* xmit */
if (mlx4_en_test_loopback_xmit(priv)) {
en_err(priv, "Transmitting loopback packet failed\n");
@@ -107,6 +109,7 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
mlx4_en_test_loopback_exit:
priv->validate_loopback = 0;
+ mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
return !loopback_ok;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 2b799f4f1c37..49308cc65ee7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -294,6 +294,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
cnt++;
}
+ netdev_tx_reset_queue(ring->tx_queue);
+
if (cnt)
en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
@@ -515,10 +517,6 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
wmb();
inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
}
- tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
- tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
- (!!vlan_tx_tag_present(skb));
- tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
}
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
@@ -592,7 +590,21 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(ring->tx_queue);
priv->port_stats.queue_stopped++;
- return NETDEV_TX_BUSY;
+ /* If queue was emptied after the if, and before the
+ * stop_queue - need to wake the queue, or else it will remain
+ * stopped forever.
+ * Need a memory barrier to make sure ring->cons was not
+ * updated before queue was stopped.
+ */
+ wmb();
+
+ if (unlikely(((int)(ring->prod - ring->cons)) <=
+ ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ netif_tx_wake_queue(ring->tx_queue);
+ priv->port_stats.wake_queue++;
+ } else {
+ return NETDEV_TX_BUSY;
+ }
}
/* Track current inflight packets for performance analysis */
@@ -630,10 +642,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->tx_csum++;
}
- /* Copy dst mac address to wqe */
- ethh = (struct ethhdr *)skb->data;
- tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest);
- tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2));
+ if (priv->flags & MLX4_EN_FLAG_ENABLE_HW_LOOPBACK) {
+ /* Copy dst mac address to wqe. This allows loopback in eSwitch,
+ * so that VFs and PF can communicate with each other
+ */
+ ethh = (struct ethhdr *)skb->data;
+ tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest);
+ tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2));
+ }
+
/* Handle LSO (TSO) packets */
if (lso_header_size) {
/* Mark opcode as LSO */
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 8b3d0512a46b..38b62c78d5da 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -127,7 +127,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[0] = "RSS support",
[1] = "RSS Toeplitz Hash Function support",
[2] = "RSS XOR Hash Function support",
- [3] = "Device manage flow steering support"
+ [3] = "Device manage flow steering support",
+ [4] = "Automatic mac reassignment support"
};
int i;
@@ -478,6 +479,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
+#define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
dev_cap->flags2 = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -637,6 +639,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
MLX4_GET(dev_cap->reserved_lkey, outbox,
QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
+ if (field & 1<<6)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN;
MLX4_GET(dev_cap->max_icm_sz, outbox,
QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
@@ -1287,14 +1292,14 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
/* Enable Ethernet flow steering
* with udp unicast and tcp unicast
*/
- MLX4_PUT(inbox, param->fs_hash_enable_bits,
+ MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
INIT_HCA_FS_ETH_BITS_OFFSET);
MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
/* Enable IPoIB flow steering
* with udp unicast and tcp unicast
*/
- MLX4_PUT(inbox, param->fs_hash_enable_bits,
+ MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
INIT_HCA_FS_IB_BITS_OFFSET);
MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index dbf2f69cc59f..3af33ff669cc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -171,7 +171,6 @@ struct mlx4_init_hca_param {
u8 log_mpt_sz;
u8 log_uar_sz;
u8 uar_page_sz; /* log pg sz in 4k chunks */
- u8 fs_hash_enable_bits;
u8 steering_mode; /* for QUERY_HCA */
u64 dev_cap_enabled;
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index e1bafffbc3b1..b9dde139dac5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -380,7 +380,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
}
}
- if ((dev_cap->flags &
+ if ((dev->caps.flags &
(MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
mlx4_is_master(dev))
dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
@@ -1415,22 +1415,6 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
if (mlx4_is_master(dev))
mlx4_parav_master_pf_caps(dev);
- priv->fs_hash_mode = MLX4_FS_L2_HASH;
-
- switch (priv->fs_hash_mode) {
- case MLX4_FS_L2_HASH:
- init_hca.fs_hash_enable_bits = 0;
- break;
-
- case MLX4_FS_L2_L3_L4_HASH:
- /* Enable flow steering with
- * udp unicast and tcp unicast
- */
- init_hca.fs_hash_enable_bits =
- MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN;
- break;
- }
-
profile = default_profile;
if (dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED)
@@ -1790,15 +1774,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
int i;
if (msi_x) {
- /* In multifunction mode each function gets 2 msi-X vectors
- * one for data path completions anf the other for asynch events
- * or command completions */
- if (mlx4_is_mfunc(dev)) {
- nreq = 2;
- } else {
- nreq = min_t(int, dev->caps.num_eqs -
- dev->caps.reserved_eqs, nreq);
- }
+ nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
+ nreq);
entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
if (!entries)
@@ -1856,12 +1833,9 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
info->dev = dev;
info->port = port;
if (!mlx4_is_slave(dev)) {
- INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL);
mlx4_init_mac_table(dev, &info->mac_table);
mlx4_init_vlan_table(dev, &info->vlan_table);
- info->base_qpn =
- dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
- (port - 1) * (1 << log_num_mac);
+ info->base_qpn = mlx4_get_base_qpn(dev, port);
}
sprintf(info->dev_name, "mlx4_port%d", port);
@@ -2077,10 +2051,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
/* Allow large DMA segments, up to the firmware limit of 1 GB */
dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
- priv = kzalloc(sizeof *priv, GFP_KERNEL);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
- dev_err(&pdev->dev, "Device struct alloc failed, "
- "aborting.\n");
err = -ENOMEM;
goto err_release_regions;
}
@@ -2169,7 +2141,8 @@ slave_start:
dev->num_slaves = MLX4_MAX_NUM_SLAVES;
else {
dev->num_slaves = 0;
- if (mlx4_multi_func_init(dev)) {
+ err = mlx4_multi_func_init(dev);
+ if (err) {
mlx4_err(dev, "Failed to init slave mfunc"
" interface, aborting.\n");
goto err_cmd;
@@ -2193,7 +2166,8 @@ slave_start:
/* In master functions, the communication channel must be initialized
* after obtaining its address from fw */
if (mlx4_is_master(dev)) {
- if (mlx4_multi_func_init(dev)) {
+ err = mlx4_multi_func_init(dev);
+ if (err) {
mlx4_err(dev, "Failed to init master mfunc"
"interface, aborting.\n");
goto err_close;
@@ -2210,6 +2184,7 @@ slave_start:
mlx4_enable_msi_x(dev);
if ((mlx4_is_mfunc(dev)) &&
!(dev->flags & MLX4_FLAG_MSI_X)) {
+ err = -ENOSYS;
mlx4_err(dev, "INTx is not supported in multi-function mode."
" aborting.\n");
goto err_free_eq;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 1ee4db3c6400..52685524708d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -664,7 +664,7 @@ static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
dw |= ctrl->priority << 16;
hw->ctrl = cpu_to_be32(dw);
- hw->vf_vep_port = cpu_to_be32(ctrl->port);
+ hw->port = ctrl->port;
hw->qpn = cpu_to_be32(ctrl->qpn);
}
@@ -1157,7 +1157,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
.priority = MLX4_DOMAIN_NIC,
};
- rule.allow_loopback = ~block_mcast_loopback;
+ rule.allow_loopback = !block_mcast_loopback;
rule.port = port;
rule.qpn = qp->qpn;
INIT_LIST_HEAD(&rule.list);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 116c5c29d2d1..ed4a6959e828 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -60,11 +60,6 @@
#define MLX4_FS_MGM_LOG_ENTRY_SIZE 7
#define MLX4_FS_NUM_MCG (1 << 17)
-enum {
- MLX4_FS_L2_HASH = 0,
- MLX4_FS_L2_L3_L4_HASH,
-};
-
#define MLX4_NUM_UP 8
#define MLX4_NUM_TC 8
#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */
@@ -658,11 +653,6 @@ struct mlx4_set_port_rqp_calc_context {
__be32 mcast;
};
-struct mlx4_mac_entry {
- u64 mac;
- u64 reg_id;
-};
-
struct mlx4_port_info {
struct mlx4_dev *dev;
int port;
@@ -672,7 +662,6 @@ struct mlx4_port_info {
char dev_mtu_name[16];
struct device_attribute port_mtu_attr;
struct mlx4_mac_table mac_table;
- struct radix_tree_root mac_tree;
struct mlx4_vlan_table vlan_table;
int base_qpn;
};
@@ -696,9 +685,12 @@ struct mlx4_steer {
struct mlx4_net_trans_rule_hw_ctrl {
__be32 ctrl;
- __be32 vf_vep_port;
+ u8 rsvd1;
+ u8 funcid;
+ u8 vep;
+ u8 port;
__be32 qpn;
- __be32 reserved;
+ __be32 rsvd2;
};
struct mlx4_net_trans_rule_hw_ib {
@@ -918,7 +910,6 @@ int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
-int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int start_index, int npages, u64 *page_list);
int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 8d54412ada63..c313d7e943a9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -198,7 +198,6 @@ enum cq_type {
*/
#define ROUNDUP_LOG2(x) ilog2(roundup_pow_of_two(x))
#define XNOR(x, y) (!(x) == !(y))
-#define ILLEGAL_MAC(addr) (addr == 0xffffffffffffULL || addr == 0x0)
struct mlx4_en_tx_info {
@@ -427,10 +426,26 @@ struct mlx4_en_frag_info {
#endif
struct ethtool_flow_id {
+ struct list_head list;
struct ethtool_rx_flow_spec flow_spec;
u64 id;
};
+enum {
+ MLX4_EN_FLAG_PROMISC = (1 << 0),
+ MLX4_EN_FLAG_MC_PROMISC = (1 << 1),
+ /* whether we need to enable hardware loopback by putting dmac
+ * in Tx WQE
+ */
+ MLX4_EN_FLAG_ENABLE_HW_LOOPBACK = (1 << 2),
+ /* whether we need to drop packets that hardware loopback-ed */
+ MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3),
+ MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4)
+};
+
+#define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE)
+#define MLX4_EN_MAC_HASH_IDX 5
+
struct mlx4_en_priv {
struct mlx4_en_dev *mdev;
struct mlx4_en_port_profile *prof;
@@ -441,6 +456,8 @@ struct mlx4_en_priv {
struct mlx4_en_port_state port_state;
spinlock_t stats_lock;
struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES];
+ /* To allow rules removal while port is going down */
+ struct list_head ethtool_list;
unsigned long last_moder_packets[MAX_RX_RINGS];
unsigned long last_moder_tx_packets;
@@ -469,7 +486,7 @@ struct mlx4_en_priv {
int registered;
int allocated;
int stride;
- u64 mac;
+ unsigned char prev_mac[ETH_ALEN + 2];
int mac_index;
unsigned max_mtu;
int base_qpn;
@@ -478,8 +495,6 @@ struct mlx4_en_priv {
struct mlx4_en_rss_map rss_map;
__be32 ctrl_flags;
u32 flags;
-#define MLX4_EN_FLAG_PROMISC 0x1
-#define MLX4_EN_FLAG_MC_PROMISC 0x2
u8 num_tx_rings_p_up;
u32 tx_ring_num;
u32 rx_ring_num;
@@ -493,7 +508,7 @@ struct mlx4_en_priv {
struct mlx4_en_cq *tx_cq;
struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
struct mlx4_qp drop_qp;
- struct work_struct mcast_task;
+ struct work_struct rx_mode_task;
struct work_struct mac_task;
struct work_struct watchdog_task;
struct work_struct linkstate_task;
@@ -510,6 +525,7 @@ struct mlx4_en_priv {
bool wol;
struct device *ddev;
int base_tx_qpn;
+ struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
#ifdef CONFIG_MLX4_EN_DCB
struct ieee_ets ets;
@@ -529,14 +545,24 @@ enum mlx4_en_wol {
MLX4_EN_WOL_ENABLED = (1ULL << 62),
};
+struct mlx4_mac_entry {
+ struct hlist_node hlist;
+ unsigned char mac[ETH_ALEN + 2];
+ u64 reg_id;
+ struct rcu_head rcu;
+};
+
#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
+void mlx4_en_update_loopback_state(struct net_device *dev,
+ netdev_features_t features);
+
void mlx4_en_destroy_netdev(struct net_device *dev);
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_port_profile *prof);
int mlx4_en_start_port(struct net_device *dev);
-void mlx4_en_stop_port(struct net_device *dev);
+void mlx4_en_stop_port(struct net_device *dev, int detach);
void mlx4_en_free_resources(struct mlx4_en_priv *priv);
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 4c51b05efa28..719ead15e491 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -74,87 +74,6 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
table->total = 0;
}
-static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
- u64 mac, int *qpn, u64 *reg_id)
-{
- __be64 be_mac;
- int err;
-
- mac &= MLX4_MAC_MASK;
- be_mac = cpu_to_be64(mac << 16);
-
- switch (dev->caps.steering_mode) {
- case MLX4_STEERING_MODE_B0: {
- struct mlx4_qp qp;
- u8 gid[16] = {0};
-
- qp.qpn = *qpn;
- memcpy(&gid[10], &be_mac, ETH_ALEN);
- gid[5] = port;
-
- err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
- break;
- }
- case MLX4_STEERING_MODE_DEVICE_MANAGED: {
- struct mlx4_spec_list spec_eth = { {NULL} };
- __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
-
- struct mlx4_net_trans_rule rule = {
- .queue_mode = MLX4_NET_TRANS_Q_FIFO,
- .exclusive = 0,
- .allow_loopback = 1,
- .promisc_mode = MLX4_FS_PROMISC_NONE,
- .priority = MLX4_DOMAIN_NIC,
- };
-
- rule.port = port;
- rule.qpn = *qpn;
- INIT_LIST_HEAD(&rule.list);
-
- spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
- memcpy(spec_eth.eth.dst_mac, &be_mac, ETH_ALEN);
- memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
- list_add_tail(&spec_eth.list, &rule.list);
-
- err = mlx4_flow_attach(dev, &rule, reg_id);
- break;
- }
- default:
- return -EINVAL;
- }
- if (err)
- mlx4_warn(dev, "Failed Attaching Unicast\n");
-
- return err;
-}
-
-static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
- u64 mac, int qpn, u64 reg_id)
-{
- switch (dev->caps.steering_mode) {
- case MLX4_STEERING_MODE_B0: {
- struct mlx4_qp qp;
- u8 gid[16] = {0};
- __be64 be_mac;
-
- qp.qpn = qpn;
- mac &= MLX4_MAC_MASK;
- be_mac = cpu_to_be64(mac << 16);
- memcpy(&gid[10], &be_mac, ETH_ALEN);
- gid[5] = port;
-
- mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
- break;
- }
- case MLX4_STEERING_MODE_DEVICE_MANAGED: {
- mlx4_flow_detach(dev, reg_id);
- break;
- }
- default:
- mlx4_err(dev, "Invalid steering mode.\n");
- }
-}
-
static int validate_index(struct mlx4_dev *dev,
struct mlx4_mac_table *table, int index)
{
@@ -181,92 +100,6 @@ static int find_index(struct mlx4_dev *dev,
return -EINVAL;
}
-int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
-{
- struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
- struct mlx4_mac_entry *entry;
- int index = 0;
- int err = 0;
- u64 reg_id;
-
- mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n",
- (unsigned long long) mac);
- index = mlx4_register_mac(dev, port, mac);
- if (index < 0) {
- err = index;
- mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
- (unsigned long long) mac);
- return err;
- }
-
- if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
- *qpn = info->base_qpn + index;
- return 0;
- }
-
- err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
- mlx4_dbg(dev, "Reserved qp %d\n", *qpn);
- if (err) {
- mlx4_err(dev, "Failed to reserve qp for mac registration\n");
- goto qp_err;
- }
-
- err = mlx4_uc_steer_add(dev, port, mac, qpn, &reg_id);
- if (err)
- goto steer_err;
-
- entry = kmalloc(sizeof *entry, GFP_KERNEL);
- if (!entry) {
- err = -ENOMEM;
- goto alloc_err;
- }
- entry->mac = mac;
- entry->reg_id = reg_id;
- err = radix_tree_insert(&info->mac_tree, *qpn, entry);
- if (err)
- goto insert_err;
- return 0;
-
-insert_err:
- kfree(entry);
-
-alloc_err:
- mlx4_uc_steer_release(dev, port, mac, *qpn, reg_id);
-
-steer_err:
- mlx4_qp_release_range(dev, *qpn, 1);
-
-qp_err:
- mlx4_unregister_mac(dev, port, mac);
- return err;
-}
-EXPORT_SYMBOL_GPL(mlx4_get_eth_qp);
-
-void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn)
-{
- struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
- struct mlx4_mac_entry *entry;
-
- mlx4_dbg(dev, "Registering MAC: 0x%llx for deleting\n",
- (unsigned long long) mac);
- mlx4_unregister_mac(dev, port, mac);
-
- if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
- entry = radix_tree_lookup(&info->mac_tree, qpn);
- if (entry) {
- mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx,"
- " qpn %d\n", port,
- (unsigned long long) mac, qpn);
- mlx4_uc_steer_release(dev, port, entry->mac,
- qpn, entry->reg_id);
- mlx4_qp_release_range(dev, qpn, 1);
- radix_tree_delete(&info->mac_tree, qpn);
- kfree(entry);
- }
- }
-}
-EXPORT_SYMBOL_GPL(mlx4_put_eth_qp);
-
static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
__be64 *entries)
{
@@ -359,6 +192,12 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
}
EXPORT_SYMBOL_GPL(mlx4_register_mac);
+int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port)
+{
+ return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
+ (port - 1) * (1 << dev->caps.log_num_macs);
+}
+EXPORT_SYMBOL_GPL(mlx4_get_base_qpn);
void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
@@ -397,29 +236,13 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
}
EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
-int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
+int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
struct mlx4_mac_table *table = &info->mac_table;
- struct mlx4_mac_entry *entry;
int index = qpn - info->base_qpn;
int err = 0;
- if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
- entry = radix_tree_lookup(&info->mac_tree, qpn);
- if (!entry)
- return -EINVAL;
- mlx4_uc_steer_release(dev, port, entry->mac,
- qpn, entry->reg_id);
- mlx4_unregister_mac(dev, port, entry->mac);
- entry->mac = new_mac;
- entry->reg_id = 0;
- mlx4_register_mac(dev, port, new_mac);
- err = mlx4_uc_steer_add(dev, port, entry->mac,
- &qpn, &entry->reg_id);
- return err;
- }
-
/* CX1 doesn't support multi-functions */
mutex_lock(&table->mutex);
@@ -439,7 +262,7 @@ out:
mutex_unlock(&table->mutex);
return err;
}
-EXPORT_SYMBOL_GPL(mlx4_replace_mac);
+EXPORT_SYMBOL_GPL(__mlx4_replace_mac);
static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
__be32 *entries)
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 561ed2a22a17..5997adc943d0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3018,7 +3018,7 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
__be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
- port = be32_to_cpu(ctrl->vf_vep_port) & 0xff;
+ port = ctrl->port;
eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
/* Clear a space in the inbox for eth header */
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index b71eb39ab448..fbcb9e74d7fc 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -1080,7 +1080,6 @@ static int ks8842_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(netdev->dev_addr, mac, netdev->addr_len);
ks8842_write_mac_addr(adapter, mac);
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 286816a4e783..33bcb63d56a2 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -69,7 +69,6 @@ union ks8851_tx_hdr {
* @mii: The MII state information for the mii calls.
* @rxctrl: RX settings for @rxctrl_work.
* @tx_work: Work queue for tx packets
- * @irq_work: Work queue for servicing interrupts
* @rxctrl_work: Work queue for updating RX mode and multicast lists
* @txq: Queue of packets for transmission.
* @spi_msg1: pre-setup SPI transfer with one message, @spi_xfer1.
@@ -121,7 +120,6 @@ struct ks8851_net {
struct ks8851_rxctrl rxctrl;
struct work_struct tx_work;
- struct work_struct irq_work;
struct work_struct rxctrl_work;
struct sk_buff_head txq;
@@ -444,23 +442,6 @@ static void ks8851_init_mac(struct ks8851_net *ks)
}
/**
- * ks8851_irq - device interrupt handler
- * @irq: Interrupt number passed from the IRQ handler.
- * @pw: The private word passed to register_irq(), our struct ks8851_net.
- *
- * Disable the interrupt from happening again until we've processed the
- * current status by scheduling ks8851_irq_work().
- */
-static irqreturn_t ks8851_irq(int irq, void *pw)
-{
- struct ks8851_net *ks = pw;
-
- disable_irq_nosync(irq);
- schedule_work(&ks->irq_work);
- return IRQ_HANDLED;
-}
-
-/**
* ks8851_rdfifo - read data from the receive fifo
* @ks: The device state.
* @buff: The buffer address
@@ -595,19 +576,20 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
}
/**
- * ks8851_irq_work - work queue handler for dealing with interrupt requests
- * @work: The work structure that was scheduled by schedule_work()
+ * ks8851_irq - IRQ handler for dealing with interrupt requests
+ * @irq: IRQ number
+ * @_ks: cookie
*
- * This is the handler invoked when the ks8851_irq() is called to find out
- * what happened, as we cannot allow ourselves to sleep whilst waiting for
- * anything other process has the chip's lock.
+ * This handler is invoked when the IRQ line asserts to find out what happened.
+ * As we cannot allow ourselves to sleep in HARDIRQ context, this handler runs
+ * in thread context.
*
* Read the interrupt status, work out what needs to be done and then clear
* any of the interrupts that are not needed.
*/
-static void ks8851_irq_work(struct work_struct *work)
+static irqreturn_t ks8851_irq(int irq, void *_ks)
{
- struct ks8851_net *ks = container_of(work, struct ks8851_net, irq_work);
+ struct ks8851_net *ks = _ks;
unsigned status;
unsigned handled = 0;
@@ -688,7 +670,7 @@ static void ks8851_irq_work(struct work_struct *work)
if (status & IRQ_TXI)
netif_wake_queue(ks->netdev);
- enable_irq(ks->netdev->irq);
+ return IRQ_HANDLED;
}
/**
@@ -896,7 +878,6 @@ static int ks8851_net_stop(struct net_device *dev)
mutex_unlock(&ks->lock);
/* stop any outstanding work */
- flush_work(&ks->irq_work);
flush_work(&ks->tx_work);
flush_work(&ks->rxctrl_work);
@@ -1052,7 +1033,6 @@ static int ks8851_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
return ks8851_write_mac_addr(dev);
}
@@ -1438,7 +1418,6 @@ static int ks8851_probe(struct spi_device *spi)
spin_lock_init(&ks->statelock);
INIT_WORK(&ks->tx_work, ks8851_tx_work);
- INIT_WORK(&ks->irq_work, ks8851_irq_work);
INIT_WORK(&ks->rxctrl_work, ks8851_rxctrl_work);
/* initialise pre-made spi transfer messages */
@@ -1505,8 +1484,9 @@ static int ks8851_probe(struct spi_device *spi)
ks8851_read_selftest(ks);
ks8851_init_mac(ks);
- ret = request_irq(spi->irq, ks8851_irq, IRQF_TRIGGER_LOW,
- ndev->name, ks);
+ ret = request_threaded_irq(spi->irq, NULL, ks8851_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ ndev->name, ks);
if (ret < 0) {
dev_err(&spi->dev, "failed to get irq\n");
goto err_irq;
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index ef8f9f92e547..a343066f7b43 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1237,7 +1237,6 @@ static int ks_set_mac_address(struct net_device *netdev, void *paddr)
struct sockaddr *addr = paddr;
u8 *da;
- netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
da = (u8 *)netdev->dev_addr;
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index 8163fd0f453f..afaf0c07f37f 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_MICROCHIP
bool "Microchip devices"
default y
- depends on SPI && EXPERIMENTAL
+ depends on SPI
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -20,7 +20,7 @@ if NET_VENDOR_MICROCHIP
config ENC28J60
tristate "ENC28J60 support"
- depends on SPI && EXPERIMENTAL
+ depends on SPI
select CRC32
---help---
Support for the Microchip EN28J60 ethernet chip.
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index a99456c3dd87..5d98a9f7bfc7 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -527,7 +527,6 @@ static int enc28j60_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(address->sa_data))
return -EADDRNOTAVAIL;
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
return enc28j60_set_hw_macaddr(dev);
}
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index f8408d6e961c..4f9937e026e5 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -664,10 +664,9 @@ static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
/* copy header of running firmware from SRAM to host memory to
* validate firmware */
hdr = kmalloc(bytes, GFP_KERNEL);
- if (hdr == NULL) {
- dev_err(dev, "could not malloc firmware hdr\n");
+ if (hdr == NULL)
return -ENOMEM;
- }
+
memcpy_fromio(hdr, mgp->sram + hdr_offset, bytes);
status = myri10ge_validate_firmware(mgp, hdr);
kfree(hdr);
diff --git a/drivers/net/ethernet/natsemi/Kconfig b/drivers/net/ethernet/natsemi/Kconfig
index f157334579fd..a100860d45e6 100644
--- a/drivers/net/ethernet/natsemi/Kconfig
+++ b/drivers/net/ethernet/natsemi/Kconfig
@@ -5,9 +5,6 @@
config NET_VENDOR_NATSEMI
bool "National Semi-conductor devices"
default y
- depends on AMIGA_PCMCIA || ARM || EISA || EXPERIMENTAL || H8300 || \
- ISA || M32R || MAC || MACH_JAZZ || MACH_TX49XX || MIPS || \
- PCI || PCMCIA || SUPERH || XTENSA_PLATFORM_XT2000 || ZORRO
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/natsemi/ibmlana.c b/drivers/net/ethernet/natsemi/ibmlana.c
deleted file mode 100644
index 923e640d604c..000000000000
--- a/drivers/net/ethernet/natsemi/ibmlana.c
+++ /dev/null
@@ -1,1075 +0,0 @@
-/*
-net-3-driver for the IBM LAN Adapter/A
-
-This is an extension to the Linux operating system, and is covered by the
-same GNU General Public License that covers that work.
-
-Copyright 1999 by Alfred Arnold (alfred@ccac.rwth-aachen.de,
- alfred.arnold@lancom.de)
-
-This driver is based both on the SK_MCA driver, which is itself based on the
-SK_G16 and 3C523 driver.
-
-paper sources:
- 'PC Hardware: Aufbau, Funktionsweise, Programmierung' by
- Hans-Peter Messmer for the basic Microchannel stuff
-
- 'Linux Geraetetreiber' by Allesandro Rubini, Kalle Dalheimer
- for help on Ethernet driver programming
-
- 'DP83934CVUL-20/25 MHz SONIC-T Ethernet Controller Datasheet' by National
- Semiconductor for info on the MAC chip
-
- 'LAN Technical Reference Ethernet Adapter Interface Version 1 Release 1.0
- Document Number SC30-3661-00' by IBM for info on the adapter itself
-
- Also see http://www.national.com/analog
-
-special acknowledgements to:
- - Bob Eager for helping me out with documentation from IBM
- - Jim Shorney for his endless patience with me while I was using
- him as a beta tester to trace down the address filter bug ;-)
-
- Missing things:
-
- -> set debug level via ioctl instead of compile-time switches
- -> I didn't follow the development of the 2.1.x kernels, so my
- assumptions about which things changed with which kernel version
- are probably nonsense
-
-History:
- Nov 6th, 1999
- startup from SK_MCA driver
- Dec 6th, 1999
- finally got docs about the card. A big thank you to Bob Eager!
- Dec 12th, 1999
- first packet received
- Dec 13th, 1999
- recv queue done, tcpdump works
- Dec 15th, 1999
- transmission part works
- Dec 28th, 1999
- added usage of the isa_functions for Linux 2.3 . Things should
- still work with 2.0.x....
- Jan 28th, 2000
- in Linux 2.2.13, the version.h file mysteriously didn't get
- included. Added a workaround for this. Furthermore, it now
- not only compiles as a modules ;-)
- Jan 30th, 2000
- newer kernels automatically probe more than one board, so the
- 'startslot' as a variable is also needed here
- Apr 12th, 2000
- the interrupt mask register is not set 'hard' instead of individually
- setting registers, since this seems to set bits that shouldn't be
- set
- May 21st, 2000
- reset interrupt status immediately after CAM load
- add a recovery delay after releasing the chip's reset line
- May 24th, 2000
- finally found the bug in the address filter setup - damned signed
- chars!
- June 1st, 2000
- corrected version codes, added support for the latest 2.3 changes
- Oct 28th, 2002
- cleaned up for the 2.5 tree <alan@lxorguk.ukuu.org.uk>
-
- *************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/time.h>
-#include <linux/mca.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/if_ether.h>
-#include <linux/skbuff.h>
-#include <linux/bitops.h>
-
-#include <asm/processor.h>
-#include <asm/io.h>
-
-#define _IBM_LANA_DRIVER_
-#include "ibmlana.h"
-
-#undef DEBUG
-
-#define DRV_NAME "ibmlana"
-
-/* ------------------------------------------------------------------------
- * global static data - not more since we can handle multiple boards and
- * have to pack all state info into the device struct!
- * ------------------------------------------------------------------------ */
-
-static char *MediaNames[Media_Count] = {
- "10BaseT", "10Base5", "Unknown", "10Base2"
-};
-
-/* ------------------------------------------------------------------------
- * private subfunctions
- * ------------------------------------------------------------------------ */
-
-#ifdef DEBUG
- /* dump all registers */
-
-static void dumpregs(struct net_device *dev)
-{
- int z;
-
- for (z = 0; z < 160; z += 2) {
- if (!(z & 15))
- printk("REGS: %04x:", z);
- printk(" %04x", inw(dev->base_addr + z));
- if ((z & 15) == 14)
- printk("\n");
- }
-}
-
-/* dump parts of shared memory - only needed during debugging */
-
-static void dumpmem(struct net_device *dev, u32 start, u32 len)
-{
- ibmlana_priv *priv = netdev_priv(dev);
- int z;
-
- printk("Address %04x:\n", start);
- for (z = 0; z < len; z++) {
- if ((z & 15) == 0)
- printk("%04x:", z);
- printk(" %02x", readb(priv->base + start + z));
- if ((z & 15) == 15)
- printk("\n");
- }
- if ((z & 15) != 0)
- printk("\n");
-}
-
-/* print exact time - ditto */
-
-static void PrTime(void)
-{
- struct timeval tv;
-
- do_gettimeofday(&tv);
- printk("%9d:%06d: ", (int) tv.tv_sec, (int) tv.tv_usec);
-}
-#endif /* DEBUG */
-
-/* deduce resources out of POS registers */
-
-static void getaddrs(struct mca_device *mdev, int *base, int *memlen,
- int *iobase, int *irq, ibmlana_medium *medium)
-{
- u_char pos0, pos1;
-
- pos0 = mca_device_read_stored_pos(mdev, 2);
- pos1 = mca_device_read_stored_pos(mdev, 3);
-
- *base = 0xc0000 + ((pos1 & 0xf0) << 9);
- *memlen = (pos1 & 0x01) ? 0x8000 : 0x4000;
- *iobase = (pos0 & 0xe0) << 7;
- switch (pos0 & 0x06) {
- case 0:
- *irq = 5;
- break;
- case 2:
- *irq = 15;
- break;
- case 4:
- *irq = 10;
- break;
- case 6:
- *irq = 11;
- break;
- }
- *medium = (pos0 & 0x18) >> 3;
-}
-
-/* wait on register value with mask and timeout */
-
-static int wait_timeout(struct net_device *dev, int regoffs, u16 mask,
- u16 value, int timeout)
-{
- unsigned long fin = jiffies + timeout;
-
- while (time_before(jiffies,fin))
- if ((inw(dev->base_addr + regoffs) & mask) == value)
- return 1;
-
- return 0;
-}
-
-
-/* reset the whole board */
-
-static void ResetBoard(struct net_device *dev)
-{
- unsigned char bcmval;
-
- /* read original board control value */
-
- bcmval = inb(dev->base_addr + BCMREG);
-
- /* set reset bit for a while */
-
- bcmval |= BCMREG_RESET;
- outb(bcmval, dev->base_addr + BCMREG);
- udelay(10);
- bcmval &= ~BCMREG_RESET;
- outb(bcmval, dev->base_addr + BCMREG);
-
- /* switch over to RAM again */
-
- bcmval |= BCMREG_RAMEN | BCMREG_RAMWIN;
- outb(bcmval, dev->base_addr + BCMREG);
-}
-
-/* calculate RAM layout & set up descriptors in RAM */
-
-static void InitDscrs(struct net_device *dev)
-{
- ibmlana_priv *priv = netdev_priv(dev);
- u32 addr, baddr, raddr;
- int z;
- tda_t tda;
- rda_t rda;
- rra_t rra;
-
- /* initialize RAM */
-
- memset_io(priv->base, 0xaa,
- dev->mem_start - dev->mem_start); /* XXX: typo? */
-
- /* setup n TX descriptors - independent of RAM size */
-
- priv->tdastart = addr = 0;
- priv->txbufstart = baddr = sizeof(tda_t) * TXBUFCNT;
- for (z = 0; z < TXBUFCNT; z++) {
- tda.status = 0;
- tda.config = 0;
- tda.length = 0;
- tda.fragcount = 1;
- tda.startlo = baddr;
- tda.starthi = 0;
- tda.fraglength = 0;
- if (z == TXBUFCNT - 1)
- tda.link = priv->tdastart;
- else
- tda.link = addr + sizeof(tda_t);
- tda.link |= 1;
- memcpy_toio(priv->base + addr, &tda, sizeof(tda_t));
- addr += sizeof(tda_t);
- baddr += PKTSIZE;
- }
-
- /* calculate how many receive buffers fit into remaining memory */
-
- priv->rxbufcnt = (dev->mem_end - dev->mem_start - baddr) / (sizeof(rra_t) + sizeof(rda_t) + PKTSIZE);
-
- /* calculate receive addresses */
-
- priv->rrastart = raddr = priv->txbufstart + (TXBUFCNT * PKTSIZE);
- priv->rdastart = addr = priv->rrastart + (priv->rxbufcnt * sizeof(rra_t));
- priv->rxbufstart = baddr = priv->rdastart + (priv->rxbufcnt * sizeof(rda_t));
-
- for (z = 0; z < priv->rxbufcnt; z++) {
- rra.startlo = baddr;
- rra.starthi = 0;
- rra.cntlo = PKTSIZE >> 1;
- rra.cnthi = 0;
- memcpy_toio(priv->base + raddr, &rra, sizeof(rra_t));
-
- rda.status = 0;
- rda.length = 0;
- rda.startlo = 0;
- rda.starthi = 0;
- rda.seqno = 0;
- if (z < priv->rxbufcnt - 1)
- rda.link = addr + sizeof(rda_t);
- else
- rda.link = 1;
- rda.inuse = 1;
- memcpy_toio(priv->base + addr, &rda, sizeof(rda_t));
-
- baddr += PKTSIZE;
- raddr += sizeof(rra_t);
- addr += sizeof(rda_t);
- }
-
- /* initialize current pointers */
-
- priv->nextrxdescr = 0;
- priv->lastrxdescr = priv->rxbufcnt - 1;
- priv->nexttxdescr = 0;
- priv->currtxdescr = 0;
- priv->txusedcnt = 0;
- memset(priv->txused, 0, sizeof(priv->txused));
-}
-
-/* set up Rx + Tx descriptors in SONIC */
-
-static int InitSONIC(struct net_device *dev)
-{
- ibmlana_priv *priv = netdev_priv(dev);
-
- /* set up start & end of resource area */
-
- outw(0, SONIC_URRA);
- outw(priv->rrastart, dev->base_addr + SONIC_RSA);
- outw(priv->rrastart + (priv->rxbufcnt * sizeof(rra_t)), dev->base_addr + SONIC_REA);
- outw(priv->rrastart, dev->base_addr + SONIC_RRP);
- outw(priv->rrastart, dev->base_addr + SONIC_RWP);
-
- /* set EOBC so that only one packet goes into one buffer */
-
- outw((PKTSIZE - 4) >> 1, dev->base_addr + SONIC_EOBC);
-
- /* let SONIC read the first RRA descriptor */
-
- outw(CMDREG_RRRA, dev->base_addr + SONIC_CMDREG);
- if (!wait_timeout(dev, SONIC_CMDREG, CMDREG_RRRA, 0, 2)) {
- printk(KERN_ERR "%s: SONIC did not respond on RRRA command - giving up.", dev->name);
- return 0;
- }
-
- /* point SONIC to the first RDA */
-
- outw(0, dev->base_addr + SONIC_URDA);
- outw(priv->rdastart, dev->base_addr + SONIC_CRDA);
-
- /* set upper half of TDA address */
-
- outw(0, dev->base_addr + SONIC_UTDA);
-
- return 1;
-}
-
-/* stop SONIC so we can reinitialize it */
-
-static void StopSONIC(struct net_device *dev)
-{
- /* disable interrupts */
-
- outb(inb(dev->base_addr + BCMREG) & (~BCMREG_IEN), dev->base_addr + BCMREG);
- outb(0, dev->base_addr + SONIC_IMREG);
-
- /* reset the SONIC */
-
- outw(CMDREG_RST, dev->base_addr + SONIC_CMDREG);
- udelay(10);
- outw(CMDREG_RST, dev->base_addr + SONIC_CMDREG);
-}
-
-/* initialize card and SONIC for proper operation */
-
-static void putcam(camentry_t * cams, int *camcnt, char *addr)
-{
- camentry_t *pcam = cams + (*camcnt);
- u8 *uaddr = (u8 *) addr;
-
- pcam->index = *camcnt;
- pcam->addr0 = (((u16) uaddr[1]) << 8) | uaddr[0];
- pcam->addr1 = (((u16) uaddr[3]) << 8) | uaddr[2];
- pcam->addr2 = (((u16) uaddr[5]) << 8) | uaddr[4];
- (*camcnt)++;
-}
-
-static void InitBoard(struct net_device *dev)
-{
- ibmlana_priv *priv = netdev_priv(dev);
- int camcnt;
- camentry_t cams[16];
- u32 cammask;
- struct netdev_hw_addr *ha;
- u16 rcrval;
-
- /* reset the SONIC */
-
- outw(CMDREG_RST, dev->base_addr + SONIC_CMDREG);
- udelay(10);
-
- /* clear all spurious interrupts */
-
- outw(inw(dev->base_addr + SONIC_ISREG), dev->base_addr + SONIC_ISREG);
-
- /* set up the SONIC's bus interface - constant for this adapter -
- must be done while the SONIC is in reset */
-
- outw(DCREG_USR1 | DCREG_USR0 | DCREG_WC1 | DCREG_DW32, dev->base_addr + SONIC_DCREG);
- outw(0, dev->base_addr + SONIC_DCREG2);
-
- /* remove reset form the SONIC */
-
- outw(0, dev->base_addr + SONIC_CMDREG);
- udelay(10);
-
- /* data sheet requires URRA to be programmed before setting up the CAM contents */
-
- outw(0, dev->base_addr + SONIC_URRA);
-
- /* program the CAM entry 0 to the device address */
-
- camcnt = 0;
- putcam(cams, &camcnt, dev->dev_addr);
-
- /* start putting the multicast addresses into the CAM list. Stop if
- it is full. */
-
- netdev_for_each_mc_addr(ha, dev) {
- putcam(cams, &camcnt, ha->addr);
- if (camcnt == 16)
- break;
- }
-
- /* calculate CAM mask */
-
- cammask = (1 << camcnt) - 1;
-
- /* feed CDA into SONIC, initialize RCR value (always get broadcasts) */
-
- memcpy_toio(priv->base, cams, sizeof(camentry_t) * camcnt);
- memcpy_toio(priv->base + (sizeof(camentry_t) * camcnt), &cammask, sizeof(cammask));
-
-#ifdef DEBUG
- printk("CAM setup:\n");
- dumpmem(dev, 0, sizeof(camentry_t) * camcnt + sizeof(cammask));
-#endif
-
- outw(0, dev->base_addr + SONIC_CAMPTR);
- outw(camcnt, dev->base_addr + SONIC_CAMCNT);
- outw(CMDREG_LCAM, dev->base_addr + SONIC_CMDREG);
- if (!wait_timeout(dev, SONIC_CMDREG, CMDREG_LCAM, 0, 2)) {
- printk(KERN_ERR "%s:SONIC did not respond on LCAM command - giving up.", dev->name);
- return;
- } else {
- /* clear interrupt condition */
-
- outw(ISREG_LCD, dev->base_addr + SONIC_ISREG);
-
-#ifdef DEBUG
- printk("Loading CAM done, address pointers %04x:%04x\n",
- inw(dev->base_addr + SONIC_URRA),
- inw(dev->base_addr + SONIC_CAMPTR));
- {
- int z;
-
- printk("\n-->CAM: PTR %04x CNT %04x\n",
- inw(dev->base_addr + SONIC_CAMPTR),
- inw(dev->base_addr + SONIC_CAMCNT));
- outw(CMDREG_RST, dev->base_addr + SONIC_CMDREG);
- for (z = 0; z < camcnt; z++) {
- outw(z, dev->base_addr + SONIC_CAMEPTR);
- printk("Entry %d: %04x %04x %04x\n", z,
- inw(dev->base_addr + SONIC_CAMADDR0),
- inw(dev->base_addr + SONIC_CAMADDR1),
- inw(dev->base_addr + SONIC_CAMADDR2));
- }
- outw(0, dev->base_addr + SONIC_CMDREG);
- }
-#endif
- }
-
- rcrval = RCREG_BRD | RCREG_LB_NONE;
-
- /* if still multicast addresses left or ALLMULTI is set, set the multicast
- enable bit */
-
- if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > camcnt)
- rcrval |= RCREG_AMC;
-
- /* promiscuous mode ? */
-
- if (dev->flags & IFF_PROMISC)
- rcrval |= RCREG_PRO;
-
- /* program receive mode */
-
- outw(rcrval, dev->base_addr + SONIC_RCREG);
-#ifdef DEBUG
- printk("\nRCRVAL: %04x\n", rcrval);
-#endif
-
- /* set up descriptors in shared memory + feed them into SONIC registers */
-
- InitDscrs(dev);
- if (!InitSONIC(dev))
- return;
-
- /* reset all pending interrupts */
-
- outw(0xffff, dev->base_addr + SONIC_ISREG);
-
- /* enable transmitter + receiver interrupts */
-
- outw(CMDREG_RXEN, dev->base_addr + SONIC_CMDREG);
- outw(IMREG_PRXEN | IMREG_RBEEN | IMREG_PTXEN | IMREG_TXEREN, dev->base_addr + SONIC_IMREG);
-
- /* turn on card interrupts */
-
- outb(inb(dev->base_addr + BCMREG) | BCMREG_IEN, dev->base_addr + BCMREG);
-
-#ifdef DEBUG
- printk("Register dump after initialization:\n");
- dumpregs(dev);
-#endif
-}
-
-/* start transmission of a descriptor */
-
-static void StartTx(struct net_device *dev, int descr)
-{
- ibmlana_priv *priv = netdev_priv(dev);
- int addr;
-
- addr = priv->tdastart + (descr * sizeof(tda_t));
-
- /* put descriptor address into SONIC */
-
- outw(addr, dev->base_addr + SONIC_CTDA);
-
- /* trigger transmitter */
-
- priv->currtxdescr = descr;
- outw(CMDREG_TXP, dev->base_addr + SONIC_CMDREG);
-}
-
-/* ------------------------------------------------------------------------
- * interrupt handler(s)
- * ------------------------------------------------------------------------ */
-
-/* receive buffer area exhausted */
-
-static void irqrbe_handler(struct net_device *dev)
-{
- ibmlana_priv *priv = netdev_priv(dev);
-
- /* point the SONIC back to the RRA start */
-
- outw(priv->rrastart, dev->base_addr + SONIC_RRP);
- outw(priv->rrastart, dev->base_addr + SONIC_RWP);
-}
-
-/* receive interrupt */
-
-static void irqrx_handler(struct net_device *dev)
-{
- ibmlana_priv *priv = netdev_priv(dev);
- rda_t rda;
- u32 rdaaddr, lrdaaddr;
-
- /* loop until ... */
-
- while (1) {
- /* read descriptor that was next to be filled by SONIC */
-
- rdaaddr = priv->rdastart + (priv->nextrxdescr * sizeof(rda_t));
- lrdaaddr = priv->rdastart + (priv->lastrxdescr * sizeof(rda_t));
- memcpy_fromio(&rda, priv->base + rdaaddr, sizeof(rda_t));
-
- /* iron out upper word halves of fields we use - SONIC will duplicate
- bits 0..15 to 16..31 */
-
- rda.status &= 0xffff;
- rda.length &= 0xffff;
- rda.startlo &= 0xffff;
-
- /* stop if the SONIC still owns it, i.e. there is no data for us */
-
- if (rda.inuse)
- break;
-
- /* good packet? */
-
- else if (rda.status & RCREG_PRX) {
- struct sk_buff *skb;
-
- /* fetch buffer */
-
- skb = netdev_alloc_skb(dev, rda.length + 2);
- if (skb == NULL)
- dev->stats.rx_dropped++;
- else {
- /* copy out data */
-
- memcpy_fromio(skb_put(skb, rda.length),
- priv->base +
- rda.startlo, rda.length);
-
- /* set up skb fields */
-
- skb->protocol = eth_type_trans(skb, dev);
- skb_checksum_none_assert(skb);
-
- /* bookkeeping */
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += rda.length;
-
- /* pass to the upper layers */
- netif_rx(skb);
- }
- }
-
- /* otherwise check error status bits and increase statistics */
-
- else {
- dev->stats.rx_errors++;
- if (rda.status & RCREG_FAER)
- dev->stats.rx_frame_errors++;
- if (rda.status & RCREG_CRCR)
- dev->stats.rx_crc_errors++;
- }
-
- /* descriptor processed, will become new last descriptor in queue */
-
- rda.link = 1;
- rda.inuse = 1;
- memcpy_toio(priv->base + rdaaddr, &rda,
- sizeof(rda_t));
-
- /* set up link and EOL = 0 in currently last descriptor. Only write
- the link field since the SONIC may currently already access the
- other fields. */
-
- memcpy_toio(priv->base + lrdaaddr + 20, &rdaaddr, 4);
-
- /* advance indices */
-
- priv->lastrxdescr = priv->nextrxdescr;
- if ((++priv->nextrxdescr) >= priv->rxbufcnt)
- priv->nextrxdescr = 0;
- }
-}
-
-/* transmit interrupt */
-
-static void irqtx_handler(struct net_device *dev)
-{
- ibmlana_priv *priv = netdev_priv(dev);
- tda_t tda;
-
- /* fetch descriptor (we forgot the size ;-) */
- memcpy_fromio(&tda, priv->base + priv->tdastart + (priv->currtxdescr * sizeof(tda_t)), sizeof(tda_t));
-
- /* update statistics */
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += tda.length;
-
- /* update our pointers */
- priv->txused[priv->currtxdescr] = 0;
- priv->txusedcnt--;
-
- /* if there are more descriptors present in RAM, start them */
- if (priv->txusedcnt > 0)
- StartTx(dev, (priv->currtxdescr + 1) % TXBUFCNT);
-
- /* tell the upper layer we can go on transmitting */
- netif_wake_queue(dev);
-}
-
-static void irqtxerr_handler(struct net_device *dev)
-{
- ibmlana_priv *priv = netdev_priv(dev);
- tda_t tda;
-
- /* fetch descriptor to check status */
- memcpy_fromio(&tda, priv->base + priv->tdastart + (priv->currtxdescr * sizeof(tda_t)), sizeof(tda_t));
-
- /* update statistics */
- dev->stats.tx_errors++;
- if (tda.status & (TCREG_NCRS | TCREG_CRSL))
- dev->stats.tx_carrier_errors++;
- if (tda.status & TCREG_EXC)
- dev->stats.tx_aborted_errors++;
- if (tda.status & TCREG_OWC)
- dev->stats.tx_window_errors++;
- if (tda.status & TCREG_FU)
- dev->stats.tx_fifo_errors++;
-
- /* update our pointers */
- priv->txused[priv->currtxdescr] = 0;
- priv->txusedcnt--;
-
- /* if there are more descriptors present in RAM, start them */
- if (priv->txusedcnt > 0)
- StartTx(dev, (priv->currtxdescr + 1) % TXBUFCNT);
-
- /* tell the upper layer we can go on transmitting */
- netif_wake_queue(dev);
-}
-
-/* general interrupt entry */
-
-static irqreturn_t irq_handler(int dummy, void *device)
-{
- struct net_device *dev = device;
- u16 ival;
-
- /* in case we're not meant... */
- if (!(inb(dev->base_addr + BCMREG) & BCMREG_IPEND))
- return IRQ_NONE;
-
- /* loop through the interrupt bits until everything is clear */
- while (1) {
- ival = inw(dev->base_addr + SONIC_ISREG);
-
- if (ival & ISREG_RBE) {
- irqrbe_handler(dev);
- outw(ISREG_RBE, dev->base_addr + SONIC_ISREG);
- }
- if (ival & ISREG_PKTRX) {
- irqrx_handler(dev);
- outw(ISREG_PKTRX, dev->base_addr + SONIC_ISREG);
- }
- if (ival & ISREG_TXDN) {
- irqtx_handler(dev);
- outw(ISREG_TXDN, dev->base_addr + SONIC_ISREG);
- }
- if (ival & ISREG_TXER) {
- irqtxerr_handler(dev);
- outw(ISREG_TXER, dev->base_addr + SONIC_ISREG);
- }
- break;
- }
- return IRQ_HANDLED;
-}
-
-/* ------------------------------------------------------------------------
- * driver methods
- * ------------------------------------------------------------------------ */
-
-/* MCA info */
-
-#if 0 /* info available elsewhere, but this is kept for reference */
-static int ibmlana_getinfo(char *buf, int slot, void *d)
-{
- int len = 0, i;
- struct net_device *dev = (struct net_device *) d;
- ibmlana_priv *priv;
-
- /* can't say anything about an uninitialized device... */
-
- if (dev == NULL)
- return len;
- priv = netdev_priv(dev);
-
- /* print info */
-
- len += sprintf(buf + len, "IRQ: %d\n", priv->realirq);
- len += sprintf(buf + len, "I/O: %#lx\n", dev->base_addr);
- len += sprintf(buf + len, "Memory: %#lx-%#lx\n", dev->mem_start, dev->mem_end - 1);
- len += sprintf(buf + len, "Transceiver: %s\n", MediaNames[priv->medium]);
- len += sprintf(buf + len, "Device: %s\n", dev->name);
- len += sprintf(buf + len, "MAC address:");
- for (i = 0; i < 6; i++)
- len += sprintf(buf + len, " %02x", dev->dev_addr[i]);
- buf[len++] = '\n';
- buf[len] = 0;
-
- return len;
-}
-#endif
-
-/* open driver. Means also initialization and start of LANCE */
-
-static int ibmlana_open(struct net_device *dev)
-{
- int result;
- ibmlana_priv *priv = netdev_priv(dev);
-
- /* register resources - only necessary for IRQ */
-
- result = request_irq(priv->realirq, irq_handler, IRQF_SHARED,
- dev->name, dev);
- if (result != 0) {
- printk(KERN_ERR "%s: failed to register irq %d\n", dev->name, dev->irq);
- return result;
- }
- dev->irq = priv->realirq;
-
- /* set up the card and SONIC */
- InitBoard(dev);
-
- /* initialize operational flags */
- netif_start_queue(dev);
- return 0;
-}
-
-/* close driver. Shut down board and free allocated resources */
-
-static int ibmlana_close(struct net_device *dev)
-{
- /* turn off board */
-
- /* release resources */
- if (dev->irq != 0)
- free_irq(dev->irq, dev);
- dev->irq = 0;
- return 0;
-}
-
-/* transmit a block. */
-
-static netdev_tx_t ibmlana_tx(struct sk_buff *skb, struct net_device *dev)
-{
- ibmlana_priv *priv = netdev_priv(dev);
- int tmplen, addr;
- unsigned long flags;
- tda_t tda;
- int baddr;
-
- /* find out if there are free slots for a frame to transmit. If not,
- the upper layer is in deep desperation and we simply ignore the frame. */
-
- if (priv->txusedcnt >= TXBUFCNT) {
- dev->stats.tx_dropped++;
- goto tx_done;
- }
-
- /* copy the frame data into the next free transmit buffer - fillup missing */
- tmplen = skb->len;
- if (tmplen < 60)
- tmplen = 60;
- baddr = priv->txbufstart + (priv->nexttxdescr * PKTSIZE);
- memcpy_toio(priv->base + baddr, skb->data, skb->len);
-
- /* copy filler into RAM - in case we're filling up...
- we're filling a bit more than necessary, but that doesn't harm
- since the buffer is far larger...
- Sorry Linus for the filler string but I couldn't resist ;-) */
-
- if (tmplen > skb->len) {
- char *fill = "NetBSD is a nice OS too! ";
- unsigned int destoffs = skb->len, l = strlen(fill);
-
- while (destoffs < tmplen) {
- memcpy_toio(priv->base + baddr + destoffs, fill, l);
- destoffs += l;
- }
- }
-
- /* set up the new frame descriptor */
- addr = priv->tdastart + (priv->nexttxdescr * sizeof(tda_t));
- memcpy_fromio(&tda, priv->base + addr, sizeof(tda_t));
- tda.length = tda.fraglength = tmplen;
- memcpy_toio(priv->base + addr, &tda, sizeof(tda_t));
-
- /* if there were no active descriptors, trigger the SONIC */
- spin_lock_irqsave(&priv->lock, flags);
-
- priv->txusedcnt++;
- priv->txused[priv->nexttxdescr] = 1;
-
- /* are all transmission slots used up ? */
- if (priv->txusedcnt >= TXBUFCNT)
- netif_stop_queue(dev);
-
- if (priv->txusedcnt == 1)
- StartTx(dev, priv->nexttxdescr);
- priv->nexttxdescr = (priv->nexttxdescr + 1) % TXBUFCNT;
-
- spin_unlock_irqrestore(&priv->lock, flags);
-tx_done:
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-/* switch receiver mode. */
-
-static void ibmlana_set_multicast_list(struct net_device *dev)
-{
- /* first stop the SONIC... */
- StopSONIC(dev);
- /* ...then reinit it with the new flags */
- InitBoard(dev);
-}
-
-/* ------------------------------------------------------------------------
- * hardware check
- * ------------------------------------------------------------------------ */
-
-static int ibmlana_irq;
-static int ibmlana_io;
-static int startslot; /* counts through slots when probing multiple devices */
-
-static short ibmlana_adapter_ids[] __initdata = {
- IBM_LANA_ID,
- 0x0000
-};
-
-static char *ibmlana_adapter_names[] = {
- "IBM LAN Adapter/A",
- NULL
-};
-
-
-static const struct net_device_ops ibmlana_netdev_ops = {
- .ndo_open = ibmlana_open,
- .ndo_stop = ibmlana_close,
- .ndo_start_xmit = ibmlana_tx,
- .ndo_set_rx_mode = ibmlana_set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int ibmlana_init_one(struct device *kdev)
-{
- struct mca_device *mdev = to_mca_device(kdev);
- struct net_device *dev;
- int slot = mdev->slot, z, rc;
- int base = 0, irq = 0, iobase = 0, memlen = 0;
- ibmlana_priv *priv;
- ibmlana_medium medium;
-
- dev = alloc_etherdev(sizeof(ibmlana_priv));
- if (!dev)
- return -ENOMEM;
-
- dev->irq = ibmlana_irq;
- dev->base_addr = ibmlana_io;
-
- base = dev->mem_start;
- irq = dev->irq;
-
- /* deduce card addresses */
- getaddrs(mdev, &base, &memlen, &iobase, &irq, &medium);
-
- /* were we looking for something different ? */
- if (dev->irq && dev->irq != irq) {
- rc = -ENODEV;
- goto err_out;
- }
- if (dev->mem_start && dev->mem_start != base) {
- rc = -ENODEV;
- goto err_out;
- }
-
- /* announce success */
- printk(KERN_INFO "%s: IBM LAN Adapter/A found in slot %d\n", dev->name, slot + 1);
-
- /* try to obtain I/O range */
- if (!request_region(iobase, IBM_LANA_IORANGE, DRV_NAME)) {
- printk(KERN_ERR "%s: cannot allocate I/O range at %#x!\n", DRV_NAME, iobase);
- startslot = slot + 1;
- rc = -EBUSY;
- goto err_out;
- }
-
- priv = netdev_priv(dev);
- priv->slot = slot;
- priv->realirq = mca_device_transform_irq(mdev, irq);
- priv->medium = medium;
- spin_lock_init(&priv->lock);
-
- /* set base + irq for this device (irq not allocated so far) */
-
- dev->irq = 0;
- dev->mem_start = base;
- dev->mem_end = base + memlen;
- dev->base_addr = iobase;
-
- priv->base = ioremap(base, memlen);
- if (!priv->base) {
- printk(KERN_ERR "%s: cannot remap memory!\n", DRV_NAME);
- startslot = slot + 1;
- rc = -EBUSY;
- goto err_out_reg;
- }
-
- mca_device_set_name(mdev, ibmlana_adapter_names[mdev->index]);
- mca_device_set_claim(mdev, 1);
-
- /* set methods */
- dev->netdev_ops = &ibmlana_netdev_ops;
- dev->flags |= IFF_MULTICAST;
-
- /* copy out MAC address */
-
- for (z = 0; z < ETH_ALEN; z++)
- dev->dev_addr[z] = inb(dev->base_addr + MACADDRPROM + z);
-
- /* print config */
-
- printk(KERN_INFO "%s: IRQ %d, I/O %#lx, memory %#lx-%#lx, "
- "MAC address %pM.\n",
- dev->name, priv->realirq, dev->base_addr,
- dev->mem_start, dev->mem_end - 1,
- dev->dev_addr);
- printk(KERN_INFO "%s: %s medium\n", dev->name, MediaNames[priv->medium]);
-
- /* reset board */
-
- ResetBoard(dev);
-
- /* next probe will start at next slot */
-
- startslot = slot + 1;
-
- rc = register_netdev(dev);
- if (rc)
- goto err_out_claimed;
-
- dev_set_drvdata(kdev, dev);
- return 0;
-
-err_out_claimed:
- mca_device_set_claim(mdev, 0);
- iounmap(priv->base);
-err_out_reg:
- release_region(iobase, IBM_LANA_IORANGE);
-err_out:
- free_netdev(dev);
- return rc;
-}
-
-static int ibmlana_remove_one(struct device *kdev)
-{
- struct mca_device *mdev = to_mca_device(kdev);
- struct net_device *dev = dev_get_drvdata(kdev);
- ibmlana_priv *priv = netdev_priv(dev);
-
- unregister_netdev(dev);
- /*DeinitBoard(dev); */
- release_region(dev->base_addr, IBM_LANA_IORANGE);
- mca_device_set_claim(mdev, 0);
- iounmap(priv->base);
- free_netdev(dev);
- return 0;
-}
-
-/* ------------------------------------------------------------------------
- * modularization support
- * ------------------------------------------------------------------------ */
-
-module_param_named(irq, ibmlana_irq, int, 0);
-module_param_named(io, ibmlana_io, int, 0);
-MODULE_PARM_DESC(irq, "IBM LAN/A IRQ number");
-MODULE_PARM_DESC(io, "IBM LAN/A I/O base address");
-MODULE_LICENSE("GPL");
-
-static struct mca_driver ibmlana_driver = {
- .id_table = ibmlana_adapter_ids,
- .driver = {
- .name = "ibmlana",
- .bus = &mca_bus_type,
- .probe = ibmlana_init_one,
- .remove = ibmlana_remove_one,
- },
-};
-
-static int __init ibmlana_init_module(void)
-{
- return mca_register_driver(&ibmlana_driver);
-}
-
-static void __exit ibmlana_cleanup_module(void)
-{
- mca_unregister_driver(&ibmlana_driver);
-}
-
-module_init(ibmlana_init_module);
-module_exit(ibmlana_cleanup_module);
diff --git a/drivers/net/ethernet/natsemi/ibmlana.h b/drivers/net/ethernet/natsemi/ibmlana.h
deleted file mode 100644
index accd5efc9c8a..000000000000
--- a/drivers/net/ethernet/natsemi/ibmlana.h
+++ /dev/null
@@ -1,278 +0,0 @@
-#ifndef _IBM_LANA_INCLUDE_
-#define _IBM_LANA_INCLUDE_
-
-#ifdef _IBM_LANA_DRIVER_
-
-/* maximum packet size */
-
-#define PKTSIZE 1524
-
-/* number of transmit buffers */
-
-#define TXBUFCNT 4
-
-/* Adapter ID's */
-#define IBM_LANA_ID 0xffe0
-
-/* media enumeration - defined in a way that it fits onto the LAN/A's
- POS registers... */
-
-typedef enum {
- Media_10BaseT, Media_10Base5,
- Media_Unknown, Media_10Base2, Media_Count
-} ibmlana_medium;
-
-/* private structure */
-
-typedef struct {
- unsigned int slot; /* MCA-Slot-# */
- int realirq; /* memorizes actual IRQ, even when
- currently not allocated */
- ibmlana_medium medium; /* physical cannector */
- u32 tdastart, txbufstart, /* addresses */
- rrastart, rxbufstart, rdastart, rxbufcnt, txusedcnt;
- int nextrxdescr, /* next rx descriptor to be used */
- lastrxdescr, /* last free rx descriptor */
- nexttxdescr, /* last tx descriptor to be used */
- currtxdescr, /* tx descriptor currently tx'ed */
- txused[TXBUFCNT]; /* busy flags */
- void __iomem *base;
- spinlock_t lock;
-} ibmlana_priv;
-
-/* this card uses quite a lot of I/O ports...luckily the MCA bus decodes
- a full 64K I/O range... */
-
-#define IBM_LANA_IORANGE 0xa0
-
-/* Command Register: */
-
-#define SONIC_CMDREG 0x00
-#define CMDREG_HTX 0x0001 /* halt transmission */
-#define CMDREG_TXP 0x0002 /* start transmission */
-#define CMDREG_RXDIS 0x0004 /* disable receiver */
-#define CMDREG_RXEN 0x0008 /* enable receiver */
-#define CMDREG_STP 0x0010 /* stop timer */
-#define CMDREG_ST 0x0020 /* start timer */
-#define CMDREG_RST 0x0080 /* software reset */
-#define CMDREG_RRRA 0x0100 /* force SONIC to read first RRA */
-#define CMDREG_LCAM 0x0200 /* force SONIC to read CAM descrs */
-
-/* Data Configuration Register */
-
-#define SONIC_DCREG 0x02
-#define DCREG_EXBUS 0x8000 /* Extended Bus Mode */
-#define DCREG_LBR 0x2000 /* Latched Bus Retry */
-#define DCREG_PO1 0x1000 /* Programmable Outputs */
-#define DCREG_PO0 0x0800
-#define DCREG_SBUS 0x0400 /* Synchronous Bus Mode */
-#define DCREG_USR1 0x0200 /* User Definable Pins */
-#define DCREG_USR0 0x0100
-#define DCREG_WC0 0x0000 /* 0..3 Wait States */
-#define DCREG_WC1 0x0040
-#define DCREG_WC2 0x0080
-#define DCREG_WC3 0x00c0
-#define DCREG_DW16 0x0000 /* 16 bit Bus Mode */
-#define DCREG_DW32 0x0020 /* 32 bit Bus Mode */
-#define DCREG_BMS 0x0010 /* Block Mode Select */
-#define DCREG_RFT4 0x0000 /* 4/8/16/24 bytes RX Threshold */
-#define DCREG_RFT8 0x0004
-#define DCREG_RFT16 0x0008
-#define DCREG_RFT24 0x000c
-#define DCREG_TFT8 0x0000 /* 8/16/24/28 bytes TX Threshold */
-#define DCREG_TFT16 0x0001
-#define DCREG_TFT24 0x0002
-#define DCREG_TFT28 0x0003
-
-/* Receive Control Register */
-
-#define SONIC_RCREG 0x04
-#define RCREG_ERR 0x8000 /* accept damaged and collided pkts */
-#define RCREG_RNT 0x4000 /* accept packets that are < 64 */
-#define RCREG_BRD 0x2000 /* accept broadcasts */
-#define RCREG_PRO 0x1000 /* promiscuous mode */
-#define RCREG_AMC 0x0800 /* accept all multicasts */
-#define RCREG_LB_NONE 0x0000 /* no loopback */
-#define RCREG_LB_MAC 0x0200 /* MAC loopback */
-#define RCREG_LB_ENDEC 0x0400 /* ENDEC loopback */
-#define RCREG_LB_XVR 0x0600 /* Transceiver loopback */
-#define RCREG_MC 0x0100 /* Multicast received */
-#define RCREG_BC 0x0080 /* Broadcast received */
-#define RCREG_LPKT 0x0040 /* last packet in RBA */
-#define RCREG_CRS 0x0020 /* carrier sense present */
-#define RCREG_COL 0x0010 /* recv'd packet with collision */
-#define RCREG_CRCR 0x0008 /* recv'd packet with CRC error */
-#define RCREG_FAER 0x0004 /* recv'd packet with inv. framing */
-#define RCREG_LBK 0x0002 /* recv'd loopback packet */
-#define RCREG_PRX 0x0001 /* recv'd packet is OK */
-
-/* Transmit Control Register */
-
-#define SONIC_TCREG 0x06
-#define TCREG_PINT 0x8000 /* generate interrupt after TDA read */
-#define TCREG_POWC 0x4000 /* timer start out of window detect */
-#define TCREG_CRCI 0x2000 /* inhibit CRC generation */
-#define TCREG_EXDIS 0x1000 /* disable excessive deferral timer */
-#define TCREG_EXD 0x0400 /* excessive deferral occurred */
-#define TCREG_DEF 0x0200 /* single deferral occurred */
-#define TCREG_NCRS 0x0100 /* no carrier detected */
-#define TCREG_CRSL 0x0080 /* carrier lost */
-#define TCREG_EXC 0x0040 /* excessive collisions occurred */
-#define TCREG_OWC 0x0020 /* out of window collision occurred */
-#define TCREG_PMB 0x0008 /* packet monitored bad */
-#define TCREG_FU 0x0004 /* FIFO underrun */
-#define TCREG_BCM 0x0002 /* byte count mismatch of fragments */
-#define TCREG_PTX 0x0001 /* packet transmitted OK */
-
-/* Interrupt Mask Register */
-
-#define SONIC_IMREG 0x08
-#define IMREG_BREN 0x4000 /* interrupt when bus retry occurred */
-#define IMREG_HBLEN 0x2000 /* interrupt when heartbeat lost */
-#define IMREG_LCDEN 0x1000 /* interrupt when CAM loaded */
-#define IMREG_PINTEN 0x0800 /* interrupt when PINT in TDA set */
-#define IMREG_PRXEN 0x0400 /* interrupt when packet received */
-#define IMREG_PTXEN 0x0200 /* interrupt when packet was sent */
-#define IMREG_TXEREN 0x0100 /* interrupt when send failed */
-#define IMREG_TCEN 0x0080 /* interrupt when timer completed */
-#define IMREG_RDEEN 0x0040 /* interrupt when RDA exhausted */
-#define IMREG_RBEEN 0x0020 /* interrupt when RBA exhausted */
-#define IMREG_RBAEEN 0x0010 /* interrupt when RBA too short */
-#define IMREG_CRCEN 0x0008 /* interrupt when CRC counter rolls */
-#define IMREG_FAEEN 0x0004 /* interrupt when FAE counter rolls */
-#define IMREG_MPEN 0x0002 /* interrupt when MP counter rolls */
-#define IMREG_RFOEN 0x0001 /* interrupt when Rx FIFO overflows */
-
-/* Interrupt Status Register */
-
-#define SONIC_ISREG 0x0a
-#define ISREG_BR 0x4000 /* bus retry occurred */
-#define ISREG_HBL 0x2000 /* heartbeat lost */
-#define ISREG_LCD 0x1000 /* CAM loaded */
-#define ISREG_PINT 0x0800 /* PINT in TDA set */
-#define ISREG_PKTRX 0x0400 /* packet received */
-#define ISREG_TXDN 0x0200 /* packet was sent */
-#define ISREG_TXER 0x0100 /* send failed */
-#define ISREG_TC 0x0080 /* timer completed */
-#define ISREG_RDE 0x0040 /* RDA exhausted */
-#define ISREG_RBE 0x0020 /* RBA exhausted */
-#define ISREG_RBAE 0x0010 /* RBA too short for received frame */
-#define ISREG_CRC 0x0008 /* CRC counter rolls over */
-#define ISREG_FAE 0x0004 /* FAE counter rolls over */
-#define ISREG_MP 0x0002 /* MP counter rolls over */
-#define ISREG_RFO 0x0001 /* Rx FIFO overflows */
-
-#define SONIC_UTDA 0x0c /* current transmit descr address */
-#define SONIC_CTDA 0x0e
-
-#define SONIC_URDA 0x1a /* current receive descr address */
-#define SONIC_CRDA 0x1c
-
-#define SONIC_CRBA0 0x1e /* current receive buffer address */
-#define SONIC_CRBA1 0x20
-
-#define SONIC_RBWC0 0x22 /* word count in receive buffer */
-#define SONIC_RBWC1 0x24
-
-#define SONIC_EOBC 0x26 /* minimum space to be free in RBA */
-
-#define SONIC_URRA 0x28 /* upper address of CDA & Recv Area */
-
-#define SONIC_RSA 0x2a /* start of receive resource area */
-
-#define SONIC_REA 0x2c /* end of receive resource area */
-
-#define SONIC_RRP 0x2e /* resource read pointer */
-
-#define SONIC_RWP 0x30 /* resource write pointer */
-
-#define SONIC_CAMEPTR 0x42 /* CAM entry pointer */
-
-#define SONIC_CAMADDR2 0x44 /* CAM address ports */
-#define SONIC_CAMADDR1 0x46
-#define SONIC_CAMADDR0 0x48
-
-#define SONIC_CAMPTR 0x4c /* lower address of CDA */
-
-#define SONIC_CAMCNT 0x4e /* # of CAM descriptors to load */
-
-/* Data Configuration Register 2 */
-
-#define SONIC_DCREG2 0x7e
-#define DCREG2_EXPO3 0x8000 /* extended programmable outputs */
-#define DCREG2_EXPO2 0x4000
-#define DCREG2_EXPO1 0x2000
-#define DCREG2_EXPO0 0x1000
-#define DCREG2_HD 0x0800 /* heartbeat disable */
-#define DCREG2_JD 0x0200 /* jabber timer disable */
-#define DCREG2_AUTO 0x0100 /* enable AUI/TP auto selection */
-#define DCREG2_XWRAP 0x0040 /* TP transceiver loopback */
-#define DCREG2_PH 0x0010 /* HOLD request timing */
-#define DCREG2_PCM 0x0004 /* packet compress when matched */
-#define DCREG2_PCNM 0x0002 /* packet compress when not matched */
-#define DCREG2_RJCM 0x0001 /* inverse packet match via CAM */
-
-/* Board Control Register: Enable RAM, Interrupts... */
-
-#define BCMREG 0x80
-#define BCMREG_RAMEN 0x80 /* switch over to RAM */
-#define BCMREG_IPEND 0x40 /* interrupt pending ? */
-#define BCMREG_RESET 0x08 /* reset board */
-#define BCMREG_16BIT 0x04 /* adapter in 16-bit slot */
-#define BCMREG_RAMWIN 0x02 /* enable RAM window */
-#define BCMREG_IEN 0x01 /* interrupt enable */
-
-/* MAC Address PROM */
-
-#define MACADDRPROM 0x92
-
-/* structure of a CAM entry */
-
-typedef struct {
- u32 index; /* pointer into CAM area */
- u32 addr0; /* address part (bits 0..15 used) */
- u32 addr1;
- u32 addr2;
-} camentry_t;
-
-/* structure of a receive resource */
-
-typedef struct {
- u32 startlo; /* start address (bits 0..15 used) */
- u32 starthi;
- u32 cntlo; /* size in 16-bit quantities */
- u32 cnthi;
-} rra_t;
-
-/* structure of a receive descriptor */
-
-typedef struct {
- u32 status; /* packet status */
- u32 length; /* length in bytes */
- u32 startlo; /* start address */
- u32 starthi;
- u32 seqno; /* frame sequence */
- u32 link; /* pointer to next descriptor */
- /* bit 0 = EOL */
- u32 inuse; /* !=0 --> free for SONIC to write */
-} rda_t;
-
-/* structure of a transmit descriptor */
-
-typedef struct {
- u32 status; /* transmit status */
- u32 config; /* value for TCR */
- u32 length; /* total length */
- u32 fragcount; /* number of fragments */
- u32 startlo; /* start address of fragment */
- u32 starthi;
- u32 fraglength; /* length of this fragment */
- /* more address/length triplets may */
- /* follow here */
- u32 link; /* pointer to next descriptor */
- /* bit 0 = EOL */
-} tda_t;
-
-#endif /* _IBM_LANA_DRIVER_ */
-
-#endif /* _IBM_LANA_INCLUDE_ */
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index f4ad60c97eae..7a5e295588b0 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -862,9 +862,6 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
prev_eedata = eedata;
}
- /* Store MAC Address in perm_addr */
- memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
-
np = netdev_priv(dev);
np->ioaddr = ioaddr;
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 7c94c089212f..bfd887382e19 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -8014,7 +8014,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
/* Set the factory defined MAC address initially */
dev->addr_len = ETH_ALEN;
memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
- memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
/* initialize number of multicast & unicast MAC entries variables */
if (sp->device_type == XFRAME_I_DEVICE) {
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
index 92dd72d3f9de..f8f073880f84 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
@@ -82,9 +82,9 @@ static void vxge_ethtool_gdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct vxgedev *vdev = netdev_priv(dev);
- strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(VXGE_DRIVER_NAME));
- strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION));
- strlcpy(info->fw_version, vdev->fw_version, VXGE_HW_FW_STRLEN);
+ strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, vdev->fw_version, sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(vdev->pdev), sizeof(info->bus_info));
info->regdump_len = sizeof(struct vxge_hw_vpath_reg)
* vdev->no_of_vpath;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 7c87105ca049..794444e09492 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -4682,7 +4682,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
/* Store the fw version for ethttool option */
strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
- memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN);
/* Copy the station mac address to the list */
for (i = 0; i < vdev->no_of_vpath; i++) {
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index cbd6a529d0c0..162da8975b05 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -878,8 +878,8 @@ static int w90p910_ether_ioctl(struct net_device *dev,
static void w90p910_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static int w90p910_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 87fa5919c455..0b8de12bcbca 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -3055,7 +3055,6 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
/* synchronized against open : rtnl_lock() held by caller */
memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
if (netif_running(dev)) {
netif_tx_lock_bh(dev);
@@ -5766,9 +5765,8 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
"%s: set workaround bit for reversed mac addr\n",
__func__);
}
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
- if (!is_valid_ether_addr(dev->perm_addr)) {
+ if (!is_valid_ether_addr(dev->dev_addr)) {
/*
* Bad mac address. At least one bios sets the mac address
* to 01:23:45:67:89:ab
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 3466ca1e8f6c..c4122c86f829 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -800,7 +800,7 @@ static int lpc_mii_probe(struct net_device *ndev)
else
netdev_info(ndev, "using RMII interface\n");
phydev = phy_connect(ndev, dev_name(&phydev->dev),
- &lpc_handle_link_change, 0,
+ &lpc_handle_link_change,
lpc_phy_interface_mode(&pldat->pdev->dev));
if (IS_ERR(phydev)) {
@@ -1239,9 +1239,10 @@ static int lpc_eth_open(struct net_device *ndev)
static void lpc_eth_ethtool_getdrvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, MODNAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, dev_name(ndev->dev.parent));
+ strlcpy(info->driver, MODNAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, dev_name(ndev->dev.parent),
+ sizeof(info->bus_info));
}
static u32 lpc_eth_ethtool_getmsglevel(struct net_device *ndev)
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index b5499198e029..921729f9c85c 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -1350,10 +1350,10 @@ static void octeon_mgmt_poll_controller(struct net_device *netdev)
static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strncpy(info->driver, DRV_NAME, sizeof(info->driver));
- strncpy(info->version, DRV_VERSION, sizeof(info->version));
- strncpy(info->fw_version, "N/A", sizeof(info->fw_version));
- strncpy(info->bus_info, "N/A", sizeof(info->bus_info));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
info->n_stats = 0;
info->testinfo_len = 0;
info->regdump_len = 0;
@@ -1534,12 +1534,10 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
mac = of_get_mac_address(pdev->dev.of_node);
- if (mac && is_valid_ether_addr(mac)) {
+ if (mac && is_valid_ether_addr(mac))
memcpy(netdev->dev_addr, mac, ETH_ALEN);
- netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
- } else {
+ else
eth_hw_addr_random(netdev);
- }
p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
diff --git a/drivers/net/ethernet/packetengines/Kconfig b/drivers/net/ethernet/packetengines/Kconfig
index 8f29feb35548..cbbeca3f8c5c 100644
--- a/drivers/net/ethernet/packetengines/Kconfig
+++ b/drivers/net/ethernet/packetengines/Kconfig
@@ -32,8 +32,8 @@ config HAMACHI
called hamachi.
config YELLOWFIN
- tristate "Packet Engines Yellowfin Gigabit-NIC support (EXPERIMENTAL)"
- depends on PCI && EXPERIMENTAL
+ tristate "Packet Engines Yellowfin Gigabit-NIC support"
+ depends on PCI
select CRC32
---help---
Say Y here if you have a Packet Engines G-NIC PCI Gigabit Ethernet
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index bf829ee30077..cac33e5f9bc2 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -1808,9 +1808,10 @@ static int check_if_running(struct net_device *dev)
static void hamachi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct hamachi_private *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(np->pci_dev));
+
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int hamachi_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index fbaed4fa72fa..d28593b1fc3e 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -1326,9 +1326,10 @@ static void set_rx_mode(struct net_device *dev)
static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct yellowfin_private *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(np->pci_dev));
+
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static const struct ethtool_ops ethtool_ops = {
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index 7f556a84925d..1bcaf45aa864 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -201,11 +201,8 @@ netxen_setup_minidump(struct netxen_adapter *adapter)
adapter->mdump.md_template =
kmalloc(adapter->mdump.md_template_size, GFP_KERNEL);
- if (!adapter->mdump.md_template) {
- dev_err(&adapter->pdev->dev, "Unable to allocate memory "
- "for minidump template.\n");
+ if (!adapter->mdump.md_template)
return -ENOMEM;
- }
err = netxen_get_minidump_template(adapter);
if (err) {
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 946160fa5843..9fbb1cdbfa47 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -670,11 +670,9 @@ static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
}
cur = kzalloc(sizeof(nx_mac_list_t), GFP_ATOMIC);
- if (cur == NULL) {
- printk(KERN_ERR "%s: failed to add mac address filter\n",
- adapter->netdev->name);
+ if (cur == NULL)
return -ENOMEM;
- }
+
memcpy(cur->mac_addr, addr, ETH_ALEN);
list_add_tail(&cur->list, &adapter->mac_list);
return nx_p3_sre_macaddr_change(adapter,
@@ -2568,16 +2566,10 @@ netxen_dump_fw(struct netxen_adapter *adapter)
adapter->mdump.md_capture_size;
if (!adapter->mdump.md_capture_buff) {
adapter->mdump.md_capture_buff =
- vmalloc(adapter->mdump.md_dump_size);
- if (!adapter->mdump.md_capture_buff) {
- dev_info(&adapter->pdev->dev,
- "Unable to allocate memory for minidump "
- "capture_buffer(%d bytes).\n",
- adapter->mdump.md_dump_size);
+ vzalloc(adapter->mdump.md_dump_size);
+ if (!adapter->mdump.md_capture_buff)
return;
- }
- memset(adapter->mdump.md_capture_buff, 0,
- adapter->mdump.md_dump_size);
+
if (netxen_collect_minidump(adapter)) {
adapter->mdump.has_valid_dump = 0;
adapter->mdump.md_dump_size = 0;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index bc165f4d0f65..4782dcfde736 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -144,7 +144,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
buffrag->length, PCI_DMA_TODEVICE);
buffrag->dma = 0ULL;
}
- for (j = 0; j < cmd_buf->frag_count; j++) {
+ for (j = 1; j < cmd_buf->frag_count; j++) {
buffrag++;
if (buffrag->dma) {
pci_unmap_page(adapter->pdev, buffrag->dma,
@@ -197,41 +197,33 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
struct nx_host_sds_ring *sds_ring;
struct nx_host_tx_ring *tx_ring;
struct netxen_rx_buffer *rx_buf;
- int ring, i, size;
+ int ring, i;
struct netxen_cmd_buffer *cmd_buf_arr;
struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
- size = sizeof(struct nx_host_tx_ring);
- tx_ring = kzalloc(size, GFP_KERNEL);
- if (tx_ring == NULL) {
- dev_err(&pdev->dev, "%s: failed to allocate tx ring struct\n",
- netdev->name);
+ tx_ring = kzalloc(sizeof(struct nx_host_tx_ring), GFP_KERNEL);
+ if (tx_ring == NULL)
return -ENOMEM;
- }
+
adapter->tx_ring = tx_ring;
tx_ring->num_desc = adapter->num_txd;
tx_ring->txq = netdev_get_tx_queue(netdev, 0);
cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
- if (cmd_buf_arr == NULL) {
- dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n",
- netdev->name);
+ if (cmd_buf_arr == NULL)
goto err_out;
- }
+
tx_ring->cmd_buf_arr = cmd_buf_arr;
recv_ctx = &adapter->recv_ctx;
- size = adapter->max_rds_rings * sizeof (struct nx_host_rds_ring);
- rds_ring = kzalloc(size, GFP_KERNEL);
- if (rds_ring == NULL) {
- dev_err(&pdev->dev, "%s: failed to allocate rds ring struct\n",
- netdev->name);
+ rds_ring = kcalloc(adapter->max_rds_rings,
+ sizeof(struct nx_host_rds_ring), GFP_KERNEL);
+ if (rds_ring == NULL)
goto err_out;
- }
+
recv_ctx->rds_rings = rds_ring;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 6098fd4adfeb..501f49207da5 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -501,12 +501,11 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
for (i = 0; i < 6; i++)
netdev->dev_addr[i] = *(p + 5 - i);
- memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
/* set station address */
- if (!is_valid_ether_addr(netdev->perm_addr))
+ if (!is_valid_ether_addr(netdev->dev_addr))
dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr);
return 0;
@@ -1963,10 +1962,12 @@ unwind:
while (--i >= 0) {
nf = &pbuf->frag_array[i+1];
pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+ nf->dma = 0ULL;
}
nf = &pbuf->frag_array[0];
pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+ nf->dma = 0ULL;
out_err:
return -ENOMEM;
@@ -3175,11 +3176,8 @@ netxen_list_config_vlan_ip(struct netxen_adapter *adapter,
}
cur = kzalloc(sizeof(struct nx_vlan_ip_list), GFP_ATOMIC);
- if (cur == NULL) {
- printk(KERN_ERR "%s: failed to add vlan ip to list\n",
- adapter->netdev->name);
+ if (cur == NULL)
return;
- }
cur->ip_addr = ifa->ifa_address;
list_add_tail(&cur->list, &adapter->vlan_ip_list);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 67a679aaf29a..8fd38cb6d26a 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -2591,13 +2591,11 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
else
qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
- qdev->lrg_buf =
- kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),
- GFP_KERNEL);
- if (qdev->lrg_buf == NULL) {
- netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n");
+ qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers,
+ sizeof(struct ql_rcv_buf_cb),
+ GFP_KERNEL);
+ if (qdev->lrg_buf == NULL)
return -ENOMEM;
- }
qdev->lrg_buf_q_alloc_virt_addr =
pci_alloc_consistent(qdev->pdev,
@@ -3867,7 +3865,6 @@ static int ql3xxx_probe(struct pci_dev *pdev,
ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress);
}
- memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile
index c4b8ced83829..7722a203e388 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/Makefile
+++ b/drivers/net/ethernet/qlogic/qlcnic/Makefile
@@ -6,4 +6,6 @@ obj-$(CONFIG_QLCNIC) := qlcnic.o
qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
qlcnic_ethtool.o qlcnic_ctx.o qlcnic_io.o \
- qlcnic_sysfs.o qlcnic_minidump.o
+ qlcnic_sysfs.o qlcnic_minidump.o qlcnic_83xx_hw.o \
+ qlcnic_83xx_init.o qlcnic_83xx_vnic.o \
+ qlcnic_minidump.o
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index bc7ec64e9c7a..11c3db6daffd 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1,6 +1,6 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
@@ -33,11 +33,13 @@
#include <linux/if_vlan.h>
#include "qlcnic_hdr.h"
+#include "qlcnic_hw.h"
+#include "qlcnic_83xx_hw.h"
#define _QLCNIC_LINUX_MAJOR 5
-#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 30
-#define QLCNIC_LINUX_VERSIONID "5.0.30"
+#define _QLCNIC_LINUX_MINOR 1
+#define _QLCNIC_LINUX_SUBVERSION 34
+#define QLCNIC_LINUX_VERSIONID "5.1.34"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -96,7 +98,6 @@
#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+ MGMT_CMD_DESC_RESV)
#define QLCNIC_MAX_TX_TIMEOUTS 2
-
/*
* Following are the states of the Phantom. Phantom will set them and
* Host will read to check if the fields are correct.
@@ -203,6 +204,7 @@ struct uni_data_desc{
/* Flash Defines and Structures */
#define QLCNIC_FLT_LOCATION 0x3F1000
+#define QLCNIC_FDT_LOCATION 0x3F0000
#define QLCNIC_B0_FW_IMAGE_REGION 0x74
#define QLCNIC_C0_FW_IMAGE_REGION 0x97
#define QLCNIC_BOOTLD_REGION 0X72
@@ -223,6 +225,36 @@ struct qlcnic_flt_entry {
u32 end_addr;
};
+/* Flash Descriptor Table */
+struct qlcnic_fdt {
+ u32 valid;
+ u16 ver;
+ u16 len;
+ u16 cksum;
+ u16 unused;
+ u8 model[16];
+ u16 mfg_id;
+ u16 id;
+ u8 flag;
+ u8 erase_cmd;
+ u8 alt_erase_cmd;
+ u8 write_enable_cmd;
+ u8 write_enable_bits;
+ u8 write_statusreg_cmd;
+ u8 unprotected_sec_cmd;
+ u8 read_manuf_cmd;
+ u32 block_size;
+ u32 alt_block_size;
+ u32 flash_size;
+ u32 write_enable_data;
+ u8 readid_addr_len;
+ u8 write_disable_bits;
+ u8 read_dev_id_len;
+ u8 chip_erase_cmd;
+ u16 read_timeo;
+ u8 protected_sec_cmd;
+ u8 resvd[65];
+};
/* Magic number to let user know flash is programmed */
#define QLCNIC_BDINFO_MAGIC 0x12345678
@@ -267,6 +299,12 @@ struct qlcnic_flt_entry {
extern char qlcnic_driver_name[];
+extern int qlcnic_use_msi;
+extern int qlcnic_use_msi_x;
+extern int qlcnic_auto_fw_reset;
+extern int qlcnic_load_fw_file;
+extern int qlcnic_config_npars;
+
/* Number of status descriptors to handle per interrupt */
#define MAX_STATUS_HANDLE (64)
@@ -314,6 +352,7 @@ struct qlcnic_rx_buffer {
#define QLCNIC_INTR_DEFAULT 0x04
#define QLCNIC_CONFIG_INTR_COALESCE 3
+#define QLCNIC_DEV_INFO_SIZE 1
struct qlcnic_nic_intr_coalesce {
u8 type;
@@ -337,6 +376,7 @@ struct qlcnic_dump_template_hdr {
u32 sys_info[3];
u32 saved_state[16];
u32 cap_sizes[8];
+ u32 ocm_wnd_reg[16];
u32 rsvd[0];
};
@@ -396,12 +436,24 @@ struct qlcnic_hardware_context {
u16 act_pci_func;
u32 capabilities;
+ u32 capabilities2;
u32 temp;
u32 int_vec_bit;
u32 fw_hal_version;
+ u32 port_config;
struct qlcnic_hardware_ops *hw_ops;
struct qlcnic_nic_intr_coalesce coal;
struct qlcnic_fw_dump fw_dump;
+ struct qlcnic_fdt fdt;
+ struct qlc_83xx_reset reset;
+ struct qlc_83xx_idc idc;
+ struct qlc_83xx_fw_info fw_info;
+ struct qlcnic_intrpt_config *intr_tbl;
+ u32 *reg_tbl;
+ u32 *ext_reg_tbl;
+ u32 mbox_aen[QLC_83XX_MBX_AEN_CNT];
+ u32 mbox_reg[4];
+ spinlock_t mbx_lock;
};
struct qlcnic_adapter_stats {
@@ -422,6 +474,8 @@ struct qlcnic_adapter_stats {
u64 null_rxbuf;
u64 rx_dma_map_error;
u64 tx_dma_map_error;
+ u64 spurious_intr;
+ u64 mac_filter_limit_overrun;
};
/*
@@ -460,12 +514,17 @@ struct qlcnic_host_sds_ring {
} ____cacheline_internodealigned_in_smp;
struct qlcnic_host_tx_ring {
+ int irq;
+ void __iomem *crb_intr_mask;
+ char name[IFNAMSIZ+4];
u16 ctx_id;
u32 producer;
u32 sw_consumer;
u32 num_desc;
void __iomem *crb_cmd_producer;
struct cmd_desc_type0 *desc_head;
+ struct qlcnic_adapter *adapter;
+ struct napi_struct napi;
struct qlcnic_cmd_buffer *cmd_buf_arr;
__le32 *hw_consumer;
@@ -492,8 +551,6 @@ struct qlcnic_recv_context {
/* HW context creation */
#define QLCNIC_OS_CRB_RETRY_COUNT 4000
-#define QLCNIC_CDRP_SIGNATURE_MAKE(pcifn, version) \
- (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16))
#define QLCNIC_CDRP_CMD_BIT 0x80000000
@@ -513,43 +570,6 @@ struct qlcnic_recv_context {
* the crb QLCNIC_CDRP_CRB_OFFSET.
*/
#define QLCNIC_CDRP_FORM_CMD(cmd) (QLCNIC_CDRP_CMD_BIT | (cmd))
-#define QLCNIC_CDRP_IS_CMD(cmd) (((cmd) & QLCNIC_CDRP_CMD_BIT) != 0)
-
-#define QLCNIC_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001
-#define QLCNIC_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002
-#define QLCNIC_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003
-#define QLCNIC_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004
-#define QLCNIC_CDRP_CMD_READ_MAX_RX_CTX 0x00000005
-#define QLCNIC_CDRP_CMD_READ_MAX_TX_CTX 0x00000006
-#define QLCNIC_CDRP_CMD_CREATE_RX_CTX 0x00000007
-#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX 0x00000008
-#define QLCNIC_CDRP_CMD_CREATE_TX_CTX 0x00000009
-#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX 0x0000000a
-#define QLCNIC_CDRP_CMD_INTRPT_TEST 0x00000011
-#define QLCNIC_CDRP_CMD_SET_MTU 0x00000012
-#define QLCNIC_CDRP_CMD_READ_PHY 0x00000013
-#define QLCNIC_CDRP_CMD_WRITE_PHY 0x00000014
-#define QLCNIC_CDRP_CMD_READ_HW_REG 0x00000015
-#define QLCNIC_CDRP_CMD_GET_FLOW_CTL 0x00000016
-#define QLCNIC_CDRP_CMD_SET_FLOW_CTL 0x00000017
-#define QLCNIC_CDRP_CMD_READ_MAX_MTU 0x00000018
-#define QLCNIC_CDRP_CMD_READ_MAX_LRO 0x00000019
-#define QLCNIC_CDRP_CMD_MAC_ADDRESS 0x0000001f
-
-#define QLCNIC_CDRP_CMD_GET_PCI_INFO 0x00000020
-#define QLCNIC_CDRP_CMD_GET_NIC_INFO 0x00000021
-#define QLCNIC_CDRP_CMD_SET_NIC_INFO 0x00000022
-#define QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY 0x00000024
-#define QLCNIC_CDRP_CMD_TOGGLE_ESWITCH 0x00000025
-#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026
-#define QLCNIC_CDRP_CMD_SET_PORTMIRRORING 0x00000027
-#define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH 0x00000028
-#define QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG 0x00000029
-#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATS 0x0000002a
-#define QLCNIC_CDRP_CMD_CONFIG_PORT 0x0000002E
-#define QLCNIC_CDRP_CMD_TEMP_SIZE 0x0000002f
-#define QLCNIC_CDRP_CMD_GET_TEMP_HDR 0x00000030
-#define QLCNIC_CDRP_CMD_GET_MAC_STATS 0x00000037
#define QLCNIC_RCODE_SUCCESS 0
#define QLCNIC_RCODE_INVALID_ARGS 6
@@ -726,6 +746,11 @@ struct qlcnic_mac_list_s {
uint8_t mac_addr[ETH_ALEN+2];
};
+/* MAC Learn */
+#define NO_MAC_LEARN 0
+#define DRV_MAC_LEARN 1
+#define FDB_MAC_LEARN 2
+
#define QLCNIC_HOST_REQUEST 0x13
#define QLCNIC_REQUEST 0x14
@@ -762,7 +787,7 @@ struct qlcnic_mac_list_s {
*/
#define QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK 0x8f
-#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141
+#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 0x8D
#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
@@ -779,6 +804,8 @@ struct qlcnic_mac_list_s {
#define QLCNIC_FW_CAPABILITY_MORE_CAPS BIT_31
#define QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG BIT_2
+#define QLCNIC_FW_CAP2_HW_LRO_IPV6 BIT_3
+#define QLCNIC_FW_CAPABILITY_2_OCBB BIT_5
/* module types */
#define LINKEVENT_MODULE_NOT_PRESENT 1
@@ -855,7 +882,7 @@ struct qlcnic_ipaddr {
#define QLCNIC_MSI_ENABLED 0x02
#define QLCNIC_MSIX_ENABLED 0x04
-#define QLCNIC_LRO_ENABLED 0x08
+#define QLCNIC_LRO_ENABLED 0x01
#define QLCNIC_LRO_DISABLED 0x00
#define QLCNIC_BRIDGE_ENABLED 0X10
#define QLCNIC_DIAG_ENABLED 0x20
@@ -887,6 +914,7 @@ struct qlcnic_ipaddr {
#define __QLCNIC_AER 5
#define __QLCNIC_DIAG_RES_ALLOC 6
#define __QLCNIC_LED_ENABLE 7
+#define __QLCNIC_ELB_INPROGRESS 8
#define QLCNIC_INTERRUPT_TEST 1
#define QLCNIC_LOOPBACK_TEST 2
@@ -895,12 +923,14 @@ struct qlcnic_ipaddr {
#define QLCNIC_FILTER_AGE 80
#define QLCNIC_READD_AGE 20
#define QLCNIC_LB_MAX_FILTERS 64
+#define QLCNIC_LB_BUCKET_SIZE 32
/* QLCNIC Driver Error Code */
#define QLCNIC_FW_NOT_RESPOND 51
#define QLCNIC_TEST_IN_PROGRESS 52
#define QLCNIC_UNDEFINED_ERROR 53
#define QLCNIC_LB_CABLE_NOT_CONN 54
+#define QLCNIC_ILB_MAX_RCV_LOOP 10
struct qlcnic_filter {
struct hlist_node fnode;
@@ -912,7 +942,8 @@ struct qlcnic_filter {
struct qlcnic_filter_hash {
struct hlist_head *fhead;
u8 fnum;
- u8 fmax;
+ u16 fmax;
+ u16 fbucket_size;
};
struct qlcnic_adapter {
@@ -934,6 +965,7 @@ struct qlcnic_adapter {
u8 max_rds_rings;
u8 max_sds_rings;
+ u8 rx_csum;
u8 portnum;
u8 fw_wait_cnt;
@@ -954,8 +986,10 @@ struct qlcnic_adapter {
u8 mac_addr[ETH_ALEN];
u64 dev_rst_time;
- u8 mac_learn;
+ bool drv_mac_learn;
+ bool fdb_mac_learn;
unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u8 flash_mfg_id;
struct qlcnic_npar_info *npars;
struct qlcnic_eswitch *eswitch;
struct qlcnic_nic_template *nic_ops;
@@ -969,12 +1003,17 @@ struct qlcnic_adapter {
void __iomem *isr_int_vec;
struct msix_entry *msix_entries;
+ struct workqueue_struct *qlcnic_wq;
struct delayed_work fw_work;
+ struct delayed_work idc_aen_work;
struct qlcnic_filter_hash fhash;
+ struct qlcnic_filter_hash rx_fhash;
spinlock_t tx_clean_lock;
spinlock_t mac_learn_lock;
+ /* spinlock for catching rcv filters for eswitch traffic */
+ spinlock_t rx_mac_learn_lock;
u32 file_prd_off; /*File fw product offset*/
u32 fw_version;
const struct firmware *fw;
@@ -995,7 +1034,24 @@ struct qlcnic_info_le {
__le16 max_rx_ques;
__le16 min_tx_bw;
__le16 max_tx_bw;
- u8 reserved2[104];
+ __le32 op_type;
+ __le16 max_bw_reg_offset;
+ __le16 max_linkspeed_reg_offset;
+ __le32 capability1;
+ __le32 capability2;
+ __le32 capability3;
+ __le16 max_tx_mac_filters;
+ __le16 max_rx_mcast_mac_filters;
+ __le16 max_rx_ucast_mac_filters;
+ __le16 max_rx_ip_addr;
+ __le16 max_rx_lro_flow;
+ __le16 max_rx_status_rings;
+ __le16 max_rx_buf_rings;
+ __le16 max_tx_vlan_keys;
+ u8 total_pf;
+ u8 total_rss_engines;
+ __le16 max_vports;
+ u8 reserved2[64];
} __packed;
struct qlcnic_info {
@@ -1005,12 +1061,28 @@ struct qlcnic_info {
u16 switch_mode;
u32 capabilities;
u8 max_mac_filters;
- u8 reserved1;
u16 max_mtu;
u16 max_tx_ques;
u16 max_rx_ques;
u16 min_tx_bw;
u16 max_tx_bw;
+ u32 op_type;
+ u16 max_bw_reg_offset;
+ u16 max_linkspeed_reg_offset;
+ u32 capability1;
+ u32 capability2;
+ u32 capability3;
+ u16 max_tx_mac_filters;
+ u16 max_rx_mcast_mac_filters;
+ u16 max_rx_ucast_mac_filters;
+ u16 max_rx_ip_addr;
+ u16 max_rx_lro_flow;
+ u16 max_rx_status_rings;
+ u16 max_rx_buf_rings;
+ u16 max_tx_vlan_keys;
+ u8 total_pf;
+ u8 total_rss_engines;
+ u16 max_vports;
};
struct qlcnic_pci_info_le {
@@ -1024,7 +1096,9 @@ struct qlcnic_pci_info_le {
__le16 reserved1[2];
u8 mac[ETH_ALEN];
- u8 reserved2[106];
+ __le16 func_count;
+ u8 reserved2[104];
+
} __packed;
struct qlcnic_pci_info {
@@ -1035,6 +1109,7 @@ struct qlcnic_pci_info {
u16 tx_min_bw;
u16 tx_max_bw;
u8 mac[ETH_ALEN];
+ u16 func_count;
};
struct qlcnic_npar_info {
@@ -1266,10 +1341,8 @@ struct qlcnic_esw_statistics {
#define QLCNIC_RESET_QUIESCENT 0xadd00020
struct _cdrp_cmd {
- u32 cmd;
- u32 arg1;
- u32 arg2;
- u32 arg3;
+ u32 num;
+ u32 *arg;
};
struct qlcnic_cmd_args {
@@ -1279,9 +1352,6 @@ struct qlcnic_cmd_args {
int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config);
-
-int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off);
-int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data);
int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
void qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *, u64, u64 *);
@@ -1291,9 +1361,10 @@ void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64);
(((addr) < (high)) && ((addr) >= (low)))
#define QLCRD32(adapter, off) \
- (qlcnic_hw_read_wx_2M(adapter, off))
+ (adapter->ahw->hw_ops->read_reg)(adapter, off)
+
#define QLCWR32(adapter, off, val) \
- (qlcnic_hw_write_wx_2M(adapter, off, val))
+ adapter->ahw->hw_ops->write_reg(adapter, off, val)
int qlcnic_pcie_sem_lock(struct qlcnic_adapter *, int, u32);
void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
@@ -1306,10 +1377,6 @@ void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
qlcnic_pcie_sem_lock((a), 3, QLCNIC_PHY_LOCK_ID)
#define qlcnic_phy_unlock(a) \
qlcnic_pcie_sem_unlock((a), 3)
-#define qlcnic_api_lock(a) \
- qlcnic_pcie_sem_lock((a), 5, 0)
-#define qlcnic_api_unlock(a) \
- qlcnic_pcie_sem_unlock((a), 5)
#define qlcnic_sw_lock(a) \
qlcnic_pcie_sem_lock((a), 6, 0)
#define qlcnic_sw_unlock(a) \
@@ -1324,14 +1391,13 @@ void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
#define MAX_CTL_CHECK 1000
-int qlcnic_get_board_info(struct qlcnic_adapter *adapter);
int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
-int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter);
void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter);
int qlcnic_dump_fw(struct qlcnic_adapter *);
/* Functions from qlcnic_init.c */
+void qlcnic_schedule_work(struct qlcnic_adapter *, work_func_t, int);
int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
@@ -1361,54 +1427,42 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
void qlcnic_watchdog_task(struct work_struct *work);
void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
- struct qlcnic_host_rds_ring *rds_ring);
+ struct qlcnic_host_rds_ring *rds_ring, u8 ring_id);
int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
void qlcnic_set_multi(struct net_device *netdev);
+int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *);
+int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *);
void qlcnic_free_mac_list(struct qlcnic_adapter *adapter);
-int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
-int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter);
-int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable);
-int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd);
-int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable);
-void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
+int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *);
int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
netdev_features_t qlcnic_fix_features(struct net_device *netdev,
netdev_features_t features);
int qlcnic_set_features(struct net_device *netdev, netdev_features_t features);
-int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *);
-void qlcnic_fetch_mac(u32, u32, u8, u8 *);
-void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
-void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter);
-int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode);
/* Functions from qlcnic_ethtool.c */
-int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[]);
+int qlcnic_check_loopback_buff(unsigned char *, u8 []);
+int qlcnic_do_lb_test(struct qlcnic_adapter *, u8);
+int qlcnic_loopback_test(struct net_device *, u8);
/* Functions from qlcnic_main.c */
int qlcnic_reset_context(struct qlcnic_adapter *);
-void qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *);
void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val);
-int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data);
-void qlcnic_dev_request_reset(struct qlcnic_adapter *);
+int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, size_t);
+int qlcnic_validate_max_rss(u8, u8);
void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
-
-/* Management functions */
-int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*);
-int qlcnic_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
-int qlcnic_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
-int qlcnic_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
+int qlcnic_enable_msix(struct qlcnic_adapter *, u32);
/* eSwitch management functions */
int qlcnic_config_switch_port(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
+
int qlcnic_get_eswitch_port_config(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8);
@@ -1418,14 +1472,12 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *, const u8, u8,
struct __qlcnic_esw_statistics *);
int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8);
int qlcnic_get_mac_stats(struct qlcnic_adapter *, struct qlcnic_mac_statistics *);
-extern int qlcnic_config_tso;
-int qlcnic_napi_add(struct qlcnic_adapter *, struct net_device *);
-void qlcnic_napi_del(struct qlcnic_adapter *adapter);
-void qlcnic_napi_enable(struct qlcnic_adapter *adapter);
-void qlcnic_napi_disable(struct qlcnic_adapter *adapter);
+void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd);
+
int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *, int);
void qlcnic_free_sds_rings(struct qlcnic_recv_context *);
+void qlcnic_advert_link_change(struct qlcnic_adapter *, int);
void qlcnic_free_tx_rings(struct qlcnic_adapter *);
int qlcnic_alloc_tx_rings(struct qlcnic_adapter *, struct net_device *);
@@ -1433,6 +1485,9 @@ void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
+void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter);
+void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter);
+
int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
void qlcnic_set_vlan_config(struct qlcnic_adapter *,
@@ -1440,6 +1495,22 @@ void qlcnic_set_vlan_config(struct qlcnic_adapter *,
void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
+void qlcnic_down(struct qlcnic_adapter *, struct net_device *);
+int qlcnic_up(struct qlcnic_adapter *, struct net_device *);
+void __qlcnic_down(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_detach(struct qlcnic_adapter *);
+void qlcnic_teardown_intr(struct qlcnic_adapter *);
+int qlcnic_attach(struct qlcnic_adapter *);
+int __qlcnic_up(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_restore_indev_addr(struct net_device *, unsigned long);
+
+int qlcnic_check_temp(struct qlcnic_adapter *);
+int qlcnic_init_pci_info(struct qlcnic_adapter *);
+int qlcnic_set_default_offload_settings(struct qlcnic_adapter *);
+int qlcnic_reset_npar_config(struct qlcnic_adapter *);
+int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *);
+void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int,
+ __le16);
/*
* QLOGIC Board information
*/
@@ -1462,6 +1533,277 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
tx_ring->producer;
}
+struct qlcnic_nic_template {
+ int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
+ int (*config_led) (struct qlcnic_adapter *, u32, u32);
+ int (*start_firmware) (struct qlcnic_adapter *);
+ int (*init_driver) (struct qlcnic_adapter *);
+ void (*request_reset) (struct qlcnic_adapter *, u32);
+ void (*cancel_idc_work) (struct qlcnic_adapter *);
+ int (*napi_add)(struct qlcnic_adapter *, struct net_device *);
+ void (*napi_del)(struct qlcnic_adapter *);
+ void (*config_ipaddr)(struct qlcnic_adapter *, __be32, int);
+ irqreturn_t (*clear_legacy_intr)(struct qlcnic_adapter *);
+};
+
+/* Adapter hardware abstraction */
+struct qlcnic_hardware_ops {
+ void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
+ void (*write_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
+ int (*read_reg) (struct qlcnic_adapter *, ulong);
+ int (*write_reg) (struct qlcnic_adapter *, ulong, u32);
+ void (*get_ocm_win) (struct qlcnic_hardware_context *);
+ int (*get_mac_address) (struct qlcnic_adapter *, u8 *);
+ int (*setup_intr) (struct qlcnic_adapter *, u8);
+ int (*alloc_mbx_args)(struct qlcnic_cmd_args *,
+ struct qlcnic_adapter *, u32);
+ int (*mbx_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+ void (*get_func_no) (struct qlcnic_adapter *);
+ int (*api_lock) (struct qlcnic_adapter *);
+ void (*api_unlock) (struct qlcnic_adapter *);
+ void (*add_sysfs) (struct qlcnic_adapter *);
+ void (*remove_sysfs) (struct qlcnic_adapter *);
+ void (*process_lb_rcv_ring_diag) (struct qlcnic_host_sds_ring *);
+ int (*create_rx_ctx) (struct qlcnic_adapter *);
+ int (*create_tx_ctx) (struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *, int);
+ int (*setup_link_event) (struct qlcnic_adapter *, int);
+ int (*get_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *, u8);
+ int (*get_pci_info) (struct qlcnic_adapter *, struct qlcnic_pci_info *);
+ int (*set_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *);
+ int (*change_macvlan) (struct qlcnic_adapter *, u8*, __le16, u8);
+ void (*napi_enable) (struct qlcnic_adapter *);
+ void (*napi_disable) (struct qlcnic_adapter *);
+ void (*config_intr_coal) (struct qlcnic_adapter *);
+ int (*config_rss) (struct qlcnic_adapter *, int);
+ int (*config_hw_lro) (struct qlcnic_adapter *, int);
+ int (*config_loopback) (struct qlcnic_adapter *, u8);
+ int (*clear_loopback) (struct qlcnic_adapter *, u8);
+ int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
+ void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, __le16);
+ int (*get_board_info) (struct qlcnic_adapter *);
+};
+
+extern struct qlcnic_nic_template qlcnic_vf_ops;
+
+static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter)
+{
+ return adapter->nic_ops->start_firmware(adapter);
+}
+
+static inline void qlcnic_read_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ adapter->ahw->hw_ops->read_crb(adapter, buf, offset, size);
+}
+
+static inline void qlcnic_write_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ adapter->ahw->hw_ops->write_crb(adapter, buf, offset, size);
+}
+
+static inline int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter,
+ ulong off)
+{
+ return adapter->ahw->hw_ops->read_reg(adapter, off);
+}
+
+static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter,
+ ulong off, u32 data)
+{
+ return adapter->ahw->hw_ops->write_reg(adapter, off, data);
+}
+
+static inline int qlcnic_get_mac_address(struct qlcnic_adapter *adapter,
+ u8 *mac)
+{
+ return adapter->ahw->hw_ops->get_mac_address(adapter, mac);
+}
+
+static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
+{
+ return adapter->ahw->hw_ops->setup_intr(adapter, num_intr);
+}
+
+static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
+ struct qlcnic_adapter *adapter, u32 arg)
+{
+ return adapter->ahw->hw_ops->alloc_mbx_args(mbx, adapter, arg);
+}
+
+static inline int qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ return adapter->ahw->hw_ops->mbx_cmd(adapter, cmd);
+}
+
+static inline void qlcnic_get_func_no(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->get_func_no(adapter);
+}
+
+static inline int qlcnic_api_lock(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->api_lock(adapter);
+}
+
+static inline void qlcnic_api_unlock(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->api_unlock(adapter);
+}
+
+static inline void qlcnic_add_sysfs(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->add_sysfs(adapter);
+}
+
+static inline void qlcnic_remove_sysfs(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->remove_sysfs(adapter);
+}
+
+static inline void
+qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+{
+ sds_ring->adapter->ahw->hw_ops->process_lb_rcv_ring_diag(sds_ring);
+}
+
+static inline int qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->create_rx_ctx(adapter);
+}
+
+static inline int qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *ptr,
+ int ring)
+{
+ return adapter->ahw->hw_ops->create_tx_ctx(adapter, ptr, ring);
+}
+
+static inline int qlcnic_linkevent_request(struct qlcnic_adapter *adapter,
+ int enable)
+{
+ return adapter->ahw->hw_ops->setup_link_event(adapter, enable);
+}
+
+static inline int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *info, u8 id)
+{
+ return adapter->ahw->hw_ops->get_nic_info(adapter, info, id);
+}
+
+static inline int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_pci_info *info)
+{
+ return adapter->ahw->hw_ops->get_pci_info(adapter, info);
+}
+
+static inline int qlcnic_set_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *info)
+{
+ return adapter->ahw->hw_ops->set_nic_info(adapter, info);
+}
+
+static inline int qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter,
+ u8 *addr, __le16 id, u8 cmd)
+{
+ return adapter->ahw->hw_ops->change_macvlan(adapter, addr, id, cmd);
+}
+
+static inline int qlcnic_napi_add(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ return adapter->nic_ops->napi_add(adapter, netdev);
+}
+
+static inline void qlcnic_napi_del(struct qlcnic_adapter *adapter)
+{
+ adapter->nic_ops->napi_del(adapter);
+}
+
+static inline void qlcnic_napi_enable(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->napi_enable(adapter);
+}
+
+static inline void qlcnic_napi_disable(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->napi_disable(adapter);
+}
+
+static inline void qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->config_intr_coal(adapter);
+}
+
+static inline int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
+{
+ return adapter->ahw->hw_ops->config_rss(adapter, enable);
+}
+
+static inline int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter,
+ int enable)
+{
+ return adapter->ahw->hw_ops->config_hw_lro(adapter, enable);
+}
+
+static inline int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ return adapter->ahw->hw_ops->config_loopback(adapter, mode);
+}
+
+static inline int qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ return adapter->ahw->hw_ops->config_loopback(adapter, mode);
+}
+
+static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter,
+ u32 mode)
+{
+ return adapter->ahw->hw_ops->config_promisc_mode(adapter, mode);
+}
+
+static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
+ u64 *addr, __le16 id)
+{
+ adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id);
+}
+
+static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->get_board_info(adapter);
+}
+
+static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
+ u32 key)
+{
+ adapter->nic_ops->request_reset(adapter, key);
+}
+
+static inline void qlcnic_cancel_idc_work(struct qlcnic_adapter *adapter)
+{
+ adapter->nic_ops->cancel_idc_work(adapter);
+}
+
+static inline irqreturn_t
+qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
+{
+ return adapter->nic_ops->clear_legacy_intr(adapter);
+}
+
+static inline int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state,
+ u32 rate)
+{
+ return adapter->nic_ops->config_led(adapter, state, rate);
+}
+
+static inline void qlcnic_config_ipaddr(struct qlcnic_adapter *adapter,
+ __be32 ip, int cmd)
+{
+ adapter->nic_ops->config_ipaddr(adapter, ip, cmd);
+}
+
static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
{
writel(0, sds_ring->crb_intr_mask);
@@ -1480,12 +1822,6 @@ static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
extern const struct ethtool_ops qlcnic_ethtool_ops;
extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
-struct qlcnic_nic_template {
- int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
- int (*config_led) (struct qlcnic_adapter *, u32, u32);
- int (*start_firmware) (struct qlcnic_adapter *);
-};
-
#define QLCDB(adapter, lvl, _fmt, _args...) do { \
if (NETIF_MSG_##lvl & adapter->ahw->msg_enable) \
printk(KERN_INFO "%s: %s: " _fmt, \
@@ -1493,6 +1829,7 @@ struct qlcnic_nic_template {
__func__, ##_args); \
} while (0)
+#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030
#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
{
@@ -1500,4 +1837,11 @@ static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
return (device == PCI_DEVICE_ID_QLOGIC_QLE824X) ? true : false;
}
+static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+ return (device == PCI_DEVICE_ID_QLOGIC_QLE834X) ? true : false;
+}
+
+
#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
new file mode 100644
index 000000000000..cd5ae8813cb3
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -0,0 +1,3011 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic.h"
+#include <linux/if_vlan.h>
+#include <linux/ipv6.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+
+#define QLCNIC_MAX_TX_QUEUES 1
+#define RSS_HASHTYPE_IP_TCP 0x3
+
+/* status descriptor mailbox data
+ * @phy_addr: physical address of buffer
+ * @sds_ring_size: buffer size
+ * @intrpt_id: interrupt id
+ * @intrpt_val: source of interrupt
+ */
+struct qlcnic_sds_mbx {
+ u64 phy_addr;
+ u8 rsvd1[16];
+ u16 sds_ring_size;
+ u16 rsvd2[3];
+ u16 intrpt_id;
+ u8 intrpt_val;
+ u8 rsvd3[5];
+} __packed;
+
+/* receive descriptor buffer data
+ * phy_addr_reg: physical address of regular buffer
+ * phy_addr_jmb: physical address of jumbo buffer
+ * reg_ring_sz: size of regular buffer
+ * reg_ring_len: no. of entries in regular buffer
+ * jmb_ring_len: no. of entries in jumbo buffer
+ * jmb_ring_sz: size of jumbo buffer
+ */
+struct qlcnic_rds_mbx {
+ u64 phy_addr_reg;
+ u64 phy_addr_jmb;
+ u16 reg_ring_sz;
+ u16 reg_ring_len;
+ u16 jmb_ring_sz;
+ u16 jmb_ring_len;
+} __packed;
+
+/* host producers for regular and jumbo rings */
+struct __host_producer_mbx {
+ u32 reg_buf;
+ u32 jmb_buf;
+} __packed;
+
+/* Receive context mailbox data outbox registers
+ * @state: state of the context
+ * @vport_id: virtual port id
+ * @context_id: receive context id
+ * @num_pci_func: number of pci functions of the port
+ * @phy_port: physical port id
+ */
+struct qlcnic_rcv_mbx_out {
+ u8 rcv_num;
+ u8 sts_num;
+ u16 ctx_id;
+ u8 state;
+ u8 num_pci_func;
+ u8 phy_port;
+ u8 vport_id;
+ u32 host_csmr[QLCNIC_MAX_RING_SETS];
+ struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
+} __packed;
+
+struct qlcnic_add_rings_mbx_out {
+ u8 rcv_num;
+ u8 sts_num;
+ u16 ctx_id;
+ u32 host_csmr[QLCNIC_MAX_RING_SETS];
+ struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
+} __packed;
+
+/* Transmit context mailbox inbox registers
+ * @phys_addr: DMA address of the transmit buffer
+ * @cnsmr_index: host consumer index
+ * @size: legth of transmit buffer ring
+ * @intr_id: interrput id
+ * @src: src of interrupt
+ */
+struct qlcnic_tx_mbx {
+ u64 phys_addr;
+ u64 cnsmr_index;
+ u16 size;
+ u16 intr_id;
+ u8 src;
+ u8 rsvd[3];
+} __packed;
+
+/* Transmit context mailbox outbox registers
+ * @host_prod: host producer index
+ * @ctx_id: transmit context id
+ * @state: state of the transmit context
+ */
+struct qlcnic_tx_mbx_out {
+ u32 host_prod;
+ u16 ctx_id;
+ u8 state;
+ u8 rsvd;
+} __packed;
+
+static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
+ {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
+ {QLCNIC_CMD_CONFIG_INTRPT, 18, 34},
+ {QLCNIC_CMD_CREATE_RX_CTX, 136, 27},
+ {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1},
+ {QLCNIC_CMD_CREATE_TX_CTX, 54, 18},
+ {QLCNIC_CMD_DESTROY_TX_CTX, 2, 1},
+ {QLCNIC_CMD_CONFIGURE_MAC_LEARNING, 2, 1},
+ {QLCNIC_CMD_INTRPT_TEST, 22, 12},
+ {QLCNIC_CMD_SET_MTU, 3, 1},
+ {QLCNIC_CMD_READ_PHY, 4, 2},
+ {QLCNIC_CMD_WRITE_PHY, 5, 1},
+ {QLCNIC_CMD_READ_HW_REG, 4, 1},
+ {QLCNIC_CMD_GET_FLOW_CTL, 4, 2},
+ {QLCNIC_CMD_SET_FLOW_CTL, 4, 1},
+ {QLCNIC_CMD_READ_MAX_MTU, 4, 2},
+ {QLCNIC_CMD_READ_MAX_LRO, 4, 2},
+ {QLCNIC_CMD_MAC_ADDRESS, 4, 3},
+ {QLCNIC_CMD_GET_PCI_INFO, 1, 66},
+ {QLCNIC_CMD_GET_NIC_INFO, 2, 19},
+ {QLCNIC_CMD_SET_NIC_INFO, 32, 1},
+ {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3},
+ {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3},
+ {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1},
+ {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3},
+ {QLCNIC_CMD_GET_ESWITCH_STATS, 5, 1},
+ {QLCNIC_CMD_CONFIG_PORT, 4, 1},
+ {QLCNIC_CMD_TEMP_SIZE, 1, 4},
+ {QLCNIC_CMD_GET_TEMP_HDR, 5, 5},
+ {QLCNIC_CMD_GET_LINK_EVENT, 2, 1},
+ {QLCNIC_CMD_CONFIG_MAC_VLAN, 4, 3},
+ {QLCNIC_CMD_CONFIG_INTR_COAL, 6, 1},
+ {QLCNIC_CMD_CONFIGURE_RSS, 14, 1},
+ {QLCNIC_CMD_CONFIGURE_LED, 2, 1},
+ {QLCNIC_CMD_CONFIGURE_MAC_RX_MODE, 2, 1},
+ {QLCNIC_CMD_CONFIGURE_HW_LRO, 2, 1},
+ {QLCNIC_CMD_GET_STATISTICS, 2, 80},
+ {QLCNIC_CMD_SET_PORT_CONFIG, 2, 1},
+ {QLCNIC_CMD_GET_PORT_CONFIG, 2, 2},
+ {QLCNIC_CMD_GET_LINK_STATUS, 2, 4},
+ {QLCNIC_CMD_IDC_ACK, 5, 1},
+ {QLCNIC_CMD_INIT_NIC_FUNC, 2, 1},
+ {QLCNIC_CMD_STOP_NIC_FUNC, 2, 1},
+ {QLCNIC_CMD_SET_LED_CONFIG, 5, 1},
+ {QLCNIC_CMD_GET_LED_CONFIG, 1, 5},
+ {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26},
+};
+
+static const u32 qlcnic_83xx_ext_reg_tbl[] = {
+ 0x38CC, /* Global Reset */
+ 0x38F0, /* Wildcard */
+ 0x38FC, /* Informant */
+ 0x3038, /* Host MBX ctrl */
+ 0x303C, /* FW MBX ctrl */
+ 0x355C, /* BOOT LOADER ADDRESS REG */
+ 0x3560, /* BOOT LOADER SIZE REG */
+ 0x3564, /* FW IMAGE ADDR REG */
+ 0x1000, /* MBX intr enable */
+ 0x1200, /* Default Intr mask */
+ 0x1204, /* Default Interrupt ID */
+ 0x3780, /* QLC_83XX_IDC_MAJ_VERSION */
+ 0x3784, /* QLC_83XX_IDC_DEV_STATE */
+ 0x3788, /* QLC_83XX_IDC_DRV_PRESENCE */
+ 0x378C, /* QLC_83XX_IDC_DRV_ACK */
+ 0x3790, /* QLC_83XX_IDC_CTRL */
+ 0x3794, /* QLC_83XX_IDC_DRV_AUDIT */
+ 0x3798, /* QLC_83XX_IDC_MIN_VERSION */
+ 0x379C, /* QLC_83XX_RECOVER_DRV_LOCK */
+ 0x37A0, /* QLC_83XX_IDC_PF_0 */
+ 0x37A4, /* QLC_83XX_IDC_PF_1 */
+ 0x37A8, /* QLC_83XX_IDC_PF_2 */
+ 0x37AC, /* QLC_83XX_IDC_PF_3 */
+ 0x37B0, /* QLC_83XX_IDC_PF_4 */
+ 0x37B4, /* QLC_83XX_IDC_PF_5 */
+ 0x37B8, /* QLC_83XX_IDC_PF_6 */
+ 0x37BC, /* QLC_83XX_IDC_PF_7 */
+ 0x37C0, /* QLC_83XX_IDC_PF_8 */
+ 0x37C4, /* QLC_83XX_IDC_PF_9 */
+ 0x37C8, /* QLC_83XX_IDC_PF_10 */
+ 0x37CC, /* QLC_83XX_IDC_PF_11 */
+ 0x37D0, /* QLC_83XX_IDC_PF_12 */
+ 0x37D4, /* QLC_83XX_IDC_PF_13 */
+ 0x37D8, /* QLC_83XX_IDC_PF_14 */
+ 0x37DC, /* QLC_83XX_IDC_PF_15 */
+ 0x37E0, /* QLC_83XX_IDC_DEV_PARTITION_INFO_1 */
+ 0x37E4, /* QLC_83XX_IDC_DEV_PARTITION_INFO_2 */
+ 0x37F0, /* QLC_83XX_DRV_OP_MODE */
+ 0x37F4, /* QLC_83XX_VNIC_STATE */
+ 0x3868, /* QLC_83XX_DRV_LOCK */
+ 0x386C, /* QLC_83XX_DRV_UNLOCK */
+ 0x3504, /* QLC_83XX_DRV_LOCK_ID */
+ 0x34A4, /* QLC_83XX_ASIC_TEMP */
+};
+
+static const u32 qlcnic_83xx_reg_tbl[] = {
+ 0x34A8, /* PEG_HALT_STAT1 */
+ 0x34AC, /* PEG_HALT_STAT2 */
+ 0x34B0, /* FW_HEARTBEAT */
+ 0x3500, /* FLASH LOCK_ID */
+ 0x3528, /* FW_CAPABILITIES */
+ 0x3538, /* Driver active, DRV_REG0 */
+ 0x3540, /* Device state, DRV_REG1 */
+ 0x3544, /* Driver state, DRV_REG2 */
+ 0x3548, /* Driver scratch, DRV_REG3 */
+ 0x354C, /* Device partiton info, DRV_REG4 */
+ 0x3524, /* Driver IDC ver, DRV_REG5 */
+ 0x3550, /* FW_VER_MAJOR */
+ 0x3554, /* FW_VER_MINOR */
+ 0x3558, /* FW_VER_SUB */
+ 0x359C, /* NPAR STATE */
+ 0x35FC, /* FW_IMG_VALID */
+ 0x3650, /* CMD_PEG_STATE */
+ 0x373C, /* RCV_PEG_STATE */
+ 0x37B4, /* ASIC TEMP */
+ 0x356C, /* FW API */
+ 0x3570, /* DRV OP MODE */
+ 0x3850, /* FLASH LOCK */
+ 0x3854, /* FLASH UNLOCK */
+};
+
+static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
+ .read_crb = qlcnic_83xx_read_crb,
+ .write_crb = qlcnic_83xx_write_crb,
+ .read_reg = qlcnic_83xx_rd_reg_indirect,
+ .write_reg = qlcnic_83xx_wrt_reg_indirect,
+ .get_mac_address = qlcnic_83xx_get_mac_address,
+ .setup_intr = qlcnic_83xx_setup_intr,
+ .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args,
+ .mbx_cmd = qlcnic_83xx_mbx_op,
+ .get_func_no = qlcnic_83xx_get_func_no,
+ .api_lock = qlcnic_83xx_cam_lock,
+ .api_unlock = qlcnic_83xx_cam_unlock,
+ .add_sysfs = qlcnic_83xx_add_sysfs,
+ .remove_sysfs = qlcnic_83xx_remove_sysfs,
+ .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag,
+ .create_rx_ctx = qlcnic_83xx_create_rx_ctx,
+ .create_tx_ctx = qlcnic_83xx_create_tx_ctx,
+ .setup_link_event = qlcnic_83xx_setup_link_event,
+ .get_nic_info = qlcnic_83xx_get_nic_info,
+ .get_pci_info = qlcnic_83xx_get_pci_info,
+ .set_nic_info = qlcnic_83xx_set_nic_info,
+ .change_macvlan = qlcnic_83xx_sre_macaddr_change,
+ .napi_enable = qlcnic_83xx_napi_enable,
+ .napi_disable = qlcnic_83xx_napi_disable,
+ .config_intr_coal = qlcnic_83xx_config_intr_coal,
+ .config_rss = qlcnic_83xx_config_rss,
+ .config_hw_lro = qlcnic_83xx_config_hw_lro,
+ .config_promisc_mode = qlcnic_83xx_nic_set_promisc,
+ .change_l2_filter = qlcnic_83xx_change_l2_filter,
+ .get_board_info = qlcnic_83xx_get_port_info,
+};
+
+static struct qlcnic_nic_template qlcnic_83xx_ops = {
+ .config_bridged_mode = qlcnic_config_bridged_mode,
+ .config_led = qlcnic_config_led,
+ .request_reset = qlcnic_83xx_idc_request_reset,
+ .cancel_idc_work = qlcnic_83xx_idc_exit,
+ .napi_add = qlcnic_83xx_napi_add,
+ .napi_del = qlcnic_83xx_napi_del,
+ .config_ipaddr = qlcnic_83xx_config_ipaddr,
+ .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr,
+};
+
+void qlcnic_83xx_register_map(struct qlcnic_hardware_context *ahw)
+{
+ ahw->hw_ops = &qlcnic_83xx_hw_ops;
+ ahw->reg_tbl = (u32 *)qlcnic_83xx_reg_tbl;
+ ahw->ext_reg_tbl = (u32 *)qlcnic_83xx_ext_reg_tbl;
+}
+
+int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *adapter)
+{
+ u32 fw_major, fw_minor, fw_build;
+ struct pci_dev *pdev = adapter->pdev;
+
+ fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
+ adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
+
+ dev_info(&pdev->dev, "Driver v%s, firmware version %d.%d.%d\n",
+ QLCNIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build);
+
+ return adapter->fw_version;
+}
+
+static int __qlcnic_set_win_base(struct qlcnic_adapter *adapter, u32 addr)
+{
+ void __iomem *base;
+ u32 val;
+
+ base = adapter->ahw->pci_base0 +
+ QLC_83XX_CRB_WIN_FUNC(adapter->ahw->pci_func);
+ writel(addr, base);
+ val = readl(base);
+ if (val != addr)
+ return -EIO;
+
+ return 0;
+}
+
+int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *adapter, ulong addr)
+{
+ int ret;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ ret = __qlcnic_set_win_base(adapter, (u32) addr);
+ if (!ret) {
+ return QLCRDX(ahw, QLCNIC_WILDCARD);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "%s failed, addr = 0x%x\n", __func__, (int)addr);
+ return -EIO;
+ }
+}
+
+int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
+ u32 data)
+{
+ int err;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ err = __qlcnic_set_win_base(adapter, (u32) addr);
+ if (!err) {
+ QLCWRX(ahw, QLCNIC_WILDCARD, data);
+ return 0;
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "%s failed, addr = 0x%x data = 0x%x\n",
+ __func__, (int)addr, data);
+ return err;
+ }
+}
+
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
+{
+ int err, i, num_msix;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (!num_intr)
+ num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS;
+ num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
+ num_intr));
+ /* account for AEN interrupt MSI-X based interrupts */
+ num_msix += 1;
+ num_msix += adapter->max_drv_tx_rings;
+ err = qlcnic_enable_msix(adapter, num_msix);
+ if (err == -ENOMEM)
+ return err;
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ num_msix = adapter->ahw->num_msix;
+ else
+ num_msix = 1;
+ /* setup interrupt mapping table for fw */
+ ahw->intr_tbl = vzalloc(num_msix *
+ sizeof(struct qlcnic_intrpt_config));
+ if (!ahw->intr_tbl)
+ return -ENOMEM;
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ /* MSI-X enablement failed, use legacy interrupt */
+ adapter->tgt_status_reg = ahw->pci_base0 + QLC_83XX_INTX_PTR;
+ adapter->tgt_mask_reg = ahw->pci_base0 + QLC_83XX_INTX_MASK;
+ adapter->isr_int_vec = ahw->pci_base0 + QLC_83XX_INTX_TRGR;
+ adapter->msix_entries[0].vector = adapter->pdev->irq;
+ dev_info(&adapter->pdev->dev, "using legacy interrupt\n");
+ }
+
+ for (i = 0; i < num_msix; i++) {
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ ahw->intr_tbl[i].type = QLCNIC_INTRPT_MSIX;
+ else
+ ahw->intr_tbl[i].type = QLCNIC_INTRPT_INTX;
+ ahw->intr_tbl[i].id = i;
+ ahw->intr_tbl[i].src = 0;
+ }
+ return 0;
+}
+
+inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter)
+{
+ writel(0, adapter->tgt_mask_reg);
+}
+
+/* Enable MSI-x and INT-x interrupts */
+void qlcnic_83xx_enable_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ writel(0, sds_ring->crb_intr_mask);
+}
+
+/* Disable MSI-x and INT-x interrupts */
+void qlcnic_83xx_disable_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ writel(1, sds_ring->crb_intr_mask);
+}
+
+inline void qlcnic_83xx_enable_legacy_msix_mbx_intr(struct qlcnic_adapter
+ *adapter)
+{
+ u32 mask;
+
+ /* Mailbox in MSI-x mode and Legacy Interrupt share the same
+ * source register. We could be here before contexts are created
+ * and sds_ring->crb_intr_mask has not been initialized, calculate
+ * BAR offset for Interrupt Source Register
+ */
+ mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+ writel(0, adapter->ahw->pci_base0 + mask);
+}
+
+inline void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *adapter)
+{
+ u32 mask;
+
+ mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+ writel(1, adapter->ahw->pci_base0 + mask);
+}
+
+static inline void qlcnic_83xx_get_mbx_data(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ int i;
+ for (i = 0; i < cmd->rsp.num; i++)
+ cmd->rsp.arg[i] = readl(QLCNIC_MBX_FW(adapter->ahw, i));
+}
+
+irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
+{
+ u32 intr_val;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int retries = 0;
+
+ intr_val = readl(adapter->tgt_status_reg);
+
+ if (!QLC_83XX_VALID_INTX_BIT31(intr_val))
+ return IRQ_NONE;
+
+ if (QLC_83XX_INTX_FUNC(intr_val) != adapter->ahw->pci_func) {
+ adapter->stats.spurious_intr++;
+ return IRQ_NONE;
+ }
+ /* The barrier is required to ensure writes to the registers */
+ wmb();
+
+ /* clear the interrupt trigger control register */
+ writel(0, adapter->isr_int_vec);
+ intr_val = readl(adapter->isr_int_vec);
+ do {
+ intr_val = readl(adapter->tgt_status_reg);
+ if (QLC_83XX_INTX_FUNC(intr_val) != ahw->pci_func)
+ break;
+ retries++;
+ } while (QLC_83XX_VALID_INTX_BIT30(intr_val) &&
+ (retries < QLC_83XX_LEGACY_INTX_MAX_RETRY));
+
+ return IRQ_HANDLED;
+}
+
+static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
+{
+ u32 resp, event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
+
+ resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
+ if (!(resp & QLCNIC_SET_OWNER))
+ goto out;
+
+ event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
+ if (event & QLCNIC_MBX_ASYNC_EVENT)
+ qlcnic_83xx_process_aen(adapter);
+out:
+ qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+ spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
+}
+
+irqreturn_t qlcnic_83xx_intr(int irq, void *data)
+{
+ struct qlcnic_adapter *adapter = data;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (qlcnic_83xx_clear_legacy_intr(adapter) == IRQ_NONE)
+ return IRQ_NONE;
+
+ qlcnic_83xx_poll_process_aen(adapter);
+
+ if (ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+ ahw->diag_cnt++;
+ qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+ return IRQ_HANDLED;
+ }
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+ } else {
+ sds_ring = &adapter->recv_ctx->sds_rings[0];
+ napi_schedule(&sds_ring->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t qlcnic_83xx_tmp_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ goto done;
+
+ if (adapter->nic_ops->clear_legacy_intr(adapter) == IRQ_NONE)
+ return IRQ_NONE;
+
+done:
+ adapter->ahw->diag_cnt++;
+ qlcnic_83xx_enable_intr(adapter, sds_ring);
+
+ return IRQ_HANDLED;
+}
+
+void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *adapter)
+{
+ u32 val = 0, num_msix = adapter->ahw->num_msix - 1;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ num_msix = adapter->ahw->num_msix - 1;
+ else
+ num_msix = 0;
+
+ QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, val);
+
+ qlcnic_83xx_disable_mbx_intr(adapter);
+
+ msleep(20);
+ synchronize_irq(adapter->msix_entries[num_msix].vector);
+ free_irq(adapter->msix_entries[num_msix].vector, adapter);
+}
+
+int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter)
+{
+ irq_handler_t handler;
+ u32 val;
+ char name[32];
+ int err = 0;
+ unsigned long flags = 0;
+
+ if (!(adapter->flags & QLCNIC_MSI_ENABLED) &&
+ !(adapter->flags & QLCNIC_MSIX_ENABLED))
+ flags |= IRQF_SHARED;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ handler = qlcnic_83xx_handle_aen;
+ val = adapter->msix_entries[adapter->ahw->num_msix - 1].vector;
+ snprintf(name, (IFNAMSIZ + 4),
+ "%s[%s]", "qlcnic", "aen");
+ err = request_irq(val, handler, flags, name, adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "failed to register MBX interrupt\n");
+ return err;
+ }
+ } else {
+ handler = qlcnic_83xx_intr;
+ val = adapter->msix_entries[0].vector;
+ err = request_irq(val, handler, flags, "qlcnic", adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "failed to register INTx interrupt\n");
+ return err;
+ }
+ qlcnic_83xx_clear_legacy_intr_mask(adapter);
+ }
+
+ /* Enable mailbox interrupt */
+ qlcnic_83xx_enable_mbx_intrpt(adapter);
+
+ return err;
+}
+
+void qlcnic_83xx_get_func_no(struct qlcnic_adapter *adapter)
+{
+ u32 val = QLCRDX(adapter->ahw, QLCNIC_INFORMANT);
+ adapter->ahw->pci_func = val & 0xf;
+}
+
+int qlcnic_83xx_cam_lock(struct qlcnic_adapter *adapter)
+{
+ void __iomem *addr;
+ u32 val, limit = 0;
+
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ addr = ahw->pci_base0 + QLC_83XX_SEM_LOCK_FUNC(ahw->pci_func);
+ do {
+ val = readl(addr);
+ if (val) {
+ /* write the function number to register */
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER,
+ ahw->pci_func);
+ return 0;
+ }
+ usleep_range(1000, 2000);
+ } while (++limit <= QLCNIC_PCIE_SEM_TIMEOUT);
+
+ return -EIO;
+}
+
+void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *adapter)
+{
+ void __iomem *addr;
+ u32 val;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ addr = ahw->pci_base0 + QLC_83XX_SEM_UNLOCK_FUNC(ahw->pci_func);
+ val = readl(addr);
+}
+
+void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ int ret;
+ u32 data;
+
+ if (qlcnic_api_lock(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to acquire lock. addr offset 0x%x\n",
+ __func__, (u32)offset);
+ return;
+ }
+
+ ret = qlcnic_83xx_rd_reg_indirect(adapter, (u32) offset);
+ qlcnic_api_unlock(adapter);
+
+ if (ret == -EIO) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed. addr offset 0x%x\n",
+ __func__, (u32)offset);
+ return;
+ }
+ data = ret;
+ memcpy(buf, &data, size);
+}
+
+void qlcnic_83xx_write_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ u32 data;
+
+ memcpy(&data, buf, size);
+ qlcnic_83xx_wrt_reg_indirect(adapter, (u32) offset, data);
+}
+
+int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter)
+{
+ int status;
+
+ status = qlcnic_83xx_get_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Get Port Info failed\n");
+ } else {
+ if (QLC_83XX_SFP_10G_CAPABLE(adapter->ahw->port_config))
+ adapter->ahw->port_type = QLCNIC_XGBE;
+ else
+ adapter->ahw->port_type = QLCNIC_GBE;
+
+ if (QLC_83XX_AUTONEG(adapter->ahw->port_config))
+ adapter->ahw->link_autoneg = AUTONEG_ENABLE;
+ }
+ return status;
+}
+
+void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ val = BIT_2 | ((adapter->ahw->num_msix - 1) << 8);
+ else
+ val = BIT_2;
+
+ QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, val);
+ qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+}
+
+void qlcnic_83xx_check_vf(struct qlcnic_adapter *adapter,
+ const struct pci_device_id *ent)
+{
+ u32 op_mode, priv_level;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ ahw->fw_hal_version = 2;
+ qlcnic_get_func_no(adapter);
+
+ /* Determine function privilege level */
+ op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
+ if (op_mode == QLC_83XX_DEFAULT_OPMODE)
+ priv_level = QLCNIC_MGMT_FUNC;
+ else
+ priv_level = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode,
+ ahw->pci_func);
+
+ if (priv_level == QLCNIC_NON_PRIV_FUNC) {
+ ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d Non Privileged function\n",
+ ahw->fw_hal_version);
+ adapter->nic_ops = &qlcnic_vf_ops;
+ } else {
+ adapter->nic_ops = &qlcnic_83xx_ops;
+ }
+}
+
+static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
+ u32 data[]);
+static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
+ u32 data[]);
+
+static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ int i;
+
+ dev_info(&adapter->pdev->dev,
+ "Host MBX regs(%d)\n", cmd->req.num);
+ for (i = 0; i < cmd->req.num; i++) {
+ if (i && !(i % 8))
+ pr_info("\n");
+ pr_info("%08x ", cmd->req.arg[i]);
+ }
+ pr_info("\n");
+ dev_info(&adapter->pdev->dev,
+ "FW MBX regs(%d)\n", cmd->rsp.num);
+ for (i = 0; i < cmd->rsp.num; i++) {
+ if (i && !(i % 8))
+ pr_info("\n");
+ pr_info("%08x ", cmd->rsp.arg[i]);
+ }
+ pr_info("\n");
+}
+
+/* Mailbox response for mac rcode */
+static u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
+{
+ u32 fw_data;
+ u8 mac_cmd_rcode;
+
+ fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2));
+ mac_cmd_rcode = (u8)fw_data;
+ if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
+ mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
+ mac_cmd_rcode == QLC_83XX_MAC_ABSENT)
+ return QLCNIC_RCODE_SUCCESS;
+ return 1;
+}
+
+static u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter)
+{
+ u32 data;
+ unsigned long wait_time = 0;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ /* wait for mailbox completion */
+ do {
+ data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
+ if (++wait_time > QLCNIC_MBX_TIMEOUT) {
+ data = QLCNIC_RCODE_TIMEOUT;
+ break;
+ }
+ mdelay(1);
+ } while (!data);
+ return data;
+}
+
+int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ int i;
+ u16 opcode;
+ u8 mbx_err_code;
+ unsigned long flags;
+ u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ opcode = LSW(cmd->req.arg[0]);
+ if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
+ dev_info(&adapter->pdev->dev,
+ "Mailbox cmd attempted, 0x%x\n", opcode);
+ dev_info(&adapter->pdev->dev, "Mailbox detached\n");
+ return 0;
+ }
+
+ spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
+ mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
+
+ if (mbx_val) {
+ QLCDB(adapter, DRV,
+ "Mailbox cmd attempted, 0x%x\n", opcode);
+ QLCDB(adapter, DRV,
+ "Mailbox not available, 0x%x, collect FW dump\n",
+ mbx_val);
+ cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
+ spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
+ return cmd->rsp.arg[0];
+ }
+
+ /* Fill in mailbox registers */
+ mbx_cmd = cmd->req.arg[0];
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
+ for (i = 1; i < cmd->req.num; i++)
+ writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i));
+
+ /* Signal FW about the impending command */
+ QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
+poll:
+ rsp = qlcnic_83xx_mbx_poll(adapter);
+ if (rsp != QLCNIC_RCODE_TIMEOUT) {
+ /* Get the FW response data */
+ fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
+ if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
+ qlcnic_83xx_process_aen(adapter);
+ mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
+ if (mbx_val)
+ goto poll;
+ }
+ mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
+ rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
+ opcode = QLCNIC_MBX_RSP(fw_data);
+ qlcnic_83xx_get_mbx_data(adapter, cmd);
+
+ switch (mbx_err_code) {
+ case QLCNIC_MBX_RSP_OK:
+ case QLCNIC_MBX_PORT_RSP_OK:
+ rsp = QLCNIC_RCODE_SUCCESS;
+ break;
+ default:
+ if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) {
+ rsp = qlcnic_83xx_mac_rcode(adapter);
+ if (!rsp)
+ goto out;
+ }
+ dev_err(&adapter->pdev->dev,
+ "MBX command 0x%x failed with err:0x%x\n",
+ opcode, mbx_err_code);
+ rsp = mbx_err_code;
+ qlcnic_dump_mbx(adapter, cmd);
+ break;
+ }
+ goto out;
+ }
+
+ dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n",
+ QLCNIC_MBX_RSP(mbx_cmd));
+ rsp = QLCNIC_RCODE_TIMEOUT;
+out:
+ /* clear fw mbx control register */
+ QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
+ spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
+ return rsp;
+}
+
+int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
+ struct qlcnic_adapter *adapter, u32 type)
+{
+ int i, size;
+ u32 temp;
+ const struct qlcnic_mailbox_metadata *mbx_tbl;
+
+ mbx_tbl = qlcnic_83xx_mbx_tbl;
+ size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl);
+ for (i = 0; i < size; i++) {
+ if (type == mbx_tbl[i].cmd) {
+ mbx->req.num = mbx_tbl[i].in_args;
+ mbx->rsp.num = mbx_tbl[i].out_args;
+ mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
+ GFP_ATOMIC);
+ if (!mbx->req.arg)
+ return -ENOMEM;
+ mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32),
+ GFP_ATOMIC);
+ if (!mbx->rsp.arg) {
+ kfree(mbx->req.arg);
+ mbx->req.arg = NULL;
+ return -ENOMEM;
+ }
+ memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
+ memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
+ temp = adapter->ahw->fw_hal_version << 29;
+ mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
+ break;
+ }
+ }
+ return 0;
+}
+
+void qlcnic_83xx_idc_aen_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_cmd_args cmd;
+ int i, err = 0;
+
+ adapter = container_of(work, struct qlcnic_adapter, idc_aen_work.work);
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_IDC_ACK);
+
+ for (i = 1; i < QLC_83XX_MBX_AEN_CNT; i++)
+ cmd.req.arg[i] = adapter->ahw->mbox_aen[i];
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev,
+ "%s: Mailbox IDC ACK failed.\n", __func__);
+ qlcnic_free_mbx_args(&cmd);
+}
+
+static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
+ u32 data[])
+{
+ dev_dbg(&adapter->pdev->dev, "Completion AEN:0x%x.\n",
+ QLCNIC_MBX_RSP(data[0]));
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &adapter->ahw->idc.status);
+ return;
+}
+
+void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
+{
+ u32 event[QLC_83XX_MBX_AEN_CNT];
+ int i;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++)
+ event[i] = readl(QLCNIC_MBX_FW(ahw, i));
+
+ switch (QLCNIC_MBX_RSP(event[0])) {
+
+ case QLCNIC_MBX_LINK_EVENT:
+ qlcnic_83xx_handle_link_aen(adapter, event);
+ break;
+ case QLCNIC_MBX_COMP_EVENT:
+ qlcnic_83xx_handle_idc_comp_aen(adapter, event);
+ break;
+ case QLCNIC_MBX_REQUEST_EVENT:
+ for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++)
+ adapter->ahw->mbox_aen[i] = QLCNIC_MBX_RSP(event[i]);
+ queue_delayed_work(adapter->qlcnic_wq,
+ &adapter->idc_aen_work, 0);
+ break;
+ case QLCNIC_MBX_TIME_EXTEND_EVENT:
+ break;
+ case QLCNIC_MBX_SFP_INSERT_EVENT:
+ dev_info(&adapter->pdev->dev, "SFP+ Insert AEN:0x%x.\n",
+ QLCNIC_MBX_RSP(event[0]));
+ break;
+ case QLCNIC_MBX_SFP_REMOVE_EVENT:
+ dev_info(&adapter->pdev->dev, "SFP Removed AEN:0x%x.\n",
+ QLCNIC_MBX_RSP(event[0]));
+ break;
+ default:
+ dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n",
+ QLCNIC_MBX_RSP(event[0]));
+ break;
+ }
+
+ QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
+}
+
+static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
+{
+ int index, i, err, sds_mbx_size;
+ u32 *buf, intrpt_id, intr_mask;
+ u16 context_id;
+ u8 num_sds;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_host_sds_ring *sds;
+ struct qlcnic_sds_mbx sds_mbx;
+ struct qlcnic_add_rings_mbx_out *mbx_out;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
+ context_id = recv_ctx->context_id;
+ num_sds = (adapter->max_sds_rings - QLCNIC_MAX_RING_SETS);
+ ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_ADD_RCV_RINGS);
+ cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16);
+
+ /* set up status rings, mbx 2-81 */
+ index = 2;
+ for (i = 8; i < adapter->max_sds_rings; i++) {
+ memset(&sds_mbx, 0, sds_mbx_size);
+ sds = &recv_ctx->sds_rings[i];
+ sds->consumer = 0;
+ memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds));
+ sds_mbx.phy_addr = sds->phys_addr;
+ sds_mbx.sds_ring_size = sds->num_desc;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intrpt_id = ahw->intr_tbl[i].id;
+ else
+ intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ sds_mbx.intrpt_id = intrpt_id;
+ else
+ sds_mbx.intrpt_id = 0xffff;
+ sds_mbx.intrpt_val = 0;
+ buf = &cmd.req.arg[index];
+ memcpy(buf, &sds_mbx, sds_mbx_size);
+ index += sds_mbx_size / sizeof(u32);
+ }
+
+ /* send the mailbox command */
+ err = ahw->hw_ops->mbx_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to add rings %d\n", err);
+ goto out;
+ }
+
+ mbx_out = (struct qlcnic_add_rings_mbx_out *)&cmd.rsp.arg[1];
+ index = 0;
+ /* status descriptor ring */
+ for (i = 8; i < adapter->max_sds_rings; i++) {
+ sds = &recv_ctx->sds_rings[i];
+ sds->crb_sts_consumer = ahw->pci_base0 +
+ mbx_out->host_csmr[index];
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intr_mask = ahw->intr_tbl[i].src;
+ else
+ intr_mask = QLCRDX(ahw, QLCNIC_DEF_INT_MASK);
+
+ sds->crb_intr_mask = ahw->pci_base0 + intr_mask;
+ index++;
+ }
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ int i, err, index, sds_mbx_size, rds_mbx_size;
+ u8 num_sds, num_rds;
+ u32 *buf, intrpt_id, intr_mask, cap = 0;
+ struct qlcnic_host_sds_ring *sds;
+ struct qlcnic_host_rds_ring *rds;
+ struct qlcnic_sds_mbx sds_mbx;
+ struct qlcnic_rds_mbx rds_mbx;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_rcv_mbx_out *mbx_out;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ num_rds = adapter->max_rds_rings;
+
+ if (adapter->max_sds_rings <= QLCNIC_MAX_RING_SETS)
+ num_sds = adapter->max_sds_rings;
+ else
+ num_sds = QLCNIC_MAX_RING_SETS;
+
+ sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
+ rds_mbx_size = sizeof(struct qlcnic_rds_mbx);
+ cap = QLCNIC_CAP0_LEGACY_CONTEXT;
+
+ if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
+ cap |= QLC_83XX_FW_CAP_LRO_MSS;
+
+ /* set mailbox hdr and capabilities */
+ qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_CREATE_RX_CTX);
+ cmd.req.arg[1] = cap;
+ cmd.req.arg[5] = 1 | (num_rds << 5) | (num_sds << 8) |
+ (QLC_83XX_HOST_RDS_MODE_UNIQUE << 16);
+ /* set up status rings, mbx 8-57/87 */
+ index = QLC_83XX_HOST_SDS_MBX_IDX;
+ for (i = 0; i < num_sds; i++) {
+ memset(&sds_mbx, 0, sds_mbx_size);
+ sds = &recv_ctx->sds_rings[i];
+ sds->consumer = 0;
+ memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds));
+ sds_mbx.phy_addr = sds->phys_addr;
+ sds_mbx.sds_ring_size = sds->num_desc;
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intrpt_id = ahw->intr_tbl[i].id;
+ else
+ intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ sds_mbx.intrpt_id = intrpt_id;
+ else
+ sds_mbx.intrpt_id = 0xffff;
+ sds_mbx.intrpt_val = 0;
+ buf = &cmd.req.arg[index];
+ memcpy(buf, &sds_mbx, sds_mbx_size);
+ index += sds_mbx_size / sizeof(u32);
+ }
+ /* set up receive rings, mbx 88-111/135 */
+ index = QLCNIC_HOST_RDS_MBX_IDX;
+ rds = &recv_ctx->rds_rings[0];
+ rds->producer = 0;
+ memset(&rds_mbx, 0, rds_mbx_size);
+ rds_mbx.phy_addr_reg = rds->phys_addr;
+ rds_mbx.reg_ring_sz = rds->dma_size;
+ rds_mbx.reg_ring_len = rds->num_desc;
+ /* Jumbo ring */
+ rds = &recv_ctx->rds_rings[1];
+ rds->producer = 0;
+ rds_mbx.phy_addr_jmb = rds->phys_addr;
+ rds_mbx.jmb_ring_sz = rds->dma_size;
+ rds_mbx.jmb_ring_len = rds->num_desc;
+ buf = &cmd.req.arg[index];
+ memcpy(buf, &rds_mbx, rds_mbx_size);
+
+ /* send the mailbox command */
+ err = ahw->hw_ops->mbx_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to create Rx ctx in firmware%d\n", err);
+ goto out;
+ }
+ mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd.rsp.arg[1];
+ recv_ctx->context_id = mbx_out->ctx_id;
+ recv_ctx->state = mbx_out->state;
+ recv_ctx->virt_port = mbx_out->vport_id;
+ dev_info(&adapter->pdev->dev, "Rx Context[%d] Created, state:0x%x\n",
+ recv_ctx->context_id, recv_ctx->state);
+ /* Receive descriptor ring */
+ /* Standard ring */
+ rds = &recv_ctx->rds_rings[0];
+ rds->crb_rcv_producer = ahw->pci_base0 +
+ mbx_out->host_prod[0].reg_buf;
+ /* Jumbo ring */
+ rds = &recv_ctx->rds_rings[1];
+ rds->crb_rcv_producer = ahw->pci_base0 +
+ mbx_out->host_prod[0].jmb_buf;
+ /* status descriptor ring */
+ for (i = 0; i < num_sds; i++) {
+ sds = &recv_ctx->sds_rings[i];
+ sds->crb_sts_consumer = ahw->pci_base0 +
+ mbx_out->host_csmr[i];
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intr_mask = ahw->intr_tbl[i].src;
+ else
+ intr_mask = QLCRDX(ahw, QLCNIC_DEF_INT_MASK);
+ sds->crb_intr_mask = ahw->pci_base0 + intr_mask;
+ }
+
+ if (adapter->max_sds_rings > QLCNIC_MAX_RING_SETS)
+ err = qlcnic_83xx_add_rings(adapter);
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx, int ring)
+{
+ int err;
+ u16 msix_id;
+ u32 *buf, intr_mask;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_tx_mbx mbx;
+ struct qlcnic_tx_mbx_out *mbx_out;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ /* Reset host resources */
+ tx->producer = 0;
+ tx->sw_consumer = 0;
+ *(tx->hw_consumer) = 0;
+
+ memset(&mbx, 0, sizeof(struct qlcnic_tx_mbx));
+
+ /* setup mailbox inbox registerss */
+ mbx.phys_addr = tx->phys_addr;
+ mbx.cnsmr_index = tx->hw_cons_phys_addr;
+ mbx.size = tx->num_desc;
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ msix_id = ahw->intr_tbl[adapter->max_sds_rings + ring].id;
+ else
+ msix_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ mbx.intr_id = msix_id;
+ else
+ mbx.intr_id = 0xffff;
+ mbx.src = 0;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
+ cmd.req.arg[1] = QLCNIC_CAP0_LEGACY_CONTEXT;
+ cmd.req.arg[5] = QLCNIC_MAX_TX_QUEUES;
+ buf = &cmd.req.arg[6];
+ memcpy(buf, &mbx, sizeof(struct qlcnic_tx_mbx));
+ /* send the mailbox command*/
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to create Tx ctx in firmware 0x%x\n", err);
+ goto out;
+ }
+ mbx_out = (struct qlcnic_tx_mbx_out *)&cmd.rsp.arg[2];
+ tx->crb_cmd_producer = ahw->pci_base0 + mbx_out->host_prod;
+ tx->ctx_id = mbx_out->ctx_id;
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ intr_mask = ahw->intr_tbl[adapter->max_sds_rings + ring].src;
+ tx->crb_intr_mask = ahw->pci_base0 + intr_mask;
+ }
+ dev_info(&adapter->pdev->dev, "Tx Context[0x%x] Created, state:0x%x\n",
+ tx->ctx_id, mbx_out->state);
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_rds_ring *rds_ring;
+ u8 ring;
+ int ret;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_detach(adapter);
+
+ adapter->max_sds_rings = 1;
+ adapter->ahw->diag_test = test;
+ adapter->ahw->linkup = 0;
+
+ ret = qlcnic_attach(adapter);
+ if (ret) {
+ netif_device_attach(netdev);
+ return ret;
+ }
+
+ ret = qlcnic_fw_create_ctx(adapter);
+ if (ret) {
+ qlcnic_detach(adapter);
+ netif_device_attach(netdev);
+ return ret;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx->rds_rings[ring];
+ qlcnic_post_rx_buffers(adapter, rds_ring, ring);
+ }
+
+ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx->sds_rings[ring];
+ qlcnic_83xx_enable_intr(adapter, sds_ring);
+ }
+ }
+
+ if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
+ /* disable and free mailbox interrupt */
+ qlcnic_83xx_free_mbx_intr(adapter);
+ adapter->ahw->loopback_state = 0;
+ adapter->ahw->hw_ops->setup_link_event(adapter, 1);
+ }
+
+ set_bit(__QLCNIC_DEV_UP, &adapter->state);
+ return 0;
+}
+
+static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
+ int max_sds_rings)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ int ring, err;
+
+ clear_bit(__QLCNIC_DEV_UP, &adapter->state);
+ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx->sds_rings[ring];
+ qlcnic_83xx_disable_intr(adapter, sds_ring);
+ }
+ }
+
+ qlcnic_fw_destroy_ctx(adapter);
+ qlcnic_detach(adapter);
+
+ if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to setup mbx interrupt\n",
+ __func__);
+ goto out;
+ }
+ }
+ adapter->ahw->diag_test = 0;
+ adapter->max_sds_rings = max_sds_rings;
+
+ if (qlcnic_attach(adapter))
+ goto out;
+
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+out:
+ netif_device_attach(netdev);
+}
+
+int qlcnic_83xx_config_led(struct qlcnic_adapter *adapter, u32 state,
+ u32 beacon)
+{
+ struct qlcnic_cmd_args cmd;
+ u32 mbx_in;
+ int i, status = 0;
+
+ if (state) {
+ /* Get LED configuration */
+ qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_GET_LED_CONFIG);
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Get led config failed.\n");
+ goto mbx_err;
+ } else {
+ for (i = 0; i < 4; i++)
+ adapter->ahw->mbox_reg[i] = cmd.rsp.arg[i+1];
+ }
+ qlcnic_free_mbx_args(&cmd);
+ /* Set LED Configuration */
+ mbx_in = (LSW(QLC_83XX_LED_CONFIG) << 16) |
+ LSW(QLC_83XX_LED_CONFIG);
+ qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_SET_LED_CONFIG);
+ cmd.req.arg[1] = mbx_in;
+ cmd.req.arg[2] = mbx_in;
+ cmd.req.arg[3] = mbx_in;
+ if (beacon)
+ cmd.req.arg[4] = QLC_83XX_ENABLE_BEACON;
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Set led config failed.\n");
+ }
+mbx_err:
+ qlcnic_free_mbx_args(&cmd);
+ return status;
+
+ } else {
+ /* Restoring default LED configuration */
+ qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_SET_LED_CONFIG);
+ cmd.req.arg[1] = adapter->ahw->mbox_reg[0];
+ cmd.req.arg[2] = adapter->ahw->mbox_reg[1];
+ cmd.req.arg[3] = adapter->ahw->mbox_reg[2];
+ if (beacon)
+ cmd.req.arg[4] = adapter->ahw->mbox_reg[3];
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status)
+ dev_err(&adapter->pdev->dev,
+ "Restoring led config failed.\n");
+ qlcnic_free_mbx_args(&cmd);
+ return status;
+ }
+}
+
+void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
+ int enable)
+{
+ struct qlcnic_cmd_args cmd;
+ int status;
+
+ if (enable) {
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INIT_NIC_FUNC);
+ cmd.req.arg[1] = BIT_0 | BIT_31;
+ } else {
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_STOP_NIC_FUNC);
+ cmd.req.arg[1] = BIT_0 | BIT_31;
+ }
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status)
+ dev_err(&adapter->pdev->dev,
+ "Failed to %s in NIC IDC function event.\n",
+ (enable ? "register" : "unregister"));
+
+ qlcnic_free_mbx_args(&cmd);
+}
+
+int qlcnic_83xx_set_port_config(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORT_CONFIG);
+ cmd.req.arg[1] = adapter->ahw->port_config;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev, "Set Port Config failed.\n");
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_83xx_get_port_config(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PORT_CONFIG);
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev, "Get Port config failed\n");
+ else
+ adapter->ahw->port_config = cmd.rsp.arg[1];
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *adapter, int enable)
+{
+ int err;
+ u32 temp;
+ struct qlcnic_cmd_args cmd;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_EVENT);
+ temp = adapter->recv_ctx->context_id << 16;
+ cmd.req.arg[1] = (enable ? 1 : 0) | BIT_8 | temp;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev,
+ "Setup linkevent mailbox failed\n");
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
+{
+ int err;
+ u32 temp;
+ struct qlcnic_cmd_args cmd;
+
+ if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+ return -EIO;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_MAC_RX_MODE);
+ temp = adapter->recv_ctx->context_id << 16;
+ cmd.req.arg[1] = (mode ? 1 : 0) | temp;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev,
+ "Promiscous mode config failed\n");
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int ret = 0, loop = 0, max_sds_rings = adapter->max_sds_rings;
+
+ QLCDB(adapter, DRV, "%s loopback test in progress\n",
+ mode == QLCNIC_ILB_MODE ? "internal" : "external");
+ if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ dev_warn(&adapter->pdev->dev,
+ "Loopback test not supported for non privilege function\n");
+ return ret;
+ }
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
+ if (ret)
+ goto fail_diag_alloc;
+
+ ret = qlcnic_83xx_set_lb_mode(adapter, mode);
+ if (ret)
+ goto free_diag_res;
+
+ /* Poll for link up event before running traffic */
+ do {
+ msleep(500);
+ qlcnic_83xx_process_aen(adapter);
+ if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
+ dev_info(&adapter->pdev->dev,
+ "Firmware didn't sent link up event to loopback request\n");
+ ret = -QLCNIC_FW_NOT_RESPOND;
+ qlcnic_83xx_clear_lb_mode(adapter, mode);
+ goto free_diag_res;
+ }
+ } while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
+
+ ret = qlcnic_do_lb_test(adapter, mode);
+
+ qlcnic_83xx_clear_lb_mode(adapter, mode);
+
+free_diag_res:
+ qlcnic_83xx_diag_free_res(netdev, max_sds_rings);
+
+fail_diag_alloc:
+ adapter->max_sds_rings = max_sds_rings;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return ret;
+}
+
+int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int status = 0, loop = 0;
+ u32 config;
+
+ status = qlcnic_83xx_get_port_config(adapter);
+ if (status)
+ return status;
+
+ config = ahw->port_config;
+ set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+
+ if (mode == QLCNIC_ILB_MODE)
+ ahw->port_config |= QLC_83XX_CFG_LOOPBACK_HSS;
+ if (mode == QLCNIC_ELB_MODE)
+ ahw->port_config |= QLC_83XX_CFG_LOOPBACK_EXT;
+
+ status = qlcnic_83xx_set_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to Set Loopback Mode = 0x%x.\n",
+ ahw->port_config);
+ ahw->port_config = config;
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ return status;
+ }
+
+ /* Wait for Link and IDC Completion AEN */
+ do {
+ msleep(300);
+ qlcnic_83xx_process_aen(adapter);
+ if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
+ dev_err(&adapter->pdev->dev,
+ "FW did not generate IDC completion AEN\n");
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ qlcnic_83xx_clear_lb_mode(adapter, mode);
+ return -EIO;
+ }
+ } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
+
+ qlcnic_sre_macaddr_change(adapter, adapter->mac_addr, 0,
+ QLCNIC_MAC_ADD);
+ return status;
+}
+
+int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int status = 0, loop = 0;
+ u32 config = ahw->port_config;
+
+ set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ if (mode == QLCNIC_ILB_MODE)
+ ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_HSS;
+ if (mode == QLCNIC_ELB_MODE)
+ ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_EXT;
+
+ status = qlcnic_83xx_set_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to Clear Loopback Mode = 0x%x.\n",
+ ahw->port_config);
+ ahw->port_config = config;
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ return status;
+ }
+
+ /* Wait for Link and IDC Completion AEN */
+ do {
+ msleep(300);
+ qlcnic_83xx_process_aen(adapter);
+ if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
+ dev_err(&adapter->pdev->dev,
+ "Firmware didn't sent IDC completion AEN\n");
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ return -EIO;
+ }
+ } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
+
+ qlcnic_sre_macaddr_change(adapter, adapter->mac_addr, 0,
+ QLCNIC_MAC_DEL);
+ return status;
+}
+
+void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip,
+ int mode)
+{
+ int err;
+ u32 temp, temp_ip;
+ struct qlcnic_cmd_args cmd;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_IP_ADDR);
+ if (mode == QLCNIC_IP_UP) {
+ temp = adapter->recv_ctx->context_id << 16;
+ cmd.req.arg[1] = 1 | temp;
+ } else {
+ temp = adapter->recv_ctx->context_id << 16;
+ cmd.req.arg[1] = 2 | temp;
+ }
+
+ /*
+ * Adapter needs IP address in network byte order.
+ * But hardware mailbox registers go through writel(), hence IP address
+ * gets swapped on big endian architecture.
+ * To negate swapping of writel() on big endian architecture
+ * use swab32(value).
+ */
+
+ temp_ip = swab32(ntohl(ip));
+ memcpy(&cmd.req.arg[2], &temp_ip, sizeof(u32));
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS)
+ dev_err(&adapter->netdev->dev,
+ "could not notify %s IP 0x%x request\n",
+ (mode == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
+ qlcnic_free_mbx_args(&cmd);
+}
+
+int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *adapter, int mode)
+{
+ int err;
+ u32 temp, arg1;
+ struct qlcnic_cmd_args cmd;
+ int lro_bit_mask;
+
+ lro_bit_mask = (mode ? (BIT_0 | BIT_1 | BIT_2 | BIT_3) : 0);
+
+ if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+ return 0;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_HW_LRO);
+ temp = adapter->recv_ctx->context_id << 16;
+ arg1 = lro_bit_mask | temp;
+ cmd.req.arg[1] = arg1;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev, "LRO config failed\n");
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_83xx_config_rss(struct qlcnic_adapter *adapter, int enable)
+{
+ int err;
+ u32 word;
+ struct qlcnic_cmd_args cmd;
+ const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+ 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+ 0x255b0ec26d5a56daULL };
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_RSS);
+
+ /*
+ * RSS request:
+ * bits 3-0: Rsvd
+ * 5-4: hash_type_ipv4
+ * 7-6: hash_type_ipv6
+ * 8: enable
+ * 9: use indirection table
+ * 16-31: indirection table mask
+ */
+ word = ((u32)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
+ ((u32)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
+ ((u32)(enable & 0x1) << 8) |
+ ((0x7ULL) << 16);
+ cmd.req.arg[1] = (adapter->recv_ctx->context_id);
+ cmd.req.arg[2] = word;
+ memcpy(&cmd.req.arg[4], key, sizeof(key));
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err)
+ dev_info(&adapter->pdev->dev, "RSS config failed\n");
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+
+}
+
+int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
+ __le16 vlan_id, u8 op)
+{
+ int err;
+ u32 *buf;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_macvlan_mbx mv;
+
+ if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+ return -EIO;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
+ if (err)
+ return err;
+ cmd.req.arg[1] = op | (1 << 8) |
+ (adapter->recv_ctx->context_id << 16);
+
+ mv.vlan = le16_to_cpu(vlan_id);
+ memcpy(&mv.mac, addr, ETH_ALEN);
+ buf = &cmd.req.arg[2];
+ memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "MAC-VLAN %s to CAM failed, err=%d.\n",
+ ((op == 1) ? "add " : "delete "), err);
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
+ __le16 vlan_id)
+{
+ u8 mac[ETH_ALEN];
+ memcpy(&mac, addr, ETH_ALEN);
+ qlcnic_83xx_sre_macaddr_change(adapter, mac, vlan_id, QLCNIC_MAC_ADD);
+}
+
+void qlcnic_83xx_configure_mac(struct qlcnic_adapter *adapter, u8 *mac,
+ u8 type, struct qlcnic_cmd_args *cmd)
+{
+ switch (type) {
+ case QLCNIC_SET_STATION_MAC:
+ case QLCNIC_SET_FAC_DEF_MAC:
+ memcpy(&cmd->req.arg[2], mac, sizeof(u32));
+ memcpy(&cmd->req.arg[3], &mac[4], sizeof(u16));
+ break;
+ }
+ cmd->req.arg[1] = type;
+}
+
+int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
+{
+ int err, i;
+ struct qlcnic_cmd_args cmd;
+ u32 mac_low, mac_high;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
+ qlcnic_83xx_configure_mac(adapter, mac, QLCNIC_GET_CURRENT_MAC, &cmd);
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ mac_low = cmd.rsp.arg[1];
+ mac_high = cmd.rsp.arg[2];
+
+ for (i = 0; i < 2; i++)
+ mac[i] = (u8) (mac_high >> ((1 - i) * 8));
+ for (i = 2; i < 6; i++)
+ mac[i] = (u8) (mac_low >> ((5 - i) * 8));
+ } else {
+ dev_err(&adapter->pdev->dev, "Failed to get mac address%d\n",
+ err);
+ err = -EIO;
+ }
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter)
+{
+ int err;
+ u32 temp;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+
+ if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+ return;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL);
+ cmd.req.arg[1] = 1 | (adapter->recv_ctx->context_id << 16);
+ cmd.req.arg[3] = coal->flag;
+ temp = coal->rx_time_us << 16;
+ cmd.req.arg[2] = coal->rx_packets | temp;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS)
+ dev_info(&adapter->pdev->dev,
+ "Failed to send interrupt coalescence parameters\n");
+ qlcnic_free_mbx_args(&cmd);
+}
+
+static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
+ u32 data[])
+{
+ u8 link_status, duplex;
+ /* link speed */
+ link_status = LSB(data[3]) & 1;
+ adapter->ahw->link_speed = MSW(data[2]);
+ adapter->ahw->link_autoneg = MSB(MSW(data[3]));
+ adapter->ahw->module_type = MSB(LSW(data[3]));
+ duplex = LSB(MSW(data[3]));
+ if (duplex)
+ adapter->ahw->link_duplex = DUPLEX_FULL;
+ else
+ adapter->ahw->link_duplex = DUPLEX_HALF;
+ adapter->ahw->has_link_events = 1;
+ qlcnic_advert_link_change(adapter, link_status);
+}
+
+irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
+{
+ struct qlcnic_adapter *adapter = data;
+ unsigned long flags;
+ u32 mask, resp, event;
+
+ spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
+ resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
+ if (!(resp & QLCNIC_SET_OWNER))
+ goto out;
+
+ event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
+ if (event & QLCNIC_MBX_ASYNC_EVENT)
+ qlcnic_83xx_process_aen(adapter);
+out:
+ mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+ writel(0, adapter->ahw->pci_base0 + mask);
+ spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+int qlcnic_enable_eswitch(struct qlcnic_adapter *adapter, u8 port, u8 enable)
+{
+ int err = -EIO;
+ struct qlcnic_cmd_args cmd;
+
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Error, invoked by non management func\n",
+ __func__);
+ return err;
+ }
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH);
+ cmd.req.arg[1] = (port & 0xf) | BIT_4;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev, "Failed to enable eswitch%d\n",
+ err);
+ err = -EIO;
+ }
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+
+}
+
+int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *nic)
+{
+ int i, err = -EIO;
+ struct qlcnic_cmd_args cmd;
+
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Error, invoked by non management func\n",
+ __func__);
+ return err;
+ }
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+ cmd.req.arg[1] = (nic->pci_func << 16);
+ cmd.req.arg[2] = 0x1 << 16;
+ cmd.req.arg[3] = nic->phys_port | (nic->switch_mode << 16);
+ cmd.req.arg[4] = nic->capabilities;
+ cmd.req.arg[5] = (nic->max_mac_filters & 0xFF) | ((nic->max_mtu) << 16);
+ cmd.req.arg[6] = (nic->max_tx_ques) | ((nic->max_rx_ques) << 16);
+ cmd.req.arg[7] = (nic->min_tx_bw) | ((nic->max_tx_bw) << 16);
+ for (i = 8; i < 32; i++)
+ cmd.req.arg[i] = 0;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev, "Failed to set nic info%d\n",
+ err);
+ err = -EIO;
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info, u8 func_id)
+{
+ int err;
+ u32 temp;
+ u8 op = 0;
+ struct qlcnic_cmd_args cmd;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
+ if (func_id != adapter->ahw->pci_func) {
+ temp = func_id << 16;
+ cmd.req.arg[1] = op | BIT_31 | temp;
+ } else {
+ cmd.req.arg[1] = adapter->ahw->pci_func << 16;
+ }
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_info(&adapter->pdev->dev,
+ "Failed to get nic info %d\n", err);
+ goto out;
+ }
+
+ npar_info->op_type = cmd.rsp.arg[1];
+ npar_info->pci_func = cmd.rsp.arg[2] & 0xFFFF;
+ npar_info->op_mode = (cmd.rsp.arg[2] & 0xFFFF0000) >> 16;
+ npar_info->phys_port = cmd.rsp.arg[3] & 0xFFFF;
+ npar_info->switch_mode = (cmd.rsp.arg[3] & 0xFFFF0000) >> 16;
+ npar_info->capabilities = cmd.rsp.arg[4];
+ npar_info->max_mac_filters = cmd.rsp.arg[5] & 0xFF;
+ npar_info->max_mtu = (cmd.rsp.arg[5] & 0xFFFF0000) >> 16;
+ npar_info->max_tx_ques = cmd.rsp.arg[6] & 0xFFFF;
+ npar_info->max_rx_ques = (cmd.rsp.arg[6] & 0xFFFF0000) >> 16;
+ npar_info->min_tx_bw = cmd.rsp.arg[7] & 0xFFFF;
+ npar_info->max_tx_bw = (cmd.rsp.arg[7] & 0xFFFF0000) >> 16;
+ if (cmd.rsp.arg[8] & 0x1)
+ npar_info->max_bw_reg_offset = (cmd.rsp.arg[8] & 0x7FFE) >> 1;
+ if (cmd.rsp.arg[8] & 0x10000) {
+ temp = (cmd.rsp.arg[8] & 0x7FFE0000) >> 17;
+ npar_info->max_linkspeed_reg_offset = temp;
+ }
+
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_pci_info *pci_info)
+{
+ int i, err = 0, j = 0;
+ u32 temp;
+ struct qlcnic_cmd_args cmd;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ adapter->ahw->act_pci_func = 0;
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ pci_info->func_count = cmd.rsp.arg[1] & 0xFF;
+ dev_info(&adapter->pdev->dev,
+ "%s: total functions = %d\n",
+ __func__, pci_info->func_count);
+ for (i = 2, j = 0; j < QLCNIC_MAX_PCI_FUNC; j++, pci_info++) {
+ pci_info->id = cmd.rsp.arg[i] & 0xFFFF;
+ pci_info->active = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
+ i++;
+ pci_info->type = cmd.rsp.arg[i] & 0xFFFF;
+ if (pci_info->type == QLCNIC_TYPE_NIC)
+ adapter->ahw->act_pci_func++;
+ temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
+ pci_info->default_port = temp;
+ i++;
+ pci_info->tx_min_bw = cmd.rsp.arg[i] & 0xFFFF;
+ temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
+ pci_info->tx_max_bw = temp;
+ i = i + 2;
+ memcpy(pci_info->mac, &cmd.rsp.arg[i], ETH_ALEN - 2);
+ i++;
+ memcpy(pci_info->mac + sizeof(u32), &cmd.rsp.arg[i], 2);
+ i = i + 3;
+
+ dev_info(&adapter->pdev->dev, "%s:\n"
+ "\tid = %d active = %d type = %d\n"
+ "\tport = %d min bw = %d max bw = %d\n"
+ "\tmac_addr = %pM\n", __func__,
+ pci_info->id, pci_info->active, pci_info->type,
+ pci_info->default_port, pci_info->tx_min_bw,
+ pci_info->tx_max_bw, pci_info->mac);
+ }
+ } else {
+ dev_err(&adapter->pdev->dev, "Failed to get PCI Info%d\n",
+ err);
+ err = -EIO;
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *adapter, bool op_type)
+{
+ int i, index, err;
+ bool type;
+ u8 max_ints;
+ u32 val, temp;
+ struct qlcnic_cmd_args cmd;
+
+ max_ints = adapter->ahw->num_msix - 1;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTRPT);
+ cmd.req.arg[1] = max_ints;
+ for (i = 0, index = 2; i < max_ints; i++) {
+ type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL;
+ val = type | (adapter->ahw->intr_tbl[i].type << 4);
+ if (adapter->ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX)
+ val |= (adapter->ahw->intr_tbl[i].id << 16);
+ cmd.req.arg[index++] = val;
+ }
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure interrupts 0x%x\n", err);
+ goto out;
+ }
+
+ max_ints = cmd.rsp.arg[1];
+ for (i = 0, index = 2; i < max_ints; i++, index += 2) {
+ val = cmd.rsp.arg[index];
+ if (LSB(val)) {
+ dev_info(&adapter->pdev->dev,
+ "Can't configure interrupt %d\n",
+ adapter->ahw->intr_tbl[i].id);
+ continue;
+ }
+ if (op_type) {
+ adapter->ahw->intr_tbl[i].id = MSW(val);
+ adapter->ahw->intr_tbl[i].enabled = 1;
+ temp = cmd.rsp.arg[index + 1];
+ adapter->ahw->intr_tbl[i].src = temp;
+ } else {
+ adapter->ahw->intr_tbl[i].id = i;
+ adapter->ahw->intr_tbl[i].enabled = 0;
+ adapter->ahw->intr_tbl[i].src = 0;
+ }
+ }
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_83xx_lock_flash(struct qlcnic_adapter *adapter)
+{
+ int id, timeout = 0;
+ u32 status = 0;
+
+ while (status == 0) {
+ status = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK);
+ if (status)
+ break;
+
+ if (++timeout >= QLC_83XX_FLASH_LOCK_TIMEOUT) {
+ id = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_FLASH_LOCK_OWNER);
+ dev_err(&adapter->pdev->dev,
+ "%s: failed, lock held by %d\n", __func__, id);
+ return -EIO;
+ }
+ usleep_range(1000, 2000);
+ }
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER, adapter->portnum);
+ return 0;
+}
+
+void qlcnic_83xx_unlock_flash(struct qlcnic_adapter *adapter)
+{
+ QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER, 0xFF);
+}
+
+int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter,
+ u32 flash_addr, u8 *p_data,
+ int count)
+{
+ int i, ret;
+ u32 word, range, flash_offset, addr = flash_addr;
+ ulong indirect_add, direct_window;
+
+ flash_offset = addr & (QLCNIC_FLASH_SECTOR_SIZE - 1);
+ if (addr & 0x3) {
+ dev_err(&adapter->pdev->dev, "Illegal addr = 0x%x\n", addr);
+ return -EIO;
+ }
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_DIRECT_WINDOW,
+ (addr));
+
+ range = flash_offset + (count * sizeof(u32));
+ /* Check if data is spread across multiple sectors */
+ if (range > (QLCNIC_FLASH_SECTOR_SIZE - 1)) {
+
+ /* Multi sector read */
+ for (i = 0; i < count; i++) {
+ indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
+ ret = qlcnic_83xx_rd_reg_indirect(adapter,
+ indirect_add);
+ if (ret == -EIO)
+ return -EIO;
+
+ word = ret;
+ *(u32 *)p_data = word;
+ p_data = p_data + 4;
+ addr = addr + 4;
+ flash_offset = flash_offset + 4;
+
+ if (flash_offset > (QLCNIC_FLASH_SECTOR_SIZE - 1)) {
+ direct_window = QLC_83XX_FLASH_DIRECT_WINDOW;
+ /* This write is needed once for each sector */
+ qlcnic_83xx_wrt_reg_indirect(adapter,
+ direct_window,
+ (addr));
+ flash_offset = 0;
+ }
+ }
+ } else {
+ /* Single sector read */
+ for (i = 0; i < count; i++) {
+ indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
+ ret = qlcnic_83xx_rd_reg_indirect(adapter,
+ indirect_add);
+ if (ret == -EIO)
+ return -EIO;
+
+ word = ret;
+ *(u32 *)p_data = word;
+ p_data = p_data + 4;
+ addr = addr + 4;
+ }
+ }
+
+ return 0;
+}
+
+static int qlcnic_83xx_poll_flash_status_reg(struct qlcnic_adapter *adapter)
+{
+ u32 status;
+ int retries = QLC_83XX_FLASH_READ_RETRY_COUNT;
+
+ do {
+ status = qlcnic_83xx_rd_reg_indirect(adapter,
+ QLC_83XX_FLASH_STATUS);
+ if ((status & QLC_83XX_FLASH_STATUS_READY) ==
+ QLC_83XX_FLASH_STATUS_READY)
+ break;
+
+ msleep(QLC_83XX_FLASH_STATUS_REG_POLL_DELAY);
+ } while (--retries);
+
+ if (!retries)
+ return -EIO;
+
+ return 0;
+}
+
+static int qlcnic_83xx_enable_flash_write_op(struct qlcnic_adapter *adapter)
+{
+ int ret;
+ u32 cmd;
+ cmd = adapter->ahw->fdt.write_statusreg_cmd;
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ (QLC_83XX_FLASH_FDT_WRITE_DEF_SIG | cmd));
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+ adapter->ahw->fdt.write_enable_bits);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_SECOND_ERASE_MS_VAL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret)
+ return -EIO;
+
+ return 0;
+}
+
+static int qlcnic_83xx_disable_flash_write_op(struct qlcnic_adapter *adapter)
+{
+ int ret;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ (QLC_83XX_FLASH_FDT_WRITE_DEF_SIG |
+ adapter->ahw->fdt.write_statusreg_cmd));
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+ adapter->ahw->fdt.write_disable_bits);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_SECOND_ERASE_MS_VAL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret)
+ return -EIO;
+
+ return 0;
+}
+
+int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *adapter)
+{
+ int ret, mfg_id;
+
+ if (qlcnic_83xx_lock_flash(adapter))
+ return -EIO;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_FDT_READ_MFG_ID_VAL);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_READ_CTRL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ mfg_id = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_RDDATA);
+ if (mfg_id == -EIO)
+ return -EIO;
+
+ adapter->flash_mfg_id = (mfg_id & 0xFF);
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *adapter)
+{
+ int count, fdt_size, ret = 0;
+
+ fdt_size = sizeof(struct qlcnic_fdt);
+ count = fdt_size / sizeof(u32);
+
+ if (qlcnic_83xx_lock_flash(adapter))
+ return -EIO;
+
+ memset(&adapter->ahw->fdt, 0, fdt_size);
+ ret = qlcnic_83xx_lockless_flash_read32(adapter, QLCNIC_FDT_LOCATION,
+ (u8 *)&adapter->ahw->fdt,
+ count);
+
+ qlcnic_83xx_unlock_flash(adapter);
+ return ret;
+}
+
+int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter,
+ u32 sector_start_addr)
+{
+ u32 reversed_addr, addr1, addr2, cmd;
+ int ret = -EIO;
+
+ if (qlcnic_83xx_lock_flash(adapter) != 0)
+ return -EIO;
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_enable_flash_write_op(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ dev_err(&adapter->pdev->dev,
+ "%s failed at %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+ }
+
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ addr1 = (sector_start_addr & 0xFF) << 16;
+ addr2 = (sector_start_addr & 0xFF0000) >> 16;
+ reversed_addr = addr1 | addr2;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+ reversed_addr);
+ cmd = QLC_83XX_FLASH_FDT_ERASE_DEF_SIG | adapter->ahw->fdt.erase_cmd;
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id)
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, cmd);
+ else
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_OEM_ERASE_SIG);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_LAST_ERASE_MS_VAL);
+
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write_op(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return ret;
+ }
+ }
+
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_flash_write32(struct qlcnic_adapter *adapter, u32 addr,
+ u32 *p_data)
+{
+ int ret = -EIO;
+ u32 addr1 = 0x00800000 | (addr >> 2);
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, addr1);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_LAST_ERASE_MS_VAL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr,
+ u32 *p_data, int count)
+{
+ u32 temp;
+ int ret = -EIO;
+
+ if ((count < QLC_83XX_FLASH_BULK_WRITE_MIN) ||
+ (count > QLC_83XX_FLASH_BULK_WRITE_MAX)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Invalid word count\n", __func__);
+ return -EIO;
+ }
+
+ temp = qlcnic_83xx_rd_reg_indirect(adapter,
+ QLC_83XX_FLASH_SPI_CONTROL);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_SPI_CONTROL,
+ (temp | QLC_83XX_FLASH_SPI_CTRL));
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_ADDR_TEMP_VAL);
+
+ /* First DWORD write */
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_FIRST_MS_PATTERN);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ count--;
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_ADDR_SECOND_TEMP_VAL);
+ /* Second to N-1 DWORD writes */
+ while (count != 1) {
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+ *p_data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_SECOND_MS_PATTERN);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+ count--;
+ }
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_ADDR_TEMP_VAL |
+ (addr >> 2));
+ /* Last DWORD write */
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_LAST_MS_PATTERN);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ ret = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_SPI_STATUS);
+ if ((ret & QLC_83XX_FLASH_SPI_CTRL) == QLC_83XX_FLASH_SPI_CTRL) {
+ dev_err(&adapter->pdev->dev, "%s: failed at %d\n",
+ __func__, __LINE__);
+ /* Operation failed, clear error bit */
+ temp = qlcnic_83xx_rd_reg_indirect(adapter,
+ QLC_83XX_FLASH_SPI_CONTROL);
+ qlcnic_83xx_wrt_reg_indirect(adapter,
+ QLC_83XX_FLASH_SPI_CONTROL,
+ (temp | QLC_83XX_FLASH_SPI_CTRL));
+ }
+
+ return 0;
+}
+
+static void qlcnic_83xx_recover_driver_lock(struct qlcnic_adapter *adapter)
+{
+ u32 val, id;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK);
+
+ /* Check if recovery need to be performed by the calling function */
+ if ((val & QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK) == 0) {
+ val = val & ~0x3F;
+ val = val | ((adapter->portnum << 2) |
+ QLC_83XX_NEED_DRV_LOCK_RECOVERY);
+ QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val);
+ dev_info(&adapter->pdev->dev,
+ "%s: lock recovery initiated\n", __func__);
+ msleep(QLC_83XX_DRV_LOCK_RECOVERY_DELAY);
+ val = QLCRDX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK);
+ id = ((val >> 2) & 0xF);
+ if (id == adapter->portnum) {
+ val = val & ~QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK;
+ val = val | QLC_83XX_DRV_LOCK_RECOVERY_IN_PROGRESS;
+ QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val);
+ /* Force release the lock */
+ QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK);
+ /* Clear recovery bits */
+ val = val & ~0x3F;
+ QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val);
+ dev_info(&adapter->pdev->dev,
+ "%s: lock recovery completed\n", __func__);
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "%s: func %d to resume lock recovery process\n",
+ __func__, id);
+ }
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "%s: lock recovery initiated by other functions\n",
+ __func__);
+ }
+}
+
+int qlcnic_83xx_lock_driver(struct qlcnic_adapter *adapter)
+{
+ u32 lock_alive_counter, val, id, i = 0, status = 0, temp = 0;
+ int max_attempt = 0;
+
+ while (status == 0) {
+ status = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK);
+ if (status)
+ break;
+
+ msleep(QLC_83XX_DRV_LOCK_WAIT_DELAY);
+ i++;
+
+ if (i == 1)
+ temp = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+
+ if (i == QLC_83XX_DRV_LOCK_WAIT_COUNTER) {
+ val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ if (val == temp) {
+ id = val & 0xFF;
+ dev_info(&adapter->pdev->dev,
+ "%s: lock to be recovered from %d\n",
+ __func__, id);
+ qlcnic_83xx_recover_driver_lock(adapter);
+ i = 0;
+ max_attempt++;
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to get lock\n", __func__);
+ return -EIO;
+ }
+ }
+
+ /* Force exit from while loop after few attempts */
+ if (max_attempt == QLC_83XX_MAX_DRV_LOCK_RECOVERY_ATTEMPT) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to get lock\n", __func__);
+ return -EIO;
+ }
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ lock_alive_counter = val >> 8;
+ lock_alive_counter++;
+ val = lock_alive_counter << 8 | adapter->portnum;
+ QLCWRX(adapter->ahw, QLC_83XX_DRV_LOCK_ID, val);
+
+ return 0;
+}
+
+void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *adapter)
+{
+ u32 val, lock_alive_counter, id;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ id = val & 0xFF;
+ lock_alive_counter = val >> 8;
+
+ if (id != adapter->portnum)
+ dev_err(&adapter->pdev->dev,
+ "%s:Warning func %d is unlocking lock owned by %d\n",
+ __func__, adapter->portnum, id);
+
+ val = (lock_alive_counter << 8) | 0xFF;
+ QLCWRX(adapter->ahw, QLC_83XX_DRV_LOCK_ID, val);
+ QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK);
+}
+
+int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
+ u32 *data, u32 count)
+{
+ int i, j, ret = 0;
+ u32 temp;
+
+ /* Check alignment */
+ if (addr & 0xF)
+ return -EIO;
+
+ mutex_lock(&adapter->ahw->mem_lock);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_ADDR_HI, 0);
+
+ for (i = 0; i < count; i++, addr += 16) {
+ if (!((ADDR_IN_RANGE(addr, QLCNIC_ADDR_QDR_NET,
+ QLCNIC_ADDR_QDR_NET_MAX)) ||
+ (ADDR_IN_RANGE(addr, QLCNIC_ADDR_DDR_NET,
+ QLCNIC_ADDR_DDR_NET_MAX)))) {
+ mutex_unlock(&adapter->ahw->mem_lock);
+ return -EIO;
+ }
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_ADDR_LO, addr);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_LO,
+ *data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_HI,
+ *data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_ULO,
+ *data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_UHI,
+ *data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_CTRL,
+ QLCNIC_TA_WRITE_ENABLE);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_CTRL,
+ QLCNIC_TA_WRITE_START);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qlcnic_83xx_rd_reg_indirect(adapter,
+ QLCNIC_MS_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ /* Status check failure */
+ if (j >= MAX_CTL_CHECK) {
+ printk_ratelimited(KERN_WARNING
+ "MS memory write failed\n");
+ mutex_unlock(&adapter->ahw->mem_lock);
+ return -EIO;
+ }
+ }
+
+ mutex_unlock(&adapter->ahw->mem_lock);
+
+ return ret;
+}
+
+int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
+ u8 *p_data, int count)
+{
+ int i, ret;
+ u32 word, addr = flash_addr;
+ ulong indirect_addr;
+
+ if (qlcnic_83xx_lock_flash(adapter) != 0)
+ return -EIO;
+
+ if (addr & 0x3) {
+ dev_err(&adapter->pdev->dev, "Illegal addr = 0x%x\n", addr);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (qlcnic_83xx_wrt_reg_indirect(adapter,
+ QLC_83XX_FLASH_DIRECT_WINDOW,
+ (addr))) {
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr);
+ ret = qlcnic_83xx_rd_reg_indirect(adapter,
+ indirect_addr);
+ if (ret == -EIO)
+ return -EIO;
+ word = ret;
+ *(u32 *)p_data = word;
+ p_data = p_data + 4;
+ addr = addr + 4;
+ }
+
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter)
+{
+ int err;
+ u32 config = 0, state;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ state = readl(ahw->pci_base0 + QLC_83XX_LINK_STATE(ahw->pci_func));
+ if (!QLC_83xx_FUNC_VAL(state, ahw->pci_func)) {
+ dev_info(&adapter->pdev->dev, "link state down\n");
+ return config;
+ }
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_STATUS);
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_info(&adapter->pdev->dev,
+ "Get Link Status Command failed: 0x%x\n", err);
+ goto out;
+ } else {
+ config = cmd.rsp.arg[1];
+ switch (QLC_83XX_CURRENT_LINK_SPEED(config)) {
+ case QLC_83XX_10M_LINK:
+ ahw->link_speed = SPEED_10;
+ break;
+ case QLC_83XX_100M_LINK:
+ ahw->link_speed = SPEED_100;
+ break;
+ case QLC_83XX_1G_LINK:
+ ahw->link_speed = SPEED_1000;
+ break;
+ case QLC_83XX_10G_LINK:
+ ahw->link_speed = SPEED_10000;
+ break;
+ default:
+ ahw->link_speed = 0;
+ break;
+ }
+ config = cmd.rsp.arg[3];
+ if (config & 1)
+ err = 1;
+ }
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return config;
+}
+
+int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter)
+{
+ u32 config = 0;
+ int status = 0;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ /* Get port configuration info */
+ status = qlcnic_83xx_get_port_info(adapter);
+ /* Get Link Status related info */
+ config = qlcnic_83xx_test_link(adapter);
+ ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config);
+ /* hard code until there is a way to get it from flash */
+ ahw->board_type = QLCNIC_BRDTYPE_83XX_10G;
+ return status;
+}
+
+int qlcnic_83xx_set_settings(struct qlcnic_adapter *adapter,
+ struct ethtool_cmd *ecmd)
+{
+ int status = 0;
+ u32 config = adapter->ahw->port_config;
+
+ if (ecmd->autoneg)
+ adapter->ahw->port_config |= BIT_15;
+
+ switch (ethtool_cmd_speed(ecmd)) {
+ case SPEED_10:
+ adapter->ahw->port_config |= BIT_8;
+ break;
+ case SPEED_100:
+ adapter->ahw->port_config |= BIT_9;
+ break;
+ case SPEED_1000:
+ adapter->ahw->port_config |= BIT_10;
+ break;
+ case SPEED_10000:
+ adapter->ahw->port_config |= BIT_11;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ status = qlcnic_83xx_set_port_config(adapter);
+ if (status) {
+ dev_info(&adapter->pdev->dev,
+ "Faild to Set Link Speed and autoneg.\n");
+ adapter->ahw->port_config = config;
+ }
+ return status;
+}
+
+static inline u64 *qlcnic_83xx_copy_stats(struct qlcnic_cmd_args *cmd,
+ u64 *data, int index)
+{
+ u32 low, hi;
+ u64 val;
+
+ low = cmd->rsp.arg[index];
+ hi = cmd->rsp.arg[index + 1];
+ val = (((u64) low) | (((u64) hi) << 32));
+ *data++ = val;
+ return data;
+}
+
+static u64 *qlcnic_83xx_fill_stats(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd, u64 *data,
+ int type, int *ret)
+{
+ int err, k, total_regs;
+
+ *ret = 0;
+ err = qlcnic_issue_cmd(adapter, cmd);
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_info(&adapter->pdev->dev,
+ "Error in get statistics mailbox command\n");
+ *ret = -EIO;
+ return data;
+ }
+ total_regs = cmd->rsp.num;
+ switch (type) {
+ case QLC_83XX_STAT_MAC:
+ /* fill in MAC tx counters */
+ for (k = 2; k < 28; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 24 bytes of reserved area */
+ /* fill in MAC rx counters */
+ for (k += 6; k < 60; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 24 bytes of reserved area */
+ /* fill in MAC rx frame stats */
+ for (k += 6; k < 80; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ break;
+ case QLC_83XX_STAT_RX:
+ for (k = 2; k < 8; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 8 bytes of reserved data */
+ for (k += 2; k < 24; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 8 bytes containing RE1FBQ error data */
+ for (k += 2; k < total_regs; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ break;
+ case QLC_83XX_STAT_TX:
+ for (k = 2; k < 10; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 8 bytes of reserved data */
+ for (k += 2; k < total_regs; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ break;
+ default:
+ dev_warn(&adapter->pdev->dev, "Unknown get statistics mode\n");
+ *ret = -EIO;
+ }
+ return data;
+}
+
+void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
+{
+ struct qlcnic_cmd_args cmd;
+ int ret = 0;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS);
+ /* Get Tx stats */
+ cmd.req.arg[1] = BIT_1 | (adapter->tx_ring->ctx_id << 16);
+ cmd.rsp.num = QLC_83XX_TX_STAT_REGS;
+ data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
+ QLC_83XX_STAT_TX, &ret);
+ if (ret) {
+ dev_info(&adapter->pdev->dev, "Error getting MAC stats\n");
+ goto out;
+ }
+ /* Get MAC stats */
+ cmd.req.arg[1] = BIT_2 | (adapter->portnum << 16);
+ cmd.rsp.num = QLC_83XX_MAC_STAT_REGS;
+ memset(cmd.rsp.arg, 0, sizeof(u32) * cmd.rsp.num);
+ data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
+ QLC_83XX_STAT_MAC, &ret);
+ if (ret) {
+ dev_info(&adapter->pdev->dev,
+ "Error getting Rx stats\n");
+ goto out;
+ }
+ /* Get Rx stats */
+ cmd.req.arg[1] = adapter->recv_ctx->context_id << 16;
+ cmd.rsp.num = QLC_83XX_RX_STAT_REGS;
+ memset(cmd.rsp.arg, 0, sizeof(u32) * cmd.rsp.num);
+ data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
+ QLC_83XX_STAT_RX, &ret);
+ if (ret)
+ dev_info(&adapter->pdev->dev,
+ "Error getting Tx stats\n");
+out:
+ qlcnic_free_mbx_args(&cmd);
+}
+
+int qlcnic_83xx_reg_test(struct qlcnic_adapter *adapter)
+{
+ u32 major, minor, sub;
+
+ major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ sub = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
+
+ if (adapter->fw_version != QLCNIC_VERSION_CODE(major, minor, sub)) {
+ dev_info(&adapter->pdev->dev, "%s: Reg test failed\n",
+ __func__);
+ return 1;
+ }
+ return 0;
+}
+
+int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *adapter)
+{
+ return (ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl) *
+ sizeof(adapter->ahw->ext_reg_tbl)) +
+ (ARRAY_SIZE(qlcnic_83xx_reg_tbl) +
+ sizeof(adapter->ahw->reg_tbl));
+}
+
+int qlcnic_83xx_get_registers(struct qlcnic_adapter *adapter, u32 *regs_buff)
+{
+ int i, j = 0;
+
+ for (i = QLCNIC_DEV_INFO_SIZE + 1;
+ j < ARRAY_SIZE(qlcnic_83xx_reg_tbl); i++, j++)
+ regs_buff[i] = QLC_SHARED_REG_RD32(adapter, j);
+
+ for (j = 0; j < ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl); j++)
+ regs_buff[i++] = QLCRDX(adapter->ahw, j);
+ return i;
+}
+
+int qlcnic_83xx_interrupt_test(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_cmd_args cmd;
+ u32 data;
+ u16 intrpt_id, id;
+ u8 val;
+ int ret, max_sds_rings = adapter->max_sds_rings;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EIO;
+
+ ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST);
+ if (ret)
+ goto fail_diag_irq;
+
+ ahw->diag_cnt = 0;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intrpt_id = ahw->intr_tbl[0].id;
+ else
+ intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+
+ cmd.req.arg[1] = 1;
+ cmd.req.arg[2] = intrpt_id;
+ cmd.req.arg[3] = BIT_0;
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ data = cmd.rsp.arg[2];
+ id = LSW(data);
+ val = LSB(MSW(data));
+ if (id != intrpt_id)
+ dev_info(&adapter->pdev->dev,
+ "Interrupt generated: 0x%x, requested:0x%x\n",
+ id, intrpt_id);
+ if (val)
+ dev_err(&adapter->pdev->dev,
+ "Interrupt test error: 0x%x\n", val);
+ if (ret)
+ goto done;
+
+ msleep(20);
+ ret = !ahw->diag_cnt;
+
+done:
+ qlcnic_free_mbx_args(&cmd);
+ qlcnic_83xx_diag_free_res(netdev, max_sds_rings);
+
+fail_diag_irq:
+ adapter->max_sds_rings = max_sds_rings;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return ret;
+}
+
+void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *adapter,
+ struct ethtool_pauseparam *pause)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int status = 0;
+ u32 config;
+
+ status = qlcnic_83xx_get_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Get Pause Config failed\n", __func__);
+ return;
+ }
+ config = ahw->port_config;
+ if (config & QLC_83XX_CFG_STD_PAUSE) {
+ if (config & QLC_83XX_CFG_STD_TX_PAUSE)
+ pause->tx_pause = 1;
+ if (config & QLC_83XX_CFG_STD_RX_PAUSE)
+ pause->rx_pause = 1;
+ }
+
+ if (QLC_83XX_AUTONEG(config))
+ pause->autoneg = 1;
+}
+
+int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *adapter,
+ struct ethtool_pauseparam *pause)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int status = 0;
+ u32 config;
+
+ status = qlcnic_83xx_get_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Get Pause Config failed.\n", __func__);
+ return status;
+ }
+ config = ahw->port_config;
+
+ if (ahw->port_type == QLCNIC_GBE) {
+ if (pause->autoneg)
+ ahw->port_config |= QLC_83XX_ENABLE_AUTONEG;
+ if (!pause->autoneg)
+ ahw->port_config &= ~QLC_83XX_ENABLE_AUTONEG;
+ } else if ((ahw->port_type == QLCNIC_XGBE) && (pause->autoneg)) {
+ return -EOPNOTSUPP;
+ }
+
+ if (!(config & QLC_83XX_CFG_STD_PAUSE))
+ ahw->port_config |= QLC_83XX_CFG_STD_PAUSE;
+
+ if (pause->rx_pause && pause->tx_pause) {
+ ahw->port_config |= QLC_83XX_CFG_STD_TX_RX_PAUSE;
+ } else if (pause->rx_pause && !pause->tx_pause) {
+ ahw->port_config &= ~QLC_83XX_CFG_STD_TX_PAUSE;
+ ahw->port_config |= QLC_83XX_CFG_STD_RX_PAUSE;
+ } else if (pause->tx_pause && !pause->rx_pause) {
+ ahw->port_config &= ~QLC_83XX_CFG_STD_RX_PAUSE;
+ ahw->port_config |= QLC_83XX_CFG_STD_TX_PAUSE;
+ } else if (!pause->rx_pause && !pause->tx_pause) {
+ ahw->port_config &= ~QLC_83XX_CFG_STD_TX_RX_PAUSE;
+ }
+ status = qlcnic_83xx_set_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Set Pause Config failed.\n", __func__);
+ ahw->port_config = config;
+ }
+ return status;
+}
+
+static int qlcnic_83xx_read_flash_status_reg(struct qlcnic_adapter *adapter)
+{
+ int ret;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_OEM_READ_SIG);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_READ_CTRL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret)
+ return -EIO;
+
+ ret = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_RDDATA);
+ return ret & 0xFF;
+}
+
+int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter)
+{
+ int status;
+
+ status = qlcnic_83xx_read_flash_status_reg(adapter);
+ if (status == -EIO) {
+ dev_info(&adapter->pdev->dev, "%s: EEPROM test failed.\n",
+ __func__);
+ return 1;
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
new file mode 100644
index 000000000000..61f81f6c84a9
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -0,0 +1,438 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef __QLCNIC_83XX_HW_H
+#define __QLCNIC_83XX_HW_H
+
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include "qlcnic_hw.h"
+
+/* Directly mapped registers */
+#define QLC_83XX_CRB_WIN_BASE 0x3800
+#define QLC_83XX_CRB_WIN_FUNC(f) (QLC_83XX_CRB_WIN_BASE+((f)*4))
+#define QLC_83XX_SEM_LOCK_BASE 0x3840
+#define QLC_83XX_SEM_UNLOCK_BASE 0x3844
+#define QLC_83XX_SEM_LOCK_FUNC(f) (QLC_83XX_SEM_LOCK_BASE+((f)*8))
+#define QLC_83XX_SEM_UNLOCK_FUNC(f) (QLC_83XX_SEM_UNLOCK_BASE+((f)*8))
+#define QLC_83XX_LINK_STATE(f) (0x3698+((f) > 7 ? 4 : 0))
+#define QLC_83XX_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4))
+#define QLC_83XX_LINK_SPEED_FACTOR 10
+#define QLC_83xx_FUNC_VAL(v, f) ((v) & (1 << (f * 4)))
+#define QLC_83XX_INTX_PTR 0x38C0
+#define QLC_83XX_INTX_TRGR 0x38C4
+#define QLC_83XX_INTX_MASK 0x38C8
+
+#define QLC_83XX_DRV_LOCK_WAIT_COUNTER 100
+#define QLC_83XX_DRV_LOCK_WAIT_DELAY 20
+#define QLC_83XX_NEED_DRV_LOCK_RECOVERY 1
+#define QLC_83XX_DRV_LOCK_RECOVERY_IN_PROGRESS 2
+#define QLC_83XX_MAX_DRV_LOCK_RECOVERY_ATTEMPT 3
+#define QLC_83XX_DRV_LOCK_RECOVERY_DELAY 200
+#define QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK 0x3
+
+#define QLC_83XX_NO_NIC_RESOURCE 0x5
+#define QLC_83XX_MAC_PRESENT 0xC
+#define QLC_83XX_MAC_ABSENT 0xD
+
+
+#define QLC_83XX_FLASH_SECTOR_SIZE (64 * 1024)
+
+/* PEG status definitions */
+#define QLC_83XX_CMDPEG_COMPLETE 0xff01
+#define QLC_83XX_VALID_INTX_BIT30(val) ((val) & BIT_30)
+#define QLC_83XX_VALID_INTX_BIT31(val) ((val) & BIT_31)
+#define QLC_83XX_INTX_FUNC(val) ((val) & 0xFF)
+#define QLC_83XX_LEGACY_INTX_MAX_RETRY 100
+#define QLC_83XX_LEGACY_INTX_DELAY 4
+#define QLC_83XX_REG_DESC 1
+#define QLC_83XX_LRO_DESC 2
+#define QLC_83XX_CTRL_DESC 3
+#define QLC_83XX_FW_CAPABILITY_TSO BIT_6
+#define QLC_83XX_FW_CAP_LRO_MSS BIT_17
+#define QLC_83XX_HOST_RDS_MODE_UNIQUE 0
+#define QLC_83XX_HOST_SDS_MBX_IDX 8
+
+#define QLCNIC_HOST_RDS_MBX_IDX 88
+#define QLCNIC_MAX_RING_SETS 8
+
+/* Pause control registers */
+#define QLC_83XX_SRE_SHIM_REG 0x0D200284
+#define QLC_83XX_PORT0_THRESHOLD 0x0B2003A4
+#define QLC_83XX_PORT1_THRESHOLD 0x0B2013A4
+#define QLC_83XX_PORT0_TC_MC_REG 0x0B200388
+#define QLC_83XX_PORT1_TC_MC_REG 0x0B201388
+#define QLC_83XX_PORT0_TC_STATS 0x0B20039C
+#define QLC_83XX_PORT1_TC_STATS 0x0B20139C
+#define QLC_83XX_PORT2_IFB_THRESHOLD 0x0B200704
+#define QLC_83XX_PORT3_IFB_THRESHOLD 0x0B201704
+
+/* Peg PC status registers */
+#define QLC_83XX_CRB_PEG_NET_0 0x3400003c
+#define QLC_83XX_CRB_PEG_NET_1 0x3410003c
+#define QLC_83XX_CRB_PEG_NET_2 0x3420003c
+#define QLC_83XX_CRB_PEG_NET_3 0x3430003c
+#define QLC_83XX_CRB_PEG_NET_4 0x34b0003c
+
+/* Firmware image definitions */
+#define QLC_83XX_BOOTLOADER_FLASH_ADDR 0x10000
+#define QLC_83XX_FW_FILE_NAME "83xx_fw.bin"
+#define QLC_83XX_BOOT_FROM_FLASH 0
+#define QLC_83XX_BOOT_FROM_FILE 0x12345678
+
+#define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16
+
+struct qlcnic_intrpt_config {
+ u8 type;
+ u8 enabled;
+ u16 id;
+ u32 src;
+};
+
+struct qlcnic_macvlan_mbx {
+ u8 mac[ETH_ALEN];
+ u16 vlan;
+};
+
+struct qlc_83xx_fw_info {
+ const struct firmware *fw;
+ u16 major_fw_version;
+ u8 minor_fw_version;
+ u8 sub_fw_version;
+ u8 fw_build_num;
+ u8 load_from_file;
+};
+
+struct qlc_83xx_reset {
+ struct qlc_83xx_reset_hdr *hdr;
+ int seq_index;
+ int seq_error;
+ int array_index;
+ u32 array[QLC_83XX_MAX_RESET_SEQ_ENTRIES];
+ u8 *buff;
+ u8 *stop_offset;
+ u8 *start_offset;
+ u8 *init_offset;
+ u8 seq_end;
+ u8 template_end;
+};
+
+#define QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY 0x1
+#define QLC_83XX_IDC_GRACEFULL_RESET 0x2
+#define QLC_83XX_IDC_TIMESTAMP 0
+#define QLC_83XX_IDC_DURATION 1
+#define QLC_83XX_IDC_INIT_TIMEOUT_SECS 30
+#define QLC_83XX_IDC_RESET_ACK_TIMEOUT_SECS 10
+#define QLC_83XX_IDC_RESET_TIMEOUT_SECS 10
+#define QLC_83XX_IDC_QUIESCE_ACK_TIMEOUT_SECS 20
+#define QLC_83XX_IDC_FW_POLL_DELAY (1 * HZ)
+#define QLC_83XX_IDC_FW_FAIL_THRESH 2
+#define QLC_83XX_IDC_MAX_FUNC_PER_PARTITION_INFO 8
+#define QLC_83XX_IDC_MAX_CNA_FUNCTIONS 16
+#define QLC_83XX_IDC_MAJOR_VERSION 1
+#define QLC_83XX_IDC_MINOR_VERSION 0
+#define QLC_83XX_IDC_FLASH_PARAM_ADDR 0x3e8020
+
+struct qlcnic_adapter;
+struct qlc_83xx_idc {
+ int (*state_entry) (struct qlcnic_adapter *);
+ u64 sec_counter;
+ u64 delay;
+ unsigned long status;
+ int err_code;
+ int collect_dump;
+ u8 curr_state;
+ u8 prev_state;
+ u8 vnic_state;
+ u8 vnic_wait_limit;
+ u8 quiesce_req;
+ char **name;
+};
+
+#define QLCNIC_MBX_RSP(reg) LSW(reg)
+#define QLCNIC_MBX_NUM_REGS(reg) (MSW(reg) & 0x1FF)
+#define QLCNIC_MBX_STATUS(reg) (((reg) >> 25) & 0x7F)
+#define QLCNIC_MBX_HOST(ahw, i) ((ahw)->pci_base0 + ((i) * 4))
+#define QLCNIC_MBX_FW(ahw, i) ((ahw)->pci_base0 + 0x800 + ((i) * 4))
+
+/* Mailbox process AEN count */
+#define QLC_83XX_IDC_COMP_AEN 3
+#define QLC_83XX_MBX_AEN_CNT 5
+#define QLC_83XX_MODULE_LOADED 1
+#define QLC_83XX_MBX_READY 2
+#define QLC_83XX_MBX_AEN_ACK 3
+#define QLC_83XX_SFP_PRESENT(data) ((data) & 3)
+#define QLC_83XX_SFP_ERR(data) (((data) >> 2) & 3)
+#define QLC_83XX_SFP_MODULE_TYPE(data) (((data) >> 4) & 0x1F)
+#define QLC_83XX_SFP_CU_LENGTH(data) (LSB((data) >> 16))
+#define QLC_83XX_SFP_TX_FAULT(data) ((data) & BIT_10)
+#define QLC_83XX_SFP_10G_CAPABLE(data) ((data) & BIT_11)
+#define QLC_83XX_LINK_STATS(data) ((data) & BIT_0)
+#define QLC_83XX_CURRENT_LINK_SPEED(data) (((data) >> 3) & 7)
+#define QLC_83XX_LINK_PAUSE(data) (((data) >> 6) & 3)
+#define QLC_83XX_LINK_LB(data) (((data) >> 8) & 7)
+#define QLC_83XX_LINK_FEC(data) ((data) & BIT_12)
+#define QLC_83XX_LINK_EEE(data) ((data) & BIT_13)
+#define QLC_83XX_DCBX(data) (((data) >> 28) & 7)
+#define QLC_83XX_AUTONEG(data) ((data) & BIT_15)
+#define QLC_83XX_CFG_STD_PAUSE (1 << 5)
+#define QLC_83XX_CFG_STD_TX_PAUSE (1 << 20)
+#define QLC_83XX_CFG_STD_RX_PAUSE (2 << 20)
+#define QLC_83XX_CFG_STD_TX_RX_PAUSE (3 << 20)
+#define QLC_83XX_ENABLE_AUTONEG (1 << 15)
+#define QLC_83XX_CFG_LOOPBACK_HSS (2 << 1)
+#define QLC_83XX_CFG_LOOPBACK_PHY (3 << 1)
+#define QLC_83XX_CFG_LOOPBACK_EXT (4 << 1)
+
+/* LED configuration settings */
+#define QLC_83XX_ENABLE_BEACON 0xe
+#define QLC_83XX_LED_RATE 0xff
+#define QLC_83XX_LED_ACT (1 << 10)
+#define QLC_83XX_LED_MOD (0 << 13)
+#define QLC_83XX_LED_CONFIG (QLC_83XX_LED_RATE | QLC_83XX_LED_ACT | \
+ QLC_83XX_LED_MOD)
+
+#define QLC_83XX_10M_LINK 1
+#define QLC_83XX_100M_LINK 2
+#define QLC_83XX_1G_LINK 3
+#define QLC_83XX_10G_LINK 4
+#define QLC_83XX_STAT_TX 3
+#define QLC_83XX_STAT_RX 2
+#define QLC_83XX_STAT_MAC 1
+#define QLC_83XX_TX_STAT_REGS 14
+#define QLC_83XX_RX_STAT_REGS 40
+#define QLC_83XX_MAC_STAT_REGS 80
+
+#define QLC_83XX_GET_FUNC_PRIVILEGE(VAL, FN) (0x3 & ((VAL) >> (FN * 2)))
+#define QLC_83XX_SET_FUNC_OPMODE(VAL, FN) ((VAL) << (FN * 2))
+#define QLC_83XX_DEFAULT_OPMODE 0x55555555
+#define QLC_83XX_PRIVLEGED_FUNC 0x1
+#define QLC_83XX_VIRTUAL_FUNC 0x2
+
+#define QLC_83XX_LB_MAX_FILTERS 2048
+#define QLC_83XX_LB_BUCKET_SIZE 256
+#define QLC_83XX_MINIMUM_VECTOR 3
+
+#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000)
+#define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20)
+#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40)
+#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40)
+#define QLC_83XX_GET_HW_LRO_CAPABILITY(val) (val & 0x400)
+#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000)
+#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000)
+#define QLC_83XX_VIRTUAL_NIC_MODE 0xFF
+#define QLC_83XX_DEFAULT_MODE 0x0
+#define QLCNIC_BRDTYPE_83XX_10G 0x0083
+
+#define QLC_83XX_FLASH_SPI_STATUS 0x2808E010
+#define QLC_83XX_FLASH_SPI_CONTROL 0x2808E014
+#define QLC_83XX_FLASH_STATUS 0x42100004
+#define QLC_83XX_FLASH_CONTROL 0x42110004
+#define QLC_83XX_FLASH_ADDR 0x42110008
+#define QLC_83XX_FLASH_WRDATA 0x4211000C
+#define QLC_83XX_FLASH_RDDATA 0x42110018
+#define QLC_83XX_FLASH_DIRECT_WINDOW 0x42110030
+#define QLC_83XX_FLASH_DIRECT_DATA(DATA) (0x42150000 | (0x0000FFFF&DATA))
+#define QLC_83XX_FLASH_SECTOR_ERASE_CMD 0xdeadbeef
+#define QLC_83XX_FLASH_WRITE_CMD 0xdacdacda
+#define QLC_83XX_FLASH_BULK_WRITE_CMD 0xcadcadca
+#define QLC_83XX_FLASH_READ_RETRY_COUNT 5000
+#define QLC_83XX_FLASH_STATUS_READY 0x6
+#define QLC_83XX_FLASH_BULK_WRITE_MIN 2
+#define QLC_83XX_FLASH_BULK_WRITE_MAX 64
+#define QLC_83XX_FLASH_STATUS_REG_POLL_DELAY 1
+#define QLC_83XX_ERASE_MODE 1
+#define QLC_83XX_WRITE_MODE 2
+#define QLC_83XX_BULK_WRITE_MODE 3
+#define QLC_83XX_FLASH_FDT_WRITE_DEF_SIG 0xFD0100
+#define QLC_83XX_FLASH_FDT_ERASE_DEF_SIG 0xFD0300
+#define QLC_83XX_FLASH_FDT_READ_MFG_ID_VAL 0xFD009F
+#define QLC_83XX_FLASH_OEM_ERASE_SIG 0xFD03D8
+#define QLC_83XX_FLASH_OEM_WRITE_SIG 0xFD0101
+#define QLC_83XX_FLASH_OEM_READ_SIG 0xFD0005
+#define QLC_83XX_FLASH_ADDR_TEMP_VAL 0x00800000
+#define QLC_83XX_FLASH_ADDR_SECOND_TEMP_VAL 0x00800001
+#define QLC_83XX_FLASH_WRDATA_DEF 0x0
+#define QLC_83XX_FLASH_READ_CTRL 0x3F
+#define QLC_83XX_FLASH_SPI_CTRL 0x4
+#define QLC_83XX_FLASH_FIRST_ERASE_MS_VAL 0x2
+#define QLC_83XX_FLASH_SECOND_ERASE_MS_VAL 0x5
+#define QLC_83XX_FLASH_LAST_ERASE_MS_VAL 0x3D
+#define QLC_83XX_FLASH_FIRST_MS_PATTERN 0x43
+#define QLC_83XX_FLASH_SECOND_MS_PATTERN 0x7F
+#define QLC_83XX_FLASH_LAST_MS_PATTERN 0x7D
+#define QLC_83xx_FLASH_MAX_WAIT_USEC 100
+#define QLC_83XX_FLASH_LOCK_TIMEOUT 10000
+
+/* Additional registers in 83xx */
+enum qlc_83xx_ext_regs {
+ QLCNIC_GLOBAL_RESET = 0,
+ QLCNIC_WILDCARD,
+ QLCNIC_INFORMANT,
+ QLCNIC_HOST_MBX_CTRL,
+ QLCNIC_FW_MBX_CTRL,
+ QLCNIC_BOOTLOADER_ADDR,
+ QLCNIC_BOOTLOADER_SIZE,
+ QLCNIC_FW_IMAGE_ADDR,
+ QLCNIC_MBX_INTR_ENBL,
+ QLCNIC_DEF_INT_MASK,
+ QLCNIC_DEF_INT_ID,
+ QLC_83XX_IDC_MAJ_VERSION,
+ QLC_83XX_IDC_DEV_STATE,
+ QLC_83XX_IDC_DRV_PRESENCE,
+ QLC_83XX_IDC_DRV_ACK,
+ QLC_83XX_IDC_CTRL,
+ QLC_83XX_IDC_DRV_AUDIT,
+ QLC_83XX_IDC_MIN_VERSION,
+ QLC_83XX_RECOVER_DRV_LOCK,
+ QLC_83XX_IDC_PF_0,
+ QLC_83XX_IDC_PF_1,
+ QLC_83XX_IDC_PF_2,
+ QLC_83XX_IDC_PF_3,
+ QLC_83XX_IDC_PF_4,
+ QLC_83XX_IDC_PF_5,
+ QLC_83XX_IDC_PF_6,
+ QLC_83XX_IDC_PF_7,
+ QLC_83XX_IDC_PF_8,
+ QLC_83XX_IDC_PF_9,
+ QLC_83XX_IDC_PF_10,
+ QLC_83XX_IDC_PF_11,
+ QLC_83XX_IDC_PF_12,
+ QLC_83XX_IDC_PF_13,
+ QLC_83XX_IDC_PF_14,
+ QLC_83XX_IDC_PF_15,
+ QLC_83XX_IDC_DEV_PARTITION_INFO_1,
+ QLC_83XX_IDC_DEV_PARTITION_INFO_2,
+ QLC_83XX_DRV_OP_MODE,
+ QLC_83XX_VNIC_STATE,
+ QLC_83XX_DRV_LOCK,
+ QLC_83XX_DRV_UNLOCK,
+ QLC_83XX_DRV_LOCK_ID,
+ QLC_83XX_ASIC_TEMP,
+};
+
+/* 83xx funcitons */
+int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *);
+int qlcnic_83xx_mbx_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8);
+void qlcnic_83xx_get_func_no(struct qlcnic_adapter *);
+int qlcnic_83xx_cam_lock(struct qlcnic_adapter *);
+void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *);
+int qlcnic_send_ctrl_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *, u32);
+void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *);
+void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *);
+void qlcnic_83xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+void qlcnic_83xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *, ulong);
+int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
+void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *, int, u64 []);
+int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *, u8);
+int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8);
+int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
+int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
+int qlcnic_83xx_config_intr_coalesce(struct qlcnic_adapter *);
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, __le16);
+int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
+int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
+void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *, int);
+
+int qlcnic_83xx_napi_add(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_83xx_napi_del(struct qlcnic_adapter *);
+void qlcnic_83xx_napi_enable(struct qlcnic_adapter *);
+void qlcnic_83xx_napi_disable(struct qlcnic_adapter *);
+int qlcnic_83xx_config_led(struct qlcnic_adapter *, u32, u32);
+void qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32);
+int qlcnic_ind_rd(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *);
+int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *, int);
+int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
+int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *, int);
+void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *);
+int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *, bool);
+int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, __le16, u8);
+int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *);
+void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
+ struct qlcnic_cmd_args *);
+int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *,
+ struct qlcnic_adapter *, u32);
+void qlcnic_free_mbx_args(struct qlcnic_cmd_args *);
+void qlcnic_set_npar_data(struct qlcnic_adapter *, const struct qlcnic_info *,
+ struct qlcnic_info *);
+void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *);
+irqreturn_t qlcnic_83xx_handle_aen(int, void *);
+int qlcnic_83xx_get_port_info(struct qlcnic_adapter *);
+void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *);
+irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *);
+irqreturn_t qlcnic_83xx_intr(int, void *);
+irqreturn_t qlcnic_83xx_tmp_intr(int, void *);
+void qlcnic_83xx_enable_intr(struct qlcnic_adapter *,
+ struct qlcnic_host_sds_ring *);
+void qlcnic_83xx_disable_intr(struct qlcnic_adapter *,
+ struct qlcnic_host_sds_ring *);
+void qlcnic_83xx_check_vf(struct qlcnic_adapter *,
+ const struct pci_device_id *);
+void qlcnic_83xx_process_aen(struct qlcnic_adapter *);
+int qlcnic_83xx_get_port_config(struct qlcnic_adapter *);
+int qlcnic_83xx_set_port_config(struct qlcnic_adapter *);
+int qlcnic_enable_eswitch(struct qlcnic_adapter *, u8, u8);
+int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *);
+int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *);
+int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *);
+void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *);
+void qlcnic_83xx_register_map(struct qlcnic_hardware_context *);
+void qlcnic_83xx_idc_aen_work(struct work_struct *);
+void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *, __be32, int);
+
+int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *, u32, u32 *, int);
+int qlcnic_83xx_flash_write32(struct qlcnic_adapter *, u32, u32 *);
+int qlcnic_83xx_lock_flash(struct qlcnic_adapter *);
+void qlcnic_83xx_unlock_flash(struct qlcnic_adapter *);
+int qlcnic_83xx_save_flash_status(struct qlcnic_adapter *);
+int qlcnic_83xx_restore_flash_status(struct qlcnic_adapter *, int);
+int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *);
+int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *);
+int qlcnic_83xx_flash_read32(struct qlcnic_adapter *, u32, u8 *, int);
+int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *,
+ u32, u8 *, int);
+int qlcnic_83xx_init(struct qlcnic_adapter *);
+int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *);
+int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev);
+void qlcnic_83xx_idc_poll_dev_state(struct work_struct *);
+int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *);
+void qlcnic_83xx_idc_exit(struct qlcnic_adapter *);
+void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_lock_driver(struct qlcnic_adapter *);
+void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *);
+int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *);
+int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
+int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *);
+int qlcnic_83xx_enable_vnic_mode(struct qlcnic_adapter *, int);
+int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int);
+int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
+int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *,
+ struct qlcnic_info *, u8);
+int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *);
+
+void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
+void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
+int qlcnic_83xx_get_settings(struct qlcnic_adapter *);
+int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
+void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *,
+ struct ethtool_pauseparam *);
+int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *,
+ struct ethtool_pauseparam *);
+int qlcnic_83xx_test_link(struct qlcnic_adapter *);
+int qlcnic_83xx_reg_test(struct qlcnic_adapter *);
+int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *);
+int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *);
+int qlcnic_83xx_loopback_test(struct net_device *, u8);
+int qlcnic_83xx_interrupt_test(struct net_device *);
+int qlcnic_83xx_flash_test(struct qlcnic_adapter *);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
new file mode 100644
index 000000000000..c53832b02b3e
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -0,0 +1,2054 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
+/* Reset template definitions */
+#define QLC_83XX_RESTART_TEMPLATE_SIZE 0x2000
+#define QLC_83XX_RESET_TEMPLATE_ADDR 0x4F0000
+#define QLC_83XX_RESET_SEQ_VERSION 0x0101
+
+#define QLC_83XX_OPCODE_NOP 0x0000
+#define QLC_83XX_OPCODE_WRITE_LIST 0x0001
+#define QLC_83XX_OPCODE_READ_WRITE_LIST 0x0002
+#define QLC_83XX_OPCODE_POLL_LIST 0x0004
+#define QLC_83XX_OPCODE_POLL_WRITE_LIST 0x0008
+#define QLC_83XX_OPCODE_READ_MODIFY_WRITE 0x0010
+#define QLC_83XX_OPCODE_SEQ_PAUSE 0x0020
+#define QLC_83XX_OPCODE_SEQ_END 0x0040
+#define QLC_83XX_OPCODE_TMPL_END 0x0080
+#define QLC_83XX_OPCODE_POLL_READ_LIST 0x0100
+
+static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter);
+static int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
+static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev);
+static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter);
+
+/* Template header */
+struct qlc_83xx_reset_hdr {
+ u16 version;
+ u16 signature;
+ u16 size;
+ u16 entries;
+ u16 hdr_size;
+ u16 checksum;
+ u16 init_offset;
+ u16 start_offset;
+} __packed;
+
+/* Command entry header. */
+struct qlc_83xx_entry_hdr {
+ u16 cmd;
+ u16 size;
+ u16 count;
+ u16 delay;
+} __packed;
+
+/* Generic poll command */
+struct qlc_83xx_poll {
+ u32 mask;
+ u32 status;
+} __packed;
+
+/* Read modify write command */
+struct qlc_83xx_rmw {
+ u32 mask;
+ u32 xor_value;
+ u32 or_value;
+ u8 shl;
+ u8 shr;
+ u8 index_a;
+ u8 rsvd;
+} __packed;
+
+/* Generic command with 2 DWORD */
+struct qlc_83xx_entry {
+ u32 arg1;
+ u32 arg2;
+} __packed;
+
+/* Generic command with 4 DWORD */
+struct qlc_83xx_quad_entry {
+ u32 dr_addr;
+ u32 dr_value;
+ u32 ar_addr;
+ u32 ar_value;
+} __packed;
+static const char *const qlc_83xx_idc_states[] = {
+ "Unknown",
+ "Cold",
+ "Init",
+ "Ready",
+ "Need Reset",
+ "Need Quiesce",
+ "Failed",
+ "Quiesce"
+};
+
+/* Device States */
+enum qlcnic_83xx_states {
+ QLC_83XX_IDC_DEV_UNKNOWN,
+ QLC_83XX_IDC_DEV_COLD,
+ QLC_83XX_IDC_DEV_INIT,
+ QLC_83XX_IDC_DEV_READY,
+ QLC_83XX_IDC_DEV_NEED_RESET,
+ QLC_83XX_IDC_DEV_NEED_QUISCENT,
+ QLC_83XX_IDC_DEV_FAILED,
+ QLC_83XX_IDC_DEV_QUISCENT
+};
+
+static int
+qlcnic_83xx_idc_check_driver_presence_reg(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ if ((val & 0xFFFF))
+ return 1;
+ else
+ return 0;
+}
+
+static void qlcnic_83xx_idc_log_state_history(struct qlcnic_adapter *adapter)
+{
+ u32 cur, prev;
+ cur = adapter->ahw->idc.curr_state;
+ prev = adapter->ahw->idc.prev_state;
+
+ dev_info(&adapter->pdev->dev,
+ "current state = %s, prev state = %s\n",
+ adapter->ahw->idc.name[cur],
+ adapter->ahw->idc.name[prev]);
+}
+
+static int qlcnic_83xx_idc_update_audit_reg(struct qlcnic_adapter *adapter,
+ u8 mode, int lock)
+{
+ u32 val;
+ int seconds;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ val = adapter->portnum & 0xf;
+ val |= mode << 7;
+ if (mode)
+ seconds = jiffies / HZ - adapter->ahw->idc.sec_counter;
+ else
+ seconds = jiffies / HZ;
+
+ val |= seconds << 8;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT, val);
+ adapter->ahw->idc.sec_counter = jiffies / HZ;
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static void qlcnic_83xx_idc_update_minor_version(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MIN_VERSION);
+ val = val & ~(0x3 << (adapter->portnum * 2));
+ val = val | (QLC_83XX_IDC_MINOR_VERSION << (adapter->portnum * 2));
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_MIN_VERSION, val);
+}
+
+static int qlcnic_83xx_idc_update_major_version(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ u32 val;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION);
+ val = val & ~0xFF;
+ val = val | QLC_83XX_IDC_MAJOR_VERSION;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION, val);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int
+qlcnic_83xx_idc_update_drv_presence_reg(struct qlcnic_adapter *adapter,
+ int status, int lock)
+{
+ u32 val;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+
+ if (status)
+ val = val | (1 << adapter->portnum);
+ else
+ val = val & ~(1 << adapter->portnum);
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val);
+ qlcnic_83xx_idc_update_minor_version(adapter);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_check_major_version(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ u8 version;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION);
+ version = val & 0xFF;
+
+ if (version != QLC_83XX_IDC_MAJOR_VERSION) {
+ dev_info(&adapter->pdev->dev,
+ "%s:mismatch. version 0x%x, expected version 0x%x\n",
+ __func__, version, QLC_83XX_IDC_MAJOR_VERSION);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_clear_registers(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ u32 val;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_ACK, 0);
+ /* Clear gracefull reset bit */
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val &= ~QLC_83XX_IDC_GRACEFULL_RESET;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_update_drv_ack_reg(struct qlcnic_adapter *adapter,
+ int flag, int lock)
+{
+ u32 val;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_ACK);
+ if (flag)
+ val = val | (1 << adapter->portnum);
+ else
+ val = val & ~(1 << adapter->portnum);
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_ACK, val);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_check_timeout(struct qlcnic_adapter *adapter,
+ int time_limit)
+{
+ u64 seconds;
+
+ seconds = jiffies / HZ - adapter->ahw->idc.sec_counter;
+ if (seconds <= time_limit)
+ return 0;
+ else
+ return -EBUSY;
+}
+
+/**
+ * qlcnic_83xx_idc_check_reset_ack_reg
+ *
+ * @adapter: adapter structure
+ *
+ * Check ACK wait limit and clear the functions which failed to ACK
+ *
+ * Return 0 if all functions have acknowledged the reset request.
+ **/
+static int qlcnic_83xx_idc_check_reset_ack_reg(struct qlcnic_adapter *adapter)
+{
+ int timeout;
+ u32 ack, presence, val;
+
+ timeout = QLC_83XX_IDC_RESET_TIMEOUT_SECS;
+ ack = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_ACK);
+ presence = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ dev_info(&adapter->pdev->dev,
+ "%s: ack = 0x%x, presence = 0x%x\n", __func__, ack, presence);
+ if (!((ack & presence) == presence)) {
+ if (qlcnic_83xx_idc_check_timeout(adapter, timeout)) {
+ /* Clear functions which failed to ACK */
+ dev_info(&adapter->pdev->dev,
+ "%s: ACK wait exceeds time limit\n", __func__);
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ val = val & ~(ack ^ presence);
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val);
+ dev_info(&adapter->pdev->dev,
+ "%s: updated drv presence reg = 0x%x\n",
+ __func__, val);
+ qlcnic_83xx_unlock_driver(adapter);
+ return 0;
+
+ } else {
+ return 1;
+ }
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "%s: Reset ACK received from all functions\n",
+ __func__);
+ return 0;
+ }
+}
+
+/**
+ * qlcnic_83xx_idc_tx_soft_reset
+ *
+ * @adapter: adapter structure
+ *
+ * Handle context deletion and recreation request from transmit routine
+ *
+ * Returns -EBUSY or Success (0)
+ *
+ **/
+static int qlcnic_83xx_idc_tx_soft_reset(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ netif_device_detach(netdev);
+ qlcnic_down(adapter, netdev);
+ qlcnic_up(adapter, netdev);
+ netif_device_attach(netdev);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ dev_err(&adapter->pdev->dev, "%s:\n", __func__);
+
+ adapter->netdev->trans_start = jiffies;
+
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_detach_driver
+ *
+ * @adapter: adapter structure
+ * Detach net interface, stop TX and cleanup resources before the HW reset.
+ * Returns: None
+ *
+ **/
+static void qlcnic_83xx_idc_detach_driver(struct qlcnic_adapter *adapter)
+{
+ int i;
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+ /* Disable mailbox interrupt */
+ QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, 0);
+ qlcnic_down(adapter, netdev);
+ for (i = 0; i < adapter->ahw->num_msix; i++) {
+ adapter->ahw->intr_tbl[i].id = i;
+ adapter->ahw->intr_tbl[i].enabled = 0;
+ adapter->ahw->intr_tbl[i].src = 0;
+ }
+}
+
+/**
+ * qlcnic_83xx_idc_attach_driver
+ *
+ * @adapter: adapter structure
+ *
+ * Re-attach and re-enable net interface
+ * Returns: None
+ *
+ **/
+static void qlcnic_83xx_idc_attach_driver(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (netif_running(netdev)) {
+ if (qlcnic_up(adapter, netdev))
+ goto done;
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+done:
+ netif_device_attach(netdev);
+ if (netif_running(netdev)) {
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ }
+}
+
+static int qlcnic_83xx_idc_enter_failed_state(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ qlcnic_83xx_idc_clear_registers(adapter, 0);
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_FAILED);
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ qlcnic_83xx_idc_log_state_history(adapter);
+ dev_info(&adapter->pdev->dev, "Device will enter failed state\n");
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_enter_init_state(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_INIT);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_enter_need_quiesce(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE,
+ QLC_83XX_IDC_DEV_NEED_QUISCENT);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int
+qlcnic_83xx_idc_enter_need_reset_state(struct qlcnic_adapter *adapter, int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE,
+ QLC_83XX_IDC_DEV_NEED_RESET);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_enter_ready_state(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_READY);
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_find_reset_owner_id
+ *
+ * @adapter: adapter structure
+ *
+ * NIC gets precedence over ISCSI and ISCSI has precedence over FCOE.
+ * Within the same class, function with lowest PCI ID assumes ownership
+ *
+ * Returns: reset owner id or failure indication (-EIO)
+ *
+ **/
+static int qlcnic_83xx_idc_find_reset_owner_id(struct qlcnic_adapter *adapter)
+{
+ u32 reg, reg1, reg2, i, j, owner, class;
+
+ reg1 = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_PARTITION_INFO_1);
+ reg2 = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_PARTITION_INFO_2);
+ owner = QLCNIC_TYPE_NIC;
+ i = 0;
+ j = 0;
+ reg = reg1;
+
+ do {
+ class = (((reg & (0xF << j * 4)) >> j * 4) & 0x3);
+ if (class == owner)
+ break;
+ if (i == (QLC_83XX_IDC_MAX_FUNC_PER_PARTITION_INFO - 1)) {
+ reg = reg2;
+ j = 0;
+ } else {
+ j++;
+ }
+
+ if (i == (QLC_83XX_IDC_MAX_CNA_FUNCTIONS - 1)) {
+ if (owner == QLCNIC_TYPE_NIC)
+ owner = QLCNIC_TYPE_ISCSI;
+ else if (owner == QLCNIC_TYPE_ISCSI)
+ owner = QLCNIC_TYPE_FCOE;
+ else if (owner == QLCNIC_TYPE_FCOE)
+ return -EIO;
+ reg = reg1;
+ j = 0;
+ i = 0;
+ }
+ } while (i++ < QLC_83XX_IDC_MAX_CNA_FUNCTIONS);
+
+ return i;
+}
+
+static int qlcnic_83xx_idc_restart_hw(struct qlcnic_adapter *adapter, int lock)
+{
+ int ret = 0;
+
+ ret = qlcnic_83xx_restart_hw(adapter);
+
+ if (ret) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, lock);
+ } else {
+ qlcnic_83xx_idc_clear_registers(adapter, lock);
+ ret = qlcnic_83xx_idc_enter_ready_state(adapter, lock);
+ }
+
+ return ret;
+}
+
+static int qlcnic_83xx_idc_check_fan_failure(struct qlcnic_adapter *adapter)
+{
+ u32 status;
+
+ status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1);
+
+ if (status & QLCNIC_RCODE_FATAL_ERROR) {
+ dev_err(&adapter->pdev->dev,
+ "peg halt status1=0x%x\n", status);
+ if (QLCNIC_FWERROR_CODE(status) == QLCNIC_FWERROR_FAN_FAILURE) {
+ dev_err(&adapter->pdev->dev,
+ "On board active cooling fan failed. "
+ "Device has been halted.\n");
+ dev_err(&adapter->pdev->dev,
+ "Replace the adapter.\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
+{
+ /* register for NIC IDC AEN Events */
+ qlcnic_83xx_register_nic_idc_func(adapter, 1);
+
+ qlcnic_83xx_enable_mbx_intrpt(adapter);
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ if (qlcnic_83xx_config_intrpt(adapter, 1)) {
+ netdev_err(adapter->netdev,
+ "Failed to enable mbx intr\n");
+ return -EIO;
+ }
+ }
+
+ if (qlcnic_83xx_configure_opmode(adapter)) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return -EIO;
+ }
+
+ if (adapter->nic_ops->init_driver(adapter)) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return -EIO;
+ }
+
+ qlcnic_83xx_idc_attach_driver(adapter);
+
+ return 0;
+}
+
+static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter)
+{
+ qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
+ qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+ set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
+ adapter->ahw->idc.quiesce_req = 0;
+ adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
+ adapter->ahw->idc.err_code = 0;
+ adapter->ahw->idc.collect_dump = 0;
+}
+
+/**
+ * qlcnic_83xx_idc_ready_state_entry
+ *
+ * @adapter: adapter structure
+ *
+ * Perform ready state initialization, this routine will get invoked only
+ * once from READY state.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY) {
+ qlcnic_83xx_idc_update_idc_params(adapter);
+ /* Re-attach the device if required */
+ if ((ahw->idc.prev_state == QLC_83XX_IDC_DEV_NEED_RESET) ||
+ (ahw->idc.prev_state == QLC_83XX_IDC_DEV_INIT)) {
+ if (qlcnic_83xx_idc_reattach_driver(adapter))
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_vnic_pf_entry
+ *
+ * @adapter: adapter structure
+ *
+ * Ensure vNIC mode privileged function starts only after vNIC mode is
+ * enabled by management function.
+ * If vNIC mode is ready, start initialization.
+ *
+ * Returns: -EIO or 0
+ *
+ **/
+int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *adapter)
+{
+ u32 state;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ /* Privileged function waits till mgmt function enables VNIC mode */
+ state = QLCRDX(adapter->ahw, QLC_83XX_VNIC_STATE);
+ if (state != QLCNIC_DEV_NPAR_OPER) {
+ if (!ahw->idc.vnic_wait_limit--) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return -EIO;
+ }
+ dev_info(&adapter->pdev->dev, "vNIC mode disabled\n");
+ return -EIO;
+
+ } else {
+ /* Perform one time initialization from ready state */
+ if (ahw->idc.vnic_state != QLCNIC_DEV_NPAR_OPER) {
+ qlcnic_83xx_idc_update_idc_params(adapter);
+
+ /* If the previous state is UNKNOWN, device will be
+ already attached properly by Init routine*/
+ if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_UNKNOWN) {
+ if (qlcnic_83xx_idc_reattach_driver(adapter))
+ return -EIO;
+ }
+ adapter->ahw->idc.vnic_state = QLCNIC_DEV_NPAR_OPER;
+ dev_info(&adapter->pdev->dev, "vNIC mode enabled\n");
+ }
+ }
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_unknown_state(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->idc.err_code = -EIO;
+ dev_err(&adapter->pdev->dev,
+ "%s: Device in unknown state\n", __func__);
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_cold_state
+ *
+ * @adapter: adapter structure
+ *
+ * If HW is up and running device will enter READY state.
+ * If firmware image from host needs to be loaded, device is
+ * forced to start with the file firmware image.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_cold_state_handler(struct qlcnic_adapter *adapter)
+{
+ qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 0);
+ qlcnic_83xx_idc_update_audit_reg(adapter, 1, 0);
+
+ if (qlcnic_load_fw_file) {
+ qlcnic_83xx_idc_restart_hw(adapter, 0);
+ } else {
+ if (qlcnic_83xx_check_hw_status(adapter)) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, 0);
+ return -EIO;
+ } else {
+ qlcnic_83xx_idc_enter_ready_state(adapter, 0);
+ }
+ }
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_init_state
+ *
+ * @adapter: adapter structure
+ *
+ * Reset owner will restart the device from this state.
+ * Device will enter failed state if it remains
+ * in this state for more than DEV_INIT time limit.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_init_state(struct qlcnic_adapter *adapter)
+{
+ int timeout, ret = 0;
+ u32 owner;
+
+ timeout = QLC_83XX_IDC_INIT_TIMEOUT_SECS;
+ if (adapter->ahw->idc.prev_state == QLC_83XX_IDC_DEV_NEED_RESET) {
+ owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+ if (adapter->ahw->pci_func == owner)
+ ret = qlcnic_83xx_idc_restart_hw(adapter, 1);
+ } else {
+ ret = qlcnic_83xx_idc_check_timeout(adapter, timeout);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * qlcnic_83xx_idc_ready_state
+ *
+ * @adapter: adapter structure
+ *
+ * Perform IDC protocol specicifed actions after monitoring device state and
+ * events.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int ret = 0;
+
+ /* Perform NIC configuration based ready state entry actions */
+ if (ahw->idc.state_entry(adapter))
+ return -EIO;
+
+ if (qlcnic_check_temp(adapter)) {
+ if (ahw->temp == QLCNIC_TEMP_PANIC) {
+ qlcnic_83xx_idc_check_fan_failure(adapter);
+ dev_err(&adapter->pdev->dev,
+ "Error: device temperature %d above limits\n",
+ adapter->ahw->temp);
+ clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ qlcnic_83xx_idc_detach_driver(adapter);
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return -EIO;
+ }
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ ret = qlcnic_83xx_check_heartbeat(adapter);
+ if (ret) {
+ adapter->flags |= QLCNIC_FW_HANG;
+ if (!(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
+ clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
+ }
+ return -EIO;
+ }
+
+ if ((val & QLC_83XX_IDC_GRACEFULL_RESET) || ahw->idc.collect_dump) {
+ /* Move to need reset state and prepare for reset */
+ qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
+ return ret;
+ }
+
+ /* Check for soft reset request */
+ if (ahw->reset_context &&
+ !(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
+ qlcnic_83xx_idc_tx_soft_reset(adapter);
+ return ret;
+ }
+
+ /* Move to need quiesce state if requested */
+ if (adapter->ahw->idc.quiesce_req) {
+ qlcnic_83xx_idc_enter_need_quiesce(adapter, 1);
+ qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * qlcnic_83xx_idc_need_reset_state
+ *
+ * @adapter: adapter structure
+ *
+ * Device will remain in this state until:
+ * Reset request ACK's are recieved from all the functions
+ * Wait time exceeds max time limit
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter)
+{
+ int ret = 0;
+
+ if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) {
+ qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1);
+ qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ clear_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
+ if (adapter->ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
+ qlcnic_83xx_disable_vnic_mode(adapter, 1);
+ qlcnic_83xx_idc_detach_driver(adapter);
+ }
+
+ /* Check ACK from other functions */
+ ret = qlcnic_83xx_idc_check_reset_ack_reg(adapter);
+ if (ret) {
+ dev_info(&adapter->pdev->dev,
+ "%s: Waiting for reset ACK\n", __func__);
+ return 0;
+ }
+
+ /* Transit to INIT state and restart the HW */
+ qlcnic_83xx_idc_enter_init_state(adapter, 1);
+
+ return ret;
+}
+
+static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter)
+{
+ dev_err(&adapter->pdev->dev, "%s: TBD\n", __func__);
+ return 0;
+}
+
+static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter)
+{
+ dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__);
+ adapter->ahw->idc.err_code = -EIO;
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter)
+{
+ dev_info(&adapter->pdev->dev, "%s: TBD\n", __func__);
+ return 0;
+}
+
+static int qlcnic_83xx_idc_check_state_validity(struct qlcnic_adapter *adapter,
+ u32 state)
+{
+ u32 cur, prev, next;
+
+ cur = adapter->ahw->idc.curr_state;
+ prev = adapter->ahw->idc.prev_state;
+ next = state;
+
+ if ((next < QLC_83XX_IDC_DEV_COLD) ||
+ (next > QLC_83XX_IDC_DEV_QUISCENT)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: curr %d, prev %d, next state %d is invalid\n",
+ __func__, cur, prev, state);
+ return 1;
+ }
+
+ if ((cur == QLC_83XX_IDC_DEV_UNKNOWN) &&
+ (prev == QLC_83XX_IDC_DEV_UNKNOWN)) {
+ if ((next != QLC_83XX_IDC_DEV_COLD) &&
+ (next != QLC_83XX_IDC_DEV_READY)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed, cur %d prev %d next %d\n",
+ __func__, cur, prev, next);
+ return 1;
+ }
+ }
+
+ if (next == QLC_83XX_IDC_DEV_INIT) {
+ if ((prev != QLC_83XX_IDC_DEV_INIT) &&
+ (prev != QLC_83XX_IDC_DEV_COLD) &&
+ (prev != QLC_83XX_IDC_DEV_NEED_RESET)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed, cur %d prev %d next %d\n",
+ __func__, cur, prev, next);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fhash.fnum)
+ qlcnic_prune_lb_filters(adapter);
+}
+
+/**
+ * qlcnic_83xx_idc_poll_dev_state
+ *
+ * @work: kernel work queue structure used to schedule the function
+ *
+ * Poll device state periodically and perform state specific
+ * actions defined by Inter Driver Communication (IDC) protocol.
+ *
+ * Returns: None
+ *
+ **/
+void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter;
+ u32 state;
+
+ adapter = container_of(work, struct qlcnic_adapter, fw_work.work);
+ state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+
+ if (qlcnic_83xx_idc_check_state_validity(adapter, state)) {
+ qlcnic_83xx_idc_log_state_history(adapter);
+ adapter->ahw->idc.curr_state = QLC_83XX_IDC_DEV_UNKNOWN;
+ } else {
+ adapter->ahw->idc.curr_state = state;
+ }
+
+ switch (adapter->ahw->idc.curr_state) {
+ case QLC_83XX_IDC_DEV_READY:
+ qlcnic_83xx_idc_ready_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_NEED_RESET:
+ qlcnic_83xx_idc_need_reset_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_NEED_QUISCENT:
+ qlcnic_83xx_idc_need_quiesce_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_FAILED:
+ qlcnic_83xx_idc_failed_state(adapter);
+ return;
+ case QLC_83XX_IDC_DEV_INIT:
+ qlcnic_83xx_idc_init_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_QUISCENT:
+ qlcnic_83xx_idc_quiesce_state(adapter);
+ break;
+ default:
+ qlcnic_83xx_idc_unknown_state(adapter);
+ return;
+ }
+ adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state;
+ qlcnic_83xx_periodic_tasks(adapter);
+
+ /* Re-schedule the function */
+ if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status))
+ qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
+ adapter->ahw->idc.delay);
+}
+
+static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter)
+{
+ u32 idc_params, val;
+
+ if (qlcnic_83xx_lockless_flash_read32(adapter,
+ QLC_83XX_IDC_FLASH_PARAM_ADDR,
+ (u8 *)&idc_params, 1)) {
+ dev_info(&adapter->pdev->dev,
+ "%s:failed to get IDC params from flash\n", __func__);
+ adapter->dev_init_timeo = QLC_83XX_IDC_INIT_TIMEOUT_SECS;
+ adapter->reset_ack_timeo = QLC_83XX_IDC_RESET_TIMEOUT_SECS;
+ } else {
+ adapter->dev_init_timeo = idc_params & 0xFFFF;
+ adapter->reset_ack_timeo = ((idc_params >> 16) & 0xFFFF);
+ }
+
+ adapter->ahw->idc.curr_state = QLC_83XX_IDC_DEV_UNKNOWN;
+ adapter->ahw->idc.prev_state = QLC_83XX_IDC_DEV_UNKNOWN;
+ adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
+ adapter->ahw->idc.err_code = 0;
+ adapter->ahw->idc.collect_dump = 0;
+ adapter->ahw->idc.name = (char **)qlc_83xx_idc_states;
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
+ set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
+
+ /* Check if reset recovery is disabled */
+ if (!qlcnic_auto_fw_reset) {
+ /* Propagate do not reset request to other functions */
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val = val | QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+ }
+}
+
+static int
+qlcnic_83xx_idc_first_to_load_function_handler(struct qlcnic_adapter *adapter)
+{
+ u32 state, val;
+
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EIO;
+
+ /* Clear driver lock register */
+ QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, 0);
+ if (qlcnic_83xx_idc_update_major_version(adapter, 0)) {
+ qlcnic_83xx_unlock_driver(adapter);
+ return -EIO;
+ }
+
+ state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+ if (qlcnic_83xx_idc_check_state_validity(adapter, state)) {
+ qlcnic_83xx_unlock_driver(adapter);
+ return -EIO;
+ }
+
+ if (state != QLC_83XX_IDC_DEV_COLD && qlcnic_load_fw_file) {
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE,
+ QLC_83XX_IDC_DEV_COLD);
+ state = QLC_83XX_IDC_DEV_COLD;
+ }
+
+ adapter->ahw->idc.curr_state = state;
+ /* First to load function should cold boot the device */
+ if (state == QLC_83XX_IDC_DEV_COLD)
+ qlcnic_83xx_idc_cold_state_handler(adapter);
+
+ /* Check if reset recovery is enabled */
+ if (qlcnic_auto_fw_reset) {
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val = val & ~QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+ }
+
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_init(struct qlcnic_adapter *adapter)
+{
+ int ret = -EIO;
+
+ qlcnic_83xx_setup_idc_parameters(adapter);
+
+ if (qlcnic_83xx_get_reset_instruction_template(adapter))
+ return ret;
+
+ if (!qlcnic_83xx_idc_check_driver_presence_reg(adapter)) {
+ if (qlcnic_83xx_idc_first_to_load_function_handler(adapter))
+ return -EIO;
+ } else {
+ if (qlcnic_83xx_idc_check_major_version(adapter))
+ return -EIO;
+ }
+
+ qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+
+ return 0;
+}
+
+void qlcnic_83xx_idc_exit(struct qlcnic_adapter *adapter)
+{
+ int id;
+ u32 val;
+
+ while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ usleep_range(10000, 11000);
+
+ id = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ id = id & 0xFF;
+
+ if (id == adapter->portnum) {
+ dev_err(&adapter->pdev->dev,
+ "%s: wait for lock recovery.. %d\n", __func__, id);
+ msleep(20);
+ id = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ id = id & 0xFF;
+ }
+
+ /* Clear driver presence bit */
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ val = val & ~(1 << adapter->portnum);
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val);
+ clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ cancel_delayed_work_sync(&adapter->fw_work);
+}
+
+void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key)
+{
+ u32 val;
+
+ if (qlcnic_83xx_lock_driver(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "%s:failed, please retry\n", __func__);
+ return;
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ if ((val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) ||
+ !qlcnic_auto_fw_reset) {
+ dev_err(&adapter->pdev->dev,
+ "%s:failed, device in non reset mode\n", __func__);
+ qlcnic_83xx_unlock_driver(adapter);
+ return;
+ }
+
+ if (key == QLCNIC_FORCE_FW_RESET) {
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val = val | QLC_83XX_IDC_GRACEFULL_RESET;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+ } else if (key == QLCNIC_FORCE_FW_DUMP_KEY) {
+ adapter->ahw->idc.collect_dump = 1;
+ }
+
+ qlcnic_83xx_unlock_driver(adapter);
+ return;
+}
+
+static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
+{
+ u8 *p_cache;
+ u32 src, size;
+ u64 dest;
+ int ret = -EIO;
+
+ src = QLC_83XX_BOOTLOADER_FLASH_ADDR;
+ dest = QLCRDX(adapter->ahw, QLCNIC_BOOTLOADER_ADDR);
+ size = QLCRDX(adapter->ahw, QLCNIC_BOOTLOADER_SIZE);
+
+ /* alignment check */
+ if (size & 0xF)
+ size = (size + 16) & ~0xF;
+
+ p_cache = kzalloc(size, GFP_KERNEL);
+ if (p_cache == NULL)
+ return -ENOMEM;
+
+ ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache,
+ size / sizeof(u32));
+ if (ret) {
+ kfree(p_cache);
+ return ret;
+ }
+ /* 16 byte write to MS memory */
+ ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache,
+ size / 16);
+ if (ret) {
+ kfree(p_cache);
+ return ret;
+ }
+ kfree(p_cache);
+
+ return ret;
+}
+
+static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
+{
+ u32 dest, *p_cache;
+ u64 addr;
+ u8 data[16];
+ size_t size;
+ int i, ret = -EIO;
+
+ dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR);
+ size = (adapter->ahw->fw_info.fw->size & ~0xF);
+ p_cache = (u32 *)adapter->ahw->fw_info.fw->data;
+ addr = (u64)dest;
+
+ ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
+ (u32 *)p_cache, size / 16);
+ if (ret) {
+ dev_err(&adapter->pdev->dev, "MS memory write failed\n");
+ release_firmware(adapter->ahw->fw_info.fw);
+ adapter->ahw->fw_info.fw = NULL;
+ return -EIO;
+ }
+
+ /* alignment check */
+ if (adapter->ahw->fw_info.fw->size & 0xF) {
+ addr = dest + size;
+ for (i = 0; i < (adapter->ahw->fw_info.fw->size & 0xF); i++)
+ data[i] = adapter->ahw->fw_info.fw->data[size + i];
+ for (; i < 16; i++)
+ data[i] = 0;
+ ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
+ (u32 *)data, 1);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "MS memory write failed\n");
+ release_firmware(adapter->ahw->fw_info.fw);
+ adapter->ahw->fw_info.fw = NULL;
+ return -EIO;
+ }
+ }
+ release_firmware(adapter->ahw->fw_info.fw);
+ adapter->ahw->fw_info.fw = NULL;
+
+ return 0;
+}
+
+static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
+{
+ int i, j;
+ u32 val = 0, val1 = 0, reg = 0;
+
+ val = QLCRD32(adapter, QLC_83XX_SRE_SHIM_REG);
+ dev_info(&adapter->pdev->dev, "SRE-Shim Ctrl:0x%x\n", val);
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0) {
+ dev_info(&adapter->pdev->dev,
+ "Port 0 RxB Pause Threshold Regs[TC7..TC0]:");
+ reg = QLC_83XX_PORT0_THRESHOLD;
+ } else if (j == 1) {
+ dev_info(&adapter->pdev->dev,
+ "Port 1 RxB Pause Threshold Regs[TC7..TC0]:");
+ reg = QLC_83XX_PORT1_THRESHOLD;
+ }
+ for (i = 0; i < 8; i++) {
+ val = QLCRD32(adapter, reg + (i * 0x4));
+ dev_info(&adapter->pdev->dev, "0x%x ", val);
+ }
+ dev_info(&adapter->pdev->dev, "\n");
+ }
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0) {
+ dev_info(&adapter->pdev->dev,
+ "Port 0 RxB TC Max Cell Registers[4..1]:");
+ reg = QLC_83XX_PORT0_TC_MC_REG;
+ } else if (j == 1) {
+ dev_info(&adapter->pdev->dev,
+ "Port 1 RxB TC Max Cell Registers[4..1]:");
+ reg = QLC_83XX_PORT1_TC_MC_REG;
+ }
+ for (i = 0; i < 4; i++) {
+ val = QLCRD32(adapter, reg + (i * 0x4));
+ dev_info(&adapter->pdev->dev, "0x%x ", val);
+ }
+ dev_info(&adapter->pdev->dev, "\n");
+ }
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0) {
+ dev_info(&adapter->pdev->dev,
+ "Port 0 RxB Rx TC Stats[TC7..TC0]:");
+ reg = QLC_83XX_PORT0_TC_STATS;
+ } else if (j == 1) {
+ dev_info(&adapter->pdev->dev,
+ "Port 1 RxB Rx TC Stats[TC7..TC0]:");
+ reg = QLC_83XX_PORT1_TC_STATS;
+ }
+ for (i = 7; i >= 0; i--) {
+ val = QLCRD32(adapter, reg);
+ val &= ~(0x7 << 29); /* Reset bits 29 to 31 */
+ QLCWR32(adapter, reg, (val | (i << 29)));
+ val = QLCRD32(adapter, reg);
+ dev_info(&adapter->pdev->dev, "0x%x ", val);
+ }
+ dev_info(&adapter->pdev->dev, "\n");
+ }
+
+ val = QLCRD32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD);
+ val1 = QLCRD32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD);
+ dev_info(&adapter->pdev->dev,
+ "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n",
+ val, val1);
+}
+
+
+static void qlcnic_83xx_disable_pause_frames(struct qlcnic_adapter *adapter)
+{
+ u32 reg = 0, i, j;
+
+ if (qlcnic_83xx_lock_driver(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "%s:failed to acquire driver lock\n", __func__);
+ return;
+ }
+
+ qlcnic_83xx_dump_pause_control_regs(adapter);
+ QLCWR32(adapter, QLC_83XX_SRE_SHIM_REG, 0x0);
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0)
+ reg = QLC_83XX_PORT0_THRESHOLD;
+ else if (j == 1)
+ reg = QLC_83XX_PORT1_THRESHOLD;
+
+ for (i = 0; i < 8; i++)
+ QLCWR32(adapter, reg + (i * 0x4), 0x0);
+ }
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0)
+ reg = QLC_83XX_PORT0_TC_MC_REG;
+ else if (j == 1)
+ reg = QLC_83XX_PORT1_TC_MC_REG;
+
+ for (i = 0; i < 4; i++)
+ QLCWR32(adapter, reg + (i * 0x4), 0x03FF03FF);
+ }
+
+ QLCWR32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD, 0);
+ QLCWR32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD, 0);
+ dev_info(&adapter->pdev->dev,
+ "Disabled pause frames successfully on all ports\n");
+ qlcnic_83xx_unlock_driver(adapter);
+}
+
+static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev)
+{
+ u32 heartbeat, peg_status;
+ int retries, ret = -EIO;
+
+ retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
+ p_dev->heartbeat = QLC_SHARED_REG_RD32(p_dev,
+ QLCNIC_PEG_ALIVE_COUNTER);
+
+ do {
+ msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS);
+ heartbeat = QLC_SHARED_REG_RD32(p_dev,
+ QLCNIC_PEG_ALIVE_COUNTER);
+ if (heartbeat != p_dev->heartbeat) {
+ ret = QLCNIC_RCODE_SUCCESS;
+ break;
+ }
+ } while (--retries);
+
+ if (ret) {
+ dev_err(&p_dev->pdev->dev, "firmware hang detected\n");
+ qlcnic_83xx_disable_pause_frames(p_dev);
+ peg_status = QLC_SHARED_REG_RD32(p_dev,
+ QLCNIC_PEG_HALT_STATUS1);
+ dev_info(&p_dev->pdev->dev, "Dumping HW/FW registers\n"
+ "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
+ "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
+ "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
+ "PEG_NET_4_PC: 0x%x\n", peg_status,
+ QLC_SHARED_REG_RD32(p_dev, QLCNIC_PEG_HALT_STATUS2),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_0),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_1),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_2),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_3),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_4));
+
+ if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
+ dev_err(&p_dev->pdev->dev,
+ "Device is being reset err code 0x00006700.\n");
+ }
+
+ return ret;
+}
+
+static int qlcnic_83xx_check_cmd_peg_status(struct qlcnic_adapter *p_dev)
+{
+ int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT;
+ u32 val;
+
+ do {
+ val = QLC_SHARED_REG_RD32(p_dev, QLCNIC_CMDPEG_STATE);
+ if (val == QLC_83XX_CMDPEG_COMPLETE)
+ return 0;
+ msleep(QLCNIC_CMDPEG_CHECK_DELAY);
+ } while (--retries);
+
+ dev_err(&p_dev->pdev->dev, "%s: failed, state = 0x%x\n", __func__, val);
+ return -EIO;
+}
+
+int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev)
+{
+ int err;
+
+ err = qlcnic_83xx_check_cmd_peg_status(p_dev);
+ if (err)
+ return err;
+
+ err = qlcnic_83xx_check_heartbeat(p_dev);
+ if (err)
+ return err;
+
+ return err;
+}
+
+static int qlcnic_83xx_poll_reg(struct qlcnic_adapter *p_dev, u32 addr,
+ int duration, u32 mask, u32 status)
+{
+ u32 value;
+ int timeout_error;
+ u8 retries;
+
+ value = qlcnic_83xx_rd_reg_indirect(p_dev, addr);
+ retries = duration / 10;
+
+ do {
+ if ((value & mask) != status) {
+ timeout_error = 1;
+ msleep(duration / 10);
+ value = qlcnic_83xx_rd_reg_indirect(p_dev, addr);
+ } else {
+ timeout_error = 0;
+ break;
+ }
+ } while (retries--);
+
+ if (timeout_error) {
+ p_dev->ahw->reset.seq_error++;
+ dev_err(&p_dev->pdev->dev,
+ "%s: Timeout Err, entry_num = %d\n",
+ __func__, p_dev->ahw->reset.seq_index);
+ dev_err(&p_dev->pdev->dev,
+ "0x%08x 0x%08x 0x%08x\n",
+ value, mask, status);
+ }
+
+ return timeout_error;
+}
+
+static int qlcnic_83xx_reset_template_checksum(struct qlcnic_adapter *p_dev)
+{
+ u32 sum = 0;
+ u16 *buff = (u16 *)p_dev->ahw->reset.buff;
+ int count = p_dev->ahw->reset.hdr->size / sizeof(u16);
+
+ while (count-- > 0)
+ sum += *buff++;
+
+ while (sum >> 16)
+ sum = (sum & 0xFFFF) + (sum >> 16);
+
+ if (~sum) {
+ return 0;
+ } else {
+ dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+ return -1;
+ }
+}
+
+int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev)
+{
+ u8 *p_buff;
+ u32 addr, count;
+ struct qlcnic_hardware_context *ahw = p_dev->ahw;
+
+ ahw->reset.seq_error = 0;
+ ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL);
+ if (p_dev->ahw->reset.buff == NULL)
+ return -ENOMEM;
+
+ p_buff = p_dev->ahw->reset.buff;
+ addr = QLC_83XX_RESET_TEMPLATE_ADDR;
+ count = sizeof(struct qlc_83xx_reset_hdr) / sizeof(u32);
+
+ /* Copy template header from flash */
+ if (qlcnic_83xx_flash_read32(p_dev, addr, p_buff, count)) {
+ dev_err(&p_dev->pdev->dev, "%s: flash read failed\n", __func__);
+ return -EIO;
+ }
+ ahw->reset.hdr = (struct qlc_83xx_reset_hdr *)ahw->reset.buff;
+ addr = QLC_83XX_RESET_TEMPLATE_ADDR + ahw->reset.hdr->hdr_size;
+ p_buff = ahw->reset.buff + ahw->reset.hdr->hdr_size;
+ count = (ahw->reset.hdr->size - ahw->reset.hdr->hdr_size) / sizeof(u32);
+
+ /* Copy rest of the template */
+ if (qlcnic_83xx_flash_read32(p_dev, addr, p_buff, count)) {
+ dev_err(&p_dev->pdev->dev, "%s: flash read failed\n", __func__);
+ return -EIO;
+ }
+
+ if (qlcnic_83xx_reset_template_checksum(p_dev))
+ return -EIO;
+ /* Get Stop, Start and Init command offsets */
+ ahw->reset.init_offset = ahw->reset.buff + ahw->reset.hdr->init_offset;
+ ahw->reset.start_offset = ahw->reset.buff +
+ ahw->reset.hdr->start_offset;
+ ahw->reset.stop_offset = ahw->reset.buff + ahw->reset.hdr->hdr_size;
+ return 0;
+}
+
+/* Read Write HW register command */
+static void qlcnic_83xx_read_write_crb_reg(struct qlcnic_adapter *p_dev,
+ u32 raddr, u32 waddr)
+{
+ int value;
+
+ value = qlcnic_83xx_rd_reg_indirect(p_dev, raddr);
+ qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value);
+}
+
+/* Read Modify Write HW register command */
+static void qlcnic_83xx_rmw_crb_reg(struct qlcnic_adapter *p_dev,
+ u32 raddr, u32 waddr,
+ struct qlc_83xx_rmw *p_rmw_hdr)
+{
+ int value;
+
+ if (p_rmw_hdr->index_a)
+ value = p_dev->ahw->reset.array[p_rmw_hdr->index_a];
+ else
+ value = qlcnic_83xx_rd_reg_indirect(p_dev, raddr);
+
+ value &= p_rmw_hdr->mask;
+ value <<= p_rmw_hdr->shl;
+ value >>= p_rmw_hdr->shr;
+ value |= p_rmw_hdr->or_value;
+ value ^= p_rmw_hdr->xor_value;
+ qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value);
+}
+
+/* Write HW register command */
+static void qlcnic_83xx_write_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ int i;
+ struct qlc_83xx_entry *entry;
+
+ entry = (struct qlc_83xx_entry *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_wrt_reg_indirect(p_dev, entry->arg1,
+ entry->arg2);
+ if (p_hdr->delay)
+ udelay((u32)(p_hdr->delay));
+ }
+}
+
+/* Read and Write instruction */
+static void qlcnic_83xx_read_write_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ int i;
+ struct qlc_83xx_entry *entry;
+
+ entry = (struct qlc_83xx_entry *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_read_write_crb_reg(p_dev, entry->arg1,
+ entry->arg2);
+ if (p_hdr->delay)
+ udelay((u32)(p_hdr->delay));
+ }
+}
+
+/* Poll HW register command */
+static void qlcnic_83xx_poll_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ long delay;
+ struct qlc_83xx_entry *entry;
+ struct qlc_83xx_poll *poll;
+ int i;
+ unsigned long arg1, arg2;
+
+ poll = (struct qlc_83xx_poll *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ entry = (struct qlc_83xx_entry *)((char *)poll +
+ sizeof(struct qlc_83xx_poll));
+ delay = (long)p_hdr->delay;
+
+ if (!delay) {
+ for (i = 0; i < p_hdr->count; i++, entry++)
+ qlcnic_83xx_poll_reg(p_dev, entry->arg1,
+ delay, poll->mask,
+ poll->status);
+ } else {
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ arg1 = entry->arg1;
+ arg2 = entry->arg2;
+ if (delay) {
+ if (qlcnic_83xx_poll_reg(p_dev,
+ arg1, delay,
+ poll->mask,
+ poll->status)){
+ qlcnic_83xx_rd_reg_indirect(p_dev,
+ arg1);
+ qlcnic_83xx_rd_reg_indirect(p_dev,
+ arg2);
+ }
+ }
+ }
+ }
+}
+
+/* Poll and write HW register command */
+static void qlcnic_83xx_poll_write_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ int i;
+ long delay;
+ struct qlc_83xx_quad_entry *entry;
+ struct qlc_83xx_poll *poll;
+
+ poll = (struct qlc_83xx_poll *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+ entry = (struct qlc_83xx_quad_entry *)((char *)poll +
+ sizeof(struct qlc_83xx_poll));
+ delay = (long)p_hdr->delay;
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_wrt_reg_indirect(p_dev, entry->dr_addr,
+ entry->dr_value);
+ qlcnic_83xx_wrt_reg_indirect(p_dev, entry->ar_addr,
+ entry->ar_value);
+ if (delay)
+ qlcnic_83xx_poll_reg(p_dev, entry->ar_addr, delay,
+ poll->mask, poll->status);
+ }
+}
+
+/* Read Modify Write register command */
+static void qlcnic_83xx_read_modify_write(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ int i;
+ struct qlc_83xx_entry *entry;
+ struct qlc_83xx_rmw *rmw_hdr;
+
+ rmw_hdr = (struct qlc_83xx_rmw *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ entry = (struct qlc_83xx_entry *)((char *)rmw_hdr +
+ sizeof(struct qlc_83xx_rmw));
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_rmw_crb_reg(p_dev, entry->arg1,
+ entry->arg2, rmw_hdr);
+ if (p_hdr->delay)
+ udelay((u32)(p_hdr->delay));
+ }
+}
+
+static void qlcnic_83xx_pause(struct qlc_83xx_entry_hdr *p_hdr)
+{
+ if (p_hdr->delay)
+ mdelay((u32)((long)p_hdr->delay));
+}
+
+/* Read and poll register command */
+static void qlcnic_83xx_poll_read_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ long delay;
+ int index, i, j;
+ struct qlc_83xx_quad_entry *entry;
+ struct qlc_83xx_poll *poll;
+ unsigned long addr;
+
+ poll = (struct qlc_83xx_poll *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ entry = (struct qlc_83xx_quad_entry *)((char *)poll +
+ sizeof(struct qlc_83xx_poll));
+ delay = (long)p_hdr->delay;
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_wrt_reg_indirect(p_dev, entry->ar_addr,
+ entry->ar_value);
+ if (delay) {
+ if (!qlcnic_83xx_poll_reg(p_dev, entry->ar_addr, delay,
+ poll->mask, poll->status)){
+ index = p_dev->ahw->reset.array_index;
+ addr = entry->dr_addr;
+ j = qlcnic_83xx_rd_reg_indirect(p_dev, addr);
+ p_dev->ahw->reset.array[index++] = j;
+
+ if (index == QLC_83XX_MAX_RESET_SEQ_ENTRIES)
+ p_dev->ahw->reset.array_index = 1;
+ }
+ }
+ }
+}
+
+static inline void qlcnic_83xx_seq_end(struct qlcnic_adapter *p_dev)
+{
+ p_dev->ahw->reset.seq_end = 1;
+}
+
+static void qlcnic_83xx_template_end(struct qlcnic_adapter *p_dev)
+{
+ p_dev->ahw->reset.template_end = 1;
+ if (p_dev->ahw->reset.seq_error == 0)
+ dev_err(&p_dev->pdev->dev,
+ "HW restart process completed successfully.\n");
+ else
+ dev_err(&p_dev->pdev->dev,
+ "HW restart completed with timeout errors.\n");
+}
+
+/**
+* qlcnic_83xx_exec_template_cmd
+*
+* @p_dev: adapter structure
+* @p_buff: Poiter to instruction template
+*
+* Template provides instructions to stop, restart and initalize firmware.
+* These instructions are abstracted as a series of read, write and
+* poll operations on hardware registers. Register information and operation
+* specifics are not exposed to the driver. Driver reads the template from
+* flash and executes the instructions located at pre-defined offsets.
+*
+* Returns: None
+* */
+static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
+ char *p_buff)
+{
+ int index, entries;
+ struct qlc_83xx_entry_hdr *p_hdr;
+ char *entry = p_buff;
+
+ p_dev->ahw->reset.seq_end = 0;
+ p_dev->ahw->reset.template_end = 0;
+ entries = p_dev->ahw->reset.hdr->entries;
+ index = p_dev->ahw->reset.seq_index;
+
+ for (; (!p_dev->ahw->reset.seq_end) && (index < entries); index++) {
+ p_hdr = (struct qlc_83xx_entry_hdr *)entry;
+
+ switch (p_hdr->cmd) {
+ case QLC_83XX_OPCODE_NOP:
+ break;
+ case QLC_83XX_OPCODE_WRITE_LIST:
+ qlcnic_83xx_write_list(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_READ_WRITE_LIST:
+ qlcnic_83xx_read_write_list(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_POLL_LIST:
+ qlcnic_83xx_poll_list(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_POLL_WRITE_LIST:
+ qlcnic_83xx_poll_write_list(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_READ_MODIFY_WRITE:
+ qlcnic_83xx_read_modify_write(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_SEQ_PAUSE:
+ qlcnic_83xx_pause(p_hdr);
+ break;
+ case QLC_83XX_OPCODE_SEQ_END:
+ qlcnic_83xx_seq_end(p_dev);
+ break;
+ case QLC_83XX_OPCODE_TMPL_END:
+ qlcnic_83xx_template_end(p_dev);
+ break;
+ case QLC_83XX_OPCODE_POLL_READ_LIST:
+ qlcnic_83xx_poll_read_list(p_dev, p_hdr);
+ break;
+ default:
+ dev_err(&p_dev->pdev->dev,
+ "%s: Unknown opcode 0x%04x in template %d\n",
+ __func__, p_hdr->cmd, index);
+ break;
+ }
+ entry += p_hdr->size;
+ }
+ p_dev->ahw->reset.seq_index = index;
+}
+
+static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev)
+{
+ p_dev->ahw->reset.seq_index = 0;
+
+ qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.stop_offset);
+ if (p_dev->ahw->reset.seq_end != 1)
+ dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+}
+
+static void qlcnic_83xx_start_hw(struct qlcnic_adapter *p_dev)
+{
+ qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.start_offset);
+ if (p_dev->ahw->reset.template_end != 1)
+ dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+}
+
+static void qlcnic_83xx_init_hw(struct qlcnic_adapter *p_dev)
+{
+ qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.init_offset);
+ if (p_dev->ahw->reset.seq_end != 1)
+ dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+}
+
+static int qlcnic_83xx_load_fw_image_from_host(struct qlcnic_adapter *adapter)
+{
+ int err = -EIO;
+
+ if (request_firmware(&adapter->ahw->fw_info.fw,
+ QLC_83XX_FW_FILE_NAME, &(adapter->pdev->dev))) {
+ dev_err(&adapter->pdev->dev,
+ "No file FW image, loading flash FW image.\n");
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
+ QLC_83XX_BOOT_FROM_FLASH);
+ } else {
+ if (qlcnic_83xx_copy_fw_file(adapter))
+ return err;
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
+ QLC_83XX_BOOT_FROM_FILE);
+ }
+
+ return 0;
+}
+
+static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ int err = -EIO;
+
+ qlcnic_83xx_stop_hw(adapter);
+
+ /* Collect FW register dump if required */
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ if (!(val & QLC_83XX_IDC_GRACEFULL_RESET))
+ qlcnic_dump_fw(adapter);
+ qlcnic_83xx_init_hw(adapter);
+
+ if (qlcnic_83xx_copy_bootloader(adapter))
+ return err;
+ /* Boot either flash image or firmware image from host file system */
+ if (qlcnic_load_fw_file) {
+ if (qlcnic_83xx_load_fw_image_from_host(adapter))
+ return err;
+ } else {
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
+ QLC_83XX_BOOT_FROM_FLASH);
+ }
+
+ qlcnic_83xx_start_hw(adapter);
+ if (qlcnic_83xx_check_hw_status(adapter))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+* qlcnic_83xx_config_default_opmode
+*
+* @adapter: adapter structure
+*
+* Configure default driver operating mode
+*
+* Returns: Error code or Success(0)
+* */
+int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *adapter)
+{
+ u32 op_mode;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ qlcnic_get_func_no(adapter);
+ op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE);
+
+ if (op_mode == QLC_83XX_DEFAULT_OPMODE) {
+ adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
+ ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+ } else {
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
+{
+ int err;
+ struct qlcnic_info nic_info;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
+ err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
+ if (err)
+ return -EIO;
+
+ ahw->physical_port = (u8) nic_info.phys_port;
+ ahw->switch_mode = nic_info.switch_mode;
+ ahw->max_tx_ques = nic_info.max_tx_ques;
+ ahw->max_rx_ques = nic_info.max_rx_ques;
+ ahw->capabilities = nic_info.capabilities;
+ ahw->max_mac_filters = nic_info.max_mac_filters;
+ ahw->max_mtu = nic_info.max_mtu;
+
+ if (ahw->capabilities & BIT_23)
+ ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE;
+ else
+ ahw->nic_mode = QLC_83XX_DEFAULT_MODE;
+
+ return ahw->nic_mode;
+}
+
+static int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
+{
+ int ret;
+
+ ret = qlcnic_83xx_get_nic_configuration(adapter);
+ if (ret == -EIO)
+ return -EIO;
+
+ if (ret == QLC_83XX_VIRTUAL_NIC_MODE) {
+ if (qlcnic_83xx_config_vnic_opmode(adapter))
+ return -EIO;
+ } else if (ret == QLC_83XX_DEFAULT_MODE) {
+ if (qlcnic_83xx_config_default_opmode(adapter))
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void qlcnic_83xx_config_buff_descriptors(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (ahw->port_type == QLCNIC_XGBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+
+ } else if (ahw->port_type == QLCNIC_GBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
+ }
+ adapter->num_txd = MAX_CMD_DESCRIPTORS;
+ adapter->max_rds_rings = MAX_RDS_RINGS;
+}
+
+static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter)
+{
+ int err = -EIO;
+
+ qlcnic_83xx_get_minidump_template(adapter);
+ if (qlcnic_83xx_get_port_info(adapter))
+ return err;
+
+ qlcnic_83xx_config_buff_descriptors(adapter);
+ adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+
+ dev_info(&adapter->pdev->dev, "HAL Version: %d\n",
+ adapter->ahw->fw_hal_version);
+
+ return 0;
+}
+
+#define IS_QLC_83XX_USED(a, b, c) (((1 << a->portnum) & b) || ((c >> 6) & 0x1))
+static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ u32 presence_mask, audit_mask;
+ int status;
+
+ presence_mask = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ audit_mask = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT);
+
+ if (IS_QLC_83XX_USED(adapter, presence_mask, audit_mask)) {
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_STOP_NIC_FUNC);
+ cmd.req.arg[1] = BIT_31;
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status)
+ dev_err(&adapter->pdev->dev,
+ "Failed to clean up the function resources\n");
+ qlcnic_free_mbx_args(&cmd);
+ }
+}
+
+int qlcnic_83xx_init(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (qlcnic_83xx_check_hw_status(adapter))
+ return -EIO;
+
+ /* Initilaize 83xx mailbox spinlock */
+ spin_lock_init(&ahw->mbx_lock);
+
+ set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
+ qlcnic_83xx_clear_function_resources(adapter);
+
+ /* register for NIC IDC AEN Events */
+ qlcnic_83xx_register_nic_idc_func(adapter, 1);
+
+ if (!qlcnic_83xx_read_flash_descriptor_table(adapter))
+ qlcnic_83xx_read_flash_mfg_id(adapter);
+
+ if (qlcnic_83xx_idc_init(adapter))
+ return -EIO;
+
+ /* Configure default, SR-IOV or Virtual NIC mode of operation */
+ if (qlcnic_83xx_configure_opmode(adapter))
+ return -EIO;
+
+ /* Perform operating mode specific initialization */
+ if (adapter->nic_ops->init_driver(adapter))
+ return -EIO;
+
+ INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
+
+ /* Periodically monitor device status */
+ qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
+
+ return adapter->ahw->idc.err_code;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
new file mode 100644
index 000000000000..b0c3de9ede03
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -0,0 +1,225 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
+int qlcnic_83xx_enable_vnic_mode(struct qlcnic_adapter *adapter, int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+ QLCWRX(adapter->ahw, QLC_83XX_VNIC_STATE, QLCNIC_DEV_NPAR_OPER);
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *adapter, int lock)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_VNIC_STATE, QLCNIC_DEV_NPAR_NON_OPER);
+ ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER;
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *adapter)
+{
+ u8 id;
+ int i, ret = -EBUSY;
+ u32 data = QLCNIC_MGMT_FUNC;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (qlcnic_83xx_lock_driver(adapter))
+ return ret;
+
+ if (qlcnic_config_npars) {
+ for (i = 0; i < ahw->act_pci_func; i++) {
+ id = adapter->npars[i].pci_func;
+ if (id == ahw->pci_func)
+ continue;
+ data |= qlcnic_config_npars &
+ QLC_83XX_SET_FUNC_OPMODE(0x3, id);
+ }
+ } else {
+ data = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
+ data = (data & ~QLC_83XX_SET_FUNC_OPMODE(0x3, ahw->pci_func)) |
+ QLC_83XX_SET_FUNC_OPMODE(QLCNIC_MGMT_FUNC,
+ ahw->pci_func);
+ }
+ QLCWRX(adapter->ahw, QLC_83XX_DRV_OP_MODE, data);
+
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static void
+qlcnic_83xx_config_vnic_buff_descriptors(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (ahw->port_type == QLCNIC_XGBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+
+ } else if (ahw->port_type == QLCNIC_GBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
+ }
+ adapter->num_txd = MAX_CMD_DESCRIPTORS;
+ adapter->max_rds_rings = MAX_RDS_RINGS;
+}
+
+
+/**
+ * qlcnic_83xx_init_mgmt_vnic
+ *
+ * @adapter: adapter structure
+ * Management virtual NIC sets the operational mode of other vNIC's and
+ * configures embedded switch (ESWITCH).
+ * Returns: Success(0) or error code.
+ *
+ **/
+static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
+{
+ int err = -EIO;
+
+ qlcnic_83xx_get_minidump_template(adapter);
+ if (!(adapter->flags & QLCNIC_ADAPTER_INITIALIZED)) {
+ if (qlcnic_init_pci_info(adapter))
+ return err;
+
+ if (qlcnic_83xx_set_vnic_opmode(adapter))
+ return err;
+
+ if (qlcnic_set_default_offload_settings(adapter))
+ return err;
+ } else {
+ if (qlcnic_reset_npar_config(adapter))
+ return err;
+ }
+
+ if (qlcnic_83xx_get_port_info(adapter))
+ return err;
+
+ qlcnic_83xx_config_vnic_buff_descriptors(adapter);
+ adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+ qlcnic_83xx_enable_vnic_mode(adapter, 1);
+
+ dev_info(&adapter->pdev->dev, "HAL Version: %d, Management function\n",
+ adapter->ahw->fw_hal_version);
+
+ return 0;
+}
+
+static int qlcnic_83xx_init_privileged_vnic(struct qlcnic_adapter *adapter)
+{
+ int err = -EIO;
+
+ qlcnic_83xx_get_minidump_template(adapter);
+ if (qlcnic_83xx_get_port_info(adapter))
+ return err;
+
+ qlcnic_83xx_config_vnic_buff_descriptors(adapter);
+ adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d, Privileged function\n",
+ adapter->ahw->fw_hal_version);
+ return 0;
+}
+
+static int qlcnic_83xx_init_non_privileged_vnic(struct qlcnic_adapter *adapter)
+{
+ int err = -EIO;
+
+ qlcnic_83xx_get_fw_version(adapter);
+ if (qlcnic_set_eswitch_port_config(adapter))
+ return err;
+
+ if (qlcnic_83xx_get_port_info(adapter))
+ return err;
+
+ qlcnic_83xx_config_vnic_buff_descriptors(adapter);
+ adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+
+ dev_info(&adapter->pdev->dev, "HAL Version: %d, Virtual function\n",
+ adapter->ahw->fw_hal_version);
+
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_vnic_opmode
+ *
+ * @adapter: adapter structure
+ * Identify virtual NIC operational modes.
+ *
+ * Returns: Success(0) or error code.
+ *
+ **/
+int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
+{
+ u32 op_mode, priv_level;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_nic_template *nic_ops = adapter->nic_ops;
+
+ qlcnic_get_func_no(adapter);
+ op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
+
+ if (op_mode == QLC_83XX_DEFAULT_OPMODE)
+ priv_level = QLCNIC_MGMT_FUNC;
+ else
+ priv_level = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode,
+ ahw->pci_func);
+
+ if (priv_level == QLCNIC_NON_PRIV_FUNC) {
+ ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
+ ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+ nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
+ } else if (priv_level == QLCNIC_PRIV_FUNC) {
+ ahw->op_mode = QLCNIC_PRIV_FUNC;
+ ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
+ nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
+ } else if (priv_level == QLCNIC_MGMT_FUNC) {
+ ahw->op_mode = QLCNIC_MGMT_FUNC;
+ ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+ nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
+ } else {
+ return -EIO;
+ }
+
+ if (ahw->capabilities & BIT_23)
+ adapter->flags |= QLCNIC_ESWITCH_ENABLED;
+ else
+ adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
+
+ adapter->ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER;
+ adapter->ahw->idc.vnic_wait_limit = QLCNIC_DEV_NPAR_OPER_TIMEO;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index b14b8f0787ea..a69097c6b84d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -1,12 +1,92 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
#include "qlcnic.h"
+static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = {
+ {QLCNIC_CMD_CREATE_RX_CTX, 4, 1},
+ {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1},
+ {QLCNIC_CMD_CREATE_TX_CTX, 4, 1},
+ {QLCNIC_CMD_DESTROY_TX_CTX, 2, 1},
+ {QLCNIC_CMD_INTRPT_TEST, 4, 1},
+ {QLCNIC_CMD_SET_MTU, 4, 1},
+ {QLCNIC_CMD_READ_PHY, 4, 2},
+ {QLCNIC_CMD_WRITE_PHY, 5, 1},
+ {QLCNIC_CMD_READ_HW_REG, 4, 1},
+ {QLCNIC_CMD_GET_FLOW_CTL, 4, 2},
+ {QLCNIC_CMD_SET_FLOW_CTL, 4, 1},
+ {QLCNIC_CMD_READ_MAX_MTU, 4, 2},
+ {QLCNIC_CMD_READ_MAX_LRO, 4, 2},
+ {QLCNIC_CMD_MAC_ADDRESS, 4, 3},
+ {QLCNIC_CMD_GET_PCI_INFO, 4, 1},
+ {QLCNIC_CMD_GET_NIC_INFO, 4, 1},
+ {QLCNIC_CMD_SET_NIC_INFO, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3},
+ {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3},
+ {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1},
+ {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1},
+ {QLCNIC_CMD_GET_MAC_STATS, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3},
+ {QLCNIC_CMD_GET_ESWITCH_STATS, 5, 1},
+ {QLCNIC_CMD_CONFIG_PORT, 4, 1},
+ {QLCNIC_CMD_TEMP_SIZE, 4, 4},
+ {QLCNIC_CMD_GET_TEMP_HDR, 4, 1},
+ {QLCNIC_CMD_SET_DRV_VER, 4, 1},
+};
+
+static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw)
+{
+ return (ahw->pci_func & 0xff) | ((ahw->fw_hal_version & 0xff) << 8) |
+ (0xcafe << 16);
+}
+
+/* Allocate mailbox registers */
+int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
+ struct qlcnic_adapter *adapter, u32 type)
+{
+ int i, size;
+ const struct qlcnic_mailbox_metadata *mbx_tbl;
+
+ mbx_tbl = qlcnic_mbx_tbl;
+ size = ARRAY_SIZE(qlcnic_mbx_tbl);
+ for (i = 0; i < size; i++) {
+ if (type == mbx_tbl[i].cmd) {
+ mbx->req.num = mbx_tbl[i].in_args;
+ mbx->rsp.num = mbx_tbl[i].out_args;
+ mbx->req.arg = kcalloc(mbx->req.num,
+ sizeof(u32), GFP_ATOMIC);
+ if (!mbx->req.arg)
+ return -ENOMEM;
+ mbx->rsp.arg = kcalloc(mbx->rsp.num,
+ sizeof(u32), GFP_ATOMIC);
+ if (!mbx->rsp.arg) {
+ kfree(mbx->req.arg);
+ mbx->req.arg = NULL;
+ return -ENOMEM;
+ }
+ memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
+ memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
+ mbx->req.arg[0] = type;
+ break;
+ }
+ }
+ return 0;
+}
+
+/* Free up mailbox registers */
+void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd)
+{
+ kfree(cmd->req.arg);
+ cmd->req.arg = NULL;
+ kfree(cmd->rsp.arg);
+ cmd->rsp.arg = NULL;
+}
+
static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
{
int i;
@@ -38,194 +118,123 @@ qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
return rsp;
}
-void
-qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd)
+int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
{
+ int i;
u32 rsp;
u32 signature;
struct pci_dev *pdev = adapter->pdev;
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ const char *fmt;
- signature = QLCNIC_CDRP_SIGNATURE_MAKE(ahw->pci_func,
- adapter->ahw->fw_hal_version);
+ signature = qlcnic_get_cmd_signature(ahw);
/* Acquire semaphore before accessing CRB */
if (qlcnic_api_lock(adapter)) {
- cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT;
- return;
+ cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
+ return cmd->rsp.arg[0];
}
QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
- QLCWR32(adapter, QLCNIC_ARG1_CRB_OFFSET, cmd->req.arg1);
- QLCWR32(adapter, QLCNIC_ARG2_CRB_OFFSET, cmd->req.arg2);
- QLCWR32(adapter, QLCNIC_ARG3_CRB_OFFSET, cmd->req.arg3);
+ for (i = 1; i < QLCNIC_CDRP_MAX_ARGS; i++)
+ QLCWR32(adapter, QLCNIC_CDRP_ARG(i), cmd->req.arg[i]);
QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET,
- QLCNIC_CDRP_FORM_CMD(cmd->req.cmd));
-
+ QLCNIC_CDRP_FORM_CMD(cmd->req.arg[0]));
rsp = qlcnic_poll_rsp(adapter);
if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
- dev_err(&pdev->dev, "CDRP response timeout.\n");
- cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT;
+ dev_err(&pdev->dev, "card response timeout.\n");
+ cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
} else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
- cmd->rsp.cmd = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
- switch (cmd->rsp.cmd) {
+ cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1));
+ switch (cmd->rsp.arg[0]) {
case QLCNIC_RCODE_INVALID_ARGS:
- dev_err(&pdev->dev, "CDRP invalid args: 0x%x.\n",
- cmd->rsp.cmd);
+ fmt = "CDRP invalid args: [%d]\n";
break;
case QLCNIC_RCODE_NOT_SUPPORTED:
case QLCNIC_RCODE_NOT_IMPL:
- dev_err(&pdev->dev,
- "CDRP command not supported: 0x%x.\n",
- cmd->rsp.cmd);
+ fmt = "CDRP command not supported: [%d]\n";
break;
case QLCNIC_RCODE_NOT_PERMITTED:
- dev_err(&pdev->dev,
- "CDRP requested action not permitted: 0x%x.\n",
- cmd->rsp.cmd);
+ fmt = "CDRP requested action not permitted: [%d]\n";
break;
case QLCNIC_RCODE_INVALID:
- dev_err(&pdev->dev,
- "CDRP invalid or unknown cmd received: 0x%x.\n",
- cmd->rsp.cmd);
+ fmt = "CDRP invalid or unknown cmd received: [%d]\n";
break;
case QLCNIC_RCODE_TIMEOUT:
- dev_err(&pdev->dev, "CDRP command timeout: 0x%x.\n",
- cmd->rsp.cmd);
+ fmt = "CDRP command timeout: [%d]\n";
break;
default:
- dev_err(&pdev->dev, "CDRP command failed: 0x%x.\n",
- cmd->rsp.cmd);
+ fmt = "CDRP command failed: [%d]\n";
+ break;
}
- } else if (rsp == QLCNIC_CDRP_RSP_OK) {
- cmd->rsp.cmd = QLCNIC_RCODE_SUCCESS;
- if (cmd->rsp.arg2)
- cmd->rsp.arg2 = QLCRD32(adapter,
- QLCNIC_ARG2_CRB_OFFSET);
- if (cmd->rsp.arg3)
- cmd->rsp.arg3 = QLCRD32(adapter,
- QLCNIC_ARG3_CRB_OFFSET);
- }
- if (cmd->rsp.arg1)
- cmd->rsp.arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
+ dev_err(&pdev->dev, fmt, cmd->rsp.arg[0]);
+ } else if (rsp == QLCNIC_CDRP_RSP_OK)
+ cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS;
+
+ for (i = 1; i < cmd->rsp.num; i++)
+ cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i));
/* Release semaphore */
qlcnic_api_unlock(adapter);
-
-}
-
-static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size)
-{
- uint64_t sum = 0;
- int count = temp_size / sizeof(uint32_t);
- while (count-- > 0)
- sum += *temp_buffer++;
- while (sum >> 32)
- sum = (sum & 0xFFFFFFFF) + (sum >> 32);
- return ~sum;
+ return cmd->rsp.arg[0];
}
-int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
+int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter)
{
- int err, i;
- void *tmp_addr;
- u32 temp_size, version, csum, *template;
- __le32 *tmp_buf;
struct qlcnic_cmd_args cmd;
- struct qlcnic_hardware_context *ahw;
- struct qlcnic_dump_template_hdr *tmpl_hdr;
- dma_addr_t tmp_addr_t = 0;
-
- ahw = adapter->ahw;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_TEMP_SIZE;
- memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
- qlcnic_issue_cmd(adapter, &cmd);
- if (cmd.rsp.cmd != QLCNIC_RCODE_SUCCESS) {
- dev_info(&adapter->pdev->dev,
- "Can't get template size %d\n", cmd.rsp.cmd);
- err = -EIO;
- return err;
- }
- temp_size = cmd.rsp.arg2;
- version = cmd.rsp.arg3;
- dev_info(&adapter->pdev->dev,
- "minidump template version = 0x%x", version);
- if (!temp_size)
- return -EIO;
+ u32 arg1, arg2, arg3;
+ char drv_string[12];
+ int err = 0;
- tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
- &tmp_addr_t, GFP_KERNEL);
- if (!tmp_addr) {
- dev_err(&adapter->pdev->dev,
- "Can't get memory for FW dump template\n");
- return -ENOMEM;
- }
- memset(&cmd.rsp, 0, sizeof(struct _cdrp_cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_TEMP_HDR;
- cmd.req.arg1 = LSD(tmp_addr_t);
- cmd.req.arg2 = MSD(tmp_addr_t);
- cmd.req.arg3 = temp_size;
- qlcnic_issue_cmd(adapter, &cmd);
-
- err = cmd.rsp.cmd;
- if (err != QLCNIC_RCODE_SUCCESS) {
- dev_err(&adapter->pdev->dev,
- "Failed to get mini dump template header %d\n", err);
- err = -EIO;
- goto error;
- }
- ahw->fw_dump.tmpl_hdr = vzalloc(temp_size);
- if (!ahw->fw_dump.tmpl_hdr) {
- err = -EIO;
- goto error;
- }
- tmp_buf = tmp_addr;
- template = (u32 *) ahw->fw_dump.tmpl_hdr;
- for (i = 0; i < temp_size/sizeof(u32); i++)
- *template++ = __le32_to_cpu(*tmp_buf++);
+ memset(drv_string, 0, sizeof(drv_string));
+ snprintf(drv_string, sizeof(drv_string), "%d"".""%d"".""%d",
+ _QLCNIC_LINUX_MAJOR, _QLCNIC_LINUX_MINOR,
+ _QLCNIC_LINUX_SUBVERSION);
- csum = qlcnic_temp_checksum((u32 *)ahw->fw_dump.tmpl_hdr, temp_size);
- if (csum) {
- dev_err(&adapter->pdev->dev,
- "Template header checksum validation failed\n");
- err = -EIO;
- goto error;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_DRV_VER);
+ memcpy(&arg1, drv_string, sizeof(u32));
+ memcpy(&arg2, drv_string + 4, sizeof(u32));
+ memcpy(&arg3, drv_string + 8, sizeof(u32));
+
+ cmd.req.arg[1] = arg1;
+ cmd.req.arg[2] = arg2;
+ cmd.req.arg[3] = arg3;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_info(&adapter->pdev->dev,
+ "Failed to set driver version in firmware\n");
+ return -EIO;
}
- tmpl_hdr = ahw->fw_dump.tmpl_hdr;
- tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
- ahw->fw_dump.enable = 1;
-error:
- dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
- return err;
+ return 0;
}
int
qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
{
+ int err = 0;
struct qlcnic_cmd_args cmd;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_SET_MTU;
- cmd.req.arg1 = recv_ctx->context_id;
- cmd.req.arg2 = mtu;
- cmd.req.arg3 = 0;
- if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) {
- qlcnic_issue_cmd(adapter, &cmd);
- if (cmd.rsp.cmd) {
- dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
- return -EIO;
- }
- }
+ if (recv_ctx->state != QLCNIC_HOST_CTX_STATE_ACTIVE)
+ return err;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_MTU);
+ cmd.req.arg[1] = recv_ctx->context_id;
+ cmd.req.arg[2] = mtu;
- return 0;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
+ err = -EIO;
+ }
+ qlcnic_free_mbx_args(&cmd);
+ return err;
}
-static int
-qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
{
void *addr;
struct qlcnic_hostrq_rx_ctx *prq;
@@ -242,10 +251,10 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
u64 phys_addr;
u8 i, nrds_rings, nsds_rings;
+ u16 temp_u16;
size_t rq_size, rsp_size;
u32 cap, reg, val, reg2;
int err;
- u16 temp;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
@@ -279,11 +288,8 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
| QLCNIC_CAP0_VALIDOFF);
cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
- if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
- cap |= QLCNIC_CAP0_LRO_MSS;
-
- temp = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
- prq->valid_field_offset = cpu_to_le16(temp);
+ temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
+ prq->valid_field_offset = cpu_to_le16(temp_u16);
prq->txrx_sds_binding = nsds_rings - 1;
prq->capabilities[0] = cpu_to_le32(cap);
@@ -329,20 +335,17 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
}
phys_addr = hostrq_phys_addr;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.arg1 = (u32) (phys_addr >> 32);
- cmd.req.arg2 = (u32) (phys_addr & 0xffffffff);
- cmd.req.arg3 = rq_size;
- cmd.req.cmd = QLCNIC_CDRP_CMD_CREATE_RX_CTX;
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX);
+ cmd.req.arg[1] = MSD(phys_addr);
+ cmd.req.arg[2] = LSD(phys_addr);
+ cmd.req.arg[3] = rq_size;
+ err = qlcnic_issue_cmd(adapter, &cmd);
if (err) {
dev_err(&adapter->pdev->dev,
"Failed to create rx ctx in firmware%d\n", err);
goto out_free_rsp;
}
-
prsp_rds = ((struct qlcnic_cardrsp_rds_ring *)
&prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
@@ -373,6 +376,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
out_free_rsp:
dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
cardrsp_phys_addr);
+ qlcnic_free_mbx_args(&cmd);
out_free_rq:
dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
return err;
@@ -381,24 +385,24 @@ out_free_rq:
static void
qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
{
+ int err;
struct qlcnic_cmd_args cmd;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.arg1 = recv_ctx->context_id;
- cmd.req.arg2 = QLCNIC_DESTROY_CTX_RESET;
- cmd.req.arg3 = 0;
- cmd.req.cmd = QLCNIC_CDRP_CMD_DESTROY_RX_CTX;
- qlcnic_issue_cmd(adapter, &cmd);
- if (cmd.rsp.cmd)
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX);
+ cmd.req.arg[1] = recv_ctx->context_id;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
dev_err(&adapter->pdev->dev,
"Failed to destroy rx ctx in firmware\n");
recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
+ qlcnic_free_mbx_args(&cmd);
}
-static int
-qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring,
+ int ring)
{
struct qlcnic_hostrq_tx_ctx *prq;
struct qlcnic_hostrq_cds_ring *prq_cds;
@@ -410,7 +414,6 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
int err;
u64 phys_addr;
dma_addr_t rq_phys_addr, rsp_phys_addr;
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
/* reset host resources */
tx_ring->producer = 0;
@@ -445,9 +448,9 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
prq->host_int_crb_mode =
cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
+ prq->msi_index = 0;
prq->interrupt_ctl = 0;
- prq->msi_index = 0;
prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
prq_cds = &prq->cds_ring;
@@ -456,19 +459,17 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
phys_addr = rq_phys_addr;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.arg1 = (u32)(phys_addr >> 32);
- cmd.req.arg2 = ((u32)phys_addr & 0xffffffff);
- cmd.req.arg3 = rq_size;
- cmd.req.cmd = QLCNIC_CDRP_CMD_CREATE_TX_CTX;
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
+ cmd.req.arg[1] = MSD(phys_addr);
+ cmd.req.arg[2] = LSD(phys_addr);
+ cmd.req.arg[3] = rq_size;
+ err = qlcnic_issue_cmd(adapter, &cmd);
if (err == QLCNIC_RCODE_SUCCESS) {
temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
-
- adapter->tx_ring->ctx_id = le16_to_cpu(prsp->context_id);
+ tx_ring->ctx_id = le16_to_cpu(prsp->context_id);
} else {
dev_err(&adapter->pdev->dev,
"Failed to create tx ctx in firmware%d\n", err);
@@ -476,76 +477,81 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
}
dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
- rsp_phys_addr);
+ rsp_phys_addr);
out_free_rq:
dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
+ qlcnic_free_mbx_args(&cmd);
return err;
}
static void
-qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
+qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
{
struct qlcnic_cmd_args cmd;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.arg1 = adapter->tx_ring->ctx_id;
- cmd.req.arg2 = QLCNIC_DESTROY_CTX_RESET;
- cmd.req.arg3 = 0;
- cmd.req.cmd = QLCNIC_CDRP_CMD_DESTROY_TX_CTX;
- qlcnic_issue_cmd(adapter, &cmd);
- if (cmd.rsp.cmd)
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX);
+ cmd.req.arg[1] = tx_ring->ctx_id;
+ if (qlcnic_issue_cmd(adapter, &cmd))
dev_err(&adapter->pdev->dev,
"Failed to destroy tx ctx in firmware\n");
+ qlcnic_free_mbx_args(&cmd);
}
int
qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config)
{
+ int err;
struct qlcnic_cmd_args cmd;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.arg1 = config;
- cmd.req.cmd = QLCNIC_CDRP_CMD_CONFIG_PORT;
- qlcnic_issue_cmd(adapter, &cmd);
-
- return cmd.rsp.cmd;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_PORT);
+ cmd.req.arg[1] = config;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ qlcnic_free_mbx_args(&cmd);
+ return err;
}
int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
{
void *addr;
- int err;
- int ring;
+ int err, ring;
struct qlcnic_recv_context *recv_ctx;
struct qlcnic_host_rds_ring *rds_ring;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_host_tx_ring *tx_ring;
+ __le32 *ptr;
struct pci_dev *pdev = adapter->pdev;
recv_ctx = adapter->recv_ctx;
- tx_ring = adapter->tx_ring;
- tx_ring->hw_consumer = (__le32 *) dma_alloc_coherent(&pdev->dev,
- sizeof(u32), &tx_ring->hw_cons_phys_addr, GFP_KERNEL);
- if (tx_ring->hw_consumer == NULL) {
- dev_err(&pdev->dev, "failed to allocate tx consumer\n");
- return -ENOMEM;
- }
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32),
+ &tx_ring->hw_cons_phys_addr,
+ GFP_KERNEL);
- /* cmd desc ring */
- addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
- &tx_ring->phys_addr, GFP_KERNEL);
+ if (ptr == NULL) {
+ dev_err(&pdev->dev, "failed to allocate tx consumer\n");
+ return -ENOMEM;
+ }
+ tx_ring->hw_consumer = ptr;
+ /* cmd desc ring */
+ addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
+ &tx_ring->phys_addr,
+ GFP_KERNEL);
- if (addr == NULL) {
- dev_err(&pdev->dev, "failed to allocate tx desc ring\n");
- err = -ENOMEM;
- goto err_out_free;
- }
+ if (addr == NULL) {
+ dev_err(&pdev->dev,
+ "failed to allocate tx desc ring\n");
+ err = -ENOMEM;
+ goto err_out_free;
+ }
- tx_ring->desc_head = addr;
+ tx_ring->desc_head = addr;
+ }
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
@@ -584,36 +590,70 @@ err_out_free:
return err;
}
-
-int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter)
+int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
{
- int err;
+ int i, err, ring;
- if (adapter->flags & QLCNIC_NEED_FLR) {
- pci_reset_function(adapter->pdev);
- adapter->flags &= ~QLCNIC_NEED_FLR;
+ if (dev->flags & QLCNIC_NEED_FLR) {
+ pci_reset_function(dev->pdev);
+ dev->flags &= ~QLCNIC_NEED_FLR;
}
- err = qlcnic_fw_cmd_create_rx_ctx(adapter);
- if (err)
- return err;
+ if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) {
+ if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST) {
+ err = qlcnic_83xx_config_intrpt(dev, 1);
+ if (err)
+ return err;
+ }
+ }
- err = qlcnic_fw_cmd_create_tx_ctx(adapter);
- if (err) {
- qlcnic_fw_cmd_destroy_rx_ctx(adapter);
- return err;
+ err = qlcnic_fw_cmd_create_rx_ctx(dev);
+ if (err)
+ goto err_out;
+
+ for (ring = 0; ring < dev->max_drv_tx_rings; ring++) {
+ err = qlcnic_fw_cmd_create_tx_ctx(dev,
+ &dev->tx_ring[ring],
+ ring);
+ if (err) {
+ qlcnic_fw_cmd_destroy_rx_ctx(dev);
+ if (ring == 0)
+ goto err_out;
+
+ for (i = 0; i < ring; i++)
+ qlcnic_fw_cmd_destroy_tx_ctx(dev,
+ &dev->tx_ring[i]);
+
+ goto err_out;
+ }
}
- set_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
+ set_bit(__QLCNIC_FW_ATTACHED, &dev->state);
return 0;
+
+err_out:
+ if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) {
+ if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ qlcnic_83xx_config_intrpt(dev, 0);
+ }
+ return err;
}
void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
{
+ int ring;
+
if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
qlcnic_fw_cmd_destroy_rx_ctx(adapter);
- qlcnic_fw_cmd_destroy_tx_ctx(adapter);
-
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++)
+ qlcnic_fw_cmd_destroy_tx_ctx(adapter,
+ &adapter->tx_ring[ring]);
+
+ if (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ qlcnic_83xx_config_intrpt(adapter, 0);
+ }
/* Allow dma queues to drain after context reset */
mdelay(20);
}
@@ -629,20 +669,23 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
recv_ctx = adapter->recv_ctx;
- tx_ring = adapter->tx_ring;
- if (tx_ring->hw_consumer != NULL) {
- dma_free_coherent(&adapter->pdev->dev,
- sizeof(u32),
- tx_ring->hw_consumer,
- tx_ring->hw_cons_phys_addr);
- tx_ring->hw_consumer = NULL;
- }
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ if (tx_ring->hw_consumer != NULL) {
+ dma_free_coherent(&adapter->pdev->dev, sizeof(u32),
+ tx_ring->hw_consumer,
+ tx_ring->hw_cons_phys_addr);
- if (tx_ring->desc_head != NULL) {
- dma_free_coherent(&adapter->pdev->dev,
- TX_DESC_RINGSIZE(tx_ring),
- tx_ring->desc_head, tx_ring->phys_addr);
- tx_ring->desc_head = NULL;
+ tx_ring->hw_consumer = NULL;
+ }
+
+ if (tx_ring->desc_head != NULL) {
+ dma_free_coherent(&adapter->pdev->dev,
+ TX_DESC_RINGSIZE(tx_ring),
+ tx_ring->desc_head,
+ tx_ring->phys_addr);
+ tx_ring->desc_head = NULL;
+ }
}
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
@@ -671,40 +714,43 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
}
-/* Get MAC address of a NIC partition */
-int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
+int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
{
- int err;
+ int err, i;
struct qlcnic_cmd_args cmd;
+ u32 mac_low, mac_high;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.arg1 = adapter->ahw->pci_func | BIT_8;
- cmd.req.cmd = QLCNIC_CDRP_CMD_MAC_ADDRESS;
- cmd.rsp.arg1 = cmd.rsp.arg2 = 1;
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
+ cmd.req.arg[1] = adapter->ahw->pci_func | BIT_8;
+ err = qlcnic_issue_cmd(adapter, &cmd);
- if (err == QLCNIC_RCODE_SUCCESS)
- qlcnic_fetch_mac(cmd.rsp.arg1, cmd.rsp.arg2, 0, mac);
- else {
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ mac_low = cmd.rsp.arg[1];
+ mac_high = cmd.rsp.arg[2];
+
+ for (i = 0; i < 2; i++)
+ mac[i] = (u8) (mac_high >> ((1 - i) * 8));
+ for (i = 2; i < 6; i++)
+ mac[i] = (u8) (mac_low >> ((5 - i) * 8));
+ } else {
dev_err(&adapter->pdev->dev,
"Failed to get mac address%d\n", err);
err = -EIO;
}
-
+ qlcnic_free_mbx_args(&cmd);
return err;
}
/* Get info of a NIC partition */
-int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
- struct qlcnic_info *npar_info, u8 func_id)
+int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info, u8 func_id)
{
int err;
dma_addr_t nic_dma_t;
- struct qlcnic_info_le *nic_info;
+ const struct qlcnic_info_le *nic_info;
void *nic_info_addr;
struct qlcnic_cmd_args cmd;
- size_t nic_size = sizeof(struct qlcnic_info_le);
+ size_t nic_size = sizeof(struct qlcnic_info_le);
nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
&nic_dma_t, GFP_KERNEL);
@@ -713,47 +759,39 @@ int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
memset(nic_info_addr, 0, nic_size);
nic_info = nic_info_addr;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_NIC_INFO;
- cmd.req.arg1 = MSD(nic_dma_t);
- cmd.req.arg2 = LSD(nic_dma_t);
- cmd.req.arg3 = (func_id << 16 | nic_size);
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
- if (err == QLCNIC_RCODE_SUCCESS) {
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
+ cmd.req.arg[1] = MSD(nic_dma_t);
+ cmd.req.arg[2] = LSD(nic_dma_t);
+ cmd.req.arg[3] = (func_id << 16 | nic_size);
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get nic info%d\n", err);
+ err = -EIO;
+ } else {
npar_info->pci_func = le16_to_cpu(nic_info->pci_func);
npar_info->op_mode = le16_to_cpu(nic_info->op_mode);
+ npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
+ npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
npar_info->phys_port = le16_to_cpu(nic_info->phys_port);
npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode);
npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques);
npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
- npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
- npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
-
- dev_info(&adapter->pdev->dev,
- "phy port: %d switch_mode: %d,\n"
- "\tmax_tx_q: %d max_rx_q: %d min_tx_bw: 0x%x,\n"
- "\tmax_tx_bw: 0x%x max_mtu:0x%x, capabilities: 0x%x\n",
- npar_info->phys_port, npar_info->switch_mode,
- npar_info->max_tx_ques, npar_info->max_rx_ques,
- npar_info->min_tx_bw, npar_info->max_tx_bw,
- npar_info->max_mtu, npar_info->capabilities);
- } else {
- dev_err(&adapter->pdev->dev,
- "Failed to get nic info%d\n", err);
- err = -EIO;
}
dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
- nic_dma_t);
+ nic_dma_t);
+ qlcnic_free_mbx_args(&cmd);
+
return err;
}
/* Configure a NIC partition */
-int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
+int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *nic)
{
int err = -EIO;
dma_addr_t nic_dma_t;
@@ -784,13 +822,11 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw);
nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_SET_NIC_INFO;
- cmd.req.arg1 = MSD(nic_dma_t);
- cmd.req.arg2 = LSD(nic_dma_t);
- cmd.req.arg3 = ((nic->pci_func << 16) | nic_size);
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+ cmd.req.arg[1] = MSD(nic_dma_t);
+ cmd.req.arg[2] = LSD(nic_dma_t);
+ cmd.req.arg[3] = ((nic->pci_func << 16) | nic_size);
+ err = qlcnic_issue_cmd(adapter, &cmd);
if (err != QLCNIC_RCODE_SUCCESS) {
dev_err(&adapter->pdev->dev,
@@ -800,12 +836,14 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
nic_dma_t);
+ qlcnic_free_mbx_args(&cmd);
+
return err;
}
/* Get PCI Info of a partition */
-int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
- struct qlcnic_pci_info *pci_info)
+int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_pci_info *pci_info)
{
int err = 0, i;
struct qlcnic_cmd_args cmd;
@@ -822,13 +860,11 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
memset(pci_info_addr, 0, pci_size);
npar = pci_info_addr;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_PCI_INFO;
- cmd.req.arg1 = MSD(pci_info_dma_t);
- cmd.req.arg2 = LSD(pci_info_dma_t);
- cmd.req.arg3 = pci_size;
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
+ cmd.req.arg[1] = MSD(pci_info_dma_t);
+ cmd.req.arg[2] = LSD(pci_info_dma_t);
+ cmd.req.arg[3] = pci_size;
+ err = qlcnic_issue_cmd(adapter, &cmd);
adapter->ahw->act_pci_func = 0;
if (err == QLCNIC_RCODE_SUCCESS) {
@@ -854,6 +890,8 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr,
pci_info_dma_t);
+ qlcnic_free_mbx_args(&cmd);
+
return err;
}
@@ -872,21 +910,19 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
arg1 = id | (enable_mirroring ? BIT_4 : 0);
arg1 |= pci_func << 8;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_SET_PORTMIRRORING;
- cmd.req.arg1 = arg1;
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORTMIRRORING);
+ cmd.req.arg[1] = arg1;
+ err = qlcnic_issue_cmd(adapter, &cmd);
- if (err != QLCNIC_RCODE_SUCCESS) {
+ if (err != QLCNIC_RCODE_SUCCESS)
dev_err(&adapter->pdev->dev,
"Failed to configure port mirroring%d on eswitch:%d\n",
pci_func, id);
- } else {
+ else
dev_info(&adapter->pdev->dev,
"Configured eSwitch %d for port mirroring:%d\n",
id, pci_func);
- }
+ qlcnic_free_mbx_args(&cmd);
return err;
}
@@ -923,13 +959,11 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
arg1 |= rx_tx << 15 | stats_size << 16;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_STATS;
- cmd.req.arg1 = arg1;
- cmd.req.arg2 = MSD(stats_dma_t);
- cmd.req.arg3 = LSD(stats_dma_t);
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS);
+ cmd.req.arg[1] = arg1;
+ cmd.req.arg[2] = MSD(stats_dma_t);
+ cmd.req.arg[3] = LSD(stats_dma_t);
+ err = qlcnic_issue_cmd(adapter, &cmd);
if (!err) {
stats = stats_addr;
@@ -949,6 +983,8 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
stats_dma_t);
+ qlcnic_free_mbx_args(&cmd);
+
return err;
}
@@ -963,6 +999,9 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
void *stats_addr;
int err;
+ if (mac_stats == NULL)
+ return -ENOMEM;
+
stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
&stats_dma_t, GFP_KERNEL);
if (!stats_addr) {
@@ -971,15 +1010,11 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
return -ENOMEM;
}
memset(stats_addr, 0, stats_size);
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_MAC_STATS;
- cmd.req.arg1 = stats_size << 16;
- cmd.req.arg2 = MSD(stats_dma_t);
- cmd.req.arg3 = LSD(stats_dma_t);
-
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
-
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS);
+ cmd.req.arg[1] = stats_size << 16;
+ cmd.req.arg[2] = MSD(stats_dma_t);
+ cmd.req.arg[3] = LSD(stats_dma_t);
+ err = qlcnic_issue_cmd(adapter, &cmd);
if (!err) {
stats = stats_addr;
mac_stats->mac_tx_frames = le64_to_cpu(stats->mac_tx_frames);
@@ -1001,10 +1036,16 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber);
mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped);
mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "%s: Get mac stats failed, err=%d.\n", __func__, err);
}
dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
stats_dma_t);
+
+ qlcnic_free_mbx_args(&cmd);
+
return err;
}
@@ -1065,7 +1106,7 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
const u8 port, const u8 rx_tx)
{
-
+ int err;
u32 arg1;
struct qlcnic_cmd_args cmd;
@@ -1088,15 +1129,16 @@ int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12;
arg1 |= BIT_14 | rx_tx << 15;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_STATS;
- cmd.req.arg1 = arg1;
- qlcnic_issue_cmd(adapter, &cmd);
- return cmd.rsp.cmd;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS);
+ cmd.req.arg[1] = arg1;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ qlcnic_free_mbx_args(&cmd);
+ return err;
err_ret:
- dev_err(&adapter->pdev->dev, "Invalid argument func_esw=%d port=%d"
- "rx_ctx=%d\n", func_esw, port, rx_tx);
+ dev_err(&adapter->pdev->dev,
+ "Invalid args func_esw %d port %d rx_ctx %d\n",
+ func_esw, port, rx_tx);
return -EIO;
}
@@ -1109,22 +1151,21 @@ __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
u8 pci_func;
pci_func = (*arg1 >> 8);
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG;
- cmd.req.arg1 = *arg1;
- cmd.rsp.arg1 = cmd.rsp.arg2 = 1;
- qlcnic_issue_cmd(adapter, &cmd);
- *arg1 = cmd.rsp.arg1;
- *arg2 = cmd.rsp.arg2;
- err = cmd.rsp.cmd;
+ qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG);
+ cmd.req.arg[1] = *arg1;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ *arg1 = cmd.rsp.arg[1];
+ *arg2 = cmd.rsp.arg[2];
+ qlcnic_free_mbx_args(&cmd);
- if (err == QLCNIC_RCODE_SUCCESS) {
+ if (err == QLCNIC_RCODE_SUCCESS)
dev_info(&adapter->pdev->dev,
- "eSwitch port config for pci func %d\n", pci_func);
- } else {
+ "eSwitch port config for pci func %d\n", pci_func);
+ else
dev_err(&adapter->pdev->dev,
"Failed to get eswitch port config for pci func %d\n",
pci_func);
- }
return err;
}
/* Configure eSwitch port
@@ -1189,20 +1230,18 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
return err;
}
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH;
- cmd.req.arg1 = arg1;
- cmd.req.arg2 = arg2;
- qlcnic_issue_cmd(adapter, &cmd);
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_ESWITCH);
+ cmd.req.arg[1] = arg1;
+ cmd.req.arg[2] = arg2;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ qlcnic_free_mbx_args(&cmd);
- err = cmd.rsp.cmd;
- if (err != QLCNIC_RCODE_SUCCESS) {
+ if (err != QLCNIC_RCODE_SUCCESS)
dev_err(&adapter->pdev->dev,
"Failed to configure eswitch pci func %d\n", pci_func);
- } else {
+ else
dev_info(&adapter->pdev->dev,
- "Configured eSwitch for pci func %d\n", pci_func);
- }
+ "Configured eSwitch for pci func %d\n", pci_func);
return err;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 74b98110c5b4..5641f8ec49ab 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1,6 +1,6 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
@@ -22,42 +22,37 @@ struct qlcnic_stats {
#define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m)
#define QLC_OFF(m) offsetof(struct qlcnic_adapter, m)
+static const u32 qlcnic_fw_dump_level[] = {
+ 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff
+};
static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
- {"xmit_called",
- QLC_SIZEOF(stats.xmitcalled), QLC_OFF(stats.xmitcalled)},
- {"xmit_finished",
- QLC_SIZEOF(stats.xmitfinished), QLC_OFF(stats.xmitfinished)},
- {"rx_dropped",
- QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
- {"tx_dropped",
- QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
- {"csummed",
- QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
- {"rx_pkts",
- QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
- {"lro_pkts",
- QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
- {"rx_bytes",
- QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
- {"tx_bytes",
- QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
- {"lrobytes",
- QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)},
- {"lso_frames",
- QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)},
- {"xmit_on",
- QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)},
- {"xmit_off",
- QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
+ {"xmit_called", QLC_SIZEOF(stats.xmitcalled),
+ QLC_OFF(stats.xmitcalled)},
+ {"xmit_finished", QLC_SIZEOF(stats.xmitfinished),
+ QLC_OFF(stats.xmitfinished)},
+ {"rx_dropped", QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
+ {"tx_dropped", QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
+ {"csummed", QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
+ {"rx_pkts", QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
+ {"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
+ {"rx_bytes", QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
+ {"tx_bytes", QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
+ {"lrobytes", QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)},
+ {"lso_frames", QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)},
+ {"xmit_on", QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)},
+ {"xmit_off", QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
{"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
- QLC_OFF(stats.skb_alloc_failure)},
- {"null rxbuf",
- QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
+ QLC_OFF(stats.skb_alloc_failure)},
+ {"null rxbuf", QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
{"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
QLC_OFF(stats.rx_dma_map_error)},
{"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error),
QLC_OFF(stats.tx_dma_map_error)},
+ {"mac_filter_limit_overrun", QLC_SIZEOF(stats.mac_filter_limit_overrun),
+ QLC_OFF(stats.mac_filter_limit_overrun)},
+ {"spurious intr", QLC_SIZEOF(stats.spurious_intr),
+ QLC_OFF(stats.spurious_intr)},
};
@@ -78,7 +73,15 @@ static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
"tx numbytes",
};
-static const char qlcnic_mac_stats_strings [][ETH_GSTRING_LEN] = {
+static const char qlcnic_83xx_tx_stats_strings[][ETH_GSTRING_LEN] = {
+ "ctx_tx_bytes",
+ "ctx_tx_pkts",
+ "ctx_tx_errors",
+ "ctx_tx_dropped_pkts",
+ "ctx_tx_num_buffers",
+};
+
+static const char qlcnic_83xx_mac_stats_strings[][ETH_GSTRING_LEN] = {
"mac_tx_frames",
"mac_tx_bytes",
"mac_tx_mcast_pkts",
@@ -110,35 +113,70 @@ static const char qlcnic_mac_stats_strings [][ETH_GSTRING_LEN] = {
"mac_rx_length_large",
"mac_rx_jabber",
"mac_rx_dropped",
- "mac_rx_crc_error",
+ "mac_crc_error",
"mac_align_error",
};
-#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
-#define QLCNIC_MAC_STATS_LEN ARRAY_SIZE(qlcnic_mac_stats_strings)
-#define QLCNIC_DEVICE_STATS_LEN ARRAY_SIZE(qlcnic_device_gstrings_stats)
-#define QLCNIC_TOTAL_STATS_LEN QLCNIC_STATS_LEN + QLCNIC_MAC_STATS_LEN
+#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
+static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = {
+ "ctx_rx_bytes",
+ "ctx_rx_pkts",
+ "ctx_lro_pkt_cnt",
+ "ctx_ip_csum_error",
+ "ctx_rx_pkts_wo_ctx",
+ "ctx_rx_pkts_dropped_wo_sts",
+ "ctx_rx_osized_pkts",
+ "ctx_rx_pkts_dropped_wo_rds",
+ "ctx_rx_unexpected_mcast_pkts",
+ "ctx_invalid_mac_address",
+ "ctx_rx_rds_ring_prim_attemoted",
+ "ctx_rx_rds_ring_prim_success",
+ "ctx_num_lro_flows_added",
+ "ctx_num_lro_flows_removed",
+ "ctx_num_lro_flows_active",
+ "ctx_pkts_dropped_unknown",
+};
static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
"Register_Test_on_offline",
"Link_Test_on_offline",
"Interrupt_Test_offline",
"Internal_Loopback_offline",
- "External_Loopback_offline"
+ "EEPROM_Test_offline"
};
#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
+static inline int qlcnic_82xx_statistics(void)
+{
+ return QLCNIC_STATS_LEN + ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
+}
+
+static inline int qlcnic_83xx_statistics(void)
+{
+ return ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) +
+ ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
+ ARRAY_SIZE(qlcnic_83xx_rx_stats_strings);
+}
+
+static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_82xx_check(adapter))
+ return qlcnic_82xx_statistics();
+ else if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_statistics();
+ else
+ return -1;
+}
+
#define QLCNIC_RING_REGS_COUNT 20
#define QLCNIC_RING_REGS_LEN (QLCNIC_RING_REGS_COUNT * sizeof(u32))
#define QLCNIC_MAX_EEPROM_LEN 1024
static const u32 diag_registers[] = {
- CRB_CMDPEG_STATE,
- CRB_RCVPEG_STATE,
- CRB_XG_STATE_P3P,
- CRB_FW_CAPABILITIES_1,
- ISR_INT_STATE_REG,
+ QLCNIC_CMDPEG_STATE,
+ QLCNIC_RCVPEG_STATE,
+ QLCNIC_FW_CAPABILITIES,
QLCNIC_CRB_DRV_ACTIVE,
QLCNIC_CRB_DEV_STATE,
QLCNIC_CRB_DRV_STATE,
@@ -148,6 +186,13 @@ static const u32 diag_registers[] = {
QLCNIC_PEG_ALIVE_COUNTER,
QLCNIC_PEG_HALT_STATUS1,
QLCNIC_PEG_HALT_STATUS2,
+ -1
+};
+
+
+static const u32 ext_diag_registers[] = {
+ CRB_XG_STATE_P3P,
+ ISR_INT_STATE_REG,
QLCNIC_CRB_PEG_NET_0+0x3c,
QLCNIC_CRB_PEG_NET_1+0x3c,
QLCNIC_CRB_PEG_NET_2+0x3c,
@@ -156,12 +201,19 @@ static const u32 diag_registers[] = {
};
#define QLCNIC_MGMT_API_VERSION 2
-#define QLCNIC_DEV_INFO_SIZE 1
-#define QLCNIC_ETHTOOL_REGS_VER 2
+#define QLCNIC_ETHTOOL_REGS_VER 3
+
static int qlcnic_get_regs_len(struct net_device *dev)
{
- return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN +
- QLCNIC_DEV_INFO_SIZE + 1;
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 len;
+
+ if (qlcnic_83xx_check(adapter))
+ len = qlcnic_83xx_get_regs_len(adapter);
+ else
+ len = sizeof(ext_diag_registers) + sizeof(diag_registers);
+
+ return QLCNIC_RING_REGS_LEN + len + QLCNIC_DEV_INFO_SIZE + 1;
}
static int qlcnic_get_eeprom_len(struct net_device *dev)
@@ -174,10 +226,9 @@ qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 fw_major, fw_minor, fw_build;
-
- fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
- fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
- fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
+ fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d", fw_major, fw_minor, fw_build);
@@ -192,7 +243,10 @@ static int
qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 speed, reg;
int check_sfp_module = 0;
+ u16 pcifn = ahw->pci_func;
/* read which mode */
if (adapter->ahw->port_type == QLCNIC_GBE) {
@@ -213,9 +267,12 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
ecmd->autoneg = adapter->ahw->link_autoneg;
} else if (adapter->ahw->port_type == QLCNIC_XGBE) {
- u32 val;
+ u32 val = 0;
+ if (qlcnic_83xx_check(adapter))
+ qlcnic_83xx_get_settings(adapter);
+ else
+ val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
- val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
if (val == QLCNIC_PORT_MODE_802_3_AP) {
ecmd->supported = SUPPORTED_1000baseT_Full;
ecmd->advertising = ADVERTISED_1000baseT_Full;
@@ -225,6 +282,12 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
}
if (netif_running(dev) && adapter->ahw->has_link_events) {
+ if (qlcnic_82xx_check(adapter)) {
+ reg = QLCRD32(adapter,
+ P3P_LINK_SPEED_REG(pcifn));
+ speed = P3P_LINK_SPEED_VAL(pcifn, reg);
+ ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
+ }
ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed);
ecmd->autoneg = adapter->ahw->link_autoneg;
ecmd->duplex = adapter->ahw->link_duplex;
@@ -294,6 +357,13 @@ skip:
ecmd->port = PORT_TP;
}
break;
+ case QLCNIC_BRDTYPE_83XX_10G:
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
+ ecmd->advertising |= (ADVERTISED_FIBRE | ADVERTISED_TP);
+ ecmd->port = PORT_FIBRE;
+ check_sfp_module = netif_running(dev) && ahw->has_link_events;
+ break;
default:
dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
adapter->ahw->board_type);
@@ -321,16 +391,10 @@ skip:
return 0;
}
-static int
-qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int qlcnic_set_port_config(struct qlcnic_adapter *adapter,
+ struct ethtool_cmd *ecmd)
{
- u32 config = 0;
- u32 ret = 0;
- struct qlcnic_adapter *adapter = netdev_priv(dev);
-
- if (adapter->ahw->port_type != QLCNIC_GBE)
- return -EOPNOTSUPP;
-
+ u32 ret = 0, config = 0;
/* read which mode */
if (ecmd->duplex)
config |= 0x1;
@@ -358,6 +422,24 @@ qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
return -EOPNOTSUPP;
else if (ret)
return -EIO;
+ return ret;
+}
+
+static int qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ u32 ret = 0;
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ if (adapter->ahw->port_type != QLCNIC_GBE)
+ return -EOPNOTSUPP;
+
+ if (qlcnic_83xx_check(adapter))
+ ret = qlcnic_83xx_set_settings(adapter, ecmd);
+ else
+ ret = qlcnic_set_port_config(adapter, ecmd);
+
+ if (!ret)
+ return ret;
adapter->ahw->link_speed = ethtool_cmd_speed(ecmd);
adapter->ahw->link_duplex = ecmd->duplex;
@@ -370,6 +452,19 @@ qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
return dev->netdev_ops->ndo_open(dev);
}
+static int qlcnic_82xx_get_registers(struct qlcnic_adapter *adapter,
+ u32 *regs_buff)
+{
+ int i, j = 0;
+
+ for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++)
+ regs_buff[i] = QLC_SHARED_REG_RD32(adapter, diag_registers[j]);
+ j = 0;
+ while (ext_diag_registers[j] != -1)
+ regs_buff[i++] = QLCRD32(adapter, ext_diag_registers[j++]);
+ return i;
+}
+
static void
qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
{
@@ -377,17 +472,20 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_sds_ring *sds_ring;
u32 *regs_buff = p;
- int ring, i = 0, j = 0;
+ int ring, i = 0;
memset(p, 0, qlcnic_get_regs_len(dev));
+
regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) |
(adapter->ahw->revision_id << 16) | (adapter->pdev)->device;
regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff));
regs_buff[1] = QLCNIC_MGMT_API_VERSION;
- for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++)
- regs_buff[i] = QLCRD32(adapter, diag_registers[j]);
+ if (qlcnic_82xx_check(adapter))
+ i = qlcnic_82xx_get_registers(adapter, regs_buff);
+ else
+ i = qlcnic_83xx_get_registers(adapter, regs_buff);
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
return;
@@ -415,6 +513,10 @@ static u32 qlcnic_test_link(struct net_device *dev)
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 val;
+ if (qlcnic_83xx_check(adapter)) {
+ val = qlcnic_83xx_test_link(adapter);
+ return (val & 1) ? 0 : 1;
+ }
val = QLCRD32(adapter, CRB_XG_STATE_P3P);
val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val);
return (val == XG_LINK_UP_P3P) ? 0 : 1;
@@ -426,8 +528,10 @@ qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
int offset;
- int ret;
+ int ret = -1;
+ if (qlcnic_83xx_check(adapter))
+ return 0;
if (eeprom->len == 0)
return -EINVAL;
@@ -435,8 +539,9 @@ qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
((adapter->pdev)->device << 16);
offset = eeprom->offset;
- ret = qlcnic_rom_fast_read_words(adapter, offset, bytes,
- eeprom->len);
+ if (qlcnic_82xx_check(adapter))
+ ret = qlcnic_rom_fast_read_words(adapter, offset, bytes,
+ eeprom->len);
if (ret < 0)
return ret;
@@ -529,11 +634,11 @@ static int qlcnic_set_channels(struct net_device *dev,
channel->tx_count != channel->max_tx)
return -EINVAL;
- err = qlcnic_validate_max_rss(dev, channel->max_rx, channel->rx_count);
+ err = qlcnic_validate_max_rss(channel->max_rx, channel->rx_count);
if (err)
return err;
- err = qlcnic_set_max_rss(adapter, channel->rx_count);
+ err = qlcnic_set_max_rss(adapter, channel->rx_count, 0);
netdev_info(dev, "allocated 0x%x sds rings\n",
adapter->max_sds_rings);
return err;
@@ -547,6 +652,10 @@ qlcnic_get_pauseparam(struct net_device *netdev,
int port = adapter->ahw->physical_port;
__u32 val;
+ if (qlcnic_83xx_check(adapter)) {
+ qlcnic_83xx_get_pauseparam(adapter, pause);
+ return;
+ }
if (adapter->ahw->port_type == QLCNIC_GBE) {
if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
return;
@@ -592,6 +701,9 @@ qlcnic_set_pauseparam(struct net_device *netdev,
int port = adapter->ahw->physical_port;
__u32 val;
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_set_pauseparam(adapter, pause);
+
/* read mode */
if (adapter->ahw->port_type == QLCNIC_GBE) {
if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
@@ -606,6 +718,7 @@ qlcnic_set_pauseparam(struct net_device *netdev,
QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port),
val);
+ QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), val);
/* set autoneg */
val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
switch (port) {
@@ -668,6 +781,9 @@ static int qlcnic_reg_test(struct net_device *dev)
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 data_read;
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_reg_test(adapter);
+
data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0));
if ((data_read & 0xffff) != adapter->pdev->vendor)
return 1;
@@ -675,16 +791,30 @@ static int qlcnic_reg_test(struct net_device *dev)
return 0;
}
+static int qlcnic_eeprom_test(struct net_device *dev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ if (qlcnic_82xx_check(adapter))
+ return 0;
+
+ return qlcnic_83xx_flash_test(adapter);
+}
+
static int qlcnic_get_sset_count(struct net_device *dev, int sset)
{
+ int len;
+
struct qlcnic_adapter *adapter = netdev_priv(dev);
switch (sset) {
case ETH_SS_TEST:
return QLCNIC_TEST_LEN;
case ETH_SS_STATS:
- if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
- return QLCNIC_TOTAL_STATS_LEN + QLCNIC_DEVICE_STATS_LEN;
- return QLCNIC_TOTAL_STATS_LEN;
+ len = qlcnic_dev_statistics_len(adapter) + QLCNIC_STATS_LEN;
+ if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+ qlcnic_83xx_check(adapter))
+ return len;
+ return qlcnic_82xx_statistics();
default:
return -EOPNOTSUPP;
}
@@ -693,35 +823,36 @@ static int qlcnic_get_sset_count(struct net_device *dev, int sset)
static int qlcnic_irq_test(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- int max_sds_rings = adapter->max_sds_rings;
- int ret;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_cmd_args cmd;
+ int ret, max_sds_rings = adapter->max_sds_rings;
+
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_interrupt_test(netdev);
if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
return -EIO;
ret = qlcnic_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST);
if (ret)
- goto clear_it;
+ goto clear_diag_irq;
- adapter->ahw->diag_cnt = 0;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_INTRPT_TEST;
- cmd.req.arg1 = adapter->ahw->pci_func;
- qlcnic_issue_cmd(adapter, &cmd);
- ret = cmd.rsp.cmd;
+ ahw->diag_cnt = 0;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
+ cmd.req.arg[1] = ahw->pci_func;
+ ret = qlcnic_issue_cmd(adapter, &cmd);
if (ret)
goto done;
- msleep(10);
-
- ret = !adapter->ahw->diag_cnt;
+ usleep_range(1000, 12000);
+ ret = !ahw->diag_cnt;
done:
+ qlcnic_free_mbx_args(&cmd);
qlcnic_diag_free_res(netdev, max_sds_rings);
-clear_it:
+clear_diag_irq:
adapter->max_sds_rings = max_sds_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return ret;
@@ -750,7 +881,7 @@ int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[])
return memcmp(data, buff, QLCNIC_ILB_PKT_SIZE);
}
-static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
+int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
{
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
@@ -761,11 +892,10 @@ static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
skb_put(skb, QLCNIC_ILB_PKT_SIZE);
-
adapter->ahw->diag_cnt = 0;
qlcnic_xmit_frame(skb, adapter->netdev);
-
loop = 0;
+
do {
msleep(1);
qlcnic_process_rcv_ring_diag(sds_ring);
@@ -776,42 +906,46 @@ static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
dev_kfree_skb_any(skb);
if (!adapter->ahw->diag_cnt)
- QLCDB(adapter, DRV,
- "LB Test: packet #%d was not received\n", i + 1);
+ dev_warn(&adapter->pdev->dev,
+ "LB Test: packet #%d was not received\n",
+ i + 1);
else
cnt++;
}
if (cnt != i) {
- dev_warn(&adapter->pdev->dev, "LB Test failed\n");
- if (mode != QLCNIC_ILB_MODE) {
+ dev_err(&adapter->pdev->dev,
+ "LB Test: failed, TX[%d], RX[%d]\n", i, cnt);
+ if (mode != QLCNIC_ILB_MODE)
dev_warn(&adapter->pdev->dev,
- "WARNING: Please make sure external"
- "loopback connector is plugged in\n");
- }
+ "WARNING: Please check loopback cable\n");
return -1;
}
return 0;
}
-static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
+int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int max_sds_rings = adapter->max_sds_rings;
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
int loop = 0;
int ret;
- if (!(adapter->ahw->capabilities &
- QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) {
- netdev_info(netdev, "Firmware is not loopback test capable\n");
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_loopback_test(netdev, mode);
+
+ if (!(ahw->capabilities & QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) {
+ dev_info(&adapter->pdev->dev,
+ "Firmware do not support loopback test\n");
return -EOPNOTSUPP;
}
- QLCDB(adapter, DRV, "%s loopback test in progress\n",
- mode == QLCNIC_ILB_MODE ? "internal" : "external");
- if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
- netdev_warn(netdev, "Loopback test not supported for non "
- "privilege function\n");
+ dev_warn(&adapter->pdev->dev, "%s loopback test in progress\n",
+ mode == QLCNIC_ILB_MODE ? "internal" : "external");
+ if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ dev_warn(&adapter->pdev->dev,
+ "Loopback test not supported in nonprivileged mode\n");
return 0;
}
@@ -823,12 +957,11 @@ static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
goto clear_it;
sds_ring = &adapter->recv_ctx->sds_rings[0];
-
ret = qlcnic_set_lb_mode(adapter, mode);
if (ret)
goto free_res;
- adapter->ahw->diag_cnt = 0;
+ ahw->diag_cnt = 0;
do {
msleep(500);
qlcnic_process_rcv_ring_diag(sds_ring);
@@ -841,11 +974,11 @@ static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
ret = adapter->ahw->diag_cnt;
goto free_res;
}
- } while (!QLCNIC_IS_LB_CONFIGURED(adapter->ahw->loopback_state));
+ } while (!QLCNIC_IS_LB_CONFIGURED(ahw->loopback_state));
ret = qlcnic_do_lb_test(adapter, mode);
- qlcnic_clear_lb_mode(adapter);
+ qlcnic_clear_lb_mode(adapter, mode);
free_res:
qlcnic_diag_free_res(netdev, max_sds_rings);
@@ -878,20 +1011,18 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
data[3] = qlcnic_loopback_test(dev, QLCNIC_ILB_MODE);
if (data[3])
eth_test->flags |= ETH_TEST_FL_FAILED;
- if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) {
- data[4] = qlcnic_loopback_test(dev, QLCNIC_ELB_MODE);
- if (data[4])
- eth_test->flags |= ETH_TEST_FL_FAILED;
- eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
- }
+
+ data[4] = qlcnic_eeprom_test(dev);
+ if (data[4])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
}
}
static void
-qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
- int index, i, j;
+ int index, i, num_stats;
switch (stringset) {
case ETH_SS_TEST:
@@ -904,14 +1035,34 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
qlcnic_gstrings_stats[index].stat_string,
ETH_GSTRING_LEN);
}
- for (j = 0; j < QLCNIC_MAC_STATS_LEN; index++, j++) {
- memcpy(data + index * ETH_GSTRING_LEN,
- qlcnic_mac_stats_strings[j],
- ETH_GSTRING_LEN);
+ if (qlcnic_83xx_check(adapter)) {
+ num_stats = ARRAY_SIZE(qlcnic_83xx_tx_stats_strings);
+ for (i = 0; i < num_stats; i++, index++)
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_83xx_tx_stats_strings[i],
+ ETH_GSTRING_LEN);
+ num_stats = ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
+ for (i = 0; i < num_stats; i++, index++)
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_83xx_mac_stats_strings[i],
+ ETH_GSTRING_LEN);
+ num_stats = ARRAY_SIZE(qlcnic_83xx_rx_stats_strings);
+ for (i = 0; i < num_stats; i++, index++)
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_83xx_rx_stats_strings[i],
+ ETH_GSTRING_LEN);
+ return;
+ } else {
+ num_stats = ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
+ for (i = 0; i < num_stats; i++, index++)
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_83xx_mac_stats_strings[i],
+ ETH_GSTRING_LEN);
}
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
return;
- for (i = 0; i < QLCNIC_DEVICE_STATS_LEN; index++, i++) {
+ num_stats = ARRAY_SIZE(qlcnic_device_gstrings_stats);
+ for (i = 0; i < num_stats; index++, i++) {
memcpy(data + index * ETH_GSTRING_LEN,
qlcnic_device_gstrings_stats[i],
ETH_GSTRING_LEN);
@@ -920,89 +1071,84 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
}
static void
-qlcnic_fill_stats(int *index, u64 *data, void *stats, int type)
+qlcnic_fill_stats(u64 *data, void *stats, int type)
{
- int ind = *index;
-
if (type == QLCNIC_MAC_STATS) {
struct qlcnic_mac_statistics *mac_stats =
(struct qlcnic_mac_statistics *)stats;
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_frames);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_bytes);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_mcast_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_bcast_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_pause_cnt);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_ctrl_pkt);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_64b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_127b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_255b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_511b_pkts);
- data[ind++] =
- QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1023b_pkts);
- data[ind++] =
- QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1518b_pkts);
- data[ind++] =
- QLCNIC_FILL_STATS(mac_stats->mac_tx_gt_1518b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_frames);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_bytes);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_mcast_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_bcast_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_pause_cnt);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_ctrl_pkt);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_64b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_127b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_255b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_511b_pkts);
- data[ind++] =
- QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1023b_pkts);
- data[ind++] =
- QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1518b_pkts);
- data[ind++] =
- QLCNIC_FILL_STATS(mac_stats->mac_rx_gt_1518b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_error);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_small);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_large);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_jabber);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_dropped);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_crc_error);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_align_error);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_frames);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_bytes);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_mcast_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_bcast_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_pause_cnt);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_ctrl_pkt);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_64b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_127b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_255b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_511b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1023b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1518b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_gt_1518b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_frames);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_bytes);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_mcast_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_bcast_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_pause_cnt);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_ctrl_pkt);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_64b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_127b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_255b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_511b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1023b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1518b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_gt_1518b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_error);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_small);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_large);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_jabber);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_dropped);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_crc_error);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_align_error);
} else if (type == QLCNIC_ESW_STATS) {
struct __qlcnic_esw_statistics *esw_stats =
(struct __qlcnic_esw_statistics *)stats;
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->unicast_frames);
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->multicast_frames);
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->broadcast_frames);
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->dropped_frames);
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->errors);
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->local_frames);
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->numbytes);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->unicast_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->multicast_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->broadcast_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->dropped_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->errors);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->local_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->numbytes);
}
-
- *index = ind;
}
-static void
-qlcnic_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 * data)
+static void qlcnic_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
struct qlcnic_esw_statistics port_stats;
struct qlcnic_mac_statistics mac_stats;
- int index, ret;
-
- for (index = 0; index < QLCNIC_STATS_LEN; index++) {
- char *p =
- (char *)adapter +
- qlcnic_gstrings_stats[index].stat_offset;
- data[index] =
- (qlcnic_gstrings_stats[index].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p:(*(u32 *)p);
+ int index, ret, length, size;
+ char *p;
+
+ memset(data, 0, stats->n_stats * sizeof(u64));
+ length = QLCNIC_STATS_LEN;
+ for (index = 0; index < length; index++) {
+ p = (char *)adapter + qlcnic_gstrings_stats[index].stat_offset;
+ size = qlcnic_gstrings_stats[index].sizeof_stat;
+ *data++ = (size == sizeof(u64)) ? (*(u64 *)p) : ((*(u32 *)p));
}
- /* Retrieve MAC statistics from firmware */
- memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics));
- qlcnic_get_mac_stats(adapter, &mac_stats);
- qlcnic_fill_stats(&index, data, &mac_stats, QLCNIC_MAC_STATS);
+ if (qlcnic_83xx_check(adapter)) {
+ if (adapter->ahw->linkup)
+ qlcnic_83xx_get_stats(adapter, data);
+ return;
+ } else {
+ /* Retrieve MAC statistics from firmware */
+ memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics));
+ qlcnic_get_mac_stats(adapter, &mac_stats);
+ qlcnic_fill_stats(data, &mac_stats, QLCNIC_MAC_STATS);
+ }
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
return;
@@ -1013,14 +1159,13 @@ qlcnic_get_ethtool_stats(struct net_device *dev,
if (ret)
return;
- qlcnic_fill_stats(&index, data, &port_stats.rx, QLCNIC_ESW_STATS);
-
+ qlcnic_fill_stats(data, &port_stats.rx, QLCNIC_ESW_STATS);
ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
QLCNIC_QUERY_TX_COUNTER, &port_stats.tx);
if (ret)
return;
- qlcnic_fill_stats(&index, data, &port_stats.tx, QLCNIC_ESW_STATS);
+ qlcnic_fill_stats(data, &port_stats.tx, QLCNIC_ESW_STATS);
}
static int qlcnic_set_led(struct net_device *dev,
@@ -1030,6 +1175,8 @@ static int qlcnic_set_led(struct net_device *dev,
int max_sds_rings = adapter->max_sds_rings;
int err = -EIO, active = 1;
+ if (qlcnic_83xx_check(adapter))
+ return -EOPNOTSUPP;
if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
netdev_warn(dev, "LED test not supported for non "
"privilege function\n");
@@ -1096,6 +1243,8 @@ qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 wol_cfg;
+ if (qlcnic_83xx_check(adapter))
+ return;
wol->supported = 0;
wol->wolopts = 0;
@@ -1114,8 +1263,10 @@ qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 wol_cfg;
- if (wol->wolopts & ~WAKE_MAGIC)
+ if (qlcnic_83xx_check(adapter))
return -EOPNOTSUPP;
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
if (!(wol_cfg & (1 << adapter->portnum)))
@@ -1307,7 +1458,7 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
return 0;
}
netdev_info(netdev, "Forcing a FW dump\n");
- qlcnic_dev_request_reset(adapter);
+ qlcnic_dev_request_reset(adapter, val->flag);
break;
case QLCNIC_DISABLE_FW_DUMP:
if (fw_dump->enable && fw_dump->tmpl_hdr) {
@@ -1327,7 +1478,7 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
return 0;
case QLCNIC_FORCE_FW_RESET:
netdev_info(netdev, "Forcing a FW reset\n");
- qlcnic_dev_request_reset(adapter);
+ qlcnic_dev_request_reset(adapter, val->flag);
adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
return 0;
case QLCNIC_SET_QUIESCENT:
@@ -1341,8 +1492,8 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
netdev_err(netdev, "FW dump not supported\n");
return -ENOTSUPP;
}
- for (i = 0; i < ARRAY_SIZE(FW_DUMP_LEVELS); i++) {
- if (val->flag == FW_DUMP_LEVELS[i]) {
+ for (i = 0; i < ARRAY_SIZE(qlcnic_fw_dump_level); i++) {
+ if (val->flag == qlcnic_fw_dump_level[i]) {
fw_dump->tmpl_hdr->drv_cap_mask =
val->flag;
netdev_info(netdev, "Driver mask changed to: 0x%x\n",
@@ -1386,10 +1537,3 @@ const struct ethtool_ops qlcnic_ethtool_ops = {
.get_dump_data = qlcnic_get_dump_data,
.set_dump = qlcnic_set_dump,
};
-
-const struct ethtool_ops qlcnic_ethtool_failed_ops = {
- .get_settings = qlcnic_get_settings,
- .get_drvinfo = qlcnic_get_drvinfo,
- .set_msglevel = qlcnic_set_msglevel,
- .get_msglevel = qlcnic_get_msglevel,
-};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index 49cc1ac4f057..44197ca1456c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -1,6 +1,6 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
@@ -11,6 +11,8 @@
#include <linux/kernel.h>
#include <linux/types.h>
+#include "qlcnic_hw.h"
+
/*
* The basic unit of access when reading/writing control registers.
*/
@@ -387,9 +389,6 @@ enum {
#define QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
#define QLCNIC_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
-/* Lock IDs for ROM lock */
-#define ROM_LOCK_DRIVER 0x0d417340
-
/******************************************************************************
*
* Definitions specific to M25P flash
@@ -449,13 +448,10 @@ enum {
#define ISR_INT_TARGET_STATUS_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
#define ISR_INT_TARGET_MASK_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
-#define QLCNIC_PCI_MN_2M (0)
-#define QLCNIC_PCI_MS_2M (0x80000)
#define QLCNIC_PCI_OCM0_2M (0x000c0000UL)
#define QLCNIC_PCI_CRBSPACE (0x06000000UL)
#define QLCNIC_PCI_CAMQM (0x04800000UL)
#define QLCNIC_PCI_CAMQM_END (0x04800800UL)
-#define QLCNIC_PCI_2MB_SIZE (0x00200000UL)
#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL)
#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM)
@@ -491,7 +487,7 @@ enum {
#define QLCNIC_NIU_GB_MAC_CONFIG_1(I) \
(QLCNIC_CRB_NIU + 0x30004 + (I)*0x10000)
-
+#define MAX_CTL_CHECK 1000
#define TEST_AGT_CTRL (0x00)
#define TA_CTL_START BIT_0
@@ -499,44 +495,6 @@ enum {
#define TA_CTL_WRITE BIT_2
#define TA_CTL_BUSY BIT_3
-/*
- * Register offsets for MN
- */
-#define MIU_TEST_AGT_BASE (0x90)
-
-#define MIU_TEST_AGT_ADDR_LO (0x04)
-#define MIU_TEST_AGT_ADDR_HI (0x08)
-#define MIU_TEST_AGT_WRDATA_LO (0x10)
-#define MIU_TEST_AGT_WRDATA_HI (0x14)
-#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x20)
-#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x24)
-#define MIU_TEST_AGT_WRDATA(i) (0x10+(0x10*((i)>>1))+(4*((i)&1)))
-#define MIU_TEST_AGT_RDDATA_LO (0x18)
-#define MIU_TEST_AGT_RDDATA_HI (0x1c)
-#define MIU_TEST_AGT_RDDATA_UPPER_LO (0x28)
-#define MIU_TEST_AGT_RDDATA_UPPER_HI (0x2c)
-#define MIU_TEST_AGT_RDDATA(i) (0x18+(0x10*((i)>>1))+(4*((i)&1)))
-
-#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
-#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
-
-/*
- * Register offsets for MS
- */
-#define SIU_TEST_AGT_BASE (0x60)
-
-#define SIU_TEST_AGT_ADDR_LO (0x04)
-#define SIU_TEST_AGT_ADDR_HI (0x18)
-#define SIU_TEST_AGT_WRDATA_LO (0x08)
-#define SIU_TEST_AGT_WRDATA_HI (0x0c)
-#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i)))
-#define SIU_TEST_AGT_RDDATA_LO (0x10)
-#define SIU_TEST_AGT_RDDATA_HI (0x14)
-#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i)))
-
-#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8
-#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22)
-
/* XG Link status */
#define XG_LINK_UP 0x10
#define XG_LINK_DOWN 0x20
@@ -556,9 +514,6 @@ enum {
#define QLCNIC_CAM_RAM_BASE (QLCNIC_CRB_CAM + 0x02000)
#define QLCNIC_CAM_RAM(reg) (QLCNIC_CAM_RAM_BASE + (reg))
-#define QLCNIC_FW_VERSION_MAJOR (QLCNIC_CAM_RAM(0x150))
-#define QLCNIC_FW_VERSION_MINOR (QLCNIC_CAM_RAM(0x154))
-#define QLCNIC_FW_VERSION_SUB (QLCNIC_CAM_RAM(0x158))
#define QLCNIC_ROM_LOCK_ID (QLCNIC_CAM_RAM(0x100))
#define QLCNIC_PHY_LOCK_ID (QLCNIC_CAM_RAM(0x120))
#define QLCNIC_CRB_WIN_LOCK_ID (QLCNIC_CAM_RAM(0x124))
@@ -568,28 +523,17 @@ enum {
#define QLCNIC_REG(X) (NIC_CRB_BASE+(X))
#define QLCNIC_REG_2(X) (NIC_CRB_BASE_2+(X))
+#define QLCNIC_CDRP_MAX_ARGS 4
+#define QLCNIC_CDRP_ARG(i) (QLCNIC_REG(0x18 + ((i) * 4)))
+
#define QLCNIC_CDRP_CRB_OFFSET (QLCNIC_REG(0x18))
-#define QLCNIC_ARG1_CRB_OFFSET (QLCNIC_REG(0x1c))
-#define QLCNIC_ARG2_CRB_OFFSET (QLCNIC_REG(0x20))
-#define QLCNIC_ARG3_CRB_OFFSET (QLCNIC_REG(0x24))
#define QLCNIC_SIGN_CRB_OFFSET (QLCNIC_REG(0x28))
-#define CRB_CMDPEG_STATE (QLCNIC_REG(0x50))
-#define CRB_RCVPEG_STATE (QLCNIC_REG(0x13c))
-
#define CRB_XG_STATE_P3P (QLCNIC_REG(0x98))
#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8))
-#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec))
-
-#define CRB_TEMP_STATE (QLCNIC_REG(0x1b4))
-
-#define CRB_V2P_0 (QLCNIC_REG(0x290))
-#define CRB_V2P(port) (CRB_V2P_0+((port)*4))
#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0))
-#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128))
#define CRB_FW_CAPABILITIES_2 (QLCNIC_CAM_RAM(0x12c))
-#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0))
/*
* CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
@@ -616,11 +560,6 @@ enum {
/* Lock IDs for PHY lock */
#define PHY_LOCK_DRIVER 0x44524956
-/* Used for PS PCI Memory access */
-#define PCIX_PS_OP_ADDR_LO (0x10000)
-/* via CRB (PS side only) */
-#define PCIX_PS_OP_ADDR_HI (0x10004)
-
#define PCIX_INT_VECTOR (0x10100)
#define PCIX_INT_MASK (0x10104)
@@ -682,17 +621,6 @@ enum {
#define QLCNIC_PEG_TUNE_CAPABILITY (QLCNIC_CAM_RAM(0x02c))
#define QLCNIC_DMA_WATCHDOG_CTRL (QLCNIC_CAM_RAM(0x14))
-#define QLCNIC_PEG_ALIVE_COUNTER (QLCNIC_CAM_RAM(0xb0))
-#define QLCNIC_PEG_HALT_STATUS1 (QLCNIC_CAM_RAM(0xa8))
-#define QLCNIC_PEG_HALT_STATUS2 (QLCNIC_CAM_RAM(0xac))
-#define QLCNIC_CRB_DRV_ACTIVE (QLCNIC_CAM_RAM(0x138))
-#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140))
-
-#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
-#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148))
-#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c))
-#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x174))
-#define QLCNIC_CRB_DEV_NPAR_STATE (QLCNIC_CAM_RAM(0x19c))
#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c)
#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860)
@@ -711,7 +639,6 @@ enum {
#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */
#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */
-#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) & (1 << (FN * 4)))
#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4)))
#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4)))
@@ -744,6 +671,9 @@ enum {
#define QLCNIC_HEARTBEAT_PERIOD_MSECS 200
#define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 45
+#define QLCNIC_MAX_MC_COUNT 38
+#define QLCNIC_WATCHDOG_TIMEOUTVALUE 5
+
#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
@@ -766,26 +696,13 @@ struct qlcnic_legacy_intr_set {
u32 pci_int_reg;
};
-#define QLCNIC_FW_API 0x1b216c
-#define QLCNIC_DRV_OP_MODE 0x1b2170
#define QLCNIC_MSIX_BASE 0x132110
#define QLCNIC_MAX_PCI_FUNC 8
#define QLCNIC_MAX_VLAN_FILTERS 64
-/* FW dump defines */
-#define MIU_TEST_CTR 0x41000090
-#define MIU_TEST_ADDR_LO 0x41000094
-#define MIU_TEST_ADDR_HI 0x41000098
#define FLASH_ROM_WINDOW 0x42110030
#define FLASH_ROM_DATA 0x42150000
-
-static const u32 FW_DUMP_LEVELS[] = {
- 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff };
-
-static const u32 MIU_TEST_READ_DATA[] = {
- 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC, };
-
#define QLCNIC_FW_DUMP_REG1 0x00130060
#define QLCNIC_FW_DUMP_REG2 0x001e0000
#define QLCNIC_FLASH_SEM2_LK 0x0013C010
@@ -796,7 +713,8 @@ static const u32 MIU_TEST_READ_DATA[] = {
enum {
QLCNIC_MGMT_FUNC = 0,
QLCNIC_PRIV_FUNC = 1,
- QLCNIC_NON_PRIV_FUNC = 2
+ QLCNIC_NON_PRIV_FUNC = 2,
+ QLCNIC_UNKNOWN_FUNC_MODE = 3
};
enum {
@@ -1013,6 +931,8 @@ enum {
#define QLCNIC_NIU_PROMISC_MODE 1
#define QLCNIC_NIU_ALLMULTI_MODE 2
+#define QLCNIC_PCIE_SEM_TIMEOUT 10000
+
struct crb_128M_2M_sub_block_map {
unsigned valid;
unsigned start_128M;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 7a6d5ebe4e0f..325e11e1ce0f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -1,6 +1,6 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
@@ -344,21 +344,26 @@ qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
}
-static int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
+int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
{
u32 data;
if (qlcnic_82xx_check(adapter))
qlcnic_read_window_reg(addr, adapter->ahw->pci_base0, &data);
- else
- return -EIO;
+ else {
+ data = qlcnic_83xx_rd_reg_indirect(adapter, addr);
+ if (data == -EIO)
+ return -EIO;
+ }
return data;
}
-static void qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
+void qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
{
if (qlcnic_82xx_check(adapter))
qlcnic_write_window_reg(addr, adapter->ahw->pci_base0, data);
+ else
+ qlcnic_83xx_wrt_reg_indirect(adapter, addr, data);
}
static int
@@ -417,9 +422,8 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
return 0;
}
-static int
-qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
- __le16 vlan_id, unsigned op)
+int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
+ __le16 vlan_id, u8 op)
{
struct qlcnic_nic_req req;
struct qlcnic_mac_req *mac_req;
@@ -442,7 +446,29 @@ qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
}
-static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
+int qlcnic_nic_del_mac(struct qlcnic_adapter *adapter, const u8 *addr)
+{
+ struct list_head *head;
+ struct qlcnic_mac_list_s *cur;
+ int err = -EINVAL;
+
+ /* Delete MAC from the existing list */
+ list_for_each(head, &adapter->mac_list) {
+ cur = list_entry(head, struct qlcnic_mac_list_s, list);
+ if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
+ err = qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
+ 0, QLCNIC_MAC_DEL);
+ if (err)
+ return err;
+ list_del(&cur->list);
+ kfree(cur);
+ return err;
+ }
+ }
+ return err;
+}
+
+int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
{
struct list_head *head;
struct qlcnic_mac_list_s *cur;
@@ -455,11 +481,9 @@ static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
}
cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
- if (cur == NULL) {
- dev_err(&adapter->netdev->dev,
- "failed to add mac address filter\n");
+ if (cur == NULL)
return -ENOMEM;
- }
+
memcpy(cur->mac_addr, addr, ETH_ALEN);
if (qlcnic_sre_macaddr_change(adapter,
@@ -506,17 +530,17 @@ void qlcnic_set_multi(struct net_device *netdev)
}
send_fw_cmd:
- if (mode == VPORT_MISS_MODE_ACCEPT_ALL) {
+ if (mode == VPORT_MISS_MODE_ACCEPT_ALL && !adapter->fdb_mac_learn) {
qlcnic_alloc_lb_filters_mem(adapter);
- adapter->mac_learn = 1;
+ adapter->drv_mac_learn = true;
} else {
- adapter->mac_learn = 0;
+ adapter->drv_mac_learn = false;
}
qlcnic_nic_set_promisc(adapter, mode);
}
-int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
+int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
{
struct qlcnic_nic_req req;
u64 word;
@@ -555,18 +579,20 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
struct hlist_node *tmp_hnode, *n;
struct hlist_head *head;
int i;
+ unsigned long time;
+ u8 cmd;
- for (i = 0; i < adapter->fhash.fmax; i++) {
+ for (i = 0; i < adapter->fhash.fbucket_size; i++) {
head = &(adapter->fhash.fhead[i]);
-
- hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode)
- {
- if (jiffies >
- (QLCNIC_FILTER_AGE * HZ + tmp_fil->ftime)) {
+ hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+ cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
+ QLCNIC_MAC_DEL;
+ time = tmp_fil->ftime;
+ if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) {
qlcnic_sre_macaddr_change(adapter,
- tmp_fil->faddr, tmp_fil->vlan_id,
- tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
- QLCNIC_MAC_DEL);
+ tmp_fil->faddr,
+ tmp_fil->vlan_id,
+ cmd);
spin_lock_bh(&adapter->mac_learn_lock);
adapter->fhash.fnum--;
hlist_del(&tmp_fil->fnode);
@@ -575,6 +601,21 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
}
}
}
+ for (i = 0; i < adapter->rx_fhash.fbucket_size; i++) {
+ head = &(adapter->rx_fhash.fhead[i]);
+
+ hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode)
+ {
+ time = tmp_fil->ftime;
+ if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) {
+ spin_lock_bh(&adapter->rx_mac_learn_lock);
+ adapter->rx_fhash.fnum--;
+ hlist_del(&tmp_fil->fnode);
+ spin_unlock_bh(&adapter->rx_mac_learn_lock);
+ kfree(tmp_fil);
+ }
+ }
+ }
}
void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
@@ -583,14 +624,17 @@ void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
struct hlist_node *tmp_hnode, *n;
struct hlist_head *head;
int i;
+ u8 cmd;
- for (i = 0; i < adapter->fhash.fmax; i++) {
+ for (i = 0; i < adapter->fhash.fbucket_size; i++) {
head = &(adapter->fhash.fhead[i]);
-
hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
- qlcnic_sre_macaddr_change(adapter, tmp_fil->faddr,
- tmp_fil->vlan_id, tmp_fil->vlan_id ?
- QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL);
+ cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
+ QLCNIC_MAC_DEL;
+ qlcnic_sre_macaddr_change(adapter,
+ tmp_fil->faddr,
+ tmp_fil->vlan_id,
+ cmd);
spin_lock_bh(&adapter->mac_learn_lock);
adapter->fhash.fnum--;
hlist_del(&tmp_fil->fnode);
@@ -620,12 +664,13 @@ static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag)
return rv;
}
-int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
{
if (qlcnic_set_fw_loopback(adapter, mode))
return -EIO;
- if (qlcnic_nic_set_promisc(adapter, VPORT_MISS_MODE_ACCEPT_ALL)) {
+ if (qlcnic_nic_set_promisc(adapter,
+ VPORT_MISS_MODE_ACCEPT_ALL)) {
qlcnic_set_fw_loopback(adapter, 0);
return -EIO;
}
@@ -634,11 +679,11 @@ int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
return 0;
}
-void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
{
- int mode = VPORT_MISS_MODE_DROP;
struct net_device *netdev = adapter->netdev;
+ mode = VPORT_MISS_MODE_DROP;
qlcnic_set_fw_loopback(adapter, 0);
if (netdev->flags & IFF_PROMISC)
@@ -648,12 +693,13 @@ void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter)
qlcnic_nic_set_promisc(adapter, mode);
msleep(1000);
+ return 0;
}
/*
* Send the interrupt coalescing parameter set by ethtool to the card.
*/
-int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
+void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter)
{
struct qlcnic_nic_req req;
int rv;
@@ -675,10 +721,14 @@ int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
if (rv != 0)
dev_err(&adapter->netdev->dev,
"Could not send interrupt coalescing parameters\n");
- return rv;
}
-int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
+#define QLCNIC_ENABLE_IPV4_LRO 1
+#define QLCNIC_ENABLE_IPV6_LRO 2
+#define QLCNIC_NO_DEST_IPV4_CHECK (1 << 8)
+#define QLCNIC_NO_DEST_IPV6_CHECK (2 << 8)
+
+int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
{
struct qlcnic_nic_req req;
u64 word;
@@ -694,7 +744,15 @@ int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
req.req_hdr = cpu_to_le64(word);
- req.words[0] = cpu_to_le64(enable);
+ word = 0;
+ if (enable) {
+ word = QLCNIC_ENABLE_IPV4_LRO | QLCNIC_NO_DEST_IPV4_CHECK;
+ if (adapter->ahw->capabilities2 & QLCNIC_FW_CAP2_HW_LRO_IPV6)
+ word |= QLCNIC_ENABLE_IPV6_LRO |
+ QLCNIC_NO_DEST_IPV6_CHECK;
+ }
+
+ req.words[0] = cpu_to_le64(word);
rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
if (rv != 0)
@@ -734,9 +792,12 @@ int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
}
-#define RSS_HASHTYPE_IP_TCP 0x3
+#define QLCNIC_RSS_HASHTYPE_IP_TCP 0x3
+#define QLCNIC_ENABLE_TYPE_C_RSS BIT_10
+#define QLCNIC_RSS_FEATURE_FLAG (1ULL << 63)
+#define QLCNIC_RSS_IND_TABLE_MASK 0x7ULL
-int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
+int qlcnic_82xx_config_rss(struct qlcnic_adapter *adapter, int enable)
{
struct qlcnic_nic_req req;
u64 word;
@@ -761,13 +822,19 @@ int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
* 7-6: hash_type_ipv6
* 8: enable
* 9: use indirection table
- * 47-10: reserved
- * 63-48: indirection table mask
+ * 10: type-c rss
+ * 11: udp rss
+ * 47-12: reserved
+ * 62-48: indirection table mask
+ * 63: feature flag
*/
- word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
- ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
+ word = ((u64)(QLCNIC_RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
+ ((u64)(QLCNIC_RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
((u64)(enable & 0x1) << 8) |
- ((0x7ULL) << 48);
+ ((u64)QLCNIC_RSS_IND_TABLE_MASK << 48) |
+ (u64)QLCNIC_ENABLE_TYPE_C_RSS |
+ (u64)QLCNIC_RSS_FEATURE_FLAG;
+
req.words[0] = cpu_to_le64(word);
for (i = 0; i < 5; i++)
req.words[i+1] = cpu_to_le64(key[i]);
@@ -779,7 +846,8 @@ int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
return rv;
}
-int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd)
+void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter,
+ __be32 ip, int cmd)
{
struct qlcnic_nic_req req;
struct qlcnic_ipaddr *ipa;
@@ -801,23 +869,19 @@ int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd)
dev_err(&adapter->netdev->dev,
"could not notify %s IP 0x%x reuqest\n",
(cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
-
- return rv;
}
-int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable)
+int qlcnic_82xx_linkevent_request(struct qlcnic_adapter *adapter, int enable)
{
struct qlcnic_nic_req req;
u64 word;
int rv;
-
memset(&req, 0, sizeof(struct qlcnic_nic_req));
req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
req.req_hdr = cpu_to_le64(word);
req.words[0] = cpu_to_le64(enable | (enable << 8));
-
rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
if (rv != 0)
dev_err(&adapter->netdev->dev,
@@ -882,7 +946,8 @@ netdev_features_t qlcnic_fix_features(struct net_device *netdev,
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) &&
+ qlcnic_82xx_check(adapter)) {
netdev_features_t changed = features ^ netdev->features;
features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
}
@@ -903,13 +968,15 @@ int qlcnic_set_features(struct net_device *netdev, netdev_features_t features)
if (!(changed & NETIF_F_LRO))
return 0;
- netdev->features = features ^ NETIF_F_LRO;
+ netdev->features ^= NETIF_F_LRO;
if (qlcnic_config_hw_lro(adapter, hw_lro))
return -EIO;
- if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter))
- return -EIO;
+ if (!hw_lro && qlcnic_82xx_check(adapter)) {
+ if (qlcnic_send_lro_cleanup(adapter))
+ return -EIO;
+ }
return 0;
}
@@ -981,8 +1048,8 @@ qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
return 0;
}
-int
-qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
+int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off,
+ u32 data)
{
unsigned long flags;
int rv;
@@ -1013,7 +1080,7 @@ qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
return -EIO;
}
-int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
+int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
{
unsigned long flags;
int rv;
@@ -1042,7 +1109,6 @@ int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
return -1;
}
-
void __iomem *qlcnic_get_ioaddr(struct qlcnic_hardware_context *ahw,
u32 offset)
{
@@ -1268,7 +1334,7 @@ int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
return ret;
}
-int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter)
{
int offset, board_type, magic;
struct pci_dev *pdev = adapter->pdev;
@@ -1341,7 +1407,7 @@ qlcnic_wol_supported(struct qlcnic_adapter *adapter)
return 0;
}
-int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
+int qlcnic_82xx_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
{
struct qlcnic_nic_req req;
int rv;
@@ -1353,7 +1419,7 @@ int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16);
req.req_hdr = cpu_to_le64(word);
- req.words[0] = cpu_to_le64((u64)rate << 32);
+ req.words[0] = cpu_to_le64(((u64)rate << 32) | adapter->portnum);
req.words[1] = cpu_to_le64(state);
rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
@@ -1362,3 +1428,56 @@ int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
return rv;
}
+
+void qlcnic_82xx_get_func_no(struct qlcnic_adapter *adapter)
+{
+ void __iomem *msix_base_addr;
+ u32 func;
+ u32 msix_base;
+
+ pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
+ msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
+ msix_base = readl(msix_base_addr);
+ func = (func - msix_base) / QLCNIC_MSIX_TBL_PGSIZE;
+ adapter->ahw->pci_func = func;
+}
+
+void qlcnic_82xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ u32 data;
+ u64 qmdata;
+
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+ qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
+ memcpy(buf, &qmdata, size);
+ } else {
+ data = QLCRD32(adapter, offset);
+ memcpy(buf, &data, size);
+ }
+}
+
+void qlcnic_82xx_write_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ u32 data;
+ u64 qmdata;
+
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+ memcpy(&qmdata, buf, size);
+ qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
+ } else {
+ memcpy(&data, buf, size);
+ QLCWR32(adapter, offset, data);
+ }
+}
+
+int qlcnic_82xx_api_lock(struct qlcnic_adapter *adapter)
+{
+ return qlcnic_pcie_sem_lock(adapter, 5, 0);
+}
+
+void qlcnic_82xx_api_unlock(struct qlcnic_adapter *adapter)
+{
+ qlcnic_pcie_sem_unlock(adapter, 5);
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
new file mode 100644
index 000000000000..5b8749eda11f
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -0,0 +1,194 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef __QLCNIC_HW_H
+#define __QLCNIC_HW_H
+
+/* Common registers in 83xx and 82xx */
+enum qlcnic_regs {
+ QLCNIC_PEG_HALT_STATUS1 = 0,
+ QLCNIC_PEG_HALT_STATUS2,
+ QLCNIC_PEG_ALIVE_COUNTER,
+ QLCNIC_FLASH_LOCK_OWNER,
+ QLCNIC_FW_CAPABILITIES,
+ QLCNIC_CRB_DRV_ACTIVE,
+ QLCNIC_CRB_DEV_STATE,
+ QLCNIC_CRB_DRV_STATE,
+ QLCNIC_CRB_DRV_SCRATCH,
+ QLCNIC_CRB_DEV_PARTITION_INFO,
+ QLCNIC_CRB_DRV_IDC_VER,
+ QLCNIC_FW_VERSION_MAJOR,
+ QLCNIC_FW_VERSION_MINOR,
+ QLCNIC_FW_VERSION_SUB,
+ QLCNIC_CRB_DEV_NPAR_STATE,
+ QLCNIC_FW_IMG_VALID,
+ QLCNIC_CMDPEG_STATE,
+ QLCNIC_RCVPEG_STATE,
+ QLCNIC_ASIC_TEMP,
+ QLCNIC_FW_API,
+ QLCNIC_DRV_OP_MODE,
+ QLCNIC_FLASH_LOCK,
+ QLCNIC_FLASH_UNLOCK,
+};
+
+/* Read from an address offset from BAR0, existing registers */
+#define QLC_SHARED_REG_RD32(a, addr) \
+ readl(((a)->ahw->pci_base0) + ((a)->ahw->reg_tbl[addr]))
+
+/* Write to an address offset from BAR0, existing registers */
+#define QLC_SHARED_REG_WR32(a, addr, value) \
+ writel(value, ((a)->ahw->pci_base0) + ((a)->ahw->reg_tbl[addr]))
+
+/* Read from a direct address offset from BAR0, additional registers */
+#define QLCRDX(ahw, addr) \
+ readl(((ahw)->pci_base0) + ((ahw)->ext_reg_tbl[addr]))
+
+/* Write to a direct address offset from BAR0, additional registers */
+#define QLCWRX(ahw, addr, value) \
+ writel(value, (((ahw)->pci_base0) + ((ahw)->ext_reg_tbl[addr])))
+
+#define QLCNIC_CMD_CONFIGURE_IP_ADDR 0x1
+#define QLCNIC_CMD_CONFIG_INTRPT 0x2
+#define QLCNIC_CMD_CREATE_RX_CTX 0x7
+#define QLCNIC_CMD_DESTROY_RX_CTX 0x8
+#define QLCNIC_CMD_CREATE_TX_CTX 0x9
+#define QLCNIC_CMD_DESTROY_TX_CTX 0xa
+#define QLCNIC_CMD_CONFIGURE_LRO 0xC
+#define QLCNIC_CMD_CONFIGURE_MAC_LEARNING 0xD
+#define QLCNIC_CMD_GET_STATISTICS 0xF
+#define QLCNIC_CMD_INTRPT_TEST 0x11
+#define QLCNIC_CMD_SET_MTU 0x12
+#define QLCNIC_CMD_READ_PHY 0x13
+#define QLCNIC_CMD_WRITE_PHY 0x14
+#define QLCNIC_CMD_READ_HW_REG 0x15
+#define QLCNIC_CMD_GET_FLOW_CTL 0x16
+#define QLCNIC_CMD_SET_FLOW_CTL 0x17
+#define QLCNIC_CMD_READ_MAX_MTU 0x18
+#define QLCNIC_CMD_READ_MAX_LRO 0x19
+#define QLCNIC_CMD_MAC_ADDRESS 0x1f
+#define QLCNIC_CMD_GET_PCI_INFO 0x20
+#define QLCNIC_CMD_GET_NIC_INFO 0x21
+#define QLCNIC_CMD_SET_NIC_INFO 0x22
+#define QLCNIC_CMD_GET_ESWITCH_CAPABILITY 0x24
+#define QLCNIC_CMD_TOGGLE_ESWITCH 0x25
+#define QLCNIC_CMD_GET_ESWITCH_STATUS 0x26
+#define QLCNIC_CMD_SET_PORTMIRRORING 0x27
+#define QLCNIC_CMD_CONFIGURE_ESWITCH 0x28
+#define QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG 0x29
+#define QLCNIC_CMD_GET_ESWITCH_STATS 0x2a
+#define QLCNIC_CMD_CONFIG_PORT 0x2e
+#define QLCNIC_CMD_TEMP_SIZE 0x2f
+#define QLCNIC_CMD_GET_TEMP_HDR 0x30
+#define QLCNIC_CMD_GET_MAC_STATS 0x37
+#define QLCNIC_CMD_SET_DRV_VER 0x38
+#define QLCNIC_CMD_CONFIGURE_RSS 0x41
+#define QLCNIC_CMD_CONFIG_INTR_COAL 0x43
+#define QLCNIC_CMD_CONFIGURE_LED 0x44
+#define QLCNIC_CMD_CONFIG_MAC_VLAN 0x45
+#define QLCNIC_CMD_GET_LINK_EVENT 0x48
+#define QLCNIC_CMD_CONFIGURE_MAC_RX_MODE 0x49
+#define QLCNIC_CMD_CONFIGURE_HW_LRO 0x4A
+#define QLCNIC_CMD_INIT_NIC_FUNC 0x60
+#define QLCNIC_CMD_STOP_NIC_FUNC 0x61
+#define QLCNIC_CMD_IDC_ACK 0x63
+#define QLCNIC_CMD_SET_PORT_CONFIG 0x66
+#define QLCNIC_CMD_GET_PORT_CONFIG 0x67
+#define QLCNIC_CMD_GET_LINK_STATUS 0x68
+#define QLCNIC_CMD_SET_LED_CONFIG 0x69
+#define QLCNIC_CMD_GET_LED_CONFIG 0x6A
+#define QLCNIC_CMD_ADD_RCV_RINGS 0x0B
+
+#define QLCNIC_INTRPT_INTX 1
+#define QLCNIC_INTRPT_MSIX 3
+#define QLCNIC_INTRPT_ADD 1
+#define QLCNIC_INTRPT_DEL 2
+
+#define QLCNIC_GET_CURRENT_MAC 1
+#define QLCNIC_SET_STATION_MAC 2
+#define QLCNIC_GET_DEFAULT_MAC 3
+#define QLCNIC_GET_FAC_DEF_MAC 4
+#define QLCNIC_SET_FAC_DEF_MAC 5
+
+#define QLCNIC_MBX_LINK_EVENT 0x8001
+#define QLCNIC_MBX_COMP_EVENT 0x8100
+#define QLCNIC_MBX_REQUEST_EVENT 0x8101
+#define QLCNIC_MBX_TIME_EXTEND_EVENT 0x8102
+#define QLCNIC_MBX_SFP_INSERT_EVENT 0x8130
+#define QLCNIC_MBX_SFP_REMOVE_EVENT 0x8131
+
+struct qlcnic_mailbox_metadata {
+ u32 cmd;
+ u32 in_args;
+ u32 out_args;
+};
+
+/* Mailbox ownership */
+#define QLCNIC_GET_OWNER(val) ((val) & (BIT_0 | BIT_1))
+
+#define QLCNIC_SET_OWNER 1
+#define QLCNIC_CLR_OWNER 0
+#define QLCNIC_MBX_TIMEOUT 10000
+
+#define QLCNIC_MBX_RSP_OK 1
+#define QLCNIC_MBX_PORT_RSP_OK 0x1a
+#define QLCNIC_MBX_ASYNC_EVENT BIT_15
+
+struct qlcnic_pci_info;
+struct qlcnic_info;
+struct qlcnic_cmd_args;
+struct ethtool_stats;
+struct pci_device_id;
+struct qlcnic_host_sds_ring;
+struct qlcnic_host_tx_ring;
+struct qlcnic_host_tx_ring;
+struct qlcnic_hardware_context;
+struct qlcnic_adapter;
+
+int qlcnic_82xx_start_firmware(struct qlcnic_adapter *);
+int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong);
+int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
+int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int);
+int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
+int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
+ struct net_device *netdev);
+void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
+ u64 *uaddr, __le16 vlan_id);
+void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter);
+int qlcnic_82xx_config_rss(struct qlcnic_adapter *adapter, int);
+void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter,
+ __be32, int);
+int qlcnic_82xx_linkevent_request(struct qlcnic_adapter *adapter, int);
+void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
+int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8);
+int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *, u8);
+void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32);
+int qlcnic_82xx_setup_intr(struct qlcnic_adapter *, u8);
+irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *);
+int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *);
+int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *);
+int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *tx_ring, int);
+int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, __le16, u8);
+int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*);
+int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
+int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
+int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
+int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *,
+ struct qlcnic_adapter *, u32);
+int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
+int qlcnic_82xx_get_board_info(struct qlcnic_adapter *);
+int qlcnic_82xx_config_led(struct qlcnic_adapter *, u32, u32);
+void qlcnic_82xx_get_func_no(struct qlcnic_adapter *);
+int qlcnic_82xx_api_lock(struct qlcnic_adapter *);
+void qlcnic_82xx_api_unlock(struct qlcnic_adapter *);
+void qlcnic_82xx_napi_enable(struct qlcnic_adapter *);
+void qlcnic_82xx_napi_disable(struct qlcnic_adapter *);
+void qlcnic_82xx_napi_del(struct qlcnic_adapter *);
+#endif /* __QLCNIC_HW_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index de79cde233de..d28336fc65ab 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -1,15 +1,12 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
-#include <linux/netdevice.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/if_vlan.h>
#include "qlcnic.h"
+#include "qlcnic_hw.h"
struct crb_addr_pair {
u32 addr;
@@ -166,13 +163,12 @@ void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
{
struct qlcnic_recv_context *recv_ctx;
struct qlcnic_host_rds_ring *rds_ring;
- struct qlcnic_host_tx_ring *tx_ring;
int ring;
recv_ctx = adapter->recv_ctx;
if (recv_ctx->rds_rings == NULL)
- goto skip_rds;
+ return;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
@@ -180,16 +176,6 @@ void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
rds_ring->rx_buf_arr = NULL;
}
kfree(recv_ctx->rds_rings);
-
-skip_rds:
- if (adapter->tx_ring == NULL)
- return;
-
- tx_ring = adapter->tx_ring;
- vfree(tx_ring->cmd_buf_arr);
- tx_ring->cmd_buf_arr = NULL;
- kfree(adapter->tx_ring);
- adapter->tx_ring = NULL;
}
int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
@@ -197,39 +183,16 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
struct qlcnic_recv_context *recv_ctx;
struct qlcnic_host_rds_ring *rds_ring;
struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_rx_buffer *rx_buf;
- int ring, i, size;
-
- struct qlcnic_cmd_buffer *cmd_buf_arr;
- struct net_device *netdev = adapter->netdev;
-
- size = sizeof(struct qlcnic_host_tx_ring);
- tx_ring = kzalloc(size, GFP_KERNEL);
- if (tx_ring == NULL) {
- dev_err(&netdev->dev, "failed to allocate tx ring struct\n");
- return -ENOMEM;
- }
- adapter->tx_ring = tx_ring;
-
- tx_ring->num_desc = adapter->num_txd;
- tx_ring->txq = netdev_get_tx_queue(netdev, 0);
-
- cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
- if (cmd_buf_arr == NULL) {
- dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
- goto err_out;
- }
- tx_ring->cmd_buf_arr = cmd_buf_arr;
+ int ring, i;
recv_ctx = adapter->recv_ctx;
- size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring);
- rds_ring = kzalloc(size, GFP_KERNEL);
- if (rds_ring == NULL) {
- dev_err(&netdev->dev, "failed to allocate rds ring struct\n");
+ rds_ring = kcalloc(adapter->max_rds_rings,
+ sizeof(struct qlcnic_host_rds_ring), GFP_KERNEL);
+ if (rds_ring == NULL)
goto err_out;
- }
+
recv_ctx->rds_rings = rds_ring;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
@@ -255,11 +218,9 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
break;
}
rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
- if (rds_ring->rx_buf_arr == NULL) {
- dev_err(&netdev->dev, "Failed to allocate "
- "rx buffer ring %d\n", ring);
+ if (rds_ring->rx_buf_arr == NULL)
goto err_out;
- }
+
INIT_LIST_HEAD(&rds_ring->free_list);
/*
* Now go through all of them, set reference handles
@@ -327,7 +288,6 @@ static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
long done = 0;
cond_resched();
-
while (done == 0) {
done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS);
done &= 2;
@@ -416,8 +376,8 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
u32 off;
struct pci_dev *pdev = adapter->pdev;
- QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
- QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, 0);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_RCVPEG_STATE, 0);
/* Halt all the indiviual PEGs and other blocks */
/* disable all I2Q */
@@ -482,10 +442,8 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
}
buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
- if (buf == NULL) {
- dev_err(&pdev->dev, "Unable to calloc memory for rom read.\n");
+ if (buf == NULL)
return -ENOMEM;
- }
for (i = 0; i < n; i++) {
if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
@@ -564,8 +522,8 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0);
msleep(1);
- QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
- QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
return 0;
}
@@ -576,7 +534,7 @@ static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT;
do {
- val = QLCRD32(adapter, CRB_CMDPEG_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CMDPEG_STATE);
switch (val) {
case PHAN_INITIALIZE_COMPLETE:
@@ -592,7 +550,8 @@ static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
} while (--retries);
- QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE,
+ PHAN_INITIALIZE_FAILED);
out_err:
dev_err(&adapter->pdev->dev, "Command Peg initialization not "
@@ -607,7 +566,7 @@ qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
int retries = QLCNIC_RCVPEG_CHECK_RETRY_COUNT;
do {
- val = QLCRD32(adapter, CRB_RCVPEG_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_RCVPEG_STATE);
if (val == PHAN_PEG_RCV_INITIALIZED)
return 0;
@@ -638,7 +597,7 @@ qlcnic_check_fw_status(struct qlcnic_adapter *adapter)
if (err)
return err;
- QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
return err;
}
@@ -649,7 +608,7 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
int timeo;
u32 val;
- val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
val = QLC_DEV_GET_DRV(val, adapter->portnum);
if ((val & 0x3) != QLCNIC_TYPE_NIC) {
dev_err(&adapter->pdev->dev,
@@ -689,11 +648,9 @@ static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region,
}
entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header);
- flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size);
- if (flt_entry == NULL) {
- dev_warn(&adapter->pdev->dev, "error allocating memory\n");
+ flt_entry = vzalloc(entry_size);
+ if (flt_entry == NULL)
return -EIO;
- }
ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION +
sizeof(struct qlcnic_flt_header),
@@ -1096,11 +1053,13 @@ qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter)
u32 heartbeat, ret = -EIO;
int retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
- adapter->heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ adapter->heartbeat = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_PEG_ALIVE_COUNTER);
do {
msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS);
- heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ heartbeat = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_PEG_ALIVE_COUNTER);
if (heartbeat != adapter->heartbeat) {
ret = QLCNIC_RCODE_SUCCESS;
break;
@@ -1270,7 +1229,7 @@ qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
return -EINVAL;
}
- QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, QLCNIC_BDINFO_MAGIC);
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 6f82812d0fab..6387e0cc3ea9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -1,3 +1,10 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <net/ip.h>
@@ -5,9 +12,6 @@
#include "qlcnic.h"
-#define QLCNIC_MAC_HASH(MAC)\
- ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
-
#define TX_ETHER_PKT 0x01
#define TX_TCP_PKT 0x02
#define TX_UDP_PKT 0x03
@@ -84,6 +88,8 @@
#define qlcnic_get_lro_sts_mss(sts_data1) \
((sts_data1 >> 32) & 0x0FFFF)
+#define qlcnic_83xx_get_lro_sts_mss(sts) ((sts) & 0xffff)
+
/* opcode field in status_desc */
#define QLCNIC_SYN_OFFLOAD 0x03
#define QLCNIC_RXPKT_DESC 0x04
@@ -91,18 +97,152 @@
#define QLCNIC_RESPONSE_DESC 0x05
#define QLCNIC_LRO_DESC 0x12
+#define QLCNIC_TX_POLL_BUDGET 128
+#define QLCNIC_TCP_HDR_SIZE 20
+#define QLCNIC_TCP_TS_OPTION_SIZE 12
+#define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63)
+#define QLCNIC_DESC_OWNER_FW cpu_to_le64(STATUS_OWNER_PHANTOM)
+
+#define QLCNIC_TCP_TS_HDR_SIZE (QLCNIC_TCP_HDR_SIZE + QLCNIC_TCP_TS_OPTION_SIZE)
+
/* for status field in status_desc */
#define STATUS_CKSUM_LOOP 0
#define STATUS_CKSUM_OK 2
-static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
- u64 uaddr, __le16 vlan_id,
- struct qlcnic_host_tx_ring *tx_ring)
+#define qlcnic_83xx_pktln(sts) ((sts >> 32) & 0x3FFF)
+#define qlcnic_83xx_hndl(sts) ((sts >> 48) & 0x7FFF)
+#define qlcnic_83xx_csum_status(sts) ((sts >> 39) & 7)
+#define qlcnic_83xx_opcode(sts) ((sts >> 42) & 0xF)
+#define qlcnic_83xx_vlan_tag(sts) (((sts) >> 48) & 0xFFFF)
+#define qlcnic_83xx_lro_pktln(sts) (((sts) >> 32) & 0x3FFF)
+#define qlcnic_83xx_l2_hdr_off(sts) (((sts) >> 16) & 0xFF)
+#define qlcnic_83xx_l4_hdr_off(sts) (((sts) >> 24) & 0xFF)
+#define qlcnic_83xx_pkt_cnt(sts) (((sts) >> 16) & 0x7)
+#define qlcnic_83xx_is_tstamp(sts) (((sts) >> 40) & 1)
+#define qlcnic_83xx_is_psh_bit(sts) (((sts) >> 41) & 1)
+#define qlcnic_83xx_is_ip_align(sts) (((sts) >> 46) & 1)
+#define qlcnic_83xx_has_vlan_tag(sts) (((sts) >> 47) & 1)
+
+struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
+ struct qlcnic_host_rds_ring *, u16, u16);
+
+inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ writel(0, tx_ring->crb_intr_mask);
+}
+
+inline void qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ writel(1, tx_ring->crb_intr_mask);
+}
+
+static inline u8 qlcnic_mac_hash(u64 mac)
+{
+ return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff));
+}
+
+static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
+ u16 handle, u8 ring_id)
+{
+ if (adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE834X)
+ return handle | (ring_id << 15);
+ else
+ return handle;
+}
+
+static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data)
+{
+ return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0;
+}
+
+void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
+ int loopback_pkt, __le16 vlan_id)
+{
+ struct ethhdr *phdr = (struct ethhdr *)(skb->data);
+ struct qlcnic_filter *fil, *tmp_fil;
+ struct hlist_node *tmp_hnode, *n;
+ struct hlist_head *head;
+ unsigned long time;
+ u64 src_addr = 0;
+ u8 hindex, found = 0, op;
+ int ret;
+
+ memcpy(&src_addr, phdr->h_source, ETH_ALEN);
+
+ if (loopback_pkt) {
+ if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax)
+ return;
+
+ hindex = qlcnic_mac_hash(src_addr) &
+ (adapter->fhash.fbucket_size - 1);
+ head = &(adapter->rx_fhash.fhead[hindex]);
+
+ hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+ if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
+ tmp_fil->vlan_id == vlan_id) {
+ time = tmp_fil->ftime;
+ if (jiffies > (QLCNIC_READD_AGE * HZ + time))
+ tmp_fil->ftime = jiffies;
+ return;
+ }
+ }
+
+ fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
+ if (!fil)
+ return;
+
+ fil->ftime = jiffies;
+ memcpy(fil->faddr, &src_addr, ETH_ALEN);
+ fil->vlan_id = vlan_id;
+ spin_lock(&adapter->rx_mac_learn_lock);
+ hlist_add_head(&(fil->fnode), head);
+ adapter->rx_fhash.fnum++;
+ spin_unlock(&adapter->rx_mac_learn_lock);
+ } else {
+ hindex = qlcnic_mac_hash(src_addr) &
+ (adapter->fhash.fbucket_size - 1);
+ head = &(adapter->rx_fhash.fhead[hindex]);
+ spin_lock(&adapter->rx_mac_learn_lock);
+ hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+ if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
+ tmp_fil->vlan_id == vlan_id) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ spin_unlock(&adapter->rx_mac_learn_lock);
+ return;
+ }
+
+ op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
+ ret = qlcnic_sre_macaddr_change(adapter, (u8 *)&src_addr,
+ vlan_id, op);
+ if (!ret) {
+ op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
+ ret = qlcnic_sre_macaddr_change(adapter,
+ (u8 *)&src_addr,
+ vlan_id, op);
+ if (!ret) {
+ hlist_del(&(tmp_fil->fnode));
+ adapter->rx_fhash.fnum--;
+ }
+ }
+ spin_unlock(&adapter->rx_mac_learn_lock);
+ }
+}
+
+void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
+ __le16 vlan_id)
{
struct cmd_desc_type0 *hwdesc;
struct qlcnic_nic_req *req;
struct qlcnic_mac_req *mac_req;
struct qlcnic_vlan_req *vlan_req;
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
u32 producer;
u64 word;
@@ -128,14 +268,14 @@ static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
}
static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
- struct qlcnic_host_tx_ring *tx_ring,
struct cmd_desc_type0 *first_desc,
struct sk_buff *skb)
{
- struct ethhdr *phdr = (struct ethhdr *)(skb->data);
struct qlcnic_filter *fil, *tmp_fil;
struct hlist_node *tmp_hnode, *n;
struct hlist_head *head;
+ struct net_device *netdev = adapter->netdev;
+ struct ethhdr *phdr = (struct ethhdr *)(skb->data);
u64 src_addr = 0;
__le16 vlan_id = 0;
u8 hindex;
@@ -143,23 +283,23 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
return;
- if (adapter->fhash.fnum >= adapter->fhash.fmax)
+ if (adapter->fhash.fnum >= adapter->fhash.fmax) {
+ adapter->stats.mac_filter_limit_overrun++;
+ netdev_info(netdev, "Can not add more than %d mac addresses\n",
+ adapter->fhash.fmax);
return;
+ }
- /* Only NPAR capable devices support vlan based learning*/
- if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
- vlan_id = first_desc->vlan_TCI;
memcpy(&src_addr, phdr->h_source, ETH_ALEN);
- hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
+ hindex = qlcnic_mac_hash(src_addr) & (adapter->fhash.fbucket_size - 1);
head = &(adapter->fhash.fhead[hindex]);
hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
- tmp_fil->vlan_id == vlan_id) {
-
+ tmp_fil->vlan_id == vlan_id) {
if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
- qlcnic_change_filter(adapter, src_addr, vlan_id,
- tx_ring);
+ qlcnic_change_filter(adapter, &src_addr,
+ vlan_id);
tmp_fil->ftime = jiffies;
return;
}
@@ -169,17 +309,13 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
if (!fil)
return;
- qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
-
+ qlcnic_change_filter(adapter, &src_addr, vlan_id);
fil->ftime = jiffies;
fil->vlan_id = vlan_id;
memcpy(fil->faddr, &src_addr, ETH_ALEN);
-
spin_lock(&adapter->mac_learn_lock);
-
hlist_add_head(&(fil->fnode), head);
adapter->fhash.fnum++;
-
spin_unlock(&adapter->mac_learn_lock);
}
@@ -474,8 +610,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
goto unwind_buff;
- if (adapter->mac_learn)
- qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
+ if (adapter->drv_mac_learn)
+ qlcnic_send_filter(adapter, first_desc, skb);
adapter->stats.txbytes += skb->len;
adapter->stats.xmitcalled++;
@@ -528,8 +664,8 @@ static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
}
skb_reserve(skb, NET_IP_ALIGN);
- dma = pci_map_single(pdev, skb->data, rds_ring->dma_size,
- PCI_DMA_FROMDEVICE);
+ dma = pci_map_single(pdev, skb->data,
+ rds_ring->dma_size, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(pdev, dma)) {
adapter->stats.rx_dma_map_error++;
@@ -544,12 +680,13 @@ static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
}
static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
- struct qlcnic_host_rds_ring *rds_ring)
+ struct qlcnic_host_rds_ring *rds_ring,
+ u8 ring_id)
{
struct rcv_desc *pdesc;
struct qlcnic_rx_buffer *buffer;
int count = 0;
- uint32_t producer;
+ uint32_t producer, handle;
struct list_head *head;
if (!spin_trylock(&rds_ring->lock))
@@ -557,7 +694,6 @@ static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
producer = rds_ring->producer;
head = &rds_ring->free_list;
-
while (!list_empty(head)) {
buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
@@ -565,28 +701,29 @@ static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
break;
}
-
count++;
list_del(&buffer->list);
/* make a rcv descriptor */
pdesc = &rds_ring->desc_head[producer];
- pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+ handle = qlcnic_get_ref_handle(adapter,
+ buffer->ref_handle, ring_id);
+ pdesc->reference_handle = cpu_to_le16(handle);
pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
pdesc->addr_buffer = cpu_to_le64(buffer->dma);
producer = get_next_index(producer, rds_ring->num_desc);
}
-
if (count) {
rds_ring->producer = producer;
writel((producer - 1) & (rds_ring->num_desc - 1),
rds_ring->crb_rcv_producer);
}
-
spin_unlock(&rds_ring->lock);
}
-static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
+static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring,
+ int budget)
{
u32 sw_consumer, hw_consumer;
int i, done, count = 0;
@@ -594,7 +731,6 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
struct net_device *netdev = adapter->netdev;
struct qlcnic_skb_frag *frag;
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
if (!spin_trylock(&adapter->tx_clean_lock))
return 1;
@@ -615,22 +751,19 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
PCI_DMA_TODEVICE);
frag->dma = 0ULL;
}
-
adapter->stats.xmitfinished++;
dev_kfree_skb_any(buffer->skb);
buffer->skb = NULL;
}
sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
- if (++count >= MAX_STATUS_HANDLE)
+ if (++count >= budget)
break;
}
if (count && netif_running(netdev)) {
tx_ring->sw_consumer = sw_consumer;
-
smp_mb();
-
if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
netif_wake_queue(netdev);
@@ -654,7 +787,6 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
*/
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
done = (sw_consumer == hw_consumer);
-
spin_unlock(&adapter->tx_clean_lock);
return done;
@@ -662,16 +794,15 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
static int qlcnic_poll(struct napi_struct *napi, int budget)
{
+ int tx_complete, work_done;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_adapter *adapter;
- int tx_complete, work_done;
sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
adapter = sds_ring->adapter;
-
- tx_complete = qlcnic_process_cmd_ring(adapter);
+ tx_complete = qlcnic_process_cmd_ring(adapter, adapter->tx_ring,
+ budget);
work_done = qlcnic_process_rcv_ring(sds_ring, budget);
-
if ((work_done < budget) && tx_complete) {
napi_complete(&sds_ring->napi);
if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
@@ -804,26 +935,23 @@ static void qlcnic_handle_fw_message(int desc_cnt, int index,
}
}
-static struct sk_buff *
-qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
- struct qlcnic_host_rds_ring *rds_ring, u16 index,
- u16 cksum)
+struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *ring,
+ u16 index, u16 cksum)
{
struct qlcnic_rx_buffer *buffer;
struct sk_buff *skb;
- buffer = &rds_ring->rx_buf_arr[index];
-
+ buffer = &ring->rx_buf_arr[index];
if (unlikely(buffer->skb == NULL)) {
WARN_ON(1);
return NULL;
}
- pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
+ pci_unmap_single(adapter->pdev, buffer->dma, ring->dma_size,
PCI_DMA_FROMDEVICE);
skb = buffer->skb;
-
if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
(cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
adapter->stats.csummed++;
@@ -832,6 +960,7 @@ qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
skb_checksum_none_assert(skb);
}
+
buffer->skb = NULL;
return skb;
@@ -871,8 +1000,8 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
struct qlcnic_rx_buffer *buffer;
struct sk_buff *skb;
struct qlcnic_host_rds_ring *rds_ring;
- int index, length, cksum, pkt_offset;
- u16 vid = 0xffff;
+ int index, length, cksum, pkt_offset, is_lb_pkt;
+ u16 vid = 0xffff, t_vid;
if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
@@ -892,6 +1021,14 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
if (!skb)
return buffer;
+ if (adapter->drv_mac_learn &&
+ (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ t_vid = 0;
+ is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
+ cpu_to_le16(t_vid));
+ }
+
if (length > rds_ring->skb_size)
skb_put(skb, rds_ring->skb_size);
else
@@ -933,10 +1070,11 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
struct sk_buff *skb;
struct qlcnic_host_rds_ring *rds_ring;
struct iphdr *iph;
+ struct ipv6hdr *ipv6h;
struct tcphdr *th;
bool push, timestamp;
- int index, l2_hdr_offset, l4_hdr_offset;
- u16 lro_length, length, data_offset, vid = 0xffff;
+ int index, l2_hdr_offset, l4_hdr_offset, is_lb_pkt;
+ u16 lro_length, length, data_offset, t_vid, vid = 0xffff;
u32 seq_number;
if (unlikely(ring > adapter->max_rds_rings))
@@ -961,6 +1099,14 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
if (!skb)
return buffer;
+ if (adapter->drv_mac_learn &&
+ (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ t_vid = 0;
+ is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
+ cpu_to_le16(t_vid));
+ }
+
if (timestamp)
data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
else
@@ -976,18 +1122,32 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
}
skb->protocol = eth_type_trans(skb, netdev);
- iph = (struct iphdr *)skb->data;
- th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
- length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
- iph->tot_len = htons(length);
- iph->check = 0;
- iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+
+ if (ntohs(skb->protocol) == ETH_P_IPV6) {
+ ipv6h = (struct ipv6hdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
+ length = (th->doff << 2) + lro_length;
+ ipv6h->payload_len = htons(length);
+ } else {
+ iph = (struct iphdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
+ length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ iph->tot_len = htons(length);
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ }
+
th->psh = push;
th->seq = htonl(seq_number);
length = skb->len;
- if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
+ if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
+ if (skb->protocol == htons(ETH_P_IPV6))
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ else
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ }
if (vid != 0xffff)
__vlan_hwaccel_put_tag(skb, vid);
@@ -1006,9 +1166,9 @@ int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
struct list_head *cur;
struct status_desc *desc;
struct qlcnic_rx_buffer *rxbuf;
+ int opcode, desc_cnt, count = 0;
u64 sts_data0, sts_data1;
- __le64 owner_phantom = cpu_to_le64(STATUS_OWNER_PHANTOM);
- int opcode, ring, desc_cnt, count = 0;
+ u8 ring;
u32 consumer = sds_ring->consumer;
while (count < max) {
@@ -1020,7 +1180,6 @@ int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
opcode = qlcnic_get_sts_opcode(sts_data0);
-
switch (opcode) {
case QLCNIC_RXPKT_DESC:
case QLCNIC_OLD_RXPKT_DESC:
@@ -1040,18 +1199,16 @@ int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
default:
goto skip;
}
-
WARN_ON(desc_cnt > 1);
if (likely(rxbuf))
list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
else
adapter->stats.null_rxbuf++;
-
skip:
for (; desc_cnt > 0; desc_cnt--) {
desc = &sds_ring->desc_head[consumer];
- desc->status_desc_data[0] = owner_phantom;
+ desc->status_desc_data[0] = QLCNIC_DESC_OWNER_FW;
consumer = get_next_index(consumer, sds_ring->num_desc);
}
count++;
@@ -1059,7 +1216,6 @@ skip:
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &adapter->recv_ctx->rds_rings[ring];
-
if (!list_empty(&sds_ring->free_list[ring])) {
list_for_each(cur, &sds_ring->free_list[ring]) {
rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
@@ -1072,7 +1228,7 @@ skip:
spin_unlock(&rds_ring->lock);
}
- qlcnic_post_rx_buffers_nodb(adapter, rds_ring);
+ qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
}
if (count) {
@@ -1084,12 +1240,12 @@ skip:
}
void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
- struct qlcnic_host_rds_ring *rds_ring)
+ struct qlcnic_host_rds_ring *rds_ring, u8 ring_id)
{
struct rcv_desc *pdesc;
struct qlcnic_rx_buffer *buffer;
int count = 0;
- u32 producer;
+ u32 producer, handle;
struct list_head *head;
producer = rds_ring->producer;
@@ -1110,7 +1266,9 @@ void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
/* make a rcv descriptor */
pdesc = &rds_ring->desc_head[producer];
pdesc->addr_buffer = cpu_to_le64(buffer->dma);
- pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+ handle = qlcnic_get_ref_handle(adapter, buffer->ref_handle,
+ ring_id);
+ pdesc->reference_handle = cpu_to_le16(handle);
pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
producer = get_next_index(producer, rds_ring->num_desc);
}
@@ -1180,7 +1338,7 @@ static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring,
return;
}
-void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
{
struct qlcnic_adapter *adapter = sds_ring->adapter;
struct status_desc *desc;
@@ -1217,26 +1375,8 @@ void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
writel(consumer, sds_ring->crb_sts_consumer);
}
-void qlcnic_fetch_mac(u32 off1, u32 off2, u8 alt_mac, u8 *mac)
-{
- u32 mac_low, mac_high;
- int i;
-
- mac_low = off1;
- mac_high = off2;
-
- if (alt_mac) {
- mac_low |= (mac_low >> 16) | (mac_high << 16);
- mac_high >>= 16;
- }
-
- for (i = 0; i < 2; i++)
- mac[i] = (u8)(mac_high >> ((1 - i) * 8));
- for (i = 2; i < 6; i++)
- mac[i] = (u8)(mac_low >> ((5 - i) * 8));
-}
-
-int qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
+int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
{
int ring, max_sds_rings;
struct qlcnic_host_sds_ring *sds_ring;
@@ -1249,8 +1389,7 @@ int qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
-
- if (ring == max_sds_rings - 1)
+ if (ring == adapter->max_sds_rings - 1)
netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
QLCNIC_NETDEV_WEIGHT / max_sds_rings);
else
@@ -1258,10 +1397,15 @@ int qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
QLCNIC_NETDEV_WEIGHT*2);
}
+ if (qlcnic_alloc_tx_rings(adapter, netdev)) {
+ qlcnic_free_sds_rings(recv_ctx);
+ return -ENOMEM;
+ }
+
return 0;
}
-void qlcnic_napi_del(struct qlcnic_adapter *adapter)
+void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
@@ -1273,9 +1417,10 @@ void qlcnic_napi_del(struct qlcnic_adapter *adapter)
}
qlcnic_free_sds_rings(adapter->recv_ctx);
+ qlcnic_free_tx_rings(adapter);
}
-void qlcnic_napi_enable(struct qlcnic_adapter *adapter)
+void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
@@ -1291,7 +1436,7 @@ void qlcnic_napi_enable(struct qlcnic_adapter *adapter)
}
}
-void qlcnic_napi_disable(struct qlcnic_adapter *adapter)
+void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
@@ -1307,3 +1452,481 @@ void qlcnic_napi_disable(struct qlcnic_adapter *adapter)
napi_disable(&sds_ring->napi);
}
}
+
+#define QLC_83XX_NORMAL_LB_PKT (1ULL << 36)
+#define QLC_83XX_LRO_LB_PKT (1ULL << 46)
+
+static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data, int lro_pkt)
+{
+ if (lro_pkt)
+ return (sts_data & QLC_83XX_LRO_LB_PKT) ? 1 : 0;
+ else
+ return (sts_data & QLC_83XX_NORMAL_LB_PKT) ? 1 : 0;
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring,
+ u8 ring, u64 sts_data[])
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length, cksum, is_lb_pkt;
+ u16 vid = 0xffff, t_vid;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_83xx_hndl(sts_data[0]);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+ length = qlcnic_83xx_pktln(sts_data[0]);
+ cksum = qlcnic_83xx_csum_status(sts_data[1]);
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+ if (!skb)
+ return buffer;
+
+ if (adapter->drv_mac_learn &&
+ (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ t_vid = 0;
+ is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
+ cpu_to_le16(t_vid));
+ }
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+ if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+ adapter->stats.rxdropped++;
+ dev_kfree_skb(skb);
+ return buffer;
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (vid != 0xffff)
+ __vlan_hwaccel_put_tag(skb, vid);
+
+ napi_gro_receive(&sds_ring->napi, skb);
+
+ adapter->stats.rx_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return buffer;
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
+ u8 ring, u64 sts_data[])
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct iphdr *iph;
+ struct ipv6hdr *ipv6h;
+ struct tcphdr *th;
+ bool push;
+ int l2_hdr_offset, l4_hdr_offset;
+ int index, is_lb_pkt;
+ u16 lro_length, length, data_offset, gso_size;
+ u16 vid = 0xffff, t_vid;
+
+ if (unlikely(ring > adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_83xx_hndl(sts_data[0]);
+ if (unlikely(index > rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ lro_length = qlcnic_83xx_lro_pktln(sts_data[0]);
+ l2_hdr_offset = qlcnic_83xx_l2_hdr_off(sts_data[1]);
+ l4_hdr_offset = qlcnic_83xx_l4_hdr_off(sts_data[1]);
+ push = qlcnic_83xx_is_psh_bit(sts_data[1]);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+ if (!skb)
+ return buffer;
+
+ if (adapter->drv_mac_learn &&
+ (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ t_vid = 0;
+ is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
+ cpu_to_le16(t_vid));
+ }
+ if (qlcnic_83xx_is_tstamp(sts_data[1]))
+ data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE;
+ else
+ data_offset = l4_hdr_offset + QLCNIC_TCP_HDR_SIZE;
+
+ skb_put(skb, lro_length + data_offset);
+ skb_pull(skb, l2_hdr_offset);
+
+ if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+ adapter->stats.rxdropped++;
+ dev_kfree_skb(skb);
+ return buffer;
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+ if (ntohs(skb->protocol) == ETH_P_IPV6) {
+ ipv6h = (struct ipv6hdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
+
+ length = (th->doff << 2) + lro_length;
+ ipv6h->payload_len = htons(length);
+ } else {
+ iph = (struct iphdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
+ length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ iph->tot_len = htons(length);
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ }
+
+ th->psh = push;
+ length = skb->len;
+
+ if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
+ gso_size = qlcnic_83xx_get_lro_sts_mss(sts_data[0]);
+ skb_shinfo(skb)->gso_size = gso_size;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ else
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ }
+
+ if (vid != 0xffff)
+ __vlan_hwaccel_put_tag(skb, vid);
+
+ netif_receive_skb(skb);
+
+ adapter->stats.lro_pkts++;
+ adapter->stats.lrobytes += length;
+ return buffer;
+}
+
+static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
+ int max)
+{
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct list_head *cur;
+ struct status_desc *desc;
+ struct qlcnic_rx_buffer *rxbuf = NULL;
+ u8 ring;
+ u64 sts_data[2];
+ int count = 0, opcode;
+ u32 consumer = sds_ring->consumer;
+
+ while (count < max) {
+ desc = &sds_ring->desc_head[consumer];
+ sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
+ opcode = qlcnic_83xx_opcode(sts_data[1]);
+ if (!opcode)
+ break;
+ sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
+ ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
+
+ switch (opcode) {
+ case QLC_83XX_REG_DESC:
+ rxbuf = qlcnic_83xx_process_rcv(adapter, sds_ring,
+ ring, sts_data);
+ break;
+ case QLC_83XX_LRO_DESC:
+ rxbuf = qlcnic_83xx_process_lro(adapter, ring,
+ sts_data);
+ break;
+ default:
+ dev_info(&adapter->pdev->dev,
+ "Unkonwn opcode: 0x%x\n", opcode);
+ goto skip;
+ }
+
+ if (likely(rxbuf))
+ list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
+ else
+ adapter->stats.null_rxbuf++;
+skip:
+ desc = &sds_ring->desc_head[consumer];
+ /* Reset the descriptor */
+ desc->status_desc_data[1] = 0;
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ count++;
+ }
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx->rds_rings[ring];
+ if (!list_empty(&sds_ring->free_list[ring])) {
+ list_for_each(cur, &sds_ring->free_list[ring]) {
+ rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
+ list);
+ qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
+ }
+ spin_lock(&rds_ring->lock);
+ list_splice_tail_init(&sds_ring->free_list[ring],
+ &rds_ring->free_list);
+ spin_unlock(&rds_ring->lock);
+ }
+ qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
+ }
+ if (count) {
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+ }
+ return count;
+}
+
+static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
+{
+ int tx_complete;
+ int work_done;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+ adapter = sds_ring->adapter;
+ /* tx ring count = 1 */
+ tx_ring = adapter->tx_ring;
+
+ tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+ work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
+ if ((work_done < budget) && tx_complete) {
+ napi_complete(&sds_ring->napi);
+ qlcnic_83xx_enable_intr(adapter, sds_ring);
+ }
+
+ return work_done;
+}
+
+static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
+{
+ int work_done;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_adapter *adapter;
+
+ budget = QLCNIC_TX_POLL_BUDGET;
+ tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
+ adapter = tx_ring->adapter;
+ work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+ if (work_done) {
+ napi_complete(&tx_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
+ qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
+ }
+
+ return work_done;
+}
+
+static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget)
+{
+ int work_done;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_adapter *adapter;
+
+ sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+ adapter = sds_ring->adapter;
+ work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
+ if (work_done < budget) {
+ napi_complete(&sds_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_83xx_enable_intr(adapter, sds_ring);
+ }
+
+ return work_done;
+}
+
+void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ napi_enable(&sds_ring->napi);
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ qlcnic_83xx_enable_intr(adapter, sds_ring);
+ }
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ napi_enable(&tx_ring->napi);
+ qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
+ }
+ }
+}
+
+void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ qlcnic_83xx_disable_intr(adapter, sds_ring);
+ napi_synchronize(&sds_ring->napi);
+ napi_disable(&sds_ring->napi);
+ }
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ qlcnic_83xx_disable_tx_intr(adapter, tx_ring);
+ napi_synchronize(&tx_ring->napi);
+ napi_disable(&tx_ring->napi);
+ }
+ }
+}
+
+int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ int ring, max_sds_rings;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
+ return -ENOMEM;
+
+ max_sds_rings = adapter->max_sds_rings;
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_83xx_rx_poll,
+ QLCNIC_NETDEV_WEIGHT * 2);
+ else
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_83xx_poll,
+ QLCNIC_NETDEV_WEIGHT / max_sds_rings);
+ }
+
+ if (qlcnic_alloc_tx_rings(adapter, netdev)) {
+ qlcnic_free_sds_rings(recv_ctx);
+ return -ENOMEM;
+ }
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netif_napi_add(netdev, &tx_ring->napi,
+ qlcnic_83xx_msix_tx_poll,
+ QLCNIC_NETDEV_WEIGHT);
+ }
+ }
+
+ return 0;
+}
+
+void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netif_napi_del(&sds_ring->napi);
+ }
+
+ qlcnic_free_sds_rings(adapter->recv_ctx);
+
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netif_napi_del(&tx_ring->napi);
+ }
+ }
+
+ qlcnic_free_tx_rings(adapter);
+}
+
+void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *adapter,
+ int ring, u64 sts_data[])
+{
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+ index = qlcnic_83xx_hndl(sts_data[0]);
+ if (unlikely(index >= rds_ring->num_desc))
+ return;
+
+ length = qlcnic_83xx_pktln(sts_data[0]);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+ if (!skb)
+ return;
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+ if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
+ adapter->ahw->diag_cnt++;
+ else
+ dump_skb(skb, adapter);
+
+ dev_kfree_skb_any(skb);
+ return;
+}
+
+void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct status_desc *desc;
+ u64 sts_data[2];
+ int ring, opcode;
+ u32 consumer = sds_ring->consumer;
+
+ desc = &sds_ring->desc_head[consumer];
+ sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
+ sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
+ opcode = qlcnic_83xx_opcode(sts_data[1]);
+ if (!opcode)
+ return;
+
+ ring = QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data[0]));
+ qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data);
+ desc = &sds_ring->desc_head[consumer];
+ desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index d833f5927891..28a6d4838364 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1,24 +1,25 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
-#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include "qlcnic.h"
+#include "qlcnic_hw.h"
#include <linux/swab.h>
#include <linux/dma-mapping.h>
+#include <linux/if_vlan.h>
#include <net/ip.h>
#include <linux/ipv6.h>
#include <linux/inetdevice.h>
-#include <linux/sysfs.h>
#include <linux/aer.h>
#include <linux/log2.h>
+#include <linux/pci.h>
MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
MODULE_LICENSE("GPL");
@@ -29,28 +30,28 @@ char qlcnic_driver_name[] = "qlcnic";
static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
"Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
-static struct workqueue_struct *qlcnic_wq;
static int qlcnic_mac_learn;
module_param(qlcnic_mac_learn, int, 0444);
-MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
+MODULE_PARM_DESC(qlcnic_mac_learn,
+ "Mac Filter (0=learning is disabled, 1=Driver learning is enabled, 2=FDB learning is enabled)");
-static int qlcnic_use_msi = 1;
+int qlcnic_use_msi = 1;
MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
module_param_named(use_msi, qlcnic_use_msi, int, 0444);
-static int qlcnic_use_msi_x = 1;
+int qlcnic_use_msi_x = 1;
MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444);
-static int qlcnic_auto_fw_reset = 1;
+int qlcnic_auto_fw_reset = 1;
MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644);
-static int qlcnic_load_fw_file;
+int qlcnic_load_fw_file;
MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444);
-static int qlcnic_config_npars;
+int qlcnic_config_npars;
module_param(qlcnic_config_npars, int, 0444);
MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
@@ -62,9 +63,6 @@ static void qlcnic_tx_timeout(struct net_device *netdev);
static void qlcnic_attach_work(struct work_struct *work);
static void qlcnic_fwinit_work(struct work_struct *work);
static void qlcnic_fw_poll_work(struct work_struct *work);
-static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
- work_func_t func, int delay);
-static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void qlcnic_poll_controller(struct net_device *netdev);
#endif
@@ -77,9 +75,9 @@ static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
static irqreturn_t qlcnic_intr(int irq, void *data);
static irqreturn_t qlcnic_msi_intr(int irq, void *data);
static irqreturn_t qlcnic_msix_intr(int irq, void *data);
+static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data);
static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
-static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
static int qlcnic_start_firmware(struct qlcnic_adapter *);
static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
@@ -93,15 +91,24 @@ static int qlcnic_vlan_rx_del(struct net_device *, u16);
#define QLCNIC_IS_TSO_CAPABLE(adapter) \
((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
+static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE824X)
+ return ahw->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX;
+ else
+ return 1;
+}
+
/* PCI Device ID Table */
#define ENTRY(device) \
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
.class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
-#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
-
static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
{0,}
};
@@ -120,6 +127,32 @@ static const u32 msi_tgt_status[8] = {
ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
};
+static const u32 qlcnic_reg_tbl[] = {
+ 0x1B20A8, /* PEG_HALT_STAT1 */
+ 0x1B20AC, /* PEG_HALT_STAT2 */
+ 0x1B20B0, /* FW_HEARTBEAT */
+ 0x1B2100, /* LOCK ID */
+ 0x1B2128, /* FW_CAPABILITIES */
+ 0x1B2138, /* drv active */
+ 0x1B2140, /* dev state */
+ 0x1B2144, /* drv state */
+ 0x1B2148, /* drv scratch */
+ 0x1B214C, /* dev partition info */
+ 0x1B2174, /* drv idc ver */
+ 0x1B2150, /* fw version major */
+ 0x1B2154, /* fw version minor */
+ 0x1B2158, /* fw version sub */
+ 0x1B219C, /* npar state */
+ 0x1B21FC, /* FW_IMG_VALID */
+ 0x1B2250, /* CMD_PEG_STATE */
+ 0x1B233C, /* RCV_PEG_STATE */
+ 0x1B23B4, /* ASIC TEMP */
+ 0x1B216C, /* FW api */
+ 0x1B2170, /* drv op mode */
+ 0x13C010, /* flash lock */
+ 0x13C014, /* flash unlock */
+};
+
static const struct qlcnic_board_info qlcnic_boards[] = {
{0x1077, 0x8020, 0x1077, 0x203,
"8200 Series Single Port 10GbE Converged Network Adapter"
@@ -143,6 +176,7 @@ static const struct qlcnic_board_info qlcnic_boards[] = {
};
#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
+#define QLC_MAX_SDS_RINGS 8
static const
struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
@@ -164,35 +198,6 @@ void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
recv_ctx->sds_rings = NULL;
}
-static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
-{
- memset(&adapter->stats, 0, sizeof(adapter->stats));
-}
-
-static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
-{
- u32 control;
- int pos;
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
- if (pos) {
- pci_read_config_dword(pdev, pos, &control);
- if (enable)
- control |= PCI_MSIX_FLAGS_ENABLE;
- else
- control = 0;
- pci_write_config_dword(pdev, pos, control);
- }
-}
-
-static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
-{
- int i;
-
- for (i = 0; i < count; i++)
- adapter->msix_entries[i].entry = i;
-}
-
static int
qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
{
@@ -204,12 +209,11 @@ qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
return -EIO;
memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
- memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
/* set station address */
- if (!is_valid_ether_addr(netdev->perm_addr))
+ if (!is_valid_ether_addr(netdev->dev_addr))
dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
netdev->dev_addr);
@@ -225,7 +229,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
return -EOPNOTSUPP;
if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
+ return -EINVAL;
if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
netif_device_detach(netdev);
@@ -243,6 +247,85 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
return 0;
}
+static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *netdev, const unsigned char *addr)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err = -EOPNOTSUPP;
+
+ if (!adapter->fdb_mac_learn) {
+ pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n",
+ __func__);
+ return err;
+ }
+
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
+ if (is_unicast_ether_addr(addr))
+ err = qlcnic_nic_del_mac(adapter, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_del(netdev, addr);
+ else
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *netdev,
+ const unsigned char *addr, u16 flags)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err = 0;
+
+ if (!adapter->fdb_mac_learn) {
+ pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ pr_info("%s: FDB e-switch is not enabled\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ if (ether_addr_equal(addr, adapter->mac_addr))
+ return err;
+
+ if (is_unicast_ether_addr(addr))
+ err = qlcnic_nic_add_mac(adapter, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_add_excl(netdev, addr);
+ else
+ err = -EINVAL;
+
+ return err;
+}
+
+static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb,
+ struct net_device *netdev, int idx)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (!adapter->fdb_mac_learn) {
+ pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+ idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
+
+ return idx;
+}
+
+static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter)
+{
+ while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ usleep_range(10000, 11000);
+
+ cancel_delayed_work_sync(&adapter->fw_work);
+}
+
static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_open = qlcnic_open,
.ndo_stop = qlcnic_close,
@@ -257,6 +340,9 @@ static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_tx_timeout = qlcnic_tx_timeout,
.ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add,
.ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del,
+ .ndo_fdb_add = qlcnic_fdb_add,
+ .ndo_fdb_del = qlcnic_fdb_del,
+ .ndo_fdb_dump = qlcnic_fdb_dump,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = qlcnic_poll_controller,
#endif
@@ -267,50 +353,125 @@ static const struct net_device_ops qlcnic_netdev_failed_ops = {
};
static struct qlcnic_nic_template qlcnic_ops = {
- .config_bridged_mode = qlcnic_config_bridged_mode,
- .config_led = qlcnic_config_led,
- .start_firmware = qlcnic_start_firmware
+ .config_bridged_mode = qlcnic_config_bridged_mode,
+ .config_led = qlcnic_82xx_config_led,
+ .start_firmware = qlcnic_82xx_start_firmware,
+ .request_reset = qlcnic_82xx_dev_request_reset,
+ .cancel_idc_work = qlcnic_82xx_cancel_idc_work,
+ .napi_add = qlcnic_82xx_napi_add,
+ .napi_del = qlcnic_82xx_napi_del,
+ .config_ipaddr = qlcnic_82xx_config_ipaddr,
+ .clear_legacy_intr = qlcnic_82xx_clear_legacy_intr,
};
-static struct qlcnic_nic_template qlcnic_vf_ops = {
- .config_bridged_mode = qlcnicvf_config_bridged_mode,
- .config_led = qlcnicvf_config_led,
- .start_firmware = qlcnicvf_start_firmware
+struct qlcnic_nic_template qlcnic_vf_ops = {
+ .config_bridged_mode = qlcnicvf_config_bridged_mode,
+ .config_led = qlcnicvf_config_led,
+ .start_firmware = qlcnicvf_start_firmware
};
-static int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
+static struct qlcnic_hardware_ops qlcnic_hw_ops = {
+ .read_crb = qlcnic_82xx_read_crb,
+ .write_crb = qlcnic_82xx_write_crb,
+ .read_reg = qlcnic_82xx_hw_read_wx_2M,
+ .write_reg = qlcnic_82xx_hw_write_wx_2M,
+ .get_mac_address = qlcnic_82xx_get_mac_address,
+ .setup_intr = qlcnic_82xx_setup_intr,
+ .alloc_mbx_args = qlcnic_82xx_alloc_mbx_args,
+ .mbx_cmd = qlcnic_82xx_issue_cmd,
+ .get_func_no = qlcnic_82xx_get_func_no,
+ .api_lock = qlcnic_82xx_api_lock,
+ .api_unlock = qlcnic_82xx_api_unlock,
+ .add_sysfs = qlcnic_82xx_add_sysfs,
+ .remove_sysfs = qlcnic_82xx_remove_sysfs,
+ .process_lb_rcv_ring_diag = qlcnic_82xx_process_rcv_ring_diag,
+ .create_rx_ctx = qlcnic_82xx_fw_cmd_create_rx_ctx,
+ .create_tx_ctx = qlcnic_82xx_fw_cmd_create_tx_ctx,
+ .setup_link_event = qlcnic_82xx_linkevent_request,
+ .get_nic_info = qlcnic_82xx_get_nic_info,
+ .get_pci_info = qlcnic_82xx_get_pci_info,
+ .set_nic_info = qlcnic_82xx_set_nic_info,
+ .change_macvlan = qlcnic_82xx_sre_macaddr_change,
+ .napi_enable = qlcnic_82xx_napi_enable,
+ .napi_disable = qlcnic_82xx_napi_disable,
+ .config_intr_coal = qlcnic_82xx_config_intr_coalesce,
+ .config_rss = qlcnic_82xx_config_rss,
+ .config_hw_lro = qlcnic_82xx_config_hw_lro,
+ .config_loopback = qlcnic_82xx_set_lb_mode,
+ .clear_loopback = qlcnic_82xx_clear_lb_mode,
+ .config_promisc_mode = qlcnic_82xx_nic_set_promisc,
+ .change_l2_filter = qlcnic_82xx_change_filter,
+ .get_board_info = qlcnic_82xx_get_board_info,
+};
+
+int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
{
struct pci_dev *pdev = adapter->pdev;
- int err = -1;
+ int err = -1, i;
+ int max_tx_rings;
+
+ if (!adapter->msix_entries) {
+ adapter->msix_entries = kcalloc(num_msix,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!adapter->msix_entries)
+ return -ENOMEM;
+ }
adapter->max_sds_rings = 1;
adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
- qlcnic_set_msix_bit(pdev, 0);
if (adapter->ahw->msix_supported) {
enable_msix:
- qlcnic_init_msix_entries(adapter, num_msix);
+ for (i = 0; i < num_msix; i++)
+ adapter->msix_entries[i].entry = i;
err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
if (err == 0) {
adapter->flags |= QLCNIC_MSIX_ENABLED;
- qlcnic_set_msix_bit(pdev, 1);
-
- adapter->max_sds_rings = num_msix;
-
+ if (qlcnic_83xx_check(adapter)) {
+ adapter->ahw->num_msix = num_msix;
+ /* subtract mail box and tx ring vectors */
+ max_tx_rings = adapter->max_drv_tx_rings;
+ adapter->max_sds_rings = num_msix -
+ max_tx_rings - 1;
+ } else {
+ adapter->max_sds_rings = num_msix;
+ }
dev_info(&pdev->dev, "using msi-x interrupts\n");
return err;
- }
- if (err > 0) {
- num_msix = rounddown_pow_of_two(err);
- if (num_msix)
+ } else if (err > 0) {
+ dev_info(&pdev->dev,
+ "Unable to allocate %d MSI-X interrupt vectors\n",
+ num_msix);
+ if (qlcnic_83xx_check(adapter)) {
+ if (err < QLC_83XX_MINIMUM_VECTOR)
+ return err;
+ err -= (adapter->max_drv_tx_rings + 1);
+ num_msix = rounddown_pow_of_two(err);
+ num_msix += (adapter->max_drv_tx_rings + 1);
+ } else {
+ num_msix = rounddown_pow_of_two(err);
+ }
+
+ if (num_msix) {
+ dev_info(&pdev->dev,
+ "Trying to allocate %d MSI-X interrupt vectors\n",
+ num_msix);
goto enable_msix;
+ }
+ } else {
+ dev_info(&pdev->dev,
+ "Unable to allocate %d MSI-X interrupt vectors\n",
+ num_msix);
}
}
+
return err;
}
-static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
+static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
{
+ int err = 0;
u32 offset, mask_reg;
const struct qlcnic_legacy_intr_set *legacy_intrp;
struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -323,8 +484,10 @@ static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
offset);
dev_info(&pdev->dev, "using msi interrupts\n");
adapter->msix_entries[0].vector = pdev->irq;
- return;
+ return err;
}
+ if (qlcnic_use_msi || qlcnic_use_msi_x)
+ return -EOPNOTSUPP;
legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
adapter->ahw->int_vec_bit = legacy_intrp->int_vec_bit;
@@ -336,32 +499,47 @@ static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
adapter->crb_int_state_reg = qlcnic_get_ioaddr(ahw, ISR_INT_STATE_REG);
dev_info(&pdev->dev, "using legacy interrupts\n");
adapter->msix_entries[0].vector = pdev->irq;
+ return err;
}
-static void
-qlcnic_setup_intr(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
{
- int num_msix;
+ int num_msix, err = 0;
- if (adapter->ahw->msix_supported) {
+ if (!num_intr)
+ num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS;
+
+ if (adapter->ahw->msix_supported)
num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
- QLCNIC_DEF_NUM_STS_DESC_RINGS));
- } else
+ num_intr));
+ else
num_msix = 1;
- if (!qlcnic_enable_msix(adapter, num_msix))
- return;
+ err = qlcnic_enable_msix(adapter, num_msix);
+ if (err == -ENOMEM || !err)
+ return err;
- qlcnic_enable_msi_legacy(adapter);
+ err = qlcnic_enable_msi_legacy(adapter);
+ if (!err)
+ return err;
+
+ return -EIO;
}
-static void
-qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
+void qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
{
if (adapter->flags & QLCNIC_MSIX_ENABLED)
pci_disable_msix(adapter->pdev);
if (adapter->flags & QLCNIC_MSI_ENABLED)
pci_disable_msi(adapter->pdev);
+
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+
+ if (adapter->ahw->intr_tbl) {
+ vfree(adapter->ahw->intr_tbl);
+ adapter->ahw->intr_tbl = NULL;
+ }
}
static void
@@ -371,7 +549,36 @@ qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
iounmap(adapter->ahw->pci_base0);
}
-static int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
+static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_pci_info *pci_info;
+ int ret;
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ switch (adapter->ahw->port_type) {
+ case QLCNIC_GBE:
+ adapter->ahw->act_pci_func = QLCNIC_NIU_MAX_GBE_PORTS;
+ break;
+ case QLCNIC_XGBE:
+ adapter->ahw->act_pci_func = QLCNIC_NIU_MAX_XG_PORTS;
+ break;
+ }
+ return 0;
+ }
+
+ if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
+ return 0;
+
+ pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ if (!pci_info)
+ return -ENOMEM;
+
+ ret = qlcnic_get_pci_info(adapter, pci_info);
+ kfree(pci_info);
+ return ret;
+}
+
+int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
{
struct qlcnic_pci_info *pci_info;
int i, ret = 0, j = 0;
@@ -423,8 +630,11 @@ static int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
j++;
}
- for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
+ for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) {
adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
+ if (qlcnic_83xx_check(adapter))
+ qlcnic_enable_eswitch(adapter, i, 1);
+ }
kfree(pci_info);
return 0;
@@ -462,40 +672,31 @@ qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
QLC_DEV_SET_DRV(0xf, id));
}
} else {
- data = QLCRD32(adapter, QLCNIC_DRV_OP_MODE);
+ data = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
data = (data & ~QLC_DEV_SET_DRV(0xf, ahw->pci_func)) |
(QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
ahw->pci_func));
}
- QLCWR32(adapter, QLCNIC_DRV_OP_MODE, data);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_DRV_OP_MODE, data);
qlcnic_api_unlock(adapter);
err_lock:
return ret;
}
-static void
-qlcnic_check_vf(struct qlcnic_adapter *adapter)
+static void qlcnic_check_vf(struct qlcnic_adapter *adapter,
+ const struct pci_device_id *ent)
{
- void __iomem *msix_base_addr;
- void __iomem *priv_op;
- u32 func;
- u32 msix_base;
u32 op_mode, priv_level;
/* Determine FW API version */
- adapter->ahw->fw_hal_version = readl(adapter->ahw->pci_base0 +
- QLCNIC_FW_API);
+ adapter->ahw->fw_hal_version = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_FW_API);
/* Find PCI function number */
- pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
- msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
- msix_base = readl(msix_base_addr);
- func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
- adapter->ahw->pci_func = func;
+ qlcnic_get_func_no(adapter);
/* Determine function privilege level */
- priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
- op_mode = readl(priv_op);
+ op_mode = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
if (op_mode == QLC_DEV_DRV_DEFAULT)
priv_level = QLCNIC_MGMT_FUNC;
else
@@ -512,12 +713,16 @@ qlcnic_check_vf(struct qlcnic_adapter *adapter)
}
#define QLCNIC_82XX_BAR0_LENGTH 0x00200000UL
+#define QLCNIC_83XX_BAR0_LENGTH 0x4000
static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
{
switch (dev_id) {
case PCI_DEVICE_ID_QLOGIC_QLE824X:
*bar = QLCNIC_82XX_BAR0_LENGTH;
break;
+ case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ *bar = QLCNIC_83XX_BAR0_LENGTH;
+ break;
default:
*bar = 0;
}
@@ -547,6 +752,7 @@ static int qlcnic_setup_pci_map(struct pci_dev *pdev,
}
dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
+
ahw->pci_base0 = mem_ptr0;
ahw->pci_len0 = pci_len0;
offset = QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(ahw->pci_func));
@@ -581,19 +787,26 @@ static void qlcnic_get_board_name(struct qlcnic_adapter *adapter, char *name)
static void
qlcnic_check_options(struct qlcnic_adapter *adapter)
{
+ int err;
u32 fw_major, fw_minor, fw_build, prev_fw_version;
struct pci_dev *pdev = adapter->pdev;
- struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
prev_fw_version = adapter->fw_version;
- fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
- fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
- fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
+ fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
- if (adapter->ahw->op_mode != QLCNIC_NON_PRIV_FUNC) {
+ err = qlcnic_get_board_info(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error getting board config info.\n");
+ return;
+ }
+ if (ahw->op_mode != QLCNIC_NON_PRIV_FUNC) {
if (fw_dump->tmpl_hdr == NULL ||
adapter->fw_version > prev_fw_version) {
if (fw_dump->tmpl_hdr)
@@ -604,8 +817,9 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
}
}
- dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
- fw_major, fw_minor, fw_build);
+ dev_info(&pdev->dev, "Driver v%s, firmware v%d.%d.%d\n",
+ QLCNIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build);
+
if (adapter->ahw->port_type == QLCNIC_XGBE) {
if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
@@ -648,9 +862,19 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
adapter->ahw->max_tx_ques = nic_info.max_tx_ques;
adapter->ahw->max_rx_ques = nic_info.max_rx_ques;
adapter->ahw->capabilities = nic_info.capabilities;
+
+ if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
+ u32 temp;
+ temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
+ adapter->ahw->capabilities2 = temp;
+ }
adapter->ahw->max_mac_filters = nic_info.max_mac_filters;
adapter->ahw->max_mtu = nic_info.max_mtu;
+ /* Disable NPAR for 83XX */
+ if (qlcnic_83xx_check(adapter))
+ return err;
+
if (adapter->ahw->capabilities & BIT_6)
adapter->flags |= QLCNIC_ESWITCH_ENABLED;
else
@@ -709,7 +933,7 @@ void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
qlcnic_set_netdev_features(adapter, esw_cfg);
}
-static int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
+int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
{
struct qlcnic_esw_func_cfg esw_cfg;
@@ -730,14 +954,17 @@ qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
struct qlcnic_esw_func_cfg *esw_cfg)
{
struct net_device *netdev = adapter->netdev;
- netdev_features_t features, vlan_features;
+ unsigned long features, vlan_features;
+
+ if (qlcnic_83xx_check(adapter))
+ return;
features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
- NETIF_F_IPV6_CSUM | NETIF_F_GRO);
+ NETIF_F_IPV6_CSUM | NETIF_F_GRO);
vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
+ NETIF_F_IPV6_CSUM);
- if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
+ if (QLCNIC_IS_TSO_CAPABLE(adapter)) {
features |= (NETIF_F_TSO | NETIF_F_TSO6);
vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
}
@@ -747,12 +974,19 @@ qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
if (esw_cfg->offload_flags & BIT_0) {
netdev->features |= features;
- if (!(esw_cfg->offload_flags & BIT_1))
+ adapter->rx_csum = 1;
+ if (!(esw_cfg->offload_flags & BIT_1)) {
netdev->features &= ~NETIF_F_TSO;
- if (!(esw_cfg->offload_flags & BIT_2))
+ features &= ~NETIF_F_TSO;
+ }
+ if (!(esw_cfg->offload_flags & BIT_2)) {
netdev->features &= ~NETIF_F_TSO6;
+ features &= ~NETIF_F_TSO6;
+ }
} else {
netdev->features &= ~features;
+ features &= ~features;
+ adapter->rx_csum = 0;
}
netdev->vlan_features = (features & vlan_features);
@@ -761,7 +995,6 @@ qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
static int
qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
{
- void __iomem *priv_op;
u32 op_mode, priv_level;
int err = 0;
@@ -772,8 +1005,7 @@ qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
return 0;
- priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
- op_mode = readl(priv_op);
+ op_mode = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
if (op_mode == QLC_DEV_DRV_DEFAULT)
@@ -805,7 +1037,7 @@ qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
return err;
}
-static int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
+int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
{
struct qlcnic_esw_func_cfg esw_cfg;
struct qlcnic_npar_info *npar;
@@ -838,6 +1070,7 @@ static int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
return 0;
}
+
static int
qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
struct qlcnic_npar_info *npar, int pci_func)
@@ -861,7 +1094,7 @@ qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
return 0;
}
-static int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
+int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
{
int i, err;
struct qlcnic_npar_info *npar;
@@ -877,8 +1110,7 @@ static int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
npar = &adapter->npars[i];
pci_func = npar->pci_func;
memset(&nic_info, 0, sizeof(struct qlcnic_info));
- err = qlcnic_get_nic_info(adapter,
- &nic_info, pci_func);
+ err = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
if (err)
return err;
nic_info.min_tx_bw = npar->min_bw;
@@ -909,14 +1141,16 @@ static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
return 0;
- npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ npar_state = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_CRB_DEV_NPAR_STATE);
while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
msleep(1000);
- npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ npar_state = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_CRB_DEV_NPAR_STATE);
}
if (!npar_opt_timeo) {
dev_err(&adapter->pdev->dev,
- "Waiting for NPAR state to opertional timeout\n");
+ "Waiting for NPAR state to operational timeout\n");
return -EIO;
}
return 0;
@@ -944,8 +1178,7 @@ qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
return err;
}
-static int
-qlcnic_start_firmware(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_start_firmware(struct qlcnic_adapter *adapter)
{
int err;
@@ -985,9 +1218,8 @@ check_fw_status:
if (err)
goto err_out;
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
qlcnic_idc_debug_info(adapter, 1);
-
err = qlcnic_check_eswitch_mode(adapter);
if (err) {
dev_err(&adapter->pdev->dev,
@@ -1005,7 +1237,7 @@ check_fw_status:
return 0;
err_out:
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
dev_err(&adapter->pdev->dev, "Device state set to failed\n");
qlcnic_release_firmware(adapter);
@@ -1017,6 +1249,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
{
irq_handler_t handler;
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
int err, ring;
unsigned long flags = 0;
@@ -1024,7 +1257,8 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
- handler = qlcnic_tmp_intr;
+ if (qlcnic_82xx_check(adapter))
+ handler = qlcnic_tmp_intr;
if (!QLCNIC_IS_MSI_FAMILY(adapter))
flags |= IRQF_SHARED;
@@ -1035,20 +1269,44 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
handler = qlcnic_msi_intr;
else {
flags |= IRQF_SHARED;
- handler = qlcnic_intr;
+ if (qlcnic_82xx_check(adapter))
+ handler = qlcnic_intr;
+ else
+ handler = qlcnic_83xx_intr;
}
}
adapter->irq = netdev->irq;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
- sds_ring = &recv_ctx->sds_rings[ring];
- sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
- err = request_irq(sds_ring->irq, handler,
- flags, sds_ring->name, sds_ring);
- if (err)
- return err;
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) {
+ if (qlcnic_82xx_check(adapter) ||
+ (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED))) {
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ snprintf(sds_ring->name, sizeof(int) + IFNAMSIZ,
+ "%s[%d]", netdev->name, ring);
+ err = request_irq(sds_ring->irq, handler, flags,
+ sds_ring->name, sds_ring);
+ if (err)
+ return err;
+ }
+ }
+ if (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ handler = qlcnic_msix_tx_intr;
+ for (ring = 0; ring < adapter->max_drv_tx_rings;
+ ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ snprintf(tx_ring->name, sizeof(int) + IFNAMSIZ,
+ "%s[%d]", netdev->name,
+ adapter->max_sds_rings + ring);
+ err = request_irq(tx_ring->irq, handler, flags,
+ tx_ring->name, tx_ring);
+ if (err)
+ return err;
+ }
+ }
}
-
return 0;
}
@@ -1057,21 +1315,48 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter)
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
- sds_ring = &recv_ctx->sds_rings[ring];
- free_irq(sds_ring->irq, sds_ring);
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) {
+ if (qlcnic_82xx_check(adapter) ||
+ (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED))) {
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ free_irq(sds_ring->irq, sds_ring);
+ }
+ }
+ if (qlcnic_83xx_check(adapter)) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings;
+ ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ if (tx_ring->irq)
+ free_irq(tx_ring->irq, tx_ring);
+ }
+ }
}
}
-static int
-__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+static void qlcnic_get_lro_mss_capability(struct qlcnic_adapter *adapter)
{
- int ring;
- u32 capab2;
+ u32 capab = 0;
+
+ if (qlcnic_82xx_check(adapter)) {
+ if (adapter->ahw->capabilities2 &
+ QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
+ adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
+ } else {
+ capab = adapter->ahw->capabilities;
+ if (QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(capab))
+ adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
+ }
+}
+int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ int ring;
struct qlcnic_host_rds_ring *rds_ring;
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
@@ -1081,19 +1366,14 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
return 0;
if (qlcnic_set_eswitch_port_config(adapter))
return -EIO;
-
- if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
- capab2 = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
- if (capab2 & QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
- adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
- }
+ qlcnic_get_lro_mss_capability(adapter);
if (qlcnic_fw_create_ctx(adapter))
return -EIO;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &adapter->recv_ctx->rds_rings[ring];
- qlcnic_post_rx_buffers(adapter, rds_ring);
+ qlcnic_post_rx_buffers(adapter, rds_ring, ring);
}
qlcnic_set_multi(netdev);
@@ -1118,10 +1398,7 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
return 0;
}
-/* Usage: During resume and firmware recovery module.*/
-
-static int
-qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+int qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
int err = 0;
@@ -1133,8 +1410,7 @@ qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
return err;
}
-static void
-__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
+void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
@@ -1166,8 +1442,7 @@ __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
/* Usage: During suspend and firmware recovery module */
-static void
-qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
+void qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
rtnl_lock();
if (netif_running(netdev))
@@ -1176,7 +1451,7 @@ qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
}
-static int
+int
qlcnic_attach(struct qlcnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -1222,8 +1497,7 @@ err_out_napi_del:
return err;
}
-static void
-qlcnic_detach(struct qlcnic_adapter *adapter)
+void qlcnic_detach(struct qlcnic_adapter *adapter)
{
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
@@ -1272,21 +1546,9 @@ out:
static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
{
int err = 0;
- adapter->ahw = kzalloc(sizeof(struct qlcnic_hardware_context),
- GFP_KERNEL);
- if (!adapter->ahw) {
- dev_err(&adapter->pdev->dev,
- "Failed to allocate recv ctx resources for adapter\n");
- err = -ENOMEM;
- goto err_out;
- }
adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
GFP_KERNEL);
if (!adapter->recv_ctx) {
- dev_err(&adapter->pdev->dev,
- "Failed to allocate recv ctx resources for adapter\n");
- kfree(adapter->ahw);
- adapter->ahw = NULL;
err = -ENOMEM;
goto err_out;
}
@@ -1294,6 +1556,8 @@ static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
+ /* clear stats */
+ memset(&adapter->stats, 0, sizeof(adapter->stats));
err_out:
return err;
}
@@ -1307,8 +1571,9 @@ static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
vfree(adapter->ahw->fw_dump.tmpl_hdr);
adapter->ahw->fw_dump.tmpl_hdr = NULL;
}
- kfree(adapter->ahw);
- adapter->ahw = NULL;
+
+ kfree(adapter->ahw->reset.buff);
+ adapter->ahw->fw_dump.tmpl_hdr = NULL;
}
int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
@@ -1328,6 +1593,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
adapter->max_sds_rings = 1;
adapter->ahw->diag_test = test;
+ adapter->ahw->linkup = 0;
ret = qlcnic_attach(adapter);
if (ret) {
@@ -1344,7 +1610,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &adapter->recv_ctx->rds_rings[ring];
- qlcnic_post_rx_buffers(adapter, rds_ring);
+ qlcnic_post_rx_buffers(adapter, rds_ring, ring);
}
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
@@ -1382,6 +1648,7 @@ qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
netif_device_attach(netdev);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ dev_err(&adapter->pdev->dev, "%s:\n", __func__);
return 0;
}
@@ -1425,34 +1692,40 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
int err;
struct pci_dev *pdev = adapter->pdev;
+ adapter->rx_csum = 1;
adapter->ahw->mc_enabled = 0;
- adapter->ahw->max_mc_count = 38;
+ adapter->ahw->max_mc_count = QLCNIC_MAX_MC_COUNT;
netdev->netdev_ops = &qlcnic_netdev_ops;
- netdev->watchdog_timeo = 5*HZ;
+ netdev->watchdog_timeo = QLCNIC_WATCHDOG_TIMEOUTVALUE * HZ;
qlcnic_change_mtu(netdev, netdev->mtu);
SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
- netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
+ netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_GRO |
+ NETIF_F_HW_VLAN_RX);
+ netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM);
+
+ if (QLCNIC_IS_TSO_CAPABLE(adapter)) {
+ netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
+ netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
+ }
- if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
- netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
- if (pci_using_dac == 1)
- netdev->hw_features |= NETIF_F_HIGHDMA;
+ if (pci_using_dac) {
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
+ }
- netdev->vlan_features = netdev->hw_features;
+ if (qlcnic_vlan_tx_check(adapter))
+ netdev->features |= (NETIF_F_HW_VLAN_TX);
- if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
- netdev->hw_features |= NETIF_F_HW_VLAN_TX;
if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
- netdev->hw_features |= NETIF_F_LRO;
-
- netdev->features |= netdev->hw_features |
- NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+ netdev->features |= NETIF_F_LRO;
+ netdev->hw_features = netdev->features;
netdev->irq = adapter->msix_entries[0].vector;
err = register_netdev(netdev);
@@ -1480,17 +1753,61 @@ static int qlcnic_set_dma_mask(struct pci_dev *pdev, int *pci_using_dac)
return 0;
}
-static int
-qlcnic_alloc_msix_entries(struct qlcnic_adapter *adapter, u16 count)
+void qlcnic_free_tx_rings(struct qlcnic_adapter *adapter)
{
- adapter->msix_entries = kcalloc(count, sizeof(struct msix_entry),
- GFP_KERNEL);
+ int ring;
+ struct qlcnic_host_tx_ring *tx_ring;
- if (adapter->msix_entries)
- return 0;
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ if (tx_ring && tx_ring->cmd_buf_arr != NULL) {
+ vfree(tx_ring->cmd_buf_arr);
+ tx_ring->cmd_buf_arr = NULL;
+ }
+ }
+ if (adapter->tx_ring != NULL)
+ kfree(adapter->tx_ring);
+}
+
+int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ int ring, vector, index;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_cmd_buffer *cmd_buf_arr;
+
+ tx_ring = kcalloc(adapter->max_drv_tx_rings,
+ sizeof(struct qlcnic_host_tx_ring), GFP_KERNEL);
+ if (tx_ring == NULL)
+ return -ENOMEM;
- dev_err(&adapter->pdev->dev, "failed allocating msix_entries\n");
- return -ENOMEM;
+ adapter->tx_ring = tx_ring;
+
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ tx_ring->num_desc = adapter->num_txd;
+ tx_ring->txq = netdev_get_tx_queue(netdev, ring);
+ cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
+ if (cmd_buf_arr == NULL) {
+ qlcnic_free_tx_rings(adapter);
+ return -ENOMEM;
+ }
+ memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
+ tx_ring->cmd_buf_arr = cmd_buf_arr;
+ }
+
+ if (qlcnic_83xx_check(adapter)) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ tx_ring->adapter = adapter;
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ index = adapter->max_sds_rings + ring;
+ vector = adapter->msix_entries[index].vector;
+ tx_ring->irq = vector;
+ }
+ }
+ }
+ return 0;
}
static int
@@ -1498,9 +1815,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev = NULL;
struct qlcnic_adapter *adapter = NULL;
+ struct qlcnic_hardware_context *ahw;
int err, pci_using_dac = -1;
- uint8_t revision_id;
- char board_name[QLCNIC_MAX_BOARD_NAME_LEN];
+ u32 capab2;
+ char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
err = pci_enable_device(pdev);
if (err)
@@ -1522,10 +1840,27 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
pci_enable_pcie_error_reporting(pdev);
+ ahw = kzalloc(sizeof(struct qlcnic_hardware_context), GFP_KERNEL);
+ if (!ahw)
+ goto err_out_free_res;
+
+ if (ent->device == PCI_DEVICE_ID_QLOGIC_QLE824X) {
+ ahw->hw_ops = &qlcnic_hw_ops;
+ ahw->reg_tbl = (u32 *)qlcnic_reg_tbl;
+ } else if (ent->device == PCI_DEVICE_ID_QLOGIC_QLE834X) {
+ qlcnic_83xx_register_map(ahw);
+ } else {
+ goto err_out_free_hw_res;
+ }
+
+ err = qlcnic_setup_pci_map(pdev, ahw);
+ if (err)
+ goto err_out_free_hw_res;
+
netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
if (!netdev) {
err = -ENOMEM;
- goto err_out_free_res;
+ goto err_out_iounmap;
}
SET_NETDEV_DEV(netdev, &pdev->dev);
@@ -1533,15 +1868,25 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter = netdev_priv(netdev);
adapter->netdev = netdev;
adapter->pdev = pdev;
+ adapter->ahw = ahw;
+
+ adapter->qlcnic_wq = create_singlethread_workqueue("qlcnic");
+ if (adapter->qlcnic_wq == NULL) {
+ dev_err(&pdev->dev, "Failed to create workqueue\n");
+ goto err_out_free_netdev;
+ }
err = qlcnic_alloc_adapter_resources(adapter);
if (err)
goto err_out_free_netdev;
adapter->dev_rst_time = jiffies;
- revision_id = pdev->revision;
- adapter->ahw->revision_id = revision_id;
- adapter->mac_learn = qlcnic_mac_learn;
+ adapter->ahw->revision_id = pdev->revision;
+ if (qlcnic_mac_learn == FDB_MAC_LEARN)
+ adapter->fdb_mac_learn = true;
+ else if (qlcnic_mac_learn == DRV_MAC_LEARN)
+ adapter->drv_mac_learn = true;
+ adapter->max_drv_tx_rings = 1;
rwlock_init(&adapter->ahw->crb_lock);
mutex_init(&adapter->ahw->mem_lock);
@@ -1549,31 +1894,32 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->tx_clean_lock);
INIT_LIST_HEAD(&adapter->mac_list);
- err = qlcnic_setup_pci_map(pdev, adapter->ahw);
- if (err)
- goto err_out_free_hw;
- qlcnic_check_vf(adapter);
-
- /* This will be reset for mezz cards */
- adapter->portnum = adapter->ahw->pci_func;
-
- err = qlcnic_get_board_info(adapter);
- if (err) {
- dev_err(&pdev->dev, "Error getting board config info.\n");
- goto err_out_iounmap;
- }
-
- err = qlcnic_setup_idc_param(adapter);
- if (err)
- goto err_out_iounmap;
+ if (qlcnic_82xx_check(adapter)) {
+ qlcnic_check_vf(adapter, ent);
+ adapter->portnum = adapter->ahw->pci_func;
+ err = qlcnic_start_firmware(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
+ goto err_out_free_hw;
+ }
- adapter->flags |= QLCNIC_NEED_FLR;
+ err = qlcnic_setup_idc_param(adapter);
+ if (err)
+ goto err_out_free_hw;
- err = adapter->nic_ops->start_firmware(adapter);
- if (err) {
- dev_err(&pdev->dev, "Loading fw failed. Please Reboot\n"
- "\t\tIf reboot doesn't help, try flashing the card\n");
- goto err_out_maintenance_mode;
+ adapter->flags |= QLCNIC_NEED_FLR;
+ } else if (qlcnic_83xx_check(adapter)) {
+ qlcnic_83xx_check_vf(adapter, ent);
+ adapter->portnum = adapter->ahw->pci_func;
+ err = qlcnic_83xx_init(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "%s: failed\n", __func__);
+ goto err_out_free_hw;
+ }
+ } else {
+ dev_err(&pdev->dev,
+ "%s: failed. Please Reboot\n", __func__);
+ goto err_out_free_hw;
}
if (qlcnic_read_mac_addr(adapter))
@@ -1581,22 +1927,34 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (adapter->portnum == 0) {
qlcnic_get_board_name(adapter, board_name);
+
pr_info("%s: %s Board Chip rev 0x%x\n",
module_name(THIS_MODULE),
board_name, adapter->ahw->revision_id);
}
+ err = qlcnic_setup_intr(adapter, 0);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to setup interrupt\n");
+ goto err_out_disable_msi;
+ }
- qlcnic_clear_stats(adapter);
-
- err = qlcnic_alloc_msix_entries(adapter, adapter->ahw->max_rx_ques);
- if (err)
- goto err_out_decr_ref;
-
- qlcnic_setup_intr(adapter);
+ if (qlcnic_83xx_check(adapter)) {
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ if (err)
+ goto err_out_disable_msi;
+ }
err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
if (err)
- goto err_out_disable_msi;
+ goto err_out_disable_mbx_intr;
+
+ if (qlcnic_82xx_check(adapter)) {
+ if (ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
+ capab2 = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
+ if (capab2 & QLCNIC_FW_CAPABILITY_2_OCBB)
+ qlcnic_fw_cmd_set_drv_version(adapter);
+ }
+ }
pci_set_drvdata(pdev, adapter);
@@ -1615,29 +1973,37 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
- if (adapter->mac_learn)
+ if (qlcnic_get_act_pci_func(adapter))
+ goto err_out_disable_mbx_intr;
+
+ if (adapter->drv_mac_learn)
qlcnic_alloc_lb_filters_mem(adapter);
- qlcnic_create_diag_entries(adapter);
+ qlcnic_add_sysfs(adapter);
return 0;
+err_out_disable_mbx_intr:
+ if (qlcnic_83xx_check(adapter))
+ qlcnic_83xx_free_mbx_intr(adapter);
+
err_out_disable_msi:
qlcnic_teardown_intr(adapter);
- kfree(adapter->msix_entries);
-
-err_out_decr_ref:
+ qlcnic_cancel_idc_work(adapter);
qlcnic_clr_all_drv_state(adapter, 0);
-err_out_iounmap:
- qlcnic_cleanup_pci_map(adapter);
-
err_out_free_hw:
qlcnic_free_adapter_resources(adapter);
err_out_free_netdev:
free_netdev(netdev);
+err_out_iounmap:
+ qlcnic_cleanup_pci_map(adapter);
+
+err_out_free_hw_res:
+ kfree(ahw);
+
err_out_free_res:
pci_release_regions(pdev);
@@ -1645,24 +2011,13 @@ err_out_disable_pdev:
pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return err;
-
-err_out_maintenance_mode:
- netdev->netdev_ops = &qlcnic_netdev_failed_ops;
- SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
- err = register_netdev(netdev);
- if (err) {
- dev_err(&pdev->dev, "failed to register net device\n");
- goto err_out_decr_ref;
- }
- pci_set_drvdata(pdev, adapter);
- qlcnic_create_diag_entries(adapter);
- return 0;
}
static void qlcnic_remove(struct pci_dev *pdev)
{
struct qlcnic_adapter *adapter;
struct net_device *netdev;
+ struct qlcnic_hardware_context *ahw;
adapter = pci_get_drvdata(pdev);
if (adapter == NULL)
@@ -1670,10 +2025,17 @@ static void qlcnic_remove(struct pci_dev *pdev)
netdev = adapter->netdev;
- qlcnic_cancel_fw_work(adapter);
+ qlcnic_cancel_idc_work(adapter);
+ ahw = adapter->ahw;
unregister_netdev(netdev);
+ if (qlcnic_83xx_check(adapter)) {
+ qlcnic_83xx_free_mbx_intr(adapter);
+ qlcnic_83xx_register_nic_idc_func(adapter, 0);
+ cancel_delayed_work_sync(&adapter->idc_aen_work);
+ }
+
qlcnic_detach(adapter);
if (adapter->npars != NULL)
@@ -1689,9 +2051,8 @@ static void qlcnic_remove(struct pci_dev *pdev)
qlcnic_free_lb_filters_mem(adapter);
qlcnic_teardown_intr(adapter);
- kfree(adapter->msix_entries);
- qlcnic_remove_diag_entries(adapter);
+ qlcnic_remove_sysfs(adapter);
qlcnic_cleanup_pci_map(adapter);
@@ -1702,7 +2063,12 @@ static void qlcnic_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
+ if (adapter->qlcnic_wq) {
+ destroy_workqueue(adapter->qlcnic_wq);
+ adapter->qlcnic_wq = NULL;
+ }
qlcnic_free_adapter_resources(adapter);
+ kfree(ahw);
free_netdev(netdev);
}
static int __qlcnic_shutdown(struct pci_dev *pdev)
@@ -1713,7 +2079,7 @@ static int __qlcnic_shutdown(struct pci_dev *pdev)
netif_device_detach(netdev);
- qlcnic_cancel_fw_work(adapter);
+ qlcnic_cancel_idc_work(adapter);
if (netif_running(netdev))
qlcnic_down(adapter, netdev);
@@ -1726,7 +2092,6 @@ static int __qlcnic_shutdown(struct pci_dev *pdev)
retval = pci_save_state(pdev);
if (retval)
return retval;
-
if (qlcnic_82xx_check(adapter)) {
if (qlcnic_wol_supported(adapter)) {
pci_enable_wake(pdev, PCI_D3cold, 1);
@@ -1774,7 +2139,7 @@ qlcnic_resume(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
- err = adapter->nic_ops->start_firmware(adapter);
+ err = qlcnic_start_firmware(adapter);
if (err) {
dev_err(&pdev->dev, "failed to start firmware\n");
return err;
@@ -1797,14 +2162,8 @@ done:
static int qlcnic_open(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
int err;
- if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) {
- netdev_err(netdev, "Device in FAILED state\n");
- return -EIO;
- }
-
netif_carrier_off(netdev);
err = qlcnic_attach(adapter);
@@ -1832,6 +2191,7 @@ static int qlcnic_close(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
__qlcnic_down(adapter, netdev);
+
return 0;
}
@@ -1839,22 +2199,53 @@ void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
{
void *head;
int i;
+ struct net_device *netdev = adapter->netdev;
+ u32 filter_size = 0;
+ u16 act_pci_func = 0;
if (adapter->fhash.fmax && adapter->fhash.fhead)
return;
+ act_pci_func = adapter->ahw->act_pci_func;
spin_lock_init(&adapter->mac_learn_lock);
+ spin_lock_init(&adapter->rx_mac_learn_lock);
+
+ if (qlcnic_82xx_check(adapter)) {
+ filter_size = QLCNIC_LB_MAX_FILTERS;
+ adapter->fhash.fbucket_size = QLCNIC_LB_BUCKET_SIZE;
+ } else {
+ filter_size = QLC_83XX_LB_MAX_FILTERS;
+ adapter->fhash.fbucket_size = QLC_83XX_LB_BUCKET_SIZE;
+ }
+
+ head = kcalloc(adapter->fhash.fbucket_size,
+ sizeof(struct hlist_head), GFP_ATOMIC);
- head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head),
- GFP_KERNEL);
if (!head)
return;
- adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
+ adapter->fhash.fmax = (filter_size / act_pci_func);
adapter->fhash.fhead = head;
- for (i = 0; i < adapter->fhash.fmax; i++)
+ netdev_info(netdev, "active nic func = %d, mac filter size=%d\n",
+ act_pci_func, adapter->fhash.fmax);
+
+ for (i = 0; i < adapter->fhash.fbucket_size; i++)
INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
+
+ adapter->rx_fhash.fbucket_size = adapter->fhash.fbucket_size;
+
+ head = kcalloc(adapter->rx_fhash.fbucket_size,
+ sizeof(struct hlist_head), GFP_ATOMIC);
+
+ if (!head)
+ return;
+
+ adapter->rx_fhash.fmax = (filter_size / act_pci_func);
+ adapter->rx_fhash.fhead = head;
+
+ for (i = 0; i < adapter->rx_fhash.fbucket_size; i++)
+ INIT_HLIST_HEAD(&adapter->rx_fhash.fhead[i]);
}
static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
@@ -1864,16 +2255,25 @@ static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
adapter->fhash.fhead = NULL;
adapter->fhash.fmax = 0;
+
+ if (adapter->rx_fhash.fmax && adapter->rx_fhash.fhead)
+ kfree(adapter->rx_fhash.fhead);
+
+ adapter->rx_fhash.fmax = 0;
+ adapter->rx_fhash.fhead = NULL;
}
-static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
+int qlcnic_check_temp(struct qlcnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
u32 temp_state, temp_val, temp = 0;
int rv = 0;
+ if (qlcnic_83xx_check(adapter))
+ temp = QLCRDX(adapter->ahw, QLC_83XX_ASIC_TEMP);
+
if (qlcnic_82xx_check(adapter))
- temp = QLCRD32(adapter, CRB_TEMP_STATE);
+ temp = QLC_SHARED_REG_RD32(adapter, QLCNIC_ASIC_TEMP);
temp_state = qlcnic_get_temp_state(temp);
temp_val = qlcnic_get_temp_val(temp);
@@ -1933,7 +2333,7 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
return stats;
}
-static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
+irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
{
u32 status;
@@ -2009,6 +2409,14 @@ static irqreturn_t qlcnic_msix_intr(int irq, void *data)
return IRQ_HANDLED;
}
+static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data)
+{
+ struct qlcnic_host_tx_ring *tx_ring = data;
+
+ napi_schedule(&tx_ring->napi);
+ return IRQ_HANDLED;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void qlcnic_poll_controller(struct net_device *netdev)
{
@@ -2035,7 +2443,7 @@ qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
val |= encoding << 7;
val |= (jiffies - adapter->dev_rst_time) << 8;
- QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
adapter->dev_rst_time = jiffies;
}
@@ -2050,14 +2458,14 @@ qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
if (qlcnic_api_lock(adapter))
return -EIO;
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
if (state == QLCNIC_DEV_NEED_RESET)
QLC_DEV_SET_RST_RDY(val, adapter->portnum);
else if (state == QLCNIC_DEV_NEED_QUISCENT)
QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
qlcnic_api_unlock(adapter);
@@ -2072,9 +2480,9 @@ qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
if (qlcnic_api_lock(adapter))
return -EBUSY;
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
qlcnic_api_unlock(adapter);
@@ -2089,20 +2497,22 @@ qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
if (qlcnic_api_lock(adapter))
goto err;
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
if (failed) {
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_FAILED);
dev_info(&adapter->pdev->dev,
"Device state set to Failed. Please Reboot\n");
} else if (!(val & 0x11111111))
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_COLD);
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
qlcnic_api_unlock(adapter);
err:
@@ -2117,12 +2527,13 @@ static int
qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
{
int act, state, active_mask;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
- state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
- act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
+ act = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
if (adapter->flags & QLCNIC_FW_RESET_OWNER) {
- active_mask = (~(1 << (adapter->ahw->pci_func * 4)));
+ active_mask = (~(1 << (ahw->pci_func * 4)));
act = act & active_mask;
}
@@ -2135,7 +2546,7 @@ qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
{
- u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
+ u32 val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
if (val != QLCNIC_DRV_IDC_VER) {
dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
@@ -2159,19 +2570,21 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
if (qlcnic_api_lock(adapter))
return -1;
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
if (!(val & (1 << (portnum * 4)))) {
QLC_DEV_SET_REF_CNT(val, portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
}
- prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ prev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
QLCDB(adapter, HW, "Device state = %u\n", prev_state);
switch (prev_state) {
case QLCNIC_DEV_COLD:
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
- QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_INITIALIZING);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_IDC_VER,
+ QLCNIC_DRV_IDC_VER);
qlcnic_idc_debug_info(adapter, 0);
qlcnic_api_unlock(adapter);
return 1;
@@ -2182,15 +2595,15 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
return ret;
case QLCNIC_DEV_NEED_RESET:
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
QLC_DEV_SET_RST_RDY(val, portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
break;
case QLCNIC_DEV_NEED_QUISCENT:
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
QLC_DEV_SET_QSCNT_RDY(val, portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
break;
case QLCNIC_DEV_FAILED:
@@ -2207,7 +2620,7 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
do {
msleep(1000);
- prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ prev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
if (prev_state == QLCNIC_DEV_QUISCENT)
continue;
@@ -2222,9 +2635,9 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
if (qlcnic_api_lock(adapter))
return -1;
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
QLC_DEV_CLR_RST_QSCNT(val, portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
ret = qlcnic_check_idc_ver(adapter);
qlcnic_api_unlock(adapter);
@@ -2243,7 +2656,7 @@ qlcnic_fwinit_work(struct work_struct *work)
if (qlcnic_api_lock(adapter))
goto err_ret;
- dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
if (dev_state == QLCNIC_DEV_QUISCENT ||
dev_state == QLCNIC_DEV_NEED_QUISCENT) {
qlcnic_api_unlock(adapter);
@@ -2272,17 +2685,19 @@ qlcnic_fwinit_work(struct work_struct *work)
if (!qlcnic_check_drv_state(adapter)) {
skip_ack_check:
- dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
if (dev_state == QLCNIC_DEV_NEED_RESET) {
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
- QLCNIC_DEV_INITIALIZING);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_INITIALIZING);
set_bit(__QLCNIC_START_FW, &adapter->state);
QLCDB(adapter, DRV, "Restarting fw\n");
qlcnic_idc_debug_info(adapter, 0);
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_CRB_DRV_STATE);
QLC_DEV_SET_RST_RDY(val, adapter->portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter,
+ QLCNIC_CRB_DRV_STATE, val);
}
qlcnic_api_unlock(adapter);
@@ -2308,12 +2723,12 @@ skip_ack_check:
qlcnic_api_unlock(adapter);
wait_npar:
- dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
switch (dev_state) {
case QLCNIC_DEV_READY:
- if (!adapter->nic_ops->start_firmware(adapter)) {
+ if (!qlcnic_start_firmware(adapter)) {
qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
adapter->fw_wait_cnt = 0;
return;
@@ -2350,7 +2765,7 @@ qlcnic_detach_work(struct work_struct *work)
} else
qlcnic_down(adapter, netdev);
- status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
+ status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1);
if (status & QLCNIC_RCODE_FATAL_ERROR) {
dev_err(&adapter->pdev->dev,
@@ -2401,19 +2816,18 @@ qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
{
u32 state;
- state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
if (state == QLCNIC_DEV_NPAR_NON_OPER)
return;
if (qlcnic_api_lock(adapter))
return;
- QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
+ QLCNIC_DEV_NPAR_NON_OPER);
qlcnic_api_unlock(adapter);
}
-/*Transit to RESET state from READY state only */
-void
-qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
+void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter, u32 key)
{
u32 state, xg_val = 0, gb_val = 0;
@@ -2428,25 +2842,22 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
dev_info(&adapter->pdev->dev, "Pause control frames disabled"
" on all ports\n");
adapter->need_fw_reset = 1;
+
if (qlcnic_api_lock(adapter))
return;
- state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) {
- netdev_err(adapter->netdev,
- "Device is in FAILED state, Please Reboot\n");
- qlcnic_api_unlock(adapter);
- return;
- }
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
if (state == QLCNIC_DEV_READY) {
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_NEED_RESET);
adapter->flags |= QLCNIC_FW_RESET_OWNER;
QLCDB(adapter, DRV, "NEED_RESET state set\n");
qlcnic_idc_debug_info(adapter, 0);
}
- QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
+ QLCNIC_DEV_NPAR_NON_OPER);
qlcnic_api_unlock(adapter);
}
@@ -2457,34 +2868,22 @@ qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
if (qlcnic_api_lock(adapter))
return;
- QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
+ QLCNIC_DEV_NPAR_OPER);
QLCDB(adapter, DRV, "NPAR operational state set\n");
qlcnic_api_unlock(adapter);
}
-static void
-qlcnic_schedule_work(struct qlcnic_adapter *adapter,
- work_func_t func, int delay)
+void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
+ work_func_t func, int delay)
{
if (test_bit(__QLCNIC_AER, &adapter->state))
return;
INIT_DELAYED_WORK(&adapter->fw_work, func);
- queue_delayed_work(qlcnic_wq, &adapter->fw_work,
- round_jiffies_relative(delay));
-}
-
-static void
-qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
-{
- while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
- msleep(10);
-
- if (!adapter->fw_work.work.func)
- return;
-
- cancel_delayed_work_sync(&adapter->fw_work);
+ queue_delayed_work(adapter->qlcnic_wq, &adapter->fw_work,
+ round_jiffies_relative(delay));
}
static void
@@ -2496,7 +2895,8 @@ qlcnic_attach_work(struct work_struct *work)
u32 npar_state;
if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
- npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ npar_state = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_CRB_DEV_NPAR_STATE);
if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
qlcnic_clr_all_drv_state(adapter, 0);
else if (npar_state != QLCNIC_DEV_NPAR_OPER)
@@ -2536,16 +2936,16 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
goto detach;
if (adapter->need_fw_reset)
- qlcnic_dev_request_reset(adapter);
+ qlcnic_dev_request_reset(adapter, 0);
- state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
if (state == QLCNIC_DEV_NEED_RESET) {
qlcnic_set_npar_non_operational(adapter);
adapter->need_fw_reset = 1;
} else if (state == QLCNIC_DEV_NEED_QUISCENT)
goto detach;
- heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ heartbeat = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
if (heartbeat != adapter->heartbeat) {
adapter->heartbeat = heartbeat;
adapter->fw_fail_cnt = 0;
@@ -2565,25 +2965,25 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
adapter->flags |= QLCNIC_FW_HANG;
- qlcnic_dev_request_reset(adapter);
+ qlcnic_dev_request_reset(adapter, 0);
if (qlcnic_auto_fw_reset)
clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
dev_err(&adapter->pdev->dev, "firmware hang detected\n");
+ peg_status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1);
dev_err(&adapter->pdev->dev, "Dumping hw/fw registers\n"
"PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
"PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
"PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
"PEG_NET_4_PC: 0x%x\n",
- QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1),
- QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS2),
+ peg_status,
+ QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS2),
QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c),
QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c),
QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c),
QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c),
QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c));
- peg_status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
dev_err(&adapter->pdev->dev,
"Firmware aborted with error code 0x00006700. "
@@ -2667,17 +3067,39 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
if (adapter->ahw->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
adapter->need_fw_reset = 1;
set_bit(__QLCNIC_START_FW, &adapter->state);
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_INITIALIZING);
QLCDB(adapter, DRV, "Restarting fw\n");
}
qlcnic_api_unlock(adapter);
- err = adapter->nic_ops->start_firmware(adapter);
+ err = qlcnic_start_firmware(adapter);
if (err)
return err;
qlcnic_clr_drv_state(adapter);
- qlcnic_setup_intr(adapter);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ err = qlcnic_setup_intr(adapter, 0);
+
+ if (err) {
+ kfree(adapter->msix_entries);
+ netdev_err(netdev, "failed to setup interrupt\n");
+ return err;
+ }
+
+ if (qlcnic_83xx_check(adapter)) {
+ /* register for NIC IDC AEN Events */
+ qlcnic_83xx_register_nic_idc_func(adapter, 1);
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "failed to setup mbx interrupt\n");
+ qlcnic_clr_all_drv_state(adapter, 1);
+ clear_bit(__QLCNIC_AER, &adapter->state);
+ goto done;
+ }
+ }
if (netif_running(netdev)) {
err = qlcnic_attach(adapter);
@@ -2719,6 +3141,12 @@ static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
if (netif_running(netdev))
qlcnic_down(adapter, netdev);
+ if (qlcnic_83xx_check(adapter)) {
+ qlcnic_83xx_free_mbx_intr(adapter);
+ qlcnic_83xx_register_nic_idc_func(adapter, 0);
+ cancel_delayed_work_sync(&adapter->idc_aen_work);
+ }
+
qlcnic_detach(adapter);
qlcnic_teardown_intr(adapter);
@@ -2738,12 +3166,13 @@ static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
static void qlcnic_io_resume(struct pci_dev *pdev)
{
+ u32 state;
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
pci_cleanup_aer_uncorrect_error_status(pdev);
-
- if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
- test_and_clear_bit(__QLCNIC_AER, &adapter->state))
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (state == QLCNIC_DEV_READY && test_and_clear_bit(__QLCNIC_AER,
+ &adapter->state))
qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
FW_POLL_DELAY);
}
@@ -2776,39 +3205,59 @@ qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
return err;
}
-int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val)
+int qlcnic_validate_max_rss(u8 max_hw, u8 val)
{
- if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
- netdev_info(netdev, "no msix or msi support, hence no rss\n");
- return -EINVAL;
+ u32 max_allowed;
+
+ if (max_hw > QLC_MAX_SDS_RINGS) {
+ max_hw = QLC_MAX_SDS_RINGS;
+ pr_info("max rss reset to %d\n", QLC_MAX_SDS_RINGS);
}
- if ((val > max_hw) || (val < 2) || !is_power_of_2(val)) {
- netdev_info(netdev, "rss_ring valid range [2 - %x] in "
- " powers of 2\n", max_hw);
+ max_allowed = rounddown_pow_of_two(min_t(int, max_hw,
+ num_online_cpus()));
+ if ((val > max_allowed) || (val < 2) || !is_power_of_2(val)) {
+ pr_info("rss_ring valid range [2 - %x] in powers of 2\n",
+ max_allowed);
return -EINVAL;
}
return 0;
-
}
-int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data)
+int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, size_t len)
{
+ int err;
struct net_device *netdev = adapter->netdev;
- int err = 0;
- if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
return -EBUSY;
netif_device_detach(netdev);
if (netif_running(netdev))
__qlcnic_down(adapter, netdev);
+
qlcnic_detach(adapter);
+
+ if (qlcnic_83xx_check(adapter))
+ qlcnic_83xx_free_mbx_intr(adapter);
+
qlcnic_teardown_intr(adapter);
+ err = qlcnic_setup_intr(adapter, data);
+ if (err) {
+ kfree(adapter->msix_entries);
+ netdev_err(netdev, "failed to setup interrupt\n");
+ return err;
+ }
- if (qlcnic_enable_msix(adapter, data)) {
- netdev_info(netdev, "failed setting max_rss; rss disabled\n");
- qlcnic_enable_msi_legacy(adapter);
+ if (qlcnic_83xx_check(adapter)) {
+ /* register for NIC IDC AEN Events */
+ qlcnic_83xx_register_nic_idc_func(adapter, 1);
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "failed to setup mbx interrupt\n");
+ goto done;
+ }
}
if (netif_running(netdev)) {
@@ -2820,6 +3269,7 @@ int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data)
goto done;
qlcnic_restore_indev_addr(netdev, NETDEV_UP);
}
+ err = len;
done:
netif_device_attach(netdev);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
@@ -2858,8 +3308,7 @@ qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
in_dev_put(indev);
}
-static void
-qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
+void qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct net_device *dev;
@@ -2867,12 +3316,14 @@ qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
qlcnic_config_indev_addr(adapter, netdev, event);
+ rcu_read_lock();
for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
dev = __vlan_find_dev_deep(netdev, vid);
if (!dev)
continue;
qlcnic_config_indev_addr(adapter, dev, event);
}
+ rcu_read_unlock();
}
static int qlcnic_netdev_event(struct notifier_block *this,
@@ -2940,9 +3391,11 @@ recheck:
switch (event) {
case NETDEV_UP:
qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
+
break;
case NETDEV_DOWN:
qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
+
break;
default:
break;
@@ -2960,11 +3413,10 @@ static struct notifier_block qlcnic_inetaddr_cb = {
.notifier_call = qlcnic_inetaddr_event,
};
#else
-static void
-qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
+void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
{ }
#endif
-static struct pci_error_handlers qlcnic_err_handler = {
+static const struct pci_error_handlers qlcnic_err_handler = {
.error_detected = qlcnic_io_error_detected,
.slot_reset = qlcnic_io_slot_reset,
.resume = qlcnic_io_resume,
@@ -2990,12 +3442,6 @@ static int __init qlcnic_init_module(void)
printk(KERN_INFO "%s\n", qlcnic_driver_string);
- qlcnic_wq = create_singlethread_workqueue("qlcnic");
- if (qlcnic_wq == NULL) {
- printk(KERN_ERR "qlcnic: cannot create workqueue\n");
- return -ENOMEM;
- }
-
#ifdef CONFIG_INET
register_netdevice_notifier(&qlcnic_netdev_cb);
register_inetaddr_notifier(&qlcnic_inetaddr_cb);
@@ -3007,7 +3453,6 @@ static int __init qlcnic_init_module(void)
unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
unregister_netdevice_notifier(&qlcnic_netdev_cb);
#endif
- destroy_workqueue(qlcnic_wq);
}
return ret;
@@ -3017,14 +3462,12 @@ module_init(qlcnic_init_module);
static void __exit qlcnic_exit_module(void)
{
-
pci_unregister_driver(&qlcnic_driver);
#ifdef CONFIG_INET
unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
unregister_netdevice_notifier(&qlcnic_netdev_cb);
#endif
- destroy_workqueue(qlcnic_wq);
}
module_exit(qlcnic_exit_module);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 0b8d8625834c..abbd22c814a6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -1,8 +1,25 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
#include "qlcnic.h"
#include "qlcnic_hdr.h"
+#include "qlcnic_83xx_hw.h"
+#include "qlcnic_hw.h"
#include <net/ip.h>
+#define QLC_83XX_MINIDUMP_FLASH 0x520000
+#define QLC_83XX_OCM_INDEX 3
+#define QLC_83XX_PCI_INDEX 0
+
+static const u32 qlcnic_ms_read_data[] = {
+ 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC
+};
+
#define QLCNIC_DUMP_WCRB BIT_0
#define QLCNIC_DUMP_RWCRB BIT_1
#define QLCNIC_DUMP_ANDCRB BIT_2
@@ -102,16 +119,55 @@ struct __queue {
u8 rsvd3[2];
} __packed;
+struct __pollrd {
+ u32 sel_addr;
+ u32 read_addr;
+ u32 sel_val;
+ u16 sel_val_stride;
+ u16 no_ops;
+ u32 poll_wait;
+ u32 poll_mask;
+ u32 data_size;
+ u8 rsvd[4];
+} __packed;
+
+struct __mux2 {
+ u32 sel_addr1;
+ u32 sel_addr2;
+ u32 sel_val1;
+ u32 sel_val2;
+ u32 no_ops;
+ u32 sel_val_mask;
+ u32 read_addr;
+ u8 sel_val_stride;
+ u8 data_size;
+ u8 rsvd[2];
+} __packed;
+
+struct __pollrdmwr {
+ u32 addr1;
+ u32 addr2;
+ u32 val1;
+ u32 val2;
+ u32 poll_wait;
+ u32 poll_mask;
+ u32 mod_mask;
+ u32 data_size;
+} __packed;
+
struct qlcnic_dump_entry {
struct qlcnic_common_entry_hdr hdr;
union {
- struct __crb crb;
- struct __cache cache;
- struct __ocm ocm;
- struct __mem mem;
- struct __mux mux;
- struct __queue que;
- struct __ctrl ctrl;
+ struct __crb crb;
+ struct __cache cache;
+ struct __ocm ocm;
+ struct __mem mem;
+ struct __mux mux;
+ struct __queue que;
+ struct __ctrl ctrl;
+ struct __pollrdmwr pollrdmwr;
+ struct __mux2 mux2;
+ struct __pollrd pollrd;
} region;
} __packed;
@@ -131,6 +187,9 @@ enum qlcnic_minidump_opcode {
QLCNIC_DUMP_L2_ITAG = 22,
QLCNIC_DUMP_L2_DATA = 23,
QLCNIC_DUMP_L2_INST = 24,
+ QLCNIC_DUMP_POLL_RD = 35,
+ QLCNIC_READ_MUX2 = 36,
+ QLCNIC_READ_POLLRDMWR = 37,
QLCNIC_DUMP_READ_ROM = 71,
QLCNIC_DUMP_READ_MEM = 72,
QLCNIC_DUMP_READ_CTRL = 98,
@@ -144,46 +203,17 @@ struct qlcnic_dump_operations {
__le32 *);
};
-static void qlcnic_read_dump_reg(u32 addr, void __iomem *bar0, u32 *data)
-{
- u32 dest;
- void __iomem *window_reg;
-
- dest = addr & 0xFFFF0000;
- window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
- writel(dest, window_reg);
- readl(window_reg);
- window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
- *data = readl(window_reg);
-}
-
-static void qlcnic_write_dump_reg(u32 addr, void __iomem *bar0, u32 data)
-{
- u32 dest;
- void __iomem *window_reg;
-
- dest = addr & 0xFFFF0000;
- window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
- writel(dest, window_reg);
- readl(window_reg);
- window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
- writel(data, window_reg);
- readl(window_reg);
-}
-
-/* FW dump related functions */
static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
struct qlcnic_dump_entry *entry, __le32 *buffer)
{
int i;
u32 addr, data;
struct __crb *crb = &entry->region.crb;
- void __iomem *base = adapter->ahw->pci_base0;
addr = crb->addr;
for (i = 0; i < crb->no_ops; i++) {
- qlcnic_read_dump_reg(addr, base, &data);
+ data = qlcnic_ind_rd(adapter, addr);
*buffer++ = cpu_to_le32(addr);
*buffer++ = cpu_to_le32(data);
addr += crb->stride;
@@ -195,7 +225,6 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
struct qlcnic_dump_entry *entry, __le32 *buffer)
{
int i, k, timeout = 0;
- void __iomem *base = adapter->ahw->pci_base0;
u32 addr, data;
u8 no_ops;
struct __ctrl *ctr = &entry->region.ctrl;
@@ -211,28 +240,28 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
continue;
switch (1 << k) {
case QLCNIC_DUMP_WCRB:
- qlcnic_write_dump_reg(addr, base, ctr->val1);
+ qlcnic_ind_wr(adapter, addr, ctr->val1);
break;
case QLCNIC_DUMP_RWCRB:
- qlcnic_read_dump_reg(addr, base, &data);
- qlcnic_write_dump_reg(addr, base, data);
+ data = qlcnic_ind_rd(adapter, addr);
+ qlcnic_ind_wr(adapter, addr, data);
break;
case QLCNIC_DUMP_ANDCRB:
- qlcnic_read_dump_reg(addr, base, &data);
- qlcnic_write_dump_reg(addr, base,
- data & ctr->val2);
+ data = qlcnic_ind_rd(adapter, addr);
+ qlcnic_ind_wr(adapter, addr,
+ (data & ctr->val2));
break;
case QLCNIC_DUMP_ORCRB:
- qlcnic_read_dump_reg(addr, base, &data);
- qlcnic_write_dump_reg(addr, base,
- data | ctr->val3);
+ data = qlcnic_ind_rd(adapter, addr);
+ qlcnic_ind_wr(adapter, addr,
+ (data | ctr->val3));
break;
case QLCNIC_DUMP_POLLCRB:
while (timeout <= ctr->timeout) {
- qlcnic_read_dump_reg(addr, base, &data);
+ data = qlcnic_ind_rd(adapter, addr);
if ((data & ctr->val2) == ctr->val1)
break;
- msleep(1);
+ usleep_range(1000, 2000);
timeout++;
}
if (timeout > ctr->timeout) {
@@ -244,7 +273,7 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
case QLCNIC_DUMP_RD_SAVE:
if (ctr->index_a)
addr = t_hdr->saved_state[ctr->index_a];
- qlcnic_read_dump_reg(addr, base, &data);
+ data = qlcnic_ind_rd(adapter, addr);
t_hdr->saved_state[ctr->index_v] = data;
break;
case QLCNIC_DUMP_WRT_SAVED:
@@ -254,7 +283,7 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
data = ctr->val1;
if (ctr->index_a)
addr = t_hdr->saved_state[ctr->index_a];
- qlcnic_write_dump_reg(addr, base, data);
+ qlcnic_ind_wr(adapter, addr, data);
break;
case QLCNIC_DUMP_MOD_SAVE_ST:
data = t_hdr->saved_state[ctr->index_v];
@@ -283,12 +312,11 @@ static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
int loop;
u32 val, data = 0;
struct __mux *mux = &entry->region.mux;
- void __iomem *base = adapter->ahw->pci_base0;
val = mux->val;
for (loop = 0; loop < mux->no_ops; loop++) {
- qlcnic_write_dump_reg(mux->addr, base, val);
- qlcnic_read_dump_reg(mux->read_addr, base, &data);
+ qlcnic_ind_wr(adapter, mux->addr, val);
+ data = qlcnic_ind_rd(adapter, mux->read_addr);
*buffer++ = cpu_to_le32(val);
*buffer++ = cpu_to_le32(data);
val += mux->val_stride;
@@ -301,17 +329,16 @@ static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
{
int i, loop;
u32 cnt, addr, data, que_id = 0;
- void __iomem *base = adapter->ahw->pci_base0;
struct __queue *que = &entry->region.que;
addr = que->read_addr;
cnt = que->read_addr_cnt;
for (loop = 0; loop < que->no_ops; loop++) {
- qlcnic_write_dump_reg(que->sel_addr, base, que_id);
+ qlcnic_ind_wr(adapter, que->sel_addr, que_id);
addr = que->read_addr;
for (i = 0; i < cnt; i++) {
- qlcnic_read_dump_reg(addr, base, &data);
+ data = qlcnic_ind_rd(adapter, addr);
*buffer++ = cpu_to_le32(data);
addr += que->read_addr_stride;
}
@@ -343,27 +370,27 @@ static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
int i, count = 0;
u32 fl_addr, size, val, lck_val, addr;
struct __mem *rom = &entry->region.mem;
- void __iomem *base = adapter->ahw->pci_base0;
fl_addr = rom->addr;
- size = rom->size/4;
+ size = rom->size / 4;
lock_try:
- lck_val = readl(base + QLCNIC_FLASH_SEM2_LK);
+ lck_val = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK);
if (!lck_val && count < MAX_CTL_CHECK) {
- msleep(10);
+ usleep_range(10000, 11000);
count++;
goto lock_try;
}
- writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID));
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER,
+ adapter->ahw->pci_func);
for (i = 0; i < size; i++) {
addr = fl_addr & 0xFFFF0000;
- qlcnic_write_dump_reg(FLASH_ROM_WINDOW, base, addr);
+ qlcnic_ind_wr(adapter, FLASH_ROM_WINDOW, addr);
addr = LSW(fl_addr) + FLASH_ROM_DATA;
- qlcnic_read_dump_reg(addr, base, &val);
+ val = qlcnic_ind_rd(adapter, addr);
fl_addr += 4;
*buffer++ = cpu_to_le32(val);
}
- readl(base + QLCNIC_FLASH_SEM2_ULK);
+ QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK);
return rom->size;
}
@@ -372,18 +399,17 @@ static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
{
int i;
u32 cnt, val, data, addr;
- void __iomem *base = adapter->ahw->pci_base0;
struct __cache *l1 = &entry->region.cache;
val = l1->init_tag_val;
for (i = 0; i < l1->no_ops; i++) {
- qlcnic_write_dump_reg(l1->addr, base, val);
- qlcnic_write_dump_reg(l1->ctrl_addr, base, LSW(l1->ctrl_val));
+ qlcnic_ind_wr(adapter, l1->addr, val);
+ qlcnic_ind_wr(adapter, l1->ctrl_addr, LSW(l1->ctrl_val));
addr = l1->read_addr;
cnt = l1->read_addr_num;
while (cnt) {
- qlcnic_read_dump_reg(addr, base, &data);
+ data = qlcnic_ind_rd(adapter, addr);
*buffer++ = cpu_to_le32(data);
addr += l1->read_addr_stride;
cnt--;
@@ -399,7 +425,6 @@ static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
int i;
u32 cnt, val, data, addr;
u8 poll_mask, poll_to, time_out = 0;
- void __iomem *base = adapter->ahw->pci_base0;
struct __cache *l2 = &entry->region.cache;
val = l2->init_tag_val;
@@ -407,17 +432,17 @@ static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
poll_to = MSB(MSW(l2->ctrl_val));
for (i = 0; i < l2->no_ops; i++) {
- qlcnic_write_dump_reg(l2->addr, base, val);
+ qlcnic_ind_wr(adapter, l2->addr, val);
if (LSW(l2->ctrl_val))
- qlcnic_write_dump_reg(l2->ctrl_addr, base,
- LSW(l2->ctrl_val));
+ qlcnic_ind_wr(adapter, l2->ctrl_addr,
+ LSW(l2->ctrl_val));
if (!poll_mask)
goto skip_poll;
do {
- qlcnic_read_dump_reg(l2->ctrl_addr, base, &data);
+ data = qlcnic_ind_rd(adapter, l2->ctrl_addr);
if (!(data & poll_mask))
break;
- msleep(1);
+ usleep_range(1000, 2000);
time_out++;
} while (time_out <= poll_to);
@@ -431,7 +456,7 @@ skip_poll:
addr = l2->read_addr;
cnt = l2->read_addr_num;
while (cnt) {
- qlcnic_read_dump_reg(addr, base, &data);
+ data = qlcnic_ind_rd(adapter, addr);
*buffer++ = cpu_to_le32(data);
addr += l2->read_addr_stride;
cnt--;
@@ -447,7 +472,6 @@ static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
u32 addr, data, test, ret = 0;
int i, reg_read;
struct __mem *mem = &entry->region.mem;
- void __iomem *base = adapter->ahw->pci_base0;
reg_read = mem->size;
addr = mem->addr;
@@ -462,13 +486,12 @@ static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
mutex_lock(&adapter->ahw->mem_lock);
while (reg_read != 0) {
- qlcnic_write_dump_reg(MIU_TEST_ADDR_LO, base, addr);
- qlcnic_write_dump_reg(MIU_TEST_ADDR_HI, base, 0);
- qlcnic_write_dump_reg(MIU_TEST_CTR, base,
- TA_CTL_ENABLE | TA_CTL_START);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_START_ENABLE);
for (i = 0; i < MAX_CTL_CHECK; i++) {
- qlcnic_read_dump_reg(MIU_TEST_CTR, base, &test);
+ test = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
if (!(test & TA_CTL_BUSY))
break;
}
@@ -481,8 +504,7 @@ static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
}
}
for (i = 0; i < 4; i++) {
- qlcnic_read_dump_reg(MIU_TEST_READ_DATA[i], base,
- &data);
+ data = qlcnic_ind_rd(adapter, qlcnic_ms_read_data[i]);
*buffer++ = cpu_to_le32(data);
}
addr += 16;
@@ -501,48 +523,388 @@ static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
return 0;
}
-static const struct qlcnic_dump_operations fw_dump_ops[] = {
- { QLCNIC_DUMP_NOP, qlcnic_dump_nop },
- { QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb },
- { QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux },
- { QLCNIC_DUMP_QUEUE, qlcnic_dump_que },
- { QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom },
- { QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm },
- { QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl },
- { QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache },
- { QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache },
- { QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache },
- { QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache },
- { QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache },
- { QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache },
- { QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache },
- { QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache },
- { QLCNIC_DUMP_READ_ROM, qlcnic_read_rom },
- { QLCNIC_DUMP_READ_MEM, qlcnic_read_memory },
- { QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl },
- { QLCNIC_DUMP_TLHDR, qlcnic_dump_nop },
- { QLCNIC_DUMP_RDEND, qlcnic_dump_nop },
-};
-
-/* Walk the template and collect dump for each entry in the dump template */
-static int
-qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry,
- u32 size)
+static int qlcnic_valid_dump_entry(struct device *dev,
+ struct qlcnic_dump_entry *entry, u32 size)
{
int ret = 1;
if (size != entry->hdr.cap_size) {
- dev_info(dev,
- "Invalid dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
- entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size);
- dev_info(dev, "Aborting further dump capture\n");
+ dev_err(dev,
+ "Invalid entry, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
+ entry->hdr.type, entry->hdr.mask, size,
+ entry->hdr.cap_size);
ret = 0;
}
return ret;
}
+static u32 qlcnic_read_pollrdmwr(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry,
+ __le32 *buffer)
+{
+ struct __pollrdmwr *poll = &entry->region.pollrdmwr;
+ u32 data, wait_count, poll_wait, temp;
+
+ poll_wait = poll->poll_wait;
+
+ qlcnic_ind_wr(adapter, poll->addr1, poll->val1);
+ wait_count = 0;
+
+ while (wait_count < poll_wait) {
+ data = qlcnic_ind_rd(adapter, poll->addr1);
+ if ((data & poll->poll_mask) != 0)
+ break;
+ wait_count++;
+ }
+
+ if (wait_count == poll_wait) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout exceeded in %s, aborting dump\n",
+ __func__);
+ return 0;
+ }
+
+ data = qlcnic_ind_rd(adapter, poll->addr2) & poll->mod_mask;
+ qlcnic_ind_wr(adapter, poll->addr2, data);
+ qlcnic_ind_wr(adapter, poll->addr1, poll->val2);
+ wait_count = 0;
+
+ while (wait_count < poll_wait) {
+ temp = qlcnic_ind_rd(adapter, poll->addr1);
+ if ((temp & poll->poll_mask) != 0)
+ break;
+ wait_count++;
+ }
+
+ *buffer++ = cpu_to_le32(poll->addr2);
+ *buffer++ = cpu_to_le32(data);
+
+ return 2 * sizeof(u32);
+
+}
+
+static u32 qlcnic_read_pollrd(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ struct __pollrd *pollrd = &entry->region.pollrd;
+ u32 data, wait_count, poll_wait, sel_val;
+ int i;
+
+ poll_wait = pollrd->poll_wait;
+ sel_val = pollrd->sel_val;
+
+ for (i = 0; i < pollrd->no_ops; i++) {
+ qlcnic_ind_wr(adapter, pollrd->sel_addr, sel_val);
+ wait_count = 0;
+ while (wait_count < poll_wait) {
+ data = qlcnic_ind_rd(adapter, pollrd->sel_addr);
+ if ((data & pollrd->poll_mask) != 0)
+ break;
+ wait_count++;
+ }
+
+ if (wait_count == poll_wait) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout exceeded in %s, aborting dump\n",
+ __func__);
+ return 0;
+ }
+
+ data = qlcnic_ind_rd(adapter, pollrd->read_addr);
+ *buffer++ = cpu_to_le32(sel_val);
+ *buffer++ = cpu_to_le32(data);
+ sel_val += pollrd->sel_val_stride;
+ }
+ return pollrd->no_ops * (2 * sizeof(u32));
+}
+
+static u32 qlcnic_read_mux2(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ struct __mux2 *mux2 = &entry->region.mux2;
+ u32 data;
+ u32 t_sel_val, sel_val1, sel_val2;
+ int i;
+
+ sel_val1 = mux2->sel_val1;
+ sel_val2 = mux2->sel_val2;
+
+ for (i = 0; i < mux2->no_ops; i++) {
+ qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val1);
+ t_sel_val = sel_val1 & mux2->sel_val_mask;
+ qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
+ data = qlcnic_ind_rd(adapter, mux2->read_addr);
+ *buffer++ = cpu_to_le32(t_sel_val);
+ *buffer++ = cpu_to_le32(data);
+ qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val2);
+ t_sel_val = sel_val2 & mux2->sel_val_mask;
+ qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
+ data = qlcnic_ind_rd(adapter, mux2->read_addr);
+ *buffer++ = cpu_to_le32(t_sel_val);
+ *buffer++ = cpu_to_le32(data);
+ sel_val1 += mux2->sel_val_stride;
+ sel_val2 += mux2->sel_val_stride;
+ }
+
+ return mux2->no_ops * (4 * sizeof(u32));
+}
+
+static u32 qlcnic_83xx_dump_rom(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ u32 fl_addr, size;
+ struct __mem *rom = &entry->region.mem;
+
+ fl_addr = rom->addr;
+ size = rom->size / 4;
+
+ if (!qlcnic_83xx_lockless_flash_read32(adapter, fl_addr,
+ (u8 *)buffer, size))
+ return rom->size;
+
+ return 0;
+}
+
+static const struct qlcnic_dump_operations qlcnic_fw_dump_ops[] = {
+ {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
+ {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
+ {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
+ {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
+ {QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom},
+ {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
+ {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
+ {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_READ_ROM, qlcnic_read_rom},
+ {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
+ {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
+ {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
+ {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
+};
+
+static const struct qlcnic_dump_operations qlcnic_83xx_fw_dump_ops[] = {
+ {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
+ {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
+ {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
+ {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
+ {QLCNIC_DUMP_BRD_CONFIG, qlcnic_83xx_dump_rom},
+ {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
+ {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
+ {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_POLL_RD, qlcnic_read_pollrd},
+ {QLCNIC_READ_MUX2, qlcnic_read_mux2},
+ {QLCNIC_READ_POLLRDMWR, qlcnic_read_pollrdmwr},
+ {QLCNIC_DUMP_READ_ROM, qlcnic_83xx_dump_rom},
+ {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
+ {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
+ {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
+ {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
+};
+
+static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size)
+{
+ uint64_t sum = 0;
+ int count = temp_size / sizeof(uint32_t);
+ while (count-- > 0)
+ sum += *temp_buffer++;
+ while (sum >> 32)
+ sum = (sum & 0xFFFFFFFF) + (sum >> 32);
+ return ~sum;
+}
+
+static int qlcnic_fw_flash_get_minidump_temp(struct qlcnic_adapter *adapter,
+ u8 *buffer, u32 size)
+{
+ int ret = 0;
+
+ if (qlcnic_82xx_check(adapter))
+ return -EIO;
+
+ if (qlcnic_83xx_lock_flash(adapter))
+ return -EIO;
+
+ ret = qlcnic_83xx_lockless_flash_read32(adapter,
+ QLC_83XX_MINIDUMP_FLASH,
+ buffer, size / sizeof(u32));
+
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return ret;
+}
+
+static int
+qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_dump_template_hdr tmp_hdr;
+ u32 size = sizeof(struct qlcnic_dump_template_hdr) / sizeof(u32);
+ int ret = 0;
+
+ if (qlcnic_82xx_check(adapter))
+ return -EIO;
+
+ if (qlcnic_83xx_lock_flash(adapter))
+ return -EIO;
+
+ ret = qlcnic_83xx_lockless_flash_read32(adapter,
+ QLC_83XX_MINIDUMP_FLASH,
+ (u8 *)&tmp_hdr, size);
+
+ qlcnic_83xx_unlock_flash(adapter);
+
+ cmd->rsp.arg[2] = tmp_hdr.size;
+ cmd->rsp.arg[3] = tmp_hdr.version;
+
+ return ret;
+}
+
+static int qlcnic_fw_get_minidump_temp_size(struct qlcnic_adapter *adapter,
+ u32 *version, u32 *temp_size,
+ u8 *use_flash_temp)
+{
+ int err = 0;
+ struct qlcnic_cmd_args cmd;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TEMP_SIZE))
+ return -ENOMEM;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ if (qlcnic_fw_flash_get_minidump_temp_size(adapter, &cmd)) {
+ qlcnic_free_mbx_args(&cmd);
+ return -EIO;
+ }
+ *use_flash_temp = 1;
+ }
+
+ *temp_size = cmd.rsp.arg[2];
+ *version = cmd.rsp.arg[3];
+ qlcnic_free_mbx_args(&cmd);
+
+ if (!(*temp_size))
+ return -EIO;
+
+ return 0;
+}
+
+static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter,
+ u32 *buffer, u32 temp_size)
+{
+ int err = 0, i;
+ void *tmp_addr;
+ __le32 *tmp_buf;
+ struct qlcnic_cmd_args cmd;
+ dma_addr_t tmp_addr_t = 0;
+
+ tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
+ &tmp_addr_t, GFP_KERNEL);
+ if (!tmp_addr) {
+ dev_err(&adapter->pdev->dev,
+ "Can't get memory for FW dump template\n");
+ return -ENOMEM;
+ }
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) {
+ err = -ENOMEM;
+ goto free_mem;
+ }
+
+ cmd.req.arg[1] = LSD(tmp_addr_t);
+ cmd.req.arg[2] = MSD(tmp_addr_t);
+ cmd.req.arg[3] = temp_size;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ tmp_buf = tmp_addr;
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ for (i = 0; i < temp_size / sizeof(u32); i++)
+ *buffer++ = __le32_to_cpu(*tmp_buf++);
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+free_mem:
+ dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
+
+ return err;
+}
+
+int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
+{
+ int err;
+ u32 temp_size = 0;
+ u32 version, csum, *tmp_buf;
+ struct qlcnic_hardware_context *ahw;
+ struct qlcnic_dump_template_hdr *tmpl_hdr;
+ u8 use_flash_temp = 0;
+
+ ahw = adapter->ahw;
+
+ err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size,
+ &use_flash_temp);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Can't get template size %d\n", err);
+ return -EIO;
+ }
+
+ ahw->fw_dump.tmpl_hdr = vzalloc(temp_size);
+ if (!ahw->fw_dump.tmpl_hdr)
+ return -ENOMEM;
+
+ tmp_buf = (u32 *)ahw->fw_dump.tmpl_hdr;
+ if (use_flash_temp)
+ goto flash_temp;
+
+ err = __qlcnic_fw_cmd_get_minidump_temp(adapter, tmp_buf, temp_size);
+
+ if (err) {
+flash_temp:
+ err = qlcnic_fw_flash_get_minidump_temp(adapter, (u8 *)tmp_buf,
+ temp_size);
+
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get minidump template header %d\n",
+ err);
+ vfree(ahw->fw_dump.tmpl_hdr);
+ ahw->fw_dump.tmpl_hdr = NULL;
+ return -EIO;
+ }
+ }
+
+ csum = qlcnic_temp_checksum((uint32_t *)tmp_buf, temp_size);
+
+ if (csum) {
+ dev_err(&adapter->pdev->dev,
+ "Template header checksum validation failed\n");
+ vfree(ahw->fw_dump.tmpl_hdr);
+ ahw->fw_dump.tmpl_hdr = NULL;
+ return -EIO;
+ }
+
+ tmpl_hdr = ahw->fw_dump.tmpl_hdr;
+ tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
+ ahw->fw_dump.enable = 1;
+
+ return 0;
+}
+
int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
{
__le32 *buffer;
+ u32 ocm_window;
char mesg[64];
char *msg[] = {mesg, NULL};
int i, k, ops_cnt, ops_index, dump_size = 0;
@@ -550,12 +912,23 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
struct qlcnic_dump_entry *entry;
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
+ static const struct qlcnic_dump_operations *fw_dump_ops;
+ struct qlcnic_hardware_context *ahw;
+
+ ahw = adapter->ahw;
+
+ if (!fw_dump->enable) {
+ dev_info(&adapter->pdev->dev, "Dump not enabled\n");
+ return -EIO;
+ }
if (fw_dump->clr) {
dev_info(&adapter->pdev->dev,
"Previous dump not cleared, not capturing dump\n");
return -EIO;
}
+
+ netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n");
/* Calculate the size for dump data area only */
for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
if (i & tmpl_hdr->drv_cap_mask)
@@ -564,20 +937,27 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
return -EIO;
fw_dump->data = vzalloc(dump_size);
- if (!fw_dump->data) {
- dev_info(&adapter->pdev->dev,
- "Unable to allocate (%d KB) for fw dump\n",
- dump_size / 1024);
+ if (!fw_dump->data)
return -ENOMEM;
- }
+
buffer = fw_dump->data;
fw_dump->size = dump_size;
no_entries = tmpl_hdr->num_entries;
- ops_cnt = ARRAY_SIZE(fw_dump_ops);
entry_offset = tmpl_hdr->offset;
tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
tmpl_hdr->sys_info[1] = adapter->fw_version;
+ if (qlcnic_82xx_check(adapter)) {
+ ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
+ fw_dump_ops = qlcnic_fw_dump_ops;
+ } else {
+ ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops);
+ fw_dump_ops = qlcnic_83xx_fw_dump_ops;
+ ocm_window = tmpl_hdr->ocm_wnd_reg[adapter->ahw->pci_func];
+ tmpl_hdr->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
+ tmpl_hdr->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
+ }
+
for (i = 0; i < no_entries; i++) {
entry = (void *)tmpl_hdr + entry_offset;
if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
@@ -585,6 +965,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
entry_offset += entry->hdr.offset;
continue;
}
+
/* Find the handler for this entry */
ops_index = 0;
while (ops_index < ops_cnt) {
@@ -592,16 +973,17 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
break;
ops_index++;
}
+
if (ops_index == ops_cnt) {
dev_info(&adapter->pdev->dev,
"Invalid entry type %d, exiting dump\n",
entry->hdr.type);
goto error;
}
+
/* Collect dump for this entry */
dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
- if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry,
- dump))
+ if (!qlcnic_valid_dump_entry(&adapter->pdev->dev, entry, dump))
entry->hdr.flags |= QLCNIC_DUMP_SKIP;
buf_offset += entry->hdr.cap_size;
entry_offset += entry->hdr.offset;
@@ -616,8 +998,8 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
fw_dump->clr = 1;
snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
adapter->netdev->name);
- dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n",
- fw_dump->size);
+ dev_info(&adapter->pdev->dev, "%s: Dump data, %d bytes captured\n",
+ adapter->netdev->name, fw_dump->size);
/* Send a udev event to notify availability of FW dump */
kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
return 0;
@@ -626,3 +1008,21 @@ error:
vfree(fw_dump->data);
return -EINVAL;
}
+
+void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
+{
+ u32 prev_version, current_version;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
+ struct pci_dev *pdev = adapter->pdev;
+
+ prev_version = adapter->fw_version;
+ current_version = qlcnic_83xx_get_fw_version(adapter);
+
+ if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
+ if (fw_dump->tmpl_hdr)
+ vfree(fw_dump->tmpl_hdr);
+ if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
+ dev_info(&pdev->dev, "Supports FW dump capability\n");
+ }
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 341d37c867ff..987fb6f8adc3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -1,8 +1,16 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include "qlcnic.h"
+#include "qlcnic_hw.h"
#include <linux/swab.h>
#include <linux/dma-mapping.h>
@@ -13,6 +21,10 @@
#include <linux/aer.h>
#include <linux/log2.h>
+#include <linux/sysfs.h>
+
+#define QLC_STATUS_UNSUPPORTED_CMD -2
+
int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
{
return -EOPNOTSUPP;
@@ -40,7 +52,7 @@ static ssize_t qlcnic_store_bridged_mode(struct device *dev,
if (strict_strtoul(buf, 2, &new))
goto err_out;
- if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
+ if (!qlcnic_config_bridged_mode(adapter, !!new))
ret = len;
err_out:
@@ -80,9 +92,7 @@ static ssize_t qlcnic_show_diag_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-
- return sprintf(buf, "%d\n",
- !!(adapter->flags & QLCNIC_DIAG_ENABLED));
+ return sprintf(buf, "%d\n", !!(adapter->flags & QLCNIC_DIAG_ENABLED));
}
static int qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon,
@@ -111,10 +121,11 @@ static ssize_t qlcnic_store_beacon(struct device *dev,
const char *buf, size_t len)
{
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- int max_sds_rings = adapter->max_sds_rings;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err, max_sds_rings = adapter->max_sds_rings;
u16 beacon;
u8 b_state, b_rate;
- int err;
+ unsigned long h_beacon;
if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
dev_warn(dev,
@@ -122,6 +133,41 @@ static ssize_t qlcnic_store_beacon(struct device *dev,
return -EOPNOTSUPP;
}
+ if (qlcnic_83xx_check(adapter) &&
+ !test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+ if (kstrtoul(buf, 2, &h_beacon))
+ return -EINVAL;
+
+ if (ahw->beacon_state == h_beacon)
+ return len;
+
+ rtnl_lock();
+ if (!ahw->beacon_state) {
+ if (test_and_set_bit(__QLCNIC_LED_ENABLE,
+ &adapter->state)) {
+ rtnl_unlock();
+ return -EBUSY;
+ }
+ }
+ if (h_beacon) {
+ err = qlcnic_83xx_config_led(adapter, 1, h_beacon);
+ if (err)
+ goto beacon_err;
+ } else {
+ err = qlcnic_83xx_config_led(adapter, 0, !h_beacon);
+ if (err)
+ goto beacon_err;
+ }
+ /* set the current beacon state */
+ ahw->beacon_state = h_beacon;
+beacon_err:
+ if (!ahw->beacon_state)
+ clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+
+ rtnl_unlock();
+ return len;
+ }
+
if (len != sizeof(u16))
return QL_STATUS_INVALID_PARAM;
@@ -154,11 +200,10 @@ static ssize_t qlcnic_store_beacon(struct device *dev,
}
err = qlcnic_config_led(adapter, b_state, b_rate);
-
- if (!err) {
+ if (!err)
err = len;
- adapter->ahw->beacon_state = b_state;
- }
+ else
+ ahw->beacon_state = b_state;
if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
@@ -207,21 +252,13 @@ static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 data;
- u64 qmdata;
int ret;
ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
if (ret != 0)
return ret;
+ qlcnic_read_crb(adapter, buf, offset, size);
- if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
- qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
- memcpy(buf, &qmdata, size);
- } else {
- data = QLCRD32(adapter, offset);
- memcpy(buf, &data, size);
- }
return size;
}
@@ -231,21 +268,13 @@ static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 data;
- u64 qmdata;
int ret;
ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
if (ret != 0)
return ret;
- if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
- memcpy(&qmdata, buf, size);
- qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
- } else {
- memcpy(&data, buf, size);
- QLCWR32(adapter, offset, data);
- }
+ qlcnic_write_crb(adapter, buf, offset, size);
return size;
}
@@ -303,33 +332,44 @@ static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
return size;
}
+static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
+{
+ int i;
+ for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ if (adapter->npars[i].pci_func == pci_func)
+ return i;
+ }
+
+ return -1;
+}
+
static int validate_pm_config(struct qlcnic_adapter *adapter,
struct qlcnic_pm_func_cfg *pm_cfg, int count)
{
- u8 src_pci_func, s_esw_id, d_esw_id, dest_pci_func;
- int i;
+ u8 src_pci_func, s_esw_id, d_esw_id;
+ u8 dest_pci_func;
+ int i, src_index, dest_index;
for (i = 0; i < count; i++) {
src_pci_func = pm_cfg[i].pci_func;
dest_pci_func = pm_cfg[i].dest_npar;
- if (src_pci_func >= QLCNIC_MAX_PCI_FUNC ||
- dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
- return QL_STATUS_INVALID_PARAM;
+ src_index = qlcnic_is_valid_nic_func(adapter, src_pci_func);
- if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
+ if (src_index < 0)
return QL_STATUS_INVALID_PARAM;
- if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
+ dest_index = qlcnic_is_valid_nic_func(adapter, dest_pci_func);
+ if (dest_index < 0)
return QL_STATUS_INVALID_PARAM;
- s_esw_id = adapter->npars[src_pci_func].phy_port;
- d_esw_id = adapter->npars[dest_pci_func].phy_port;
+ s_esw_id = adapter->npars[src_index].phy_port;
+ d_esw_id = adapter->npars[dest_index].phy_port;
if (s_esw_id != d_esw_id)
return QL_STATUS_INVALID_PARAM;
}
- return 0;
+ return 0;
}
static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
@@ -342,7 +382,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_pm_func_cfg *pm_cfg;
u32 id, action, pci_func;
- int count, rem, i, ret;
+ int count, rem, i, ret, index;
count = size / sizeof(struct qlcnic_pm_func_cfg);
rem = size % sizeof(struct qlcnic_pm_func_cfg);
@@ -350,26 +390,32 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
return QL_STATUS_INVALID_PARAM;
pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
-
ret = validate_pm_config(adapter, pm_cfg, count);
+
if (ret)
return ret;
for (i = 0; i < count; i++) {
pci_func = pm_cfg[i].pci_func;
action = !!pm_cfg[i].action;
- id = adapter->npars[pci_func].phy_port;
- ret = qlcnic_config_port_mirroring(adapter, id, action,
- pci_func);
+ index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return QL_STATUS_INVALID_PARAM;
+
+ id = adapter->npars[index].phy_port;
+ ret = qlcnic_config_port_mirroring(adapter, id,
+ action, pci_func);
if (ret)
return ret;
}
for (i = 0; i < count; i++) {
pci_func = pm_cfg[i].pci_func;
- id = adapter->npars[pci_func].phy_port;
- adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
- adapter->npars[pci_func].dest_npar = id;
+ index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ id = adapter->npars[index].phy_port;
+ adapter->npars[index].enable_pm = !!pm_cfg[i].action;
+ adapter->npars[index].dest_npar = id;
}
+
return size;
}
@@ -383,16 +429,19 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
int i;
+ u8 pci_func;
if (size != sizeof(pm_cfg))
return QL_STATUS_INVALID_PARAM;
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
- if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
- continue;
- pm_cfg[i].action = adapter->npars[i].enable_pm;
- pm_cfg[i].dest_npar = 0;
- pm_cfg[i].pci_func = i;
+ memset(&pm_cfg, 0,
+ sizeof(struct qlcnic_pm_func_cfg) * QLCNIC_MAX_PCI_FUNC);
+
+ for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ pci_func = adapter->npars[i].pci_func;
+ pm_cfg[pci_func].action = adapter->npars[i].enable_pm;
+ pm_cfg[pci_func].dest_npar = 0;
+ pm_cfg[pci_func].pci_func = i;
}
memcpy(buf, &pm_cfg, size);
@@ -404,24 +453,33 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
{
u32 op_mode;
u8 pci_func;
- int i;
+ int i, ret;
- op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
+ if (qlcnic_82xx_check(adapter))
+ op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
+ else
+ op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
if (pci_func >= QLCNIC_MAX_PCI_FUNC)
return QL_STATUS_INVALID_PARAM;
- if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) {
- if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
+ if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
+ if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
return QL_STATUS_INVALID_PARAM;
- }
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
- if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
- QLCNIC_NON_PRIV_FUNC) {
+ if (qlcnic_82xx_check(adapter)) {
+ ret = QLC_DEV_GET_DRV(op_mode, pci_func);
+ } else {
+ ret = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode,
+ pci_func);
+ esw_cfg[i].offload_flags = 0;
+ }
+
+ if (ret != QLCNIC_NON_PRIV_FUNC) {
if (esw_cfg[i].mac_anti_spoof != 0)
return QL_STATUS_INVALID_PARAM;
if (esw_cfg[i].mac_override != 1)
@@ -444,6 +502,7 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
return QL_STATUS_INVALID_PARAM;
}
}
+
return 0;
}
@@ -458,7 +517,8 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
struct qlcnic_esw_func_cfg *esw_cfg;
struct qlcnic_npar_info *npar;
int count, rem, i, ret;
- u8 pci_func, op_mode = 0;
+ int index;
+ u8 op_mode = 0, pci_func;
count = size / sizeof(struct qlcnic_esw_func_cfg);
rem = size % sizeof(struct qlcnic_esw_func_cfg);
@@ -471,10 +531,9 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
return ret;
for (i = 0; i < count; i++) {
- if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) {
+ if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
return QL_STATUS_INVALID_PARAM;
- }
if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
continue;
@@ -503,7 +562,8 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
- npar = &adapter->npars[pci_func];
+ index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ npar = &adapter->npars[index];
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
npar->promisc_mode = esw_cfg[i].promisc_mode;
@@ -533,18 +593,21 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
- u8 i;
+ u8 i, pci_func;
if (size != sizeof(esw_cfg))
return QL_STATUS_INVALID_PARAM;
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
- if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
- continue;
- esw_cfg[i].pci_func = i;
- if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
+ memset(&esw_cfg, 0,
+ sizeof(struct qlcnic_esw_func_cfg) * QLCNIC_MAX_PCI_FUNC);
+
+ for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ pci_func = adapter->npars[i].pci_func;
+ esw_cfg[pci_func].pci_func = pci_func;
+ if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
return QL_STATUS_INVALID_PARAM;
}
+
memcpy(buf, &esw_cfg, size);
return size;
@@ -558,10 +621,7 @@ static int validate_npar_config(struct qlcnic_adapter *adapter,
for (i = 0; i < count; i++) {
pci_func = np_cfg[i].pci_func;
- if (pci_func >= QLCNIC_MAX_PCI_FUNC)
- return QL_STATUS_INVALID_PARAM;
-
- if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
+ if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
return QL_STATUS_INVALID_PARAM;
if (!IS_VALID_BW(np_cfg[i].min_bw) ||
@@ -581,7 +641,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_info nic_info;
struct qlcnic_npar_func_cfg *np_cfg;
- int i, count, rem, ret;
+ int i, count, rem, ret, index;
u8 pci_func;
count = size / sizeof(struct qlcnic_npar_func_cfg);
@@ -594,8 +654,10 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
if (ret)
return ret;
- for (i = 0; i < count ; i++) {
+ for (i = 0; i < count; i++) {
pci_func = np_cfg[i].pci_func;
+
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
if (ret)
return ret;
@@ -605,12 +667,12 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
ret = qlcnic_set_nic_info(adapter, &nic_info);
if (ret)
return ret;
- adapter->npars[i].min_bw = nic_info.min_tx_bw;
- adapter->npars[i].max_bw = nic_info.max_tx_bw;
+ index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ adapter->npars[index].min_bw = nic_info.min_tx_bw;
+ adapter->npars[index].max_bw = nic_info.max_tx_bw;
}
return size;
-
}
static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
@@ -628,8 +690,12 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
if (size != sizeof(np_cfg))
return QL_STATUS_INVALID_PARAM;
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
+ memset(&np_cfg, 0,
+ sizeof(struct qlcnic_npar_func_cfg) * QLCNIC_MAX_PCI_FUNC);
+
for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
- if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+ if (qlcnic_is_valid_nic_func(adapter, i) < 0)
continue;
ret = qlcnic_get_nic_info(adapter, &nic_info, i);
if (ret)
@@ -644,6 +710,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
}
+
memcpy(buf, &np_cfg, size);
return size;
}
@@ -659,6 +726,9 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
struct qlcnic_esw_statistics port_stats;
int ret;
+ if (qlcnic_83xx_check(adapter))
+ return QLC_STATUS_UNSUPPORTED_CMD;
+
if (size != sizeof(struct qlcnic_esw_statistics))
return QL_STATUS_INVALID_PARAM;
@@ -691,6 +761,9 @@ static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
struct qlcnic_esw_statistics esw_stats;
int ret;
+ if (qlcnic_83xx_check(adapter))
+ return QLC_STATUS_UNSUPPORTED_CMD;
+
if (size != sizeof(struct qlcnic_esw_statistics))
return QL_STATUS_INVALID_PARAM;
@@ -722,6 +795,9 @@ static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
int ret;
+ if (qlcnic_83xx_check(adapter))
+ return QLC_STATUS_UNSUPPORTED_CMD;
+
if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
return QL_STATUS_INVALID_PARAM;
@@ -744,10 +820,14 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
char *buf, loff_t offset,
size_t size)
{
+
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
int ret;
+ if (qlcnic_83xx_check(adapter))
+ return QLC_STATUS_UNSUPPORTED_CMD;
+
if (offset >= QLCNIC_MAX_PCI_FUNC)
return QL_STATUS_INVALID_PARAM;
@@ -789,7 +869,10 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
return ret;
}
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
+ memset(&pci_cfg, 0,
+ sizeof(struct qlcnic_pci_func_cfg) * QLCNIC_MAX_PCI_FUNC);
+
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
pci_cfg[i].pci_func = pci_info[i].id;
pci_cfg[i].func_type = pci_info[i].type;
pci_cfg[i].port_num = pci_info[i].default_port;
@@ -797,6 +880,7 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
}
+
memcpy(buf, &pci_cfg, size);
kfree(pci_info);
return size;
@@ -897,7 +981,6 @@ void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
- u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
if (device_create_bin_file(dev, &bin_attr_port_stats))
dev_info(dev, "failed to create port stats sysfs entry");
@@ -911,9 +994,6 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
if (device_create_bin_file(dev, &bin_attr_mem))
dev_info(dev, "failed to create mem sysfs entry\n");
- if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
- return;
-
if (device_create_bin_file(dev, &bin_attr_pci_config))
dev_info(dev, "failed to create pci config sysfs entry");
if (device_create_file(dev, &dev_attr_beacon))
@@ -936,7 +1016,6 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
- u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
device_remove_bin_file(dev, &bin_attr_port_stats);
@@ -945,8 +1024,6 @@ void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
device_remove_file(dev, &dev_attr_diag_mode);
device_remove_bin_file(dev, &bin_attr_crb);
device_remove_bin_file(dev, &bin_attr_mem);
- if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
- return;
device_remove_bin_file(dev, &bin_attr_pci_config);
device_remove_file(dev, &dev_attr_beacon);
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
@@ -958,3 +1035,23 @@ void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
device_remove_bin_file(dev, &bin_attr_pm_config);
device_remove_bin_file(dev, &bin_attr_esw_stats);
}
+
+void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter)
+{
+ qlcnic_create_diag_entries(adapter);
+}
+
+void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter)
+{
+ qlcnic_remove_diag_entries(adapter);
+}
+
+void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *adapter)
+{
+ qlcnic_create_diag_entries(adapter);
+}
+
+void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *adapter)
+{
+ qlcnic_remove_diag_entries(adapter);
+}
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index f80cd975daed..b13ab544a7eb 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2920,14 +2920,11 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
/*
* Allocate small buffer queue control blocks.
*/
- rx_ring->sbq =
- kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
- GFP_KERNEL);
- if (rx_ring->sbq == NULL) {
- netif_err(qdev, ifup, qdev->ndev,
- "Small buffer queue control block allocation failed.\n");
+ rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
+ sizeof(struct bq_desc),
+ GFP_KERNEL);
+ if (rx_ring->sbq == NULL)
goto err_mem;
- }
ql_init_sbq_ring(qdev, rx_ring);
}
@@ -2948,14 +2945,11 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
/*
* Allocate large buffer queue control blocks.
*/
- rx_ring->lbq =
- kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
- GFP_KERNEL);
- if (rx_ring->lbq == NULL) {
- netif_err(qdev, ifup, qdev->ndev,
- "Large buffer queue control block allocation failed.\n");
+ rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
+ sizeof(struct bq_desc),
+ GFP_KERNEL);
+ if (rx_ring->lbq == NULL)
goto err_mem;
- }
ql_init_lbq_ring(qdev, rx_ring);
}
@@ -4572,7 +4566,6 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
qdev->mpi_coredump =
vmalloc(sizeof(struct ql_mpi_coredump));
if (qdev->mpi_coredump == NULL) {
- dev_err(&pdev->dev, "Coredump alloc failed.\n");
err = -ENOMEM;
goto err_out2;
}
@@ -4586,7 +4579,6 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
goto err_out2;
}
- memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
/* Keep local copy of current mac address. */
memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
@@ -4678,7 +4670,7 @@ static int qlge_probe(struct pci_dev *pdev,
qdev = netdev_priv(ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
+ NETIF_F_TSO | NETIF_F_TSO_ECN |
NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
ndev->features = ndev->hw_features |
NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
diff --git a/drivers/net/ethernet/racal/Kconfig b/drivers/net/ethernet/racal/Kconfig
deleted file mode 100644
index 01969e0a9c68..000000000000
--- a/drivers/net/ethernet/racal/Kconfig
+++ /dev/null
@@ -1,33 +0,0 @@
-#
-# Racal-Interlan device configuration
-#
-
-config NET_VENDOR_RACAL
- bool "Racal-Interlan (Micom) NI devices"
- default y
- depends on ISA
- ---help---
- If you have a network (Ethernet) card belonging to this class, such
- as the NI5010, NI5210 or NI6210, say Y and read the Ethernet-HOWTO,
- available from <http://www.tldp.org/docs.html#howto>.
-
- Note that the answer to this question doesn't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about NI cards. If you say Y, you will be asked for
- your specific card in the following questions.
-
-if NET_VENDOR_RACAL
-
-config NI5010
- tristate "NI5010 support (EXPERIMENTAL)"
- depends on ISA && EXPERIMENTAL && BROKEN_ON_SMP
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. Note that this is still
- experimental code.
-
- To compile this driver as a module, choose M here. The module
- will be called ni5010.
-
-endif # NET_VENDOR_RACAL
diff --git a/drivers/net/ethernet/racal/Makefile b/drivers/net/ethernet/racal/Makefile
deleted file mode 100644
index 1e210ca1d78b..000000000000
--- a/drivers/net/ethernet/racal/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the Racal-Interlan network device drivers.
-#
-
-obj-$(CONFIG_NI5010) += ni5010.o
diff --git a/drivers/net/ethernet/racal/ni5010.c b/drivers/net/ethernet/racal/ni5010.c
deleted file mode 100644
index 807982220050..000000000000
--- a/drivers/net/ethernet/racal/ni5010.c
+++ /dev/null
@@ -1,771 +0,0 @@
-/* ni5010.c: A network driver for the MiCom-Interlan NI5010 ethercard.
- *
- * Copyright 1996,1997,2006 Jan-Pascal van Best and Andreas Mohr.
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * The authors may be reached as:
- * janpascal@vanbest.org andi@lisas.de
- *
- * Sources:
- * Donald Becker's "skeleton.c"
- * Crynwr ni5010 packet driver
- *
- * Changes:
- * v0.0: First test version
- * v0.1: First working version
- * v0.2:
- * v0.3->v0.90: Now demand setting io and irq when loading as module
- * 970430 v0.91: modified for Linux 2.1.14
- * v0.92: Implemented Andreas' (better) NI5010 probe
- * 970503 v0.93: Fixed auto-irq failure on warm reboot (JB)
- * 970623 v1.00: First kernel version (AM)
- * 970814 v1.01: Added detection of onboard receive buffer size (AM)
- * 060611 v1.02: slight cleanup: email addresses, driver modernization.
- * Bugs:
- * - not SMP-safe (no locking of I/O accesses)
- * - Note that you have to patch ifconfig for the new /proc/net/dev
- * format. It gives incorrect stats otherwise.
- *
- * To do:
- * Fix all bugs :-)
- * Move some stuff to chipset_init()
- * Handle xmt errors other than collisions
- * Complete merge with Andreas' driver
- * Implement ring buffers (Is this useful? You can't squeeze
- * too many packet in a 2k buffer!)
- * Implement DMA (Again, is this useful? Some docs say DMA is
- * slower than programmed I/O)
- *
- * Compile with:
- * gcc -O2 -fomit-frame-pointer -m486 -D__KERNEL__ \
- * -DMODULE -c ni5010.c
- *
- * Insert with e.g.:
- * insmod ni5010.ko io=0x300 irq=5
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-
-#include "ni5010.h"
-
-static const char boardname[] = "NI5010";
-static char version[] __initdata =
- "ni5010.c: v1.02 20060611 Jan-Pascal van Best and Andreas Mohr\n";
-
-/* bufsize_rcv == 0 means autoprobing */
-static unsigned int bufsize_rcv;
-
-#define JUMPERED_INTERRUPTS /* IRQ line jumpered on board */
-#undef JUMPERED_DMA /* No DMA used */
-#undef FULL_IODETECT /* Only detect in portlist */
-
-#ifndef FULL_IODETECT
-/* A zero-terminated list of I/O addresses to be probed. */
-static unsigned int ports[] __initdata =
- { 0x300, 0x320, 0x340, 0x360, 0x380, 0x3a0, 0 };
-#endif
-
-/* Use 0 for production, 1 for verification, >2 for debug */
-#ifndef NI5010_DEBUG
-#define NI5010_DEBUG 0
-#endif
-
-/* Information that needs to be kept for each board. */
-struct ni5010_local {
- int o_pkt_size;
- spinlock_t lock;
-};
-
-/* Index to functions, as function prototypes. */
-
-static int ni5010_probe1(struct net_device *dev, int ioaddr);
-static int ni5010_open(struct net_device *dev);
-static int ni5010_send_packet(struct sk_buff *skb, struct net_device *dev);
-static irqreturn_t ni5010_interrupt(int irq, void *dev_id);
-static void ni5010_rx(struct net_device *dev);
-static void ni5010_timeout(struct net_device *dev);
-static int ni5010_close(struct net_device *dev);
-static void ni5010_set_multicast_list(struct net_device *dev);
-static void reset_receiver(struct net_device *dev);
-
-static int process_xmt_interrupt(struct net_device *dev);
-#define tx_done(dev) 1
-static void hardware_send_packet(struct net_device *dev, char *buf, int length, int pad);
-static void chipset_init(struct net_device *dev, int startp);
-static void dump_packet(void *buf, int len);
-static void ni5010_show_registers(struct net_device *dev);
-
-static int io;
-static int irq;
-
-struct net_device * __init ni5010_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct ni5010_local));
- int *port;
- int err = 0;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- io = dev->base_addr;
- irq = dev->irq;
- }
-
- PRINTK2((KERN_DEBUG "%s: Entering ni5010_probe\n", dev->name));
-
- if (io > 0x1ff) { /* Check a single specified location. */
- err = ni5010_probe1(dev, io);
- } else if (io != 0) { /* Don't probe at all. */
- err = -ENXIO;
- } else {
-#ifdef FULL_IODETECT
- for (io=0x200; io<0x400 && ni5010_probe1(dev, io) ; io+=0x20)
- ;
- if (io == 0x400)
- err = -ENODEV;
-
-#else
- for (port = ports; *port && ni5010_probe1(dev, *port); port++)
- ;
- if (!*port)
- err = -ENODEV;
-#endif /* FULL_IODETECT */
- }
- if (err)
- goto out;
- err = register_netdev(dev);
- if (err)
- goto out1;
- return dev;
-out1:
- release_region(dev->base_addr, NI5010_IO_EXTENT);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-static inline int rd_port(int ioaddr)
-{
- inb(IE_RBUF);
- return inb(IE_SAPROM);
-}
-
-static void __init trigger_irq(int ioaddr)
-{
- outb(0x00, EDLC_RESET); /* Clear EDLC hold RESET state */
- outb(0x00, IE_RESET); /* Board reset */
- outb(0x00, EDLC_XMASK); /* Disable all Xmt interrupts */
- outb(0x00, EDLC_RMASK); /* Disable all Rcv interrupt */
- outb(0xff, EDLC_XCLR); /* Clear all pending Xmt interrupts */
- outb(0xff, EDLC_RCLR); /* Clear all pending Rcv interrupts */
- /*
- * Transmit packet mode: Ignore parity, Power xcvr,
- * Enable loopback
- */
- outb(XMD_IG_PAR | XMD_T_MODE | XMD_LBC, EDLC_XMODE);
- outb(RMD_BROADCAST, EDLC_RMODE); /* Receive normal&broadcast */
- outb(XM_ALL, EDLC_XMASK); /* Enable all Xmt interrupts */
- udelay(50); /* FIXME: Necessary? */
- outb(MM_EN_XMT|MM_MUX, IE_MMODE); /* Start transmission */
-}
-
-static const struct net_device_ops ni5010_netdev_ops = {
- .ndo_open = ni5010_open,
- .ndo_stop = ni5010_close,
- .ndo_start_xmit = ni5010_send_packet,
- .ndo_set_rx_mode = ni5010_set_multicast_list,
- .ndo_tx_timeout = ni5010_timeout,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
-};
-
-/*
- * This is the real probe routine. Linux has a history of friendly device
- * probes on the ISA bus. A good device probes avoids doing writes, and
- * verifies that the correct device exists and functions.
- */
-
-static int __init ni5010_probe1(struct net_device *dev, int ioaddr)
-{
- static unsigned version_printed;
- struct ni5010_local *lp;
- int i;
- unsigned int data = 0;
- int boguscount = 40;
- int err = -ENODEV;
-
- dev->base_addr = ioaddr;
- dev->irq = irq;
-
- if (!request_region(ioaddr, NI5010_IO_EXTENT, boardname))
- return -EBUSY;
-
- /*
- * This is no "official" probe method, I've rather tested which
- * probe works best with my seven NI5010 cards
- * (they have very different serial numbers)
- * Suggestions or failure reports are very, very welcome !
- * But I think it is a relatively good probe method
- * since it doesn't use any "outb"
- * It should be nearly 100% reliable !
- * well-known WARNING: this probe method (like many others)
- * will hang the system if a NE2000 card region is probed !
- *
- * - Andreas
- */
-
- PRINTK2((KERN_DEBUG "%s: entering ni5010_probe1(%#3x)\n",
- dev->name, ioaddr));
-
- if (inb(ioaddr+0) == 0xff)
- goto out;
-
- while ( (rd_port(ioaddr) & rd_port(ioaddr) & rd_port(ioaddr) &
- rd_port(ioaddr) & rd_port(ioaddr) & rd_port(ioaddr)) != 0xff)
- {
- if (boguscount-- == 0)
- goto out;
- }
-
- PRINTK2((KERN_DEBUG "%s: I/O #1 passed!\n", dev->name));
-
- for (i=0; i<32; i++)
- if ( (data = rd_port(ioaddr)) != 0xff) break;
- if (data==0xff)
- goto out;
-
- PRINTK2((KERN_DEBUG "%s: I/O #2 passed!\n", dev->name));
-
- if ((data != SA_ADDR0) || (rd_port(ioaddr) != SA_ADDR1) ||
- (rd_port(ioaddr) != SA_ADDR2))
- goto out;
-
- for (i=0; i<4; i++)
- rd_port(ioaddr);
-
- if ( (rd_port(ioaddr) != NI5010_MAGICVAL1) ||
- (rd_port(ioaddr) != NI5010_MAGICVAL2) )
- goto out;
-
- PRINTK2((KERN_DEBUG "%s: I/O #3 passed!\n", dev->name));
-
- if (NI5010_DEBUG && version_printed++ == 0)
- printk(KERN_INFO "%s", version);
-
- printk("NI5010 ethercard probe at 0x%x: ", ioaddr);
-
- dev->base_addr = ioaddr;
-
- for (i=0; i<6; i++) {
- outw(i, IE_GP);
- dev->dev_addr[i] = inb(IE_SAPROM);
- }
- printk("%pM ", dev->dev_addr);
-
- PRINTK2((KERN_DEBUG "%s: I/O #4 passed!\n", dev->name));
-
-#ifdef JUMPERED_INTERRUPTS
- if (dev->irq == 0xff)
- ;
- else if (dev->irq < 2) {
- unsigned long irq_mask;
-
- PRINTK2((KERN_DEBUG "%s: I/O #5 passed!\n", dev->name));
-
- irq_mask = probe_irq_on();
- trigger_irq(ioaddr);
- mdelay(20);
- dev->irq = probe_irq_off(irq_mask);
-
- PRINTK2((KERN_DEBUG "%s: I/O #6 passed!\n", dev->name));
-
- if (dev->irq == 0) {
- err = -EAGAIN;
- printk(KERN_WARNING "%s: no IRQ found!\n", dev->name);
- goto out;
- }
- PRINTK2((KERN_DEBUG "%s: I/O #7 passed!\n", dev->name));
- } else if (dev->irq == 2) {
- dev->irq = 9;
- }
-#endif /* JUMPERED_INTERRUPTS */
- PRINTK2((KERN_DEBUG "%s: I/O #9 passed!\n", dev->name));
-
- /* DMA is not supported (yet?), so no use detecting it */
- lp = netdev_priv(dev);
-
- spin_lock_init(&lp->lock);
-
- PRINTK2((KERN_DEBUG "%s: I/O #10 passed!\n", dev->name));
-
-/* get the size of the onboard receive buffer
- * higher addresses than bufsize are wrapped into real buffer
- * i.e. data for offs. 0x801 is written to 0x1 with a 2K onboard buffer
- */
- if (!bufsize_rcv) {
- outb(1, IE_MMODE); /* Put Rcv buffer on system bus */
- outw(0, IE_GP); /* Point GP at start of packet */
- outb(0, IE_RBUF); /* set buffer byte 0 to 0 */
- for (i = 1; i < 0xff; i++) {
- outw(i << 8, IE_GP); /* Point GP at packet size to be tested */
- outb(i, IE_RBUF);
- outw(0x0, IE_GP); /* Point GP at start of packet */
- data = inb(IE_RBUF);
- if (data == i) break;
- }
- bufsize_rcv = i << 8;
- outw(0, IE_GP); /* Point GP at start of packet */
- outb(0, IE_RBUF); /* set buffer byte 0 to 0 again */
- }
- printk("-> bufsize rcv/xmt=%d/%d\n", bufsize_rcv, NI5010_BUFSIZE);
-
- dev->netdev_ops = &ni5010_netdev_ops;
- dev->watchdog_timeo = HZ/20;
-
- dev->flags &= ~IFF_MULTICAST; /* Multicast doesn't work */
-
- /* Shut up the ni5010 */
- outb(0, EDLC_RMASK); /* Mask all receive interrupts */
- outb(0, EDLC_XMASK); /* Mask all xmit interrupts */
- outb(0xff, EDLC_RCLR); /* Kill all pending rcv interrupts */
- outb(0xff, EDLC_XCLR); /* Kill all pending xmt interrupts */
-
- printk(KERN_INFO "%s: NI5010 found at 0x%x, using IRQ %d", dev->name, ioaddr, dev->irq);
- if (dev->dma)
- printk(" & DMA %d", dev->dma);
- printk(".\n");
- return 0;
-out:
- release_region(dev->base_addr, NI5010_IO_EXTENT);
- return err;
-}
-
-/*
- * Open/initialize the board. This is called (in the current kernel)
- * sometime after booting when the 'ifconfig' program is run.
- *
- * This routine should set everything up anew at each open, even
- * registers that "should" only need to be set once at boot, so that
- * there is a non-reboot way to recover if something goes wrong.
- */
-
-static int ni5010_open(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- int i;
-
- PRINTK2((KERN_DEBUG "%s: entering ni5010_open()\n", dev->name));
-
- if (request_irq(dev->irq, ni5010_interrupt, 0, boardname, dev)) {
- printk(KERN_WARNING "%s: Cannot get irq %#2x\n", dev->name, dev->irq);
- return -EAGAIN;
- }
- PRINTK3((KERN_DEBUG "%s: passed open() #1\n", dev->name));
- /*
- * Always allocate the DMA channel after the IRQ,
- * and clean up on failure.
- */
-#ifdef JUMPERED_DMA
- if (request_dma(dev->dma, cardname)) {
- printk(KERN_WARNING "%s: Cannot get dma %#2x\n", dev->name, dev->dma);
- free_irq(dev->irq, NULL);
- return -EAGAIN;
- }
-#endif /* JUMPERED_DMA */
-
- PRINTK3((KERN_DEBUG "%s: passed open() #2\n", dev->name));
- /* Reset the hardware here. Don't forget to set the station address. */
-
- outb(RS_RESET, EDLC_RESET); /* Hold up EDLC_RESET while configing board */
- outb(0, IE_RESET); /* Hardware reset of ni5010 board */
- outb(XMD_LBC, EDLC_XMODE); /* Only loopback xmits */
-
- PRINTK3((KERN_DEBUG "%s: passed open() #3\n", dev->name));
- /* Set the station address */
- for(i = 0;i < 6; i++) {
- outb(dev->dev_addr[i], EDLC_ADDR + i);
- }
-
- PRINTK3((KERN_DEBUG "%s: Initialising ni5010\n", dev->name));
- outb(0, EDLC_XMASK); /* No xmit interrupts for now */
- outb(XMD_IG_PAR | XMD_T_MODE | XMD_LBC, EDLC_XMODE);
- /* Normal packet xmit mode */
- outb(0xff, EDLC_XCLR); /* Clear all pending xmit interrupts */
- outb(RMD_BROADCAST, EDLC_RMODE);
- /* Receive broadcast and normal packets */
- reset_receiver(dev); /* Ready ni5010 for receiving packets */
-
- outb(0, EDLC_RESET); /* Un-reset the ni5010 */
-
- netif_start_queue(dev);
-
- if (NI5010_DEBUG) ni5010_show_registers(dev);
-
- PRINTK((KERN_DEBUG "%s: open successful\n", dev->name));
- return 0;
-}
-
-static void reset_receiver(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- PRINTK3((KERN_DEBUG "%s: resetting receiver\n", dev->name));
- outw(0, IE_GP); /* Receive packet at start of buffer */
- outb(0xff, EDLC_RCLR); /* Clear all pending rcv interrupts */
- outb(0, IE_MMODE); /* Put EDLC to rcv buffer */
- outb(MM_EN_RCV, IE_MMODE); /* Enable rcv */
- outb(0xff, EDLC_RMASK); /* Enable all rcv interrupts */
-}
-
-static void ni5010_timeout(struct net_device *dev)
-{
- printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
- tx_done(dev) ? "IRQ conflict" : "network cable problem");
- /* Try to restart the adaptor. */
- /* FIXME: Give it a real kick here */
- chipset_init(dev, 1);
- dev->trans_start = jiffies; /* prevent tx timeout */
- netif_wake_queue(dev);
-}
-
-static int ni5010_send_packet(struct sk_buff *skb, struct net_device *dev)
-{
- int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
-
- PRINTK2((KERN_DEBUG "%s: entering ni5010_send_packet\n", dev->name));
-
- /*
- * Block sending
- */
-
- netif_stop_queue(dev);
- hardware_send_packet(dev, (unsigned char *)skb->data, skb->len, length-skb->len);
- dev_kfree_skb (skb);
- return NETDEV_TX_OK;
-}
-
-/*
- * The typical workload of the driver:
- * Handle the network interface interrupts.
- */
-static irqreturn_t ni5010_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct ni5010_local *lp;
- int ioaddr, status;
- int xmit_was_error = 0;
-
- PRINTK2((KERN_DEBUG "%s: entering ni5010_interrupt\n", dev->name));
-
- ioaddr = dev->base_addr;
- lp = netdev_priv(dev);
-
- spin_lock(&lp->lock);
- status = inb(IE_ISTAT);
- PRINTK3((KERN_DEBUG "%s: IE_ISTAT = %#02x\n", dev->name, status));
-
- if ((status & IS_R_INT) == 0) ni5010_rx(dev);
-
- if ((status & IS_X_INT) == 0) {
- xmit_was_error = process_xmt_interrupt(dev);
- }
-
- if ((status & IS_DMA_INT) == 0) {
- PRINTK((KERN_DEBUG "%s: DMA complete (?)\n", dev->name));
- outb(0, IE_DMA_RST); /* Reset DMA int */
- }
-
- if (!xmit_was_error)
- reset_receiver(dev);
- spin_unlock(&lp->lock);
- return IRQ_HANDLED;
-}
-
-
-static void dump_packet(void *buf, int len)
-{
- int i;
-
- printk(KERN_DEBUG "Packet length = %#4x\n", len);
- for (i = 0; i < len; i++){
- if (i % 16 == 0) printk(KERN_DEBUG "%#4.4x", i);
- if (i % 2 == 0) printk(" ");
- printk("%2.2x", ((unsigned char *)buf)[i]);
- if (i % 16 == 15) printk("\n");
- }
- printk("\n");
-}
-
-/* We have a good packet, get it out of the buffer. */
-static void ni5010_rx(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- unsigned char rcv_stat;
- struct sk_buff *skb;
- int i_pkt_size;
-
- PRINTK2((KERN_DEBUG "%s: entering ni5010_rx()\n", dev->name));
-
- rcv_stat = inb(EDLC_RSTAT);
- PRINTK3((KERN_DEBUG "%s: EDLC_RSTAT = %#2x\n", dev->name, rcv_stat));
-
- if ( (rcv_stat & RS_VALID_BITS) != RS_PKT_OK) {
- PRINTK((KERN_INFO "%s: receive error.\n", dev->name));
- dev->stats.rx_errors++;
- if (rcv_stat & RS_RUNT) dev->stats.rx_length_errors++;
- if (rcv_stat & RS_ALIGN) dev->stats.rx_frame_errors++;
- if (rcv_stat & RS_CRC_ERR) dev->stats.rx_crc_errors++;
- if (rcv_stat & RS_OFLW) dev->stats.rx_fifo_errors++;
- outb(0xff, EDLC_RCLR); /* Clear the interrupt */
- return;
- }
-
- outb(0xff, EDLC_RCLR); /* Clear the interrupt */
-
- i_pkt_size = inw(IE_RCNT);
- if (i_pkt_size > ETH_FRAME_LEN || i_pkt_size < 10 ) {
- PRINTK((KERN_DEBUG "%s: Packet size error, packet size = %#4.4x\n",
- dev->name, i_pkt_size));
- dev->stats.rx_errors++;
- dev->stats.rx_length_errors++;
- return;
- }
-
- /* Malloc up new buffer. */
- skb = netdev_alloc_skb(dev, i_pkt_size + 3);
- if (skb == NULL) {
- printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
- dev->stats.rx_dropped++;
- return;
- }
-
- skb_reserve(skb, 2);
-
- /* Read packet into buffer */
- outb(MM_MUX, IE_MMODE); /* Rcv buffer to system bus */
- outw(0, IE_GP); /* Seek to beginning of packet */
- insb(IE_RBUF, skb_put(skb, i_pkt_size), i_pkt_size);
-
- if (NI5010_DEBUG >= 4)
- dump_packet(skb->data, skb->len);
-
- skb->protocol = eth_type_trans(skb,dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += i_pkt_size;
-
- PRINTK2((KERN_DEBUG "%s: Received packet, size=%#4.4x\n",
- dev->name, i_pkt_size));
-}
-
-static int process_xmt_interrupt(struct net_device *dev)
-{
- struct ni5010_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- int xmit_stat;
-
- PRINTK2((KERN_DEBUG "%s: entering process_xmt_interrupt\n", dev->name));
-
- xmit_stat = inb(EDLC_XSTAT);
- PRINTK3((KERN_DEBUG "%s: EDLC_XSTAT = %2.2x\n", dev->name, xmit_stat));
-
- outb(0, EDLC_XMASK); /* Disable xmit IRQ's */
- outb(0xff, EDLC_XCLR); /* Clear all pending xmit IRQ's */
-
- if (xmit_stat & XS_COLL){
- PRINTK((KERN_DEBUG "%s: collision detected, retransmitting\n",
- dev->name));
- outw(NI5010_BUFSIZE - lp->o_pkt_size, IE_GP);
- /* outb(0, IE_MMODE); */ /* xmt buf on sysbus FIXME: needed ? */
- outb(MM_EN_XMT | MM_MUX, IE_MMODE);
- outb(XM_ALL, EDLC_XMASK); /* Enable xmt IRQ's */
- dev->stats.collisions++;
- return 1;
- }
-
- /* FIXME: handle other xmt error conditions */
-
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += lp->o_pkt_size;
- netif_wake_queue(dev);
-
- PRINTK2((KERN_DEBUG "%s: sent packet, size=%#4.4x\n",
- dev->name, lp->o_pkt_size));
-
- return 0;
-}
-
-/* The inverse routine to ni5010_open(). */
-static int ni5010_close(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- PRINTK2((KERN_DEBUG "%s: entering ni5010_close\n", dev->name));
-#ifdef JUMPERED_INTERRUPTS
- free_irq(dev->irq, NULL);
-#endif
- /* Put card in held-RESET state */
- outb(0, IE_MMODE);
- outb(RS_RESET, EDLC_RESET);
-
- netif_stop_queue(dev);
-
- PRINTK((KERN_DEBUG "%s: %s closed down\n", dev->name, boardname));
- return 0;
-
-}
-
-/* Set or clear the multicast filter for this adaptor.
- num_addrs == -1 Promiscuous mode, receive all packets
- num_addrs == 0 Normal mode, clear multicast list
- num_addrs > 0 Multicast mode, receive normal and MC packets, and do
- best-effort filtering.
-*/
-static void ni5010_set_multicast_list(struct net_device *dev)
-{
- short ioaddr = dev->base_addr;
-
- PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name));
-
- if (dev->flags & IFF_PROMISC || dev->flags & IFF_ALLMULTI ||
- !netdev_mc_empty(dev)) {
- outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */
- PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name));
- } else {
- PRINTK((KERN_DEBUG "%s: Entering broadcast mode\n", dev->name));
- outb(RMD_BROADCAST, EDLC_RMODE); /* Disable promiscuous mode, use normal mode */
- }
-}
-
-static void hardware_send_packet(struct net_device *dev, char *buf, int length, int pad)
-{
- struct ni5010_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- unsigned long flags;
- unsigned int buf_offs;
-
- PRINTK2((KERN_DEBUG "%s: entering hardware_send_packet\n", dev->name));
-
- if (length > ETH_FRAME_LEN) {
- PRINTK((KERN_WARNING "%s: packet too large, not possible\n",
- dev->name));
- return;
- }
-
- if (NI5010_DEBUG) ni5010_show_registers(dev);
-
- if (inb(IE_ISTAT) & IS_EN_XMT) {
- PRINTK((KERN_WARNING "%s: sending packet while already transmitting, not possible\n",
- dev->name));
- return;
- }
-
- if (NI5010_DEBUG > 3) dump_packet(buf, length);
-
- buf_offs = NI5010_BUFSIZE - length - pad;
-
- spin_lock_irqsave(&lp->lock, flags);
- lp->o_pkt_size = length + pad;
-
- outb(0, EDLC_RMASK); /* Mask all receive interrupts */
- outb(0, IE_MMODE); /* Put Xmit buffer on system bus */
- outb(0xff, EDLC_RCLR); /* Clear out pending rcv interrupts */
-
- outw(buf_offs, IE_GP); /* Point GP at start of packet */
- outsb(IE_XBUF, buf, length); /* Put data in buffer */
- while(pad--)
- outb(0, IE_XBUF);
-
- outw(buf_offs, IE_GP); /* Rewrite where packet starts */
-
- /* should work without that outb() (Crynwr used it) */
- /*outb(MM_MUX, IE_MMODE);*/ /* Xmt buffer to EDLC bus */
- outb(MM_EN_XMT | MM_MUX, IE_MMODE); /* Begin transmission */
- outb(XM_ALL, EDLC_XMASK); /* Cause interrupt after completion or fail */
-
- spin_unlock_irqrestore(&lp->lock, flags);
-
- netif_wake_queue(dev);
-
- if (NI5010_DEBUG) ni5010_show_registers(dev);
-}
-
-static void chipset_init(struct net_device *dev, int startp)
-{
- /* FIXME: Move some stuff here */
- PRINTK3((KERN_DEBUG "%s: doing NOTHING in chipset_init\n", dev->name));
-}
-
-static void ni5010_show_registers(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- PRINTK3((KERN_DEBUG "%s: XSTAT %#2.2x\n", dev->name, inb(EDLC_XSTAT)));
- PRINTK3((KERN_DEBUG "%s: XMASK %#2.2x\n", dev->name, inb(EDLC_XMASK)));
- PRINTK3((KERN_DEBUG "%s: RSTAT %#2.2x\n", dev->name, inb(EDLC_RSTAT)));
- PRINTK3((KERN_DEBUG "%s: RMASK %#2.2x\n", dev->name, inb(EDLC_RMASK)));
- PRINTK3((KERN_DEBUG "%s: RMODE %#2.2x\n", dev->name, inb(EDLC_RMODE)));
- PRINTK3((KERN_DEBUG "%s: XMODE %#2.2x\n", dev->name, inb(EDLC_XMODE)));
- PRINTK3((KERN_DEBUG "%s: ISTAT %#2.2x\n", dev->name, inb(IE_ISTAT)));
-}
-
-#ifdef MODULE
-static struct net_device *dev_ni5010;
-
-module_param(io, int, 0);
-module_param(irq, int, 0);
-MODULE_PARM_DESC(io, "ni5010 I/O base address");
-MODULE_PARM_DESC(irq, "ni5010 IRQ number");
-
-static int __init ni5010_init_module(void)
-{
- PRINTK2((KERN_DEBUG "%s: entering init_module\n", boardname));
- /*
- if(io <= 0 || irq == 0){
- printk(KERN_WARNING "%s: Autoprobing not allowed for modules.\n", boardname);
- printk(KERN_WARNING "%s: Set symbols 'io' and 'irq'\n", boardname);
- return -EINVAL;
- }
- */
- if (io <= 0){
- printk(KERN_WARNING "%s: Autoprobing for modules is hazardous, trying anyway..\n", boardname);
- }
-
- PRINTK2((KERN_DEBUG "%s: init_module irq=%#2x, io=%#3x\n", boardname, irq, io));
- dev_ni5010 = ni5010_probe(-1);
- if (IS_ERR(dev_ni5010))
- return PTR_ERR(dev_ni5010);
- return 0;
-}
-
-static void __exit ni5010_cleanup_module(void)
-{
- PRINTK2((KERN_DEBUG "%s: entering cleanup_module\n", boardname));
- unregister_netdev(dev_ni5010);
- release_region(dev_ni5010->base_addr, NI5010_IO_EXTENT);
- free_netdev(dev_ni5010);
-}
-module_init(ni5010_init_module);
-module_exit(ni5010_cleanup_module);
-#endif /* MODULE */
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/racal/ni5010.h b/drivers/net/ethernet/racal/ni5010.h
deleted file mode 100644
index e10e717fcd76..000000000000
--- a/drivers/net/ethernet/racal/ni5010.h
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Racal-Interlan ni5010 Ethernet definitions
- *
- * This is an extension to the Linux operating system, and is covered by the
- * same GNU General Public License that covers that work.
- *
- * copyrights (c) 1996 by Jan-Pascal van Best (jvbest@wi.leidenuniv.nl)
- *
- * I have done a look in the following sources:
- * crynwr-packet-driver by Russ Nelson
- */
-
-#define NI5010_BUFSIZE 2048 /* number of bytes in a buffer */
-
-#define NI5010_MAGICVAL0 0x00 /* magic-values for ni5010 card */
-#define NI5010_MAGICVAL1 0x55
-#define NI5010_MAGICVAL2 0xAA
-
-#define SA_ADDR0 0x02
-#define SA_ADDR1 0x07
-#define SA_ADDR2 0x01
-
-/* The number of low I/O ports used by the ni5010 ethercard. */
-#define NI5010_IO_EXTENT 32
-
-#define PRINTK(x) if (NI5010_DEBUG) printk x
-#define PRINTK2(x) if (NI5010_DEBUG>=2) printk x
-#define PRINTK3(x) if (NI5010_DEBUG>=3) printk x
-
-/* The various IE command registers */
-#define EDLC_XSTAT (ioaddr + 0x00) /* EDLC transmit csr */
-#define EDLC_XCLR (ioaddr + 0x00) /* EDLC transmit "Clear IRQ" */
-#define EDLC_XMASK (ioaddr + 0x01) /* EDLC transmit "IRQ Masks" */
-#define EDLC_RSTAT (ioaddr + 0x02) /* EDLC receive csr */
-#define EDLC_RCLR (ioaddr + 0x02) /* EDLC receive "Clear IRQ" */
-#define EDLC_RMASK (ioaddr + 0x03) /* EDLC receive "IRQ Masks" */
-#define EDLC_XMODE (ioaddr + 0x04) /* EDLC transmit Mode */
-#define EDLC_RMODE (ioaddr + 0x05) /* EDLC receive Mode */
-#define EDLC_RESET (ioaddr + 0x06) /* EDLC RESET register */
-#define EDLC_TDR1 (ioaddr + 0x07) /* "Time Domain Reflectometry" reg1 */
-#define EDLC_ADDR (ioaddr + 0x08) /* EDLC station address, 6 bytes */
- /* 0x0E doesn't exist for r/w */
-#define EDLC_TDR2 (ioaddr + 0x0f) /* "Time Domain Reflectometry" reg2 */
-#define IE_GP (ioaddr + 0x10) /* GP pointer (word register) */
- /* 0x11 is 2nd byte of GP Pointer */
-#define IE_RCNT (ioaddr + 0x10) /* Count of bytes in rcv'd packet */
- /* 0x11 is 2nd byte of "Byte Count" */
-#define IE_MMODE (ioaddr + 0x12) /* Memory Mode register */
-#define IE_DMA_RST (ioaddr + 0x13) /* IE DMA Reset. write only */
-#define IE_ISTAT (ioaddr + 0x13) /* IE Interrupt Status. read only */
-#define IE_RBUF (ioaddr + 0x14) /* IE Receive Buffer port */
-#define IE_XBUF (ioaddr + 0x15) /* IE Transmit Buffer port */
-#define IE_SAPROM (ioaddr + 0x16) /* window on station addr prom */
-#define IE_RESET (ioaddr + 0x17) /* any write causes Board Reset */
-
-/* bits in EDLC_XSTAT, interrupt clear on write, status when read */
-#define XS_TPOK 0x80 /* transmit packet successful */
-#define XS_CS 0x40 /* carrier sense */
-#define XS_RCVD 0x20 /* transmitted packet received */
-#define XS_SHORT 0x10 /* transmission media is shorted */
-#define XS_UFLW 0x08 /* underflow. iff failed board */
-#define XS_COLL 0x04 /* collision occurred */
-#define XS_16COLL 0x02 /* 16th collision occurred */
-#define XS_PERR 0x01 /* parity error */
-
-#define XS_CLR_UFLW 0x08 /* clear underflow */
-#define XS_CLR_COLL 0x04 /* clear collision */
-#define XS_CLR_16COLL 0x02 /* clear 16th collision */
-#define XS_CLR_PERR 0x01 /* clear parity error */
-
-/* bits in EDLC_XMASK, mask/enable transmit interrupts. register is r/w */
-#define XM_TPOK 0x80 /* =1 to enable Xmt Pkt OK interrupts */
-#define XM_RCVD 0x20 /* =1 to enable Xmt Pkt Rcvd ints */
-#define XM_UFLW 0x08 /* =1 to enable Xmt Underflow ints */
-#define XM_COLL 0x04 /* =1 to enable Xmt Collision ints */
-#define XM_COLL16 0x02 /* =1 to enable Xmt 16th Coll ints */
-#define XM_PERR 0x01 /* =1 to enable Xmt Parity Error ints */
- /* note: always clear this bit */
-#define XM_ALL (XM_TPOK | XM_RCVD | XM_UFLW | XM_COLL | XM_COLL16)
-
-/* bits in EDLC_RSTAT, interrupt clear on write, status when read */
-#define RS_PKT_OK 0x80 /* received good packet */
-#define RS_RST_PKT 0x10 /* RESET packet received */
-#define RS_RUNT 0x08 /* Runt Pkt rcvd. Len < 64 Bytes */
-#define RS_ALIGN 0x04 /* Alignment error. not 8 bit aligned */
-#define RS_CRC_ERR 0x02 /* Bad CRC on rcvd pkt */
-#define RS_OFLW 0x01 /* overflow for rcv FIFO */
-#define RS_VALID_BITS ( RS_PKT_OK | RS_RST_PKT | RS_RUNT | RS_ALIGN | RS_CRC_ERR | RS_OFLW )
- /* all valid RSTAT bits */
-
-#define RS_CLR_PKT_OK 0x80 /* clear rcvd packet interrupt */
-#define RS_CLR_RST_PKT 0x10 /* clear RESET packet received */
-#define RS_CLR_RUNT 0x08 /* clear Runt Pckt received */
-#define RS_CLR_ALIGN 0x04 /* clear Alignment error */
-#define RS_CLR_CRC_ERR 0x02 /* clear CRC error */
-#define RS_CLR_OFLW 0x01 /* clear rcv FIFO Overflow */
-
-/* bits in EDLC_RMASK, mask/enable receive interrupts. register is r/w */
-#define RM_PKT_OK 0x80 /* =1 to enable rcvd good packet ints */
-#define RM_RST_PKT 0x10 /* =1 to enable RESET packet ints */
-#define RM_RUNT 0x08 /* =1 to enable Runt Pkt rcvd ints */
-#define RM_ALIGN 0x04 /* =1 to enable Alignment error ints */
-#define RM_CRC_ERR 0x02 /* =1 to enable Bad CRC error ints */
-#define RM_OFLW 0x01 /* =1 to enable overflow error ints */
-
-/* bits in EDLC_RMODE, set Receive Packet mode. register is r/w */
-#define RMD_TEST 0x80 /* =1 for Chip testing. normally 0 */
-#define RMD_ADD_SIZ 0x10 /* =1 5-byte addr match. normally 0 */
-#define RMD_EN_RUNT 0x08 /* =1 enable runt rcv. normally 0 */
-#define RMD_EN_RST 0x04 /* =1 to rcv RESET pkt. normally 0 */
-
-#define RMD_PROMISC 0x03 /* receive *all* packets. unusual */
-#define RMD_MULTICAST 0x02 /* receive multicasts too. unusual */
-#define RMD_BROADCAST 0x01 /* receive broadcasts & normal. usual */
-#define RMD_NO_PACKETS 0x00 /* don't receive any packets. unusual */
-
-/* bits in EDLC_XMODE, set Transmit Packet mode. register is r/w */
-#define XMD_COLL_CNT 0xf0 /* coll's since success. read-only */
-#define XMD_IG_PAR 0x08 /* =1 to ignore parity. ALWAYS set */
-#define XMD_T_MODE 0x04 /* =1 to power xcvr. ALWAYS set this */
-#define XMD_LBC 0x02 /* =1 for loopbakc. normally set */
-#define XMD_DIS_C 0x01 /* =1 disables contention. normally 0 */
-
-/* bits in EDLC_RESET, write only */
-#define RS_RESET 0x80 /* =1 to hold EDLC in reset state */
-
-/* bits in IE_MMODE, write only */
-#define MM_EN_DMA 0x80 /* =1 begin DMA xfer, Cplt clrs it */
-#define MM_EN_RCV 0x40 /* =1 allows Pkt rcv. clr'd by rcv */
-#define MM_EN_XMT 0x20 /* =1 begin Xmt pkt. Cplt clrs it */
-#define MM_BUS_PAGE 0x18 /* =00 ALWAYS. Used when MUX=1 */
-#define MM_NET_PAGE 0x06 /* =00 ALWAYS. Used when MUX=0 */
-#define MM_MUX 0x01 /* =1 means Rcv Buff on system bus */
- /* =0 means Xmt Buff on system bus */
-
-/* bits in IE_ISTAT, read only */
-#define IS_TDIAG 0x80 /* =1 if Diagnostic problem */
-#define IS_EN_RCV 0x20 /* =1 until frame is rcv'd cplt */
-#define IS_EN_XMT 0x10 /* =1 until frame is xmt'd cplt */
-#define IS_EN_DMA 0x08 /* =1 until DMA is cplt or aborted */
-#define IS_DMA_INT 0x04 /* =0 iff DMA done interrupt. */
-#define IS_R_INT 0x02 /* =0 iff unmasked Rcv interrupt */
-#define IS_X_INT 0x01 /* =0 iff unmasked Xmt interrupt */
-
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 63c13125db6c..5b4103db70f5 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -755,9 +755,6 @@ static void r6040_mac_address(struct net_device *dev)
iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
-
- /* Store MAC Address in perm_addr */
- memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
}
static int r6040_open(struct net_device *dev)
@@ -957,9 +954,9 @@ static void netdev_get_drvinfo(struct net_device *dev,
{
struct r6040_private *rp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(rp->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
}
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -1045,7 +1042,7 @@ static int r6040_mii_probe(struct net_device *dev)
}
phydev = phy_connect(dev, dev_name(&phydev->dev), &r6040_adjust_link,
- 0, PHY_INTERFACE_MODE_MII);
+ PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
dev_err(&lp->pdev->dev, "could not attach to PHY\n");
@@ -1195,9 +1192,8 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
lp->mii_bus->name = "r6040_eth_mii";
snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
dev_name(&pdev->dev), card_idx);
- lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+ lp->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
if (!lp->mii_bus->irq) {
- dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
err = -ENOMEM;
goto err_out_mdio;
}
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 5ac93323a40c..b62a32484f6a 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1949,7 +1949,6 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < 3; i++)
((__le16 *) (dev->dev_addr))[i] =
cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
dev->netdev_ops = &cp_netdev_ops;
netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 5dc161630127..1276ac71353a 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -991,7 +991,6 @@ static int rtl8139_init_one(struct pci_dev *pdev,
for (i = 0; i < 3; i++)
((__le16 *) (dev->dev_addr))[i] =
cpu_to_le16(read_eeprom (ioaddr, i + 7, addr_len));
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
/* The Rtl8139-specific entries in the device structure. */
dev->netdev_ops = &rtl8139_netdev_ops;
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index 5821966f9f28..783fa8b5cde7 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -34,8 +34,8 @@ config ATP
will be called atp.
config 8139CP
- tristate "RealTek RTL-8139 C+ PCI Fast Ethernet Adapter support (EXPERIMENTAL)"
- depends on PCI && EXPERIMENTAL
+ tristate "RealTek RTL-8139 C+ PCI Fast Ethernet Adapter support"
+ depends on PCI
select CRC32
select NET_CORE
select MII
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index ed96f309bca8..8900398ba103 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -83,7 +83,7 @@ static const int multicast_filter_limit = 32;
#define R8169_REGS_SIZE 256
#define R8169_NAPI_WEIGHT 64
#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
-#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
+#define NUM_RX_DESC 256U /* Number of Rx descriptor registers */
#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
@@ -450,7 +450,6 @@ enum rtl8168_registers {
#define PWM_EN (1 << 22)
#define RXDV_GATED_EN (1 << 19)
#define EARLY_TALLY_EN (1 << 16)
-#define FORCE_CLK (1 << 15) /* force clock request */
};
enum rtl_register_content {
@@ -514,7 +513,6 @@ enum rtl_register_content {
PMEnable = (1 << 0), /* Power Management Enable */
/* Config2 register p. 25 */
- ClkReqEn = (1 << 7), /* Clock Request Enable */
MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
PCI_Clock_66MHz = 0x01,
PCI_Clock_33MHz = 0x00,
@@ -535,7 +533,6 @@ enum rtl_register_content {
Spi_en = (1 << 3),
LanWake = (1 << 1), /* LanWake enable/disable */
PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
- ASPM_en = (1 << 0), /* ASPM enable */
/* TBICSR p.28 */
TBIReset = 0x80000000,
@@ -684,7 +681,6 @@ enum features {
RTL_FEATURE_WOL = (1 << 0),
RTL_FEATURE_MSI = (1 << 1),
RTL_FEATURE_GMII = (1 << 2),
- RTL_FEATURE_FW_LOADED = (1 << 3),
};
struct rtl8169_counters {
@@ -727,7 +723,6 @@ struct rtl8169_private {
u16 mac_version;
u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
- u32 dirty_rx;
u32 dirty_tx;
struct rtl8169_stats rx_stats;
struct rtl8169_stats tx_stats;
@@ -1826,8 +1821,6 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
if (opts2 & RxVlanTag)
__vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
-
- desc->opts2 = 0;
}
static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -2391,10 +2384,8 @@ static void rtl_apply_firmware(struct rtl8169_private *tp)
struct rtl_fw *rtl_fw = tp->rtl_fw;
/* TODO: release firmware once rtl_phy_write_fw signals failures. */
- if (!IS_ERR_OR_NULL(rtl_fw)) {
+ if (!IS_ERR_OR_NULL(rtl_fw))
rtl_phy_write_fw(tp, rtl_fw);
- tp->features |= RTL_FEATURE_FW_LOADED;
- }
}
static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
@@ -2405,31 +2396,6 @@ static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
rtl_apply_firmware(tp);
}
-static void r810x_aldps_disable(struct rtl8169_private *tp)
-{
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x18, 0x0310);
- msleep(100);
-}
-
-static void r810x_aldps_enable(struct rtl8169_private *tp)
-{
- if (!(tp->features & RTL_FEATURE_FW_LOADED))
- return;
-
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x18, 0x8310);
-}
-
-static void r8168_aldps_enable_1(struct rtl8169_private *tp)
-{
- if (!(tp->features & RTL_FEATURE_FW_LOADED))
- return;
-
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_w1w0_phy(tp, 0x15, 0x1000, 0x0000);
-}
-
static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
@@ -3220,8 +3186,6 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
rtl_writephy(tp, 0x1f, 0x0000);
- r8168_aldps_enable_1(tp);
-
/* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
}
@@ -3296,8 +3260,6 @@ static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x05, 0x8b85);
rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
rtl_writephy(tp, 0x1f, 0x0000);
-
- r8168_aldps_enable_1(tp);
}
static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
@@ -3305,8 +3267,6 @@ static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
rtl_apply_firmware(tp);
rtl8168f_hw_phy_config(tp);
-
- r8168_aldps_enable_1(tp);
}
static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
@@ -3404,8 +3364,6 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
rtl_writephy(tp, 0x1f, 0x0000);
-
- r8168_aldps_enable_1(tp);
}
static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
@@ -3491,19 +3449,21 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
};
/* Disable ALDPS before ram code */
- r810x_aldps_disable(tp);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x18, 0x0310);
+ msleep(100);
rtl_apply_firmware(tp);
rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
-
- r810x_aldps_enable(tp);
}
static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
{
/* Disable ALDPS before setting firmware */
- r810x_aldps_disable(tp);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x18, 0x0310);
+ msleep(20);
rtl_apply_firmware(tp);
@@ -3513,8 +3473,6 @@ static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x10, 0x401f);
rtl_writephy(tp, 0x19, 0x7030);
rtl_writephy(tp, 0x1f, 0x0000);
-
- r810x_aldps_enable(tp);
}
static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
@@ -3527,7 +3485,9 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
};
/* Disable ALDPS before ram code */
- r810x_aldps_disable(tp);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x18, 0x0310);
+ msleep(100);
rtl_apply_firmware(tp);
@@ -3535,8 +3495,6 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
-
- r810x_aldps_enable(tp);
}
static void rtl_hw_phy_config(struct net_device *dev)
@@ -4177,7 +4135,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
{
- tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
+ tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
}
static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
@@ -5053,6 +5011,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
RTL_W8(MaxTxPacketSize, EarlySize);
+ rtl_disable_clock_request(pdev);
+
RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
@@ -5061,8 +5021,7 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
- RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
- RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
+ RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
}
static void rtl_hw_start_8168f(struct rtl8169_private *tp)
@@ -5087,12 +5046,13 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
RTL_W8(MaxTxPacketSize, EarlySize);
+ rtl_disable_clock_request(pdev);
+
RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
- RTL_W32(MISC, RTL_R32(MISC) | PWM_EN | FORCE_CLK);
- RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
- RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
+ RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
+ RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
}
static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
@@ -5149,10 +5109,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
- RTL_W32(MISC, (RTL_R32(MISC) | FORCE_CLK) & ~RXDV_GATED_EN);
+ RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
RTL_W8(MaxTxPacketSize, EarlySize);
- RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
- RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
@@ -5368,9 +5326,6 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
- RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
- RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
- RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
}
@@ -5396,9 +5351,6 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
- RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
- RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
- RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
@@ -5420,10 +5372,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
/* Force LAN exit from ASPM if Rx/Tx are not idle */
RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
- RTL_W32(MISC,
- (RTL_R32(MISC) | DISABLE_LAN_EN | FORCE_CLK) & ~EARLY_TALLY_EN);
- RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
- RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
+ RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
}
@@ -5920,7 +5869,7 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
/* The infamous DAC f*ckup only happens at boot time */
- if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
+ if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) {
void __iomem *ioaddr = tp->mmio_addr;
netif_info(tp, intr, dev, "disabling PCI DAC\n");
@@ -6035,10 +5984,8 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
unsigned int count;
cur_rx = tp->cur_rx;
- rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
- rx_left = min(rx_left, budget);
- for (; rx_left > 0; rx_left--, cur_rx++) {
+ for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) {
unsigned int entry = cur_rx % NUM_RX_DESC;
struct RxDesc *desc = tp->RxDescArray + entry;
u32 status;
@@ -6064,8 +6011,6 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
!(status & (RxRWT | RxFOVF)) &&
(dev->features & NETIF_F_RXALL))
goto process_pkt;
-
- rtl8169_mark_to_asic(desc, rx_buf_sz);
} else {
struct sk_buff *skb;
dma_addr_t addr;
@@ -6086,16 +6031,14 @@ process_pkt:
if (unlikely(rtl8169_fragmented_frame(status))) {
dev->stats.rx_dropped++;
dev->stats.rx_length_errors++;
- rtl8169_mark_to_asic(desc, rx_buf_sz);
- continue;
+ goto release_descriptor;
}
skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
tp, pkt_size, addr);
- rtl8169_mark_to_asic(desc, rx_buf_sz);
if (!skb) {
dev->stats.rx_dropped++;
- continue;
+ goto release_descriptor;
}
rtl8169_rx_csum(skb, status);
@@ -6111,20 +6054,15 @@ process_pkt:
tp->rx_stats.bytes += pkt_size;
u64_stats_update_end(&tp->rx_stats.syncp);
}
-
- /* Work around for AMD plateform. */
- if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
- (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
- desc->opts2 = 0;
- cur_rx++;
- }
+release_descriptor:
+ desc->opts2 = 0;
+ wmb();
+ rtl8169_mark_to_asic(desc, rx_buf_sz);
}
count = cur_rx - tp->cur_rx;
tp->cur_rx = cur_rx;
- tp->dirty_rx += count;
-
return count;
}
@@ -6948,7 +6886,6 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Get MAC address */
for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = RTL_R8(MAC0 + i);
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 3d705862bd7d..33e96176e4d8 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -891,18 +891,16 @@ static int sh_eth_ring_init(struct net_device *ndev)
mdp->rx_buf_sz += NET_IP_ALIGN;
/* Allocate RX and TX skb rings */
- mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * mdp->num_rx_ring,
- GFP_KERNEL);
+ mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring,
+ sizeof(*mdp->rx_skbuff), GFP_KERNEL);
if (!mdp->rx_skbuff) {
- dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
ret = -ENOMEM;
return ret;
}
- mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * mdp->num_tx_ring,
- GFP_KERNEL);
+ mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring,
+ sizeof(*mdp->tx_skbuff), GFP_KERNEL);
if (!mdp->tx_skbuff) {
- dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
ret = -ENOMEM;
goto skb_ring_free;
}
@@ -1422,7 +1420,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
/* Try connect to PHY */
phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
- 0, mdp->phy_interface);
+ mdp->phy_interface);
if (IS_ERR(phydev)) {
dev_err(&ndev->dev, "phy_connect failed\n");
return PTR_ERR(phydev);
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
index 72fc57dd084d..21683e2b1ff4 100644
--- a/drivers/net/ethernet/s6gmac.c
+++ b/drivers/net/ethernet/s6gmac.c
@@ -795,7 +795,7 @@ static inline int s6gmac_phy_start(struct net_device *dev)
struct phy_device *p = NULL;
while ((i < PHY_MAX_ADDR) && (!(p = pd->mii.bus->phy_map[i])))
i++;
- p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, 0,
+ p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link,
PHY_INTERFACE_MODE_RGMII);
if (IS_ERR(p)) {
printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
diff --git a/drivers/net/ethernet/seeq/Kconfig b/drivers/net/ethernet/seeq/Kconfig
index 29f18533fdc7..11f168e46ebe 100644
--- a/drivers/net/ethernet/seeq/Kconfig
+++ b/drivers/net/ethernet/seeq/Kconfig
@@ -6,7 +6,6 @@ config NET_VENDOR_SEEQ
bool "SEEQ devices"
default y
depends on HAS_IOMEM
- depends on (ARM && ARCH_ACORN) || SGI_HAS_SEEQ || EXPERIMENTAL
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -26,17 +25,6 @@ config ARM_ETHER3
If you have an Acorn system with one of these network cards, you
should say Y to this option if you wish to use it with Linux.
-config SEEQ8005
- tristate "SEEQ8005 support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
- ---help---
- This is a driver for the SEEQ 8005 network (Ethernet) card. If this
- is for you, read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called seeq8005.
-
config SGISEEQ
tristate "SGI Seeq ethernet controller support"
depends on SGI_HAS_SEEQ
diff --git a/drivers/net/ethernet/seeq/Makefile b/drivers/net/ethernet/seeq/Makefile
index 3e258a580c05..0488e99b831f 100644
--- a/drivers/net/ethernet/seeq/Makefile
+++ b/drivers/net/ethernet/seeq/Makefile
@@ -3,5 +3,4 @@
#
obj-$(CONFIG_ARM_ETHER3) += ether3.o
-obj-$(CONFIG_SEEQ8005) += seeq8005.o
obj-$(CONFIG_SGISEEQ) += sgiseeq.o
diff --git a/drivers/net/ethernet/seeq/seeq8005.c b/drivers/net/ethernet/seeq/seeq8005.c
deleted file mode 100644
index d6e50de71186..000000000000
--- a/drivers/net/ethernet/seeq/seeq8005.c
+++ /dev/null
@@ -1,749 +0,0 @@
-/* seeq8005.c: A network driver for linux. */
-/*
- Based on skeleton.c,
- Written 1993-94 by Donald Becker.
- See the skeleton.c file for further copyright information.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- The author may be reached as hamish@zot.apana.org.au
-
- This file is a network device driver for the SEEQ 8005 chipset and
- the Linux operating system.
-
-*/
-
-static const char version[] =
- "seeq8005.c:v1.00 8/07/95 Hamish Coleman (hamish@zot.apana.org.au)\n";
-
-/*
- Sources:
- SEEQ 8005 databook
-
- Version history:
- 1.00 Public release. cosmetic changes (no warnings now)
- 0.68 Turning per- packet,interrupt debug messages off - testing for release.
- 0.67 timing problems/bad buffer reads seem to be fixed now
- 0.63 *!@$ protocol=eth_type_trans -- now packets flow
- 0.56 Send working
- 0.48 Receive working
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/bitops.h>
-#include <linux/jiffies.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include "seeq8005.h"
-
-/* First, a few definitions that the brave might change. */
-/* A zero-terminated list of I/O addresses to be probed. */
-static unsigned int seeq8005_portlist[] __initdata =
- { 0x300, 0x320, 0x340, 0x360, 0};
-
-/* use 0 for production, 1 for verification, >2 for debug */
-#ifndef NET_DEBUG
-#define NET_DEBUG 1
-#endif
-static unsigned int net_debug = NET_DEBUG;
-
-/* Information that need to be kept for each board. */
-struct net_local {
- unsigned short receive_ptr; /* What address in packet memory do we expect a recv_pkt_header? */
- long open_time; /* Useless example local info. */
-};
-
-/* The station (ethernet) address prefix, used for IDing the board. */
-#define SA_ADDR0 0x00
-#define SA_ADDR1 0x80
-#define SA_ADDR2 0x4b
-
-/* Index to functions, as function prototypes. */
-
-static int seeq8005_probe1(struct net_device *dev, int ioaddr);
-static int seeq8005_open(struct net_device *dev);
-static void seeq8005_timeout(struct net_device *dev);
-static netdev_tx_t seeq8005_send_packet(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t seeq8005_interrupt(int irq, void *dev_id);
-static void seeq8005_rx(struct net_device *dev);
-static int seeq8005_close(struct net_device *dev);
-static void set_multicast_list(struct net_device *dev);
-
-/* Example routines you must write ;->. */
-#define tx_done(dev) (inw(SEEQ_STATUS) & SEEQSTAT_TX_ON)
-static void hardware_send_packet(struct net_device *dev, char *buf, int length);
-extern void seeq8005_init(struct net_device *dev, int startp);
-static inline void wait_for_buffer(struct net_device *dev);
-
-
-/* Check for a network adaptor of this type, and return '0' iff one exists.
- If dev->base_addr == 0, probe all likely locations.
- If dev->base_addr == 1, always return failure.
- */
-
-static int io = 0x320;
-static int irq = 10;
-
-struct net_device * __init seeq8005_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
- unsigned *port;
- int err = 0;
-
- if (!dev)
- return ERR_PTR(-ENODEV);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- io = dev->base_addr;
- irq = dev->irq;
- }
-
- if (io > 0x1ff) { /* Check a single specified location. */
- err = seeq8005_probe1(dev, io);
- } else if (io != 0) { /* Don't probe at all. */
- err = -ENXIO;
- } else {
- for (port = seeq8005_portlist; *port; port++) {
- if (seeq8005_probe1(dev, *port) == 0)
- break;
- }
- if (!*port)
- err = -ENODEV;
- }
- if (err)
- goto out;
- err = register_netdev(dev);
- if (err)
- goto out1;
- return dev;
-out1:
- release_region(dev->base_addr, SEEQ8005_IO_EXTENT);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-static const struct net_device_ops seeq8005_netdev_ops = {
- .ndo_open = seeq8005_open,
- .ndo_stop = seeq8005_close,
- .ndo_start_xmit = seeq8005_send_packet,
- .ndo_tx_timeout = seeq8005_timeout,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/* This is the real probe routine. Linux has a history of friendly device
- probes on the ISA bus. A good device probes avoids doing writes, and
- verifies that the correct device exists and functions. */
-
-static int __init seeq8005_probe1(struct net_device *dev, int ioaddr)
-{
- static unsigned version_printed;
- int i,j;
- unsigned char SA_prom[32];
- int old_cfg1;
- int old_cfg2;
- int old_stat;
- int old_dmaar;
- int old_rear;
- int retval;
-
- if (!request_region(ioaddr, SEEQ8005_IO_EXTENT, "seeq8005"))
- return -ENODEV;
-
- if (net_debug>1)
- printk("seeq8005: probing at 0x%x\n",ioaddr);
-
- old_stat = inw(SEEQ_STATUS); /* read status register */
- if (old_stat == 0xffff) {
- retval = -ENODEV;
- goto out; /* assume that 0xffff == no device */
- }
- if ( (old_stat & 0x1800) != 0x1800 ) { /* assume that unused bits are 1, as my manual says */
- if (net_debug>1) {
- printk("seeq8005: reserved stat bits != 0x1800\n");
- printk(" == 0x%04x\n",old_stat);
- }
- retval = -ENODEV;
- goto out;
- }
-
- old_rear = inw(SEEQ_REA);
- if (old_rear == 0xffff) {
- outw(0,SEEQ_REA);
- if (inw(SEEQ_REA) == 0xffff) { /* assume that 0xffff == no device */
- retval = -ENODEV;
- goto out;
- }
- } else if ((old_rear & 0xff00) != 0xff00) { /* assume that unused bits are 1 */
- if (net_debug>1) {
- printk("seeq8005: unused rear bits != 0xff00\n");
- printk(" == 0x%04x\n",old_rear);
- }
- retval = -ENODEV;
- goto out;
- }
-
- old_cfg2 = inw(SEEQ_CFG2); /* read CFG2 register */
- old_cfg1 = inw(SEEQ_CFG1);
- old_dmaar = inw(SEEQ_DMAAR);
-
- if (net_debug>4) {
- printk("seeq8005: stat = 0x%04x\n",old_stat);
- printk("seeq8005: cfg1 = 0x%04x\n",old_cfg1);
- printk("seeq8005: cfg2 = 0x%04x\n",old_cfg2);
- printk("seeq8005: raer = 0x%04x\n",old_rear);
- printk("seeq8005: dmaar= 0x%04x\n",old_dmaar);
- }
-
- outw( SEEQCMD_FIFO_WRITE | SEEQCMD_SET_ALL_OFF, SEEQ_CMD); /* setup for reading PROM */
- outw( 0, SEEQ_DMAAR); /* set starting PROM address */
- outw( SEEQCFG1_BUFFER_PROM, SEEQ_CFG1); /* set buffer to look at PROM */
-
-
- j=0;
- for(i=0; i <32; i++) {
- j+= SA_prom[i] = inw(SEEQ_BUFFER) & 0xff;
- }
-
-#if 0
- /* untested because I only have the one card */
- if ( (j&0xff) != 0 ) { /* checksum appears to be 8bit = 0 */
- if (net_debug>1) { /* check this before deciding that we have a card */
- printk("seeq8005: prom sum error\n");
- }
- outw( old_stat, SEEQ_STATUS);
- outw( old_dmaar, SEEQ_DMAAR);
- outw( old_cfg1, SEEQ_CFG1);
- retval = -ENODEV;
- goto out;
- }
-#endif
-
- outw( SEEQCFG2_RESET, SEEQ_CFG2); /* reset the card */
- udelay(5);
- outw( SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
-
- if (net_debug) {
- printk("seeq8005: prom sum = 0x%08x\n",j);
- for(j=0; j<32; j+=16) {
- printk("seeq8005: prom %02x: ",j);
- for(i=0;i<16;i++) {
- printk("%02x ",SA_prom[j|i]);
- }
- printk(" ");
- for(i=0;i<16;i++) {
- if ((SA_prom[j|i]>31)&&(SA_prom[j|i]<127)) {
- printk("%c", SA_prom[j|i]);
- } else {
- printk(" ");
- }
- }
- printk("\n");
- }
- }
-
-#if 0
- /*
- * testing the packet buffer memory doesn't work yet
- * but all other buffer accesses do
- * - fixing is not a priority
- */
- if (net_debug>1) { /* test packet buffer memory */
- printk("seeq8005: testing packet buffer ... ");
- outw( SEEQCFG1_BUFFER_BUFFER, SEEQ_CFG1);
- outw( SEEQCMD_FIFO_WRITE | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
- outw( 0 , SEEQ_DMAAR);
- for(i=0;i<32768;i++) {
- outw(0x5a5a, SEEQ_BUFFER);
- }
- j=jiffies+HZ;
- while ( ((inw(SEEQ_STATUS) & SEEQSTAT_FIFO_EMPTY) != SEEQSTAT_FIFO_EMPTY) && time_before(jiffies, j) )
- mb();
- outw( 0 , SEEQ_DMAAR);
- while ( ((inw(SEEQ_STATUS) & SEEQSTAT_WINDOW_INT) != SEEQSTAT_WINDOW_INT) && time_before(jiffies, j+HZ))
- mb();
- if ( (inw(SEEQ_STATUS) & SEEQSTAT_WINDOW_INT) == SEEQSTAT_WINDOW_INT)
- outw( SEEQCMD_WINDOW_INT_ACK | (inw(SEEQ_STATUS)& SEEQCMD_INT_MASK), SEEQ_CMD);
- outw( SEEQCMD_FIFO_READ | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
- j=0;
- for(i=0;i<32768;i++) {
- if (inw(SEEQ_BUFFER) != 0x5a5a)
- j++;
- }
- if (j) {
- printk("%i\n",j);
- } else {
- printk("ok.\n");
- }
- }
-#endif
-
- if (net_debug && version_printed++ == 0)
- printk(version);
-
- printk("%s: %s found at %#3x, ", dev->name, "seeq8005", ioaddr);
-
- /* Fill in the 'dev' fields. */
- dev->base_addr = ioaddr;
- dev->irq = irq;
-
- /* Retrieve and print the ethernet address. */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = SA_prom[i+6];
- printk("%pM", dev->dev_addr);
-
- if (dev->irq == 0xff)
- ; /* Do nothing: a user-level program will set it. */
- else if (dev->irq < 2) { /* "Auto-IRQ" */
- unsigned long cookie = probe_irq_on();
-
- outw( SEEQCMD_RX_INT_EN | SEEQCMD_SET_RX_ON | SEEQCMD_SET_RX_OFF, SEEQ_CMD );
-
- dev->irq = probe_irq_off(cookie);
-
- if (net_debug >= 2)
- printk(" autoirq is %d\n", dev->irq);
- } else if (dev->irq == 2)
- /* Fixup for users that don't know that IRQ 2 is really IRQ 9,
- * or don't know which one to set.
- */
- dev->irq = 9;
-
-#if 0
- {
- int irqval = request_irq(dev->irq, seeq8005_interrupt, 0, "seeq8005", dev);
- if (irqval) {
- printk ("%s: unable to get IRQ %d (irqval=%d).\n", dev->name,
- dev->irq, irqval);
- retval = -EAGAIN;
- goto out;
- }
- }
-#endif
- dev->netdev_ops = &seeq8005_netdev_ops;
- dev->watchdog_timeo = HZ/20;
- dev->flags &= ~IFF_MULTICAST;
-
- return 0;
-out:
- release_region(ioaddr, SEEQ8005_IO_EXTENT);
- return retval;
-}
-
-
-/* Open/initialize the board. This is called (in the current kernel)
- sometime after booting when the 'ifconfig' program is run.
-
- This routine should set everything up anew at each open, even
- registers that "should" only need to be set once at boot, so that
- there is non-reboot way to recover if something goes wrong.
- */
-static int seeq8005_open(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
-
- {
- int irqval = request_irq(dev->irq, seeq8005_interrupt, 0, "seeq8005", dev);
- if (irqval) {
- printk ("%s: unable to get IRQ %d (irqval=%d).\n", dev->name,
- dev->irq, irqval);
- return -EAGAIN;
- }
- }
-
- /* Reset the hardware here. Don't forget to set the station address. */
- seeq8005_init(dev, 1);
-
- lp->open_time = jiffies;
-
- netif_start_queue(dev);
- return 0;
-}
-
-static void seeq8005_timeout(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
- tx_done(dev) ? "IRQ conflict" : "network cable problem");
- /* Try to restart the adaptor. */
- seeq8005_init(dev, 1);
- dev->trans_start = jiffies; /* prevent tx timeout */
- netif_wake_queue(dev);
-}
-
-static netdev_tx_t seeq8005_send_packet(struct sk_buff *skb,
- struct net_device *dev)
-{
- short length = skb->len;
- unsigned char *buf;
-
- if (length < ETH_ZLEN) {
- if (skb_padto(skb, ETH_ZLEN))
- return NETDEV_TX_OK;
- length = ETH_ZLEN;
- }
- buf = skb->data;
-
- /* Block a timer-based transmit from overlapping */
- netif_stop_queue(dev);
-
- hardware_send_packet(dev, buf, length);
- dev->stats.tx_bytes += length;
- dev_kfree_skb (skb);
- /* You might need to clean up and record Tx statistics here. */
-
- return NETDEV_TX_OK;
-}
-
-/*
- * wait_for_buffer
- *
- * This routine waits for the SEEQ chip to assert that the FIFO is ready
- * by checking for a window interrupt, and then clearing it. This has to
- * occur in the interrupt handler!
- */
-inline void wait_for_buffer(struct net_device * dev)
-{
- int ioaddr = dev->base_addr;
- unsigned long tmp;
- int status;
-
- tmp = jiffies + HZ;
- while ( ( ((status=inw(SEEQ_STATUS)) & SEEQSTAT_WINDOW_INT) != SEEQSTAT_WINDOW_INT) && time_before(jiffies, tmp))
- cpu_relax();
-
- if ( (status & SEEQSTAT_WINDOW_INT) == SEEQSTAT_WINDOW_INT)
- outw( SEEQCMD_WINDOW_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
-}
-
-/* The typical workload of the driver:
- Handle the network interface interrupts. */
-static irqreturn_t seeq8005_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct net_local *lp;
- int ioaddr, status, boguscount = 0;
- int handled = 0;
-
- ioaddr = dev->base_addr;
- lp = netdev_priv(dev);
-
- status = inw(SEEQ_STATUS);
- do {
- if (net_debug >2) {
- printk("%s: int, status=0x%04x\n",dev->name,status);
- }
-
- if (status & SEEQSTAT_WINDOW_INT) {
- handled = 1;
- outw( SEEQCMD_WINDOW_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
- if (net_debug) {
- printk("%s: window int!\n",dev->name);
- }
- }
- if (status & SEEQSTAT_TX_INT) {
- handled = 1;
- outw( SEEQCMD_TX_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
- dev->stats.tx_packets++;
- netif_wake_queue(dev); /* Inform upper layers. */
- }
- if (status & SEEQSTAT_RX_INT) {
- handled = 1;
- /* Got a packet(s). */
- seeq8005_rx(dev);
- }
- status = inw(SEEQ_STATUS);
- } while ( (++boguscount < 10) && (status & SEEQSTAT_ANY_INT)) ;
-
- if(net_debug>2) {
- printk("%s: eoi\n",dev->name);
- }
- return IRQ_RETVAL(handled);
-}
-
-/* We have a good packet(s), get it/them out of the buffers. */
-static void seeq8005_rx(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int boguscount = 10;
- int pkt_hdr;
- int ioaddr = dev->base_addr;
-
- do {
- int next_packet;
- int pkt_len;
- int i;
- int status;
-
- status = inw(SEEQ_STATUS);
- outw( lp->receive_ptr, SEEQ_DMAAR);
- outw(SEEQCMD_FIFO_READ | SEEQCMD_RX_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
- wait_for_buffer(dev);
- next_packet = ntohs(inw(SEEQ_BUFFER));
- pkt_hdr = inw(SEEQ_BUFFER);
-
- if (net_debug>2) {
- printk("%s: 0x%04x recv next=0x%04x, hdr=0x%04x\n",dev->name,lp->receive_ptr,next_packet,pkt_hdr);
- }
-
- if ((next_packet == 0) || ((pkt_hdr & SEEQPKTH_CHAIN)==0)) { /* Read all the frames? */
- return; /* Done for now */
- }
-
- if ((pkt_hdr & SEEQPKTS_DONE)==0)
- break;
-
- if (next_packet < lp->receive_ptr) {
- pkt_len = (next_packet + 0x10000 - ((DEFAULT_TEA+1)<<8)) - lp->receive_ptr - 4;
- } else {
- pkt_len = next_packet - lp->receive_ptr - 4;
- }
-
- if (next_packet < ((DEFAULT_TEA+1)<<8)) { /* is the next_packet address sane? */
- printk("%s: recv packet ring corrupt, resetting board\n",dev->name);
- seeq8005_init(dev,1);
- return;
- }
-
- lp->receive_ptr = next_packet;
-
- if (net_debug>2) {
- printk("%s: recv len=0x%04x\n",dev->name,pkt_len);
- }
-
- if (pkt_hdr & SEEQPKTS_ANY_ERROR) { /* There was an error. */
- dev->stats.rx_errors++;
- if (pkt_hdr & SEEQPKTS_SHORT) dev->stats.rx_frame_errors++;
- if (pkt_hdr & SEEQPKTS_DRIB) dev->stats.rx_frame_errors++;
- if (pkt_hdr & SEEQPKTS_OVERSIZE) dev->stats.rx_over_errors++;
- if (pkt_hdr & SEEQPKTS_CRC_ERR) dev->stats.rx_crc_errors++;
- /* skip over this packet */
- outw( SEEQCMD_FIFO_WRITE | SEEQCMD_DMA_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
- outw( (lp->receive_ptr & 0xff00)>>8, SEEQ_REA);
- } else {
- /* Malloc up new buffer. */
- struct sk_buff *skb;
- unsigned char *buf;
-
- skb = netdev_alloc_skb(dev, pkt_len);
- if (skb == NULL) {
- printk("%s: Memory squeeze, dropping packet.\n", dev->name);
- dev->stats.rx_dropped++;
- break;
- }
- skb_reserve(skb, 2); /* align data on 16 byte */
- buf = skb_put(skb,pkt_len);
-
- insw(SEEQ_BUFFER, buf, (pkt_len + 1) >> 1);
-
- if (net_debug>2) {
- char * p = buf;
- printk("%s: recv ",dev->name);
- for(i=0;i<14;i++) {
- printk("%02x ",*(p++)&0xff);
- }
- printk("\n");
- }
-
- skb->protocol=eth_type_trans(skb,dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- }
- } while ((--boguscount) && (pkt_hdr & SEEQPKTH_CHAIN));
-
- /* If any worth-while packets have been received, netif_rx()
- has done a mark_bh(NET_BH) for us and will work on them
- when we get to the bottom-half routine. */
-}
-
-/* The inverse routine to net_open(). */
-static int seeq8005_close(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- lp->open_time = 0;
-
- netif_stop_queue(dev);
-
- /* Flush the Tx and disable Rx here. */
- outw( SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
-
- free_irq(dev->irq, dev);
-
- /* Update the statistics here. */
-
- return 0;
-
-}
-
-/* Set or clear the multicast filter for this adaptor.
- num_addrs == -1 Promiscuous mode, receive all packets
- num_addrs == 0 Normal mode, clear multicast list
- num_addrs > 0 Multicast mode, receive normal and MC packets, and do
- best-effort filtering.
- */
-static void set_multicast_list(struct net_device *dev)
-{
-/*
- * I _could_ do up to 6 addresses here, but won't (yet?)
- */
-
-#if 0
- int ioaddr = dev->base_addr;
-/*
- * hmm, not even sure if my matching works _anyway_ - seem to be receiving
- * _everything_ . . .
- */
-
- if (num_addrs) { /* Enable promiscuous mode */
- outw( (inw(SEEQ_CFG1) & ~SEEQCFG1_MATCH_MASK)| SEEQCFG1_MATCH_ALL, SEEQ_CFG1);
- dev->flags|=IFF_PROMISC;
- } else { /* Disable promiscuous mode, use normal mode */
- outw( (inw(SEEQ_CFG1) & ~SEEQCFG1_MATCH_MASK)| SEEQCFG1_MATCH_BROAD, SEEQ_CFG1);
- }
-#endif
-}
-
-void seeq8005_init(struct net_device *dev, int startp)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- int i;
-
- outw(SEEQCFG2_RESET, SEEQ_CFG2); /* reset device */
- udelay(5);
-
- outw( SEEQCMD_FIFO_WRITE | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
- outw( 0, SEEQ_DMAAR); /* load start address into both low and high byte */
-/* wait_for_buffer(dev); */ /* I think that you only need a wait for memory buffer */
- outw( SEEQCFG1_BUFFER_MAC0, SEEQ_CFG1);
-
- for(i=0;i<6;i++) { /* set Station address */
- outb(dev->dev_addr[i], SEEQ_BUFFER);
- udelay(2);
- }
-
- outw( SEEQCFG1_BUFFER_TEA, SEEQ_CFG1); /* set xmit end area pointer to 16K */
- outb( DEFAULT_TEA, SEEQ_BUFFER); /* this gives us 16K of send buffer and 48K of recv buffer */
-
- lp->receive_ptr = (DEFAULT_TEA+1)<<8; /* so we can find our packet_header */
- outw( lp->receive_ptr, SEEQ_RPR); /* Receive Pointer Register is set to recv buffer memory */
-
- outw( 0x00ff, SEEQ_REA); /* Receive Area End */
-
- if (net_debug>4) {
- printk("%s: SA0 = ",dev->name);
-
- outw( SEEQCMD_FIFO_READ | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
- outw( 0, SEEQ_DMAAR);
- outw( SEEQCFG1_BUFFER_MAC0, SEEQ_CFG1);
-
- for(i=0;i<6;i++) {
- printk("%02x ",inb(SEEQ_BUFFER));
- }
- printk("\n");
- }
-
- outw( SEEQCFG1_MAC0_EN | SEEQCFG1_MATCH_BROAD | SEEQCFG1_BUFFER_BUFFER, SEEQ_CFG1);
- outw( SEEQCFG2_AUTO_REA | SEEQCFG2_CTRLO, SEEQ_CFG2);
- outw( SEEQCMD_SET_RX_ON | SEEQCMD_TX_INT_EN | SEEQCMD_RX_INT_EN, SEEQ_CMD);
-
- if (net_debug>4) {
- int old_cfg1;
- old_cfg1 = inw(SEEQ_CFG1);
- printk("%s: stat = 0x%04x\n",dev->name,inw(SEEQ_STATUS));
- printk("%s: cfg1 = 0x%04x\n",dev->name,old_cfg1);
- printk("%s: cfg2 = 0x%04x\n",dev->name,inw(SEEQ_CFG2));
- printk("%s: raer = 0x%04x\n",dev->name,inw(SEEQ_REA));
- printk("%s: dmaar= 0x%04x\n",dev->name,inw(SEEQ_DMAAR));
-
- }
-}
-
-
-static void hardware_send_packet(struct net_device * dev, char *buf, int length)
-{
- int ioaddr = dev->base_addr;
- int status = inw(SEEQ_STATUS);
- int transmit_ptr = 0;
- unsigned long tmp;
-
- if (net_debug>4) {
- printk("%s: send 0x%04x\n",dev->name,length);
- }
-
- /* Set FIFO to writemode and set packet-buffer address */
- outw( SEEQCMD_FIFO_WRITE | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
- outw( transmit_ptr, SEEQ_DMAAR);
-
- /* output SEEQ Packet header barfage */
- outw( htons(length + 4), SEEQ_BUFFER);
- outw( SEEQPKTH_XMIT | SEEQPKTH_DATA_FOLLOWS | SEEQPKTH_XMIT_INT_EN, SEEQ_BUFFER );
-
- /* blat the buffer */
- outsw( SEEQ_BUFFER, buf, (length +1) >> 1);
- /* paranoia !! */
- outw( 0, SEEQ_BUFFER);
- outw( 0, SEEQ_BUFFER);
-
- /* set address of start of transmit chain */
- outw( transmit_ptr, SEEQ_TPR);
-
- /* drain FIFO */
- tmp = jiffies;
- while ( (((status=inw(SEEQ_STATUS)) & SEEQSTAT_FIFO_EMPTY) == 0) && time_before(jiffies, tmp + HZ))
- mb();
-
- /* doit ! */
- outw( SEEQCMD_WINDOW_INT_ACK | SEEQCMD_SET_TX_ON | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
-
-}
-
-
-#ifdef MODULE
-
-static struct net_device *dev_seeq;
-MODULE_LICENSE("GPL");
-module_param(io, int, 0);
-module_param(irq, int, 0);
-MODULE_PARM_DESC(io, "SEEQ 8005 I/O base address");
-MODULE_PARM_DESC(irq, "SEEQ 8005 IRQ number");
-
-int __init init_module(void)
-{
- dev_seeq = seeq8005_probe(-1);
- return PTR_RET(dev_seeq);
-}
-
-void __exit cleanup_module(void)
-{
- unregister_netdev(dev_seeq);
- release_region(dev_seeq->base_addr, SEEQ8005_IO_EXTENT);
- free_netdev(dev_seeq);
-}
-
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/seeq/seeq8005.h b/drivers/net/ethernet/seeq/seeq8005.h
deleted file mode 100644
index 5dfb0098c6ca..000000000000
--- a/drivers/net/ethernet/seeq/seeq8005.h
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * defines, etc for the seeq8005
- */
-
-/*
- * This file is distributed under GPL.
- *
- * This style and layout of this file is also copied
- * from many of the other linux network device drivers.
- */
-
-/* The number of low I/O ports used by the ethercard. */
-#define SEEQ8005_IO_EXTENT 16
-
-#define SEEQ_B (ioaddr)
-
-#define SEEQ_CMD (SEEQ_B) /* Write only */
-#define SEEQ_STATUS (SEEQ_B) /* Read only */
-#define SEEQ_CFG1 (SEEQ_B + 2)
-#define SEEQ_CFG2 (SEEQ_B + 4)
-#define SEEQ_REA (SEEQ_B + 6) /* Receive End Area Register */
-#define SEEQ_RPR (SEEQ_B + 10) /* Receive Pointer Register */
-#define SEEQ_TPR (SEEQ_B + 12) /* Transmit Pointer Register */
-#define SEEQ_DMAAR (SEEQ_B + 14) /* DMA Address Register */
-#define SEEQ_BUFFER (SEEQ_B + 8) /* Buffer Window Register */
-
-#define DEFAULT_TEA (0x3f)
-
-#define SEEQCMD_DMA_INT_EN (0x0001) /* DMA Interrupt Enable */
-#define SEEQCMD_RX_INT_EN (0x0002) /* Receive Interrupt Enable */
-#define SEEQCMD_TX_INT_EN (0x0004) /* Transmit Interrupt Enable */
-#define SEEQCMD_WINDOW_INT_EN (0x0008) /* What the hell is this for?? */
-#define SEEQCMD_INT_MASK (0x000f)
-
-#define SEEQCMD_DMA_INT_ACK (0x0010) /* DMA ack */
-#define SEEQCMD_RX_INT_ACK (0x0020)
-#define SEEQCMD_TX_INT_ACK (0x0040)
-#define SEEQCMD_WINDOW_INT_ACK (0x0080)
-#define SEEQCMD_ACK_ALL (0x00f0)
-
-#define SEEQCMD_SET_DMA_ON (0x0100) /* Enables DMA Request logic */
-#define SEEQCMD_SET_RX_ON (0x0200) /* Enables Packet RX */
-#define SEEQCMD_SET_TX_ON (0x0400) /* Starts TX run */
-#define SEEQCMD_SET_DMA_OFF (0x0800)
-#define SEEQCMD_SET_RX_OFF (0x1000)
-#define SEEQCMD_SET_TX_OFF (0x2000)
-#define SEEQCMD_SET_ALL_OFF (0x3800) /* set all logic off */
-
-#define SEEQCMD_FIFO_READ (0x4000) /* Set FIFO to read mode (read from Buffer) */
-#define SEEQCMD_FIFO_WRITE (0x8000) /* Set FIFO to write mode */
-
-#define SEEQSTAT_DMA_INT_EN (0x0001) /* Status of interrupt enable */
-#define SEEQSTAT_RX_INT_EN (0x0002)
-#define SEEQSTAT_TX_INT_EN (0x0004)
-#define SEEQSTAT_WINDOW_INT_EN (0x0008)
-
-#define SEEQSTAT_DMA_INT (0x0010) /* Interrupt flagged */
-#define SEEQSTAT_RX_INT (0x0020)
-#define SEEQSTAT_TX_INT (0x0040)
-#define SEEQSTAT_WINDOW_INT (0x0080)
-#define SEEQSTAT_ANY_INT (0x00f0)
-
-#define SEEQSTAT_DMA_ON (0x0100) /* DMA logic on */
-#define SEEQSTAT_RX_ON (0x0200) /* Packet RX on */
-#define SEEQSTAT_TX_ON (0x0400) /* TX running */
-
-#define SEEQSTAT_FIFO_FULL (0x2000)
-#define SEEQSTAT_FIFO_EMPTY (0x4000)
-#define SEEQSTAT_FIFO_DIR (0x8000) /* 1=read, 0=write */
-
-#define SEEQCFG1_BUFFER_MASK (0x000f) /* define what maps into the BUFFER register */
-#define SEEQCFG1_BUFFER_MAC0 (0x0000) /* MAC station addresses 0-5 */
-#define SEEQCFG1_BUFFER_MAC1 (0x0001)
-#define SEEQCFG1_BUFFER_MAC2 (0x0002)
-#define SEEQCFG1_BUFFER_MAC3 (0x0003)
-#define SEEQCFG1_BUFFER_MAC4 (0x0004)
-#define SEEQCFG1_BUFFER_MAC5 (0x0005)
-#define SEEQCFG1_BUFFER_PROM (0x0006) /* The Address/CFG PROM */
-#define SEEQCFG1_BUFFER_TEA (0x0007) /* Transmit end area */
-#define SEEQCFG1_BUFFER_BUFFER (0x0008) /* Packet buffer memory */
-#define SEEQCFG1_BUFFER_INT_VEC (0x0009) /* Interrupt Vector */
-
-#define SEEQCFG1_DMA_INTVL_MASK (0x0030)
-#define SEEQCFG1_DMA_CONT (0x0000)
-#define SEEQCFG1_DMA_800ns (0x0010)
-#define SEEQCFG1_DMA_1600ns (0x0020)
-#define SEEQCFG1_DMA_3200ns (0x0030)
-
-#define SEEQCFG1_DMA_LEN_MASK (0x00c0)
-#define SEEQCFG1_DMA_LEN1 (0x0000)
-#define SEEQCFG1_DMA_LEN2 (0x0040)
-#define SEEQCFG1_DMA_LEN4 (0x0080)
-#define SEEQCFG1_DMA_LEN8 (0x00c0)
-
-#define SEEQCFG1_MAC_MASK (0x3f00) /* Dis/enable bits for MAC addresses */
-#define SEEQCFG1_MAC0_EN (0x0100)
-#define SEEQCFG1_MAC1_EN (0x0200)
-#define SEEQCFG1_MAC2_EN (0x0400)
-#define SEEQCFG1_MAC3_EN (0x0800)
-#define SEEQCFG1_MAC4_EN (0x1000)
-#define SEEQCFG1_MAC5_EN (0x2000)
-
-#define SEEQCFG1_MATCH_MASK (0xc000) /* Packet matching logic cfg bits */
-#define SEEQCFG1_MATCH_SPECIFIC (0x0000) /* only matching MAC addresses */
-#define SEEQCFG1_MATCH_BROAD (0x4000) /* matching and broadcast addresses */
-#define SEEQCFG1_MATCH_MULTI (0x8000) /* matching, broadcast and multicast */
-#define SEEQCFG1_MATCH_ALL (0xc000) /* Promiscuous mode */
-
-#define SEEQCFG1_DEFAULT (SEEQCFG1_BUFFER_BUFFER | SEEQCFG1_MAC0_EN | SEEQCFG1_MATCH_BROAD)
-
-#define SEEQCFG2_BYTE_SWAP (0x0001) /* 0=Intel byte-order */
-#define SEEQCFG2_AUTO_REA (0x0002) /* if set, Receive End Area will be updated when reading from Buffer */
-
-#define SEEQCFG2_CRC_ERR_EN (0x0008) /* enables receiving of packets with CRC errors */
-#define SEEQCFG2_DRIBBLE_EN (0x0010) /* enables receiving of non-aligned packets */
-#define SEEQCFG2_SHORT_EN (0x0020) /* enables receiving of short packets */
-
-#define SEEQCFG2_SLOTSEL (0x0040) /* 0= standard IEEE802.3, 1= smaller,faster, non-standard */
-#define SEEQCFG2_NO_PREAM (0x0080) /* 1= user supplies Xmit preamble bytes */
-#define SEEQCFG2_ADDR_LEN (0x0100) /* 1= 2byte addresses */
-#define SEEQCFG2_REC_CRC (0x0200) /* 0= received packets will have CRC stripped from them */
-#define SEEQCFG2_XMIT_NO_CRC (0x0400) /* don't xmit CRC with each packet (user supplies it) */
-#define SEEQCFG2_LOOPBACK (0x0800)
-#define SEEQCFG2_CTRLO (0x1000)
-#define SEEQCFG2_RESET (0x8000) /* software Hard-reset bit */
-
-struct seeq_pkt_hdr {
- unsigned short next; /* address of next packet header */
- unsigned char babble_int:1, /* enable int on >1514 byte packet */
- coll_int:1, /* enable int on collision */
- coll_16_int:1, /* enable int on >15 collision */
- xmit_int:1, /* enable int on success (or xmit with <15 collision) */
- unused:1,
- data_follows:1, /* if not set, process this as a header and pointer only */
- chain_cont:1, /* if set, more headers in chain only cmd bit valid in recv header */
- xmit_recv:1; /* if set, a xmit packet, else a receive packet.*/
- unsigned char status;
-};
-
-#define SEEQPKTH_BAB_INT_EN (0x01) /* xmit only */
-#define SEEQPKTH_COL_INT_EN (0x02) /* xmit only */
-#define SEEQPKTH_COL16_INT_EN (0x04) /* xmit only */
-#define SEEQPKTH_XMIT_INT_EN (0x08) /* xmit only */
-#define SEEQPKTH_DATA_FOLLOWS (0x20) /* supposedly in xmit only */
-#define SEEQPKTH_CHAIN (0x40) /* more headers follow */
-#define SEEQPKTH_XMIT (0x80)
-
-#define SEEQPKTS_BABBLE (0x0100) /* xmit only */
-#define SEEQPKTS_OVERSIZE (0x0100) /* recv only */
-#define SEEQPKTS_COLLISION (0x0200) /* xmit only */
-#define SEEQPKTS_CRC_ERR (0x0200) /* recv only */
-#define SEEQPKTS_COLL16 (0x0400) /* xmit only */
-#define SEEQPKTS_DRIB (0x0400) /* recv only */
-#define SEEQPKTS_SHORT (0x0800) /* recv only */
-#define SEEQPKTS_DONE (0x8000)
-#define SEEQPKTS_ANY_ERROR (0x0f00)
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 0767043f44a4..3f93624fc273 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1439,7 +1439,7 @@ static int efx_phc_settime(struct ptp_clock_info *ptp,
delta = timespec_sub(*e_ts, time_now);
- efx_phc_adjtime(ptp, timespec_to_ns(&delta));
+ rc = efx_phc_adjtime(ptp, timespec_to_ns(&delta));
if (rc != 0)
return rc;
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index dc171b4961e4..7ed08c32a9c5 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -1565,9 +1565,9 @@ static void ioc3_get_drvinfo (struct net_device *dev,
{
struct ioc3_private *ip = netdev_priv(dev);
- strcpy (info->driver, IOC3_NAME);
- strcpy (info->version, IOC3_VERSION);
- strcpy (info->bus_info, pci_name(ip->pdev));
+ strlcpy(info->driver, IOC3_NAME, sizeof(info->driver));
+ strlcpy(info->version, IOC3_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(ip->pdev), sizeof(info->bus_info));
}
static int ioc3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/silan/Kconfig b/drivers/net/ethernet/silan/Kconfig
index ae1ce170864d..3409b3f97a1b 100644
--- a/drivers/net/ethernet/silan/Kconfig
+++ b/drivers/net/ethernet/silan/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_SILAN
bool "Silan devices"
default y
- depends on PCI && EXPERIMENTAL
+ depends on PCI
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -19,8 +19,8 @@ config NET_VENDOR_SILAN
if NET_VENDOR_SILAN
config SC92031
- tristate "Silan SC92031 PCI Fast Ethernet Adapter driver (EXPERIMENTAL)"
- depends on PCI && EXPERIMENTAL
+ tristate "Silan SC92031 PCI Fast Ethernet Adapter driver"
+ depends on PCI
select CRC32
---help---
This is a driver for the Fast Ethernet PCI network cards based on
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index b2315324cc6d..28f7268f1b88 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -1458,12 +1458,12 @@ static int sc92031_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mac0 = ioread32(port_base + MAC0);
mac1 = ioread32(port_base + MAC0 + 4);
- dev->dev_addr[0] = dev->perm_addr[0] = mac0 >> 24;
- dev->dev_addr[1] = dev->perm_addr[1] = mac0 >> 16;
- dev->dev_addr[2] = dev->perm_addr[2] = mac0 >> 8;
- dev->dev_addr[3] = dev->perm_addr[3] = mac0;
- dev->dev_addr[4] = dev->perm_addr[4] = mac1 >> 8;
- dev->dev_addr[5] = dev->perm_addr[5] = mac1;
+ dev->dev_addr[0] = mac0 >> 24;
+ dev->dev_addr[1] = mac0 >> 16;
+ dev->dev_addr[2] = mac0 >> 8;
+ dev->dev_addr[3] = mac0;
+ dev->dev_addr[4] = mac1 >> 8;
+ dev->dev_addr[5] = mac1;
err = register_netdev(dev);
if (err < 0)
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 5bffd9749a58..efca14eaefa9 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -247,8 +247,7 @@ static const struct ethtool_ops sis900_ethtool_ops;
* @net_dev: the net device to get address for
*
* Older SiS900 and friends, use EEPROM to store MAC address.
- * MAC address is read from read_eeprom() into @net_dev->dev_addr and
- * @net_dev->perm_addr.
+ * MAC address is read from read_eeprom() into @net_dev->dev_addr.
*/
static int sis900_get_mac_addr(struct pci_dev *pci_dev,
@@ -271,9 +270,6 @@ static int sis900_get_mac_addr(struct pci_dev *pci_dev,
for (i = 0; i < 3; i++)
((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
- /* Store MAC Address in perm_addr */
- memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
-
return 1;
}
@@ -284,8 +280,7 @@ static int sis900_get_mac_addr(struct pci_dev *pci_dev,
*
* SiS630E model, use APC CMOS RAM to store MAC address.
* APC CMOS RAM is accessed through ISA bridge.
- * MAC address is read into @net_dev->dev_addr and
- * @net_dev->perm_addr.
+ * MAC address is read into @net_dev->dev_addr.
*/
static int sis630e_get_mac_addr(struct pci_dev *pci_dev,
@@ -311,9 +306,6 @@ static int sis630e_get_mac_addr(struct pci_dev *pci_dev,
((u8 *)(net_dev->dev_addr))[i] = inb(0x71);
}
- /* Store MAC Address in perm_addr */
- memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
-
pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40);
pci_dev_put(isa_bridge);
@@ -328,7 +320,7 @@ static int sis630e_get_mac_addr(struct pci_dev *pci_dev,
*
* SiS635 model, set MAC Reload Bit to load Mac address from APC
* to rfdr. rfdr is accessed through rfcr. MAC address is read into
- * @net_dev->dev_addr and @net_dev->perm_addr.
+ * @net_dev->dev_addr.
*/
static int sis635_get_mac_addr(struct pci_dev *pci_dev,
@@ -353,9 +345,6 @@ static int sis635_get_mac_addr(struct pci_dev *pci_dev,
*( ((u16 *)net_dev->dev_addr) + i) = sr16(rfdr);
}
- /* Store MAC Address in perm_addr */
- memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
-
/* enable packet filtering */
sw32(rfcr, rfcrSave | RFEN);
@@ -375,7 +364,7 @@ static int sis635_get_mac_addr(struct pci_dev *pci_dev,
* EEDONE signal to refuse EEPROM access by LAN.
* The EEPROM map of SiS962 or SiS963 is different to SiS900.
* The signature field in SiS962 or SiS963 spec is meaningless.
- * MAC address is read into @net_dev->dev_addr and @net_dev->perm_addr.
+ * MAC address is read into @net_dev->dev_addr.
*/
static int sis96x_get_mac_addr(struct pci_dev *pci_dev,
@@ -395,9 +384,6 @@ static int sis96x_get_mac_addr(struct pci_dev *pci_dev,
for (i = 0; i < 3; i++)
mac[i] = read_eeprom(ioaddr, i + EEPROMMACAddr);
- /* Store MAC Address in perm_addr */
- memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
-
rc = 1;
break;
}
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 59a6f88da867..9dd842dbb859 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1522,9 +1522,10 @@ smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
static void
smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strncpy(info->driver, CARDNAME, sizeof(info->driver));
- strncpy(info->version, version, sizeof(info->version));
- strncpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info));
+ strlcpy(info->driver, CARDNAME, sizeof(info->driver));
+ strlcpy(info->version, version, sizeof(info->version));
+ strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ sizeof(info->bus_info));
}
static int smc911x_ethtool_nwayreset(struct net_device *dev)
@@ -2035,7 +2036,7 @@ static int smc911x_drv_probe(struct platform_device *pdev)
struct net_device *ndev;
struct resource *res;
struct smc911x_local *lp;
- unsigned int *addr;
+ void __iomem *addr;
int ret;
DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index a670d23d9340..591650a8de38 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -1597,9 +1597,10 @@ smc_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
static void
smc_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strncpy(info->driver, CARDNAME, sizeof(info->driver));
- strncpy(info->version, version, sizeof(info->version));
- strncpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info));
+ strlcpy(info->driver, CARDNAME, sizeof(info->driver));
+ strlcpy(info->version, version, sizeof(info->version));
+ strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ sizeof(info->bus_info));
}
static int smc_ethtool_nwayreset(struct net_device *dev)
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index e112877d15d3..da5cc9a3b34c 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -997,9 +997,8 @@ static int smsc911x_mii_probe(struct net_device *dev)
SMSC_TRACE(pdata, probe, "PHY: addr %d, phy_id 0x%08X",
phydev->addr, phydev->phy_id);
- ret = phy_connect_direct(dev, phydev,
- &smsc911x_phy_adjust_link, 0,
- pdata->config.phy_interface);
+ ret = phy_connect_direct(dev, phydev, &smsc911x_phy_adjust_link,
+ pdata->config.phy_interface);
if (ret) {
netdev_err(dev, "Could not attach to PHY\n");
@@ -1831,7 +1830,6 @@ static int smsc911x_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
spin_lock_irq(&pdata->mac_lock);
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 3c586585e1b3..d457fa2d7509 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -1179,7 +1179,7 @@ static int smsc9420_mii_probe(struct net_device *dev)
phydev->phy_id);
phydev = phy_connect(dev, dev_name(&phydev->dev),
- smsc9420_phy_adjust_link, 0, PHY_INTERFACE_MODE_MII);
+ smsc9420_phy_adjust_link, PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
pr_err("%s: Could not attach to PHY\n", dev->name);
@@ -1250,12 +1250,11 @@ static int smsc9420_alloc_tx_ring(struct smsc9420_pdata *pd)
BUG_ON(!pd->tx_ring);
- pd->tx_buffers = kmalloc((sizeof(struct smsc9420_ring_info) *
- TX_RING_SIZE), GFP_KERNEL);
- if (!pd->tx_buffers) {
- smsc_warn(IFUP, "Failed to allocated tx_buffers");
+ pd->tx_buffers = kmalloc_array(TX_RING_SIZE,
+ sizeof(struct smsc9420_ring_info),
+ GFP_KERNEL);
+ if (!pd->tx_buffers)
return -ENOMEM;
- }
/* Initialize the TX Ring */
for (i = 0; i < TX_RING_SIZE; i++) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 1164930a40a5..c0ea838c78d1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -26,8 +26,8 @@ config STMMAC_PLATFORM
If unsure, say N.
config STMMAC_PCI
- bool "STMMAC PCI bus support (EXPERIMENTAL)"
- depends on STMMAC_ETH && PCI && EXPERIMENTAL
+ bool "STMMAC PCI bus support"
+ depends on STMMAC_ETH && PCI
---help---
This is to select the Synopsys DWMAC available on PCI devices,
if you have a controller with this interface, say Y or M here.
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 1372ce210b58..d1ac39c1b05d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -210,8 +210,7 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
strlcpy(info->driver, MAC100_ETHTOOL_NAME,
sizeof(info->driver));
- strcpy(info->version, DRV_MODULE_VERSION);
- info->fw_version[0] = '\0';
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static int stmmac_ethtool_getsettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index f07c0612abf6..39c6c5524633 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -69,7 +69,7 @@
#undef STMMAC_XMIT_DEBUG
/*#define STMMAC_XMIT_DEBUG*/
-#ifdef STMMAC_TX_DEBUG
+#ifdef STMMAC_XMIT_DEBUG
#define TX_DBG(fmt, args...) printk(fmt, ## args)
#else
#define TX_DBG(fmt, args...) do { } while (0)
@@ -428,8 +428,7 @@ static int stmmac_init_phy(struct net_device *dev)
priv->plat->phy_addr);
pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id_fmt);
- phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, 0,
- interface);
+ phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface);
if (IS_ERR(phydev)) {
pr_err("%s: Could not attach to PHY\n", dev->name);
@@ -531,17 +530,18 @@ static void init_dma_desc_rings(struct net_device *dev)
DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
txsize, rxsize, bfsize);
- priv->rx_skbuff_dma = kmalloc(rxsize * sizeof(dma_addr_t), GFP_KERNEL);
- priv->rx_skbuff =
- kmalloc(sizeof(struct sk_buff *) * rxsize, GFP_KERNEL);
+ priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
+ GFP_KERNEL);
+ priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
+ GFP_KERNEL);
priv->dma_rx =
(struct dma_desc *)dma_alloc_coherent(priv->device,
rxsize *
sizeof(struct dma_desc),
&priv->dma_rx_phy,
GFP_KERNEL);
- priv->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * txsize,
- GFP_KERNEL);
+ priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
+ GFP_KERNEL);
priv->dma_tx =
(struct dma_desc *)dma_alloc_coherent(priv->device,
txsize *
@@ -2254,7 +2254,7 @@ static int __init stmmac_cmdline_opt(char *str)
} else if (!strncmp(opt, "pause:", 6)) {
if (kstrtoint(opt + 6, 0, &pause))
goto err;
- } else if (!strncmp(opt, "eee_timer:", 6)) {
+ } else if (!strncmp(opt, "eee_timer:", 10)) {
if (kstrtoint(opt + 10, 0, &eee_timer))
goto err;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 0376a5e6b2bf..0b9829fe3eea 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -188,8 +188,6 @@ int stmmac_mdio_register(struct net_device *ndev)
goto bus_register_fail;
}
- priv->mii = new_bus;
-
found = 0;
for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
struct phy_device *phydev = new_bus->phy_map[addr];
@@ -237,8 +235,14 @@ int stmmac_mdio_register(struct net_device *ndev)
}
}
- if (!found)
+ if (!found) {
pr_warning("%s: No PHY found\n", ndev->name);
+ mdiobus_unregister(new_bus);
+ mdiobus_free(new_bus);
+ return -ENODEV;
+ }
+
+ priv->mii = new_bus;
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 064eaac9616f..19b3a2567a46 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -102,6 +102,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat, addr);
if (!priv) {
pr_err("%s: main driver probe failed", __func__);
+ ret = -ENODEV;
goto err_out;
}
priv->dev->irq = pdev->irq;
diff --git a/drivers/net/ethernet/sun/Kconfig b/drivers/net/ethernet/sun/Kconfig
index 57bfd8599679..3074aa374c6b 100644
--- a/drivers/net/ethernet/sun/Kconfig
+++ b/drivers/net/ethernet/sun/Kconfig
@@ -32,8 +32,8 @@ config HAPPYMEAL
will be called sunhme.
config SUNBMAC
- tristate "Sun BigMAC 10/100baseT support (EXPERIMENTAL)"
- depends on SBUS && EXPERIMENTAL
+ tristate "Sun BigMAC 10/100baseT support"
+ depends on SBUS
select CRC32
---help---
This driver supports the "be" interface available as an Sbus option.
@@ -61,7 +61,7 @@ config SUNGEM
select SUNGEM_PHY
---help---
Support for the Sun GEM chip, aka Sun GigabitEthernet/P 2.0. See also
- <http://www.sun.com/products-n-solutions/hardware/docs/pdf/806-3985-10.pdf>.
+ <http://docs.oracle.com/cd/E19455-01/806-3985-10/806-3985-10.pdf>.
config CASSINI
tristate "Sun Cassini support"
@@ -69,7 +69,7 @@ config CASSINI
select CRC32
---help---
Support for the Sun Cassini chip, aka Sun GigaSwift Ethernet. See also
- <http://www.sun.com/products-n-solutions/hardware/docs/pdf/817-4341-10.pdf>
+ <http://docs.oracle.com/cd/E19113-01/giga.ether.pci/817-4341-10/817-4341-10.pdf>.
config SUNVNET
tristate "Sun Virtual Network support"
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index a0bdf0779466..e4c1c88e4c2a 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -4342,7 +4342,7 @@ static int niu_alloc_rx_ring_info(struct niu *np,
{
BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64);
- rp->rxhash = kzalloc(MAX_RBR_RING_SIZE * sizeof(struct page *),
+ rp->rxhash = kcalloc(MAX_RBR_RING_SIZE, sizeof(struct page *),
GFP_KERNEL);
if (!rp->rxhash)
return -ENOMEM;
@@ -8366,14 +8366,12 @@ static void niu_pci_vpd_validate(struct niu *np)
return;
}
- memcpy(dev->perm_addr, vpd->local_mac, ETH_ALEN);
+ memcpy(dev->dev_addr, vpd->local_mac, ETH_ALEN);
- val8 = dev->perm_addr[5];
- dev->perm_addr[5] += np->port;
- if (dev->perm_addr[5] < val8)
- dev->perm_addr[4]++;
-
- memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
+ val8 = dev->dev_addr[5];
+ dev->dev_addr[5] += np->port;
+ if (dev->dev_addr[5] < val8)
+ dev->dev_addr[4]++;
}
static int niu_pci_probe_sprom(struct niu *np)
@@ -8470,29 +8468,27 @@ static int niu_pci_probe_sprom(struct niu *np)
val = nr64(ESPC_MAC_ADDR0);
netif_printk(np, probe, KERN_DEBUG, np->dev,
"SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val);
- dev->perm_addr[0] = (val >> 0) & 0xff;
- dev->perm_addr[1] = (val >> 8) & 0xff;
- dev->perm_addr[2] = (val >> 16) & 0xff;
- dev->perm_addr[3] = (val >> 24) & 0xff;
+ dev->dev_addr[0] = (val >> 0) & 0xff;
+ dev->dev_addr[1] = (val >> 8) & 0xff;
+ dev->dev_addr[2] = (val >> 16) & 0xff;
+ dev->dev_addr[3] = (val >> 24) & 0xff;
val = nr64(ESPC_MAC_ADDR1);
netif_printk(np, probe, KERN_DEBUG, np->dev,
"SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val);
- dev->perm_addr[4] = (val >> 0) & 0xff;
- dev->perm_addr[5] = (val >> 8) & 0xff;
+ dev->dev_addr[4] = (val >> 0) & 0xff;
+ dev->dev_addr[5] = (val >> 8) & 0xff;
- if (!is_valid_ether_addr(&dev->perm_addr[0])) {
+ if (!is_valid_ether_addr(&dev->dev_addr[0])) {
dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n",
- dev->perm_addr);
+ dev->dev_addr);
return -EINVAL;
}
- val8 = dev->perm_addr[5];
- dev->perm_addr[5] += np->port;
- if (dev->perm_addr[5] < val8)
- dev->perm_addr[4]++;
-
- memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
+ val8 = dev->dev_addr[5];
+ dev->dev_addr[5] += np->port;
+ if (dev->dev_addr[5] < val8)
+ dev->dev_addr[4]++;
val = nr64(ESPC_MOD_STR_LEN);
netif_printk(np, probe, KERN_DEBUG, np->dev,
@@ -9267,16 +9263,14 @@ static int niu_get_of_props(struct niu *np)
netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n",
dp->full_name, prop_len);
}
- memcpy(dev->perm_addr, mac_addr, dev->addr_len);
- if (!is_valid_ether_addr(&dev->perm_addr[0])) {
+ memcpy(dev->dev_addr, mac_addr, dev->addr_len);
+ if (!is_valid_ether_addr(&dev->dev_addr[0])) {
netdev_err(dev, "%s: OF MAC address is invalid\n",
dp->full_name);
- netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->perm_addr);
+ netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->dev_addr);
return -EINVAL;
}
- memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
-
model = of_get_property(dp, "model", &prop_len);
if (model)
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index be82f6d13c51..5fafca065305 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1042,8 +1042,8 @@ static void bigmac_set_multicast(struct net_device *dev)
/* Ethtool support... */
static void bigmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strcpy(info->driver, "sunbmac");
- strcpy(info->version, "2.0");
+ strlcpy(info->driver, "sunbmac", sizeof(info->driver));
+ strlcpy(info->version, "2.0", sizeof(info->version));
}
static u32 bigmac_get_link(struct net_device *dev)
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 1dcee6915843..49bf3e2eb652 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -685,13 +685,14 @@ static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
struct sunqe *qep = netdev_priv(dev);
struct platform_device *op;
- strcpy(info->driver, "sunqe");
- strcpy(info->version, "3.0");
+ strlcpy(info->driver, "sunqe", sizeof(info->driver));
+ strlcpy(info->version, "3.0", sizeof(info->version));
op = qep->op;
regs = of_get_property(op->dev.of_node, "reg", NULL);
if (regs)
- sprintf(info->bus_info, "SBUS:%d", regs->which_io);
+ snprintf(info->bus_info, sizeof(info->bus_info), "SBUS:%d",
+ regs->which_io);
}
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index e1b895530827..289b4eefb42f 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -882,8 +882,8 @@ static int vnet_set_mac_addr(struct net_device *dev, void *p)
static void vnet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static u32 vnet_get_msglevel(struct net_device *dev)
@@ -1032,8 +1032,6 @@ static struct vnet *vnet_new(const u64 *local_mac)
for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff;
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
-
vp = netdev_priv(dev);
spin_lock_init(&vp->lock);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 1e4d743ff03e..e15cc71b826d 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -2179,10 +2179,10 @@ bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct bdx_priv *priv = netdev_priv(netdev);
- strlcat(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver));
- strlcat(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version));
- strlcat(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
- strlcat(drvinfo->bus_info, pci_name(priv->pdev),
+ strlcpy(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, pci_name(priv->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_stats = ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0);
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 4426151d4ac9..de71b1ec4625 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -88,8 +88,8 @@ config TLAN
Please email feedback to <torben.mathiasen@compaq.com>.
config CPMAC
- tristate "TI AR7 CPMAC Ethernet support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && AR7
+ tristate "TI AR7 CPMAC Ethernet support"
+ depends on AR7
select PHYLIB
---help---
TI AR7 CPMAC Ethernet support
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index d9625f62b026..31bbbca341a7 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -904,10 +904,9 @@ static int cpmac_set_ringparam(struct net_device *dev,
static void cpmac_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, "cpmac");
- strcpy(info->version, CPMAC_VERSION);
- info->fw_version[0] = '\0';
- sprintf(info->bus_info, "%s", "cpmac");
+ strlcpy(info->driver, "cpmac", sizeof(info->driver));
+ strlcpy(info->version, CPMAC_VERSION, sizeof(info->version));
+ snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac");
info->regdump_len = 0;
}
@@ -1173,8 +1172,8 @@ static int cpmac_probe(struct platform_device *pdev)
snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
mdio_bus_id, phy_id);
- priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link, 0,
- PHY_INTERFACE_MODE_MII);
+ priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link,
+ PHY_INTERFACE_MODE_MII);
if (IS_ERR(priv->phy)) {
if (netif_msg_drv(priv))
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 40aff684aa23..7e93df6585e7 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -32,6 +32,7 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_device.h>
+#include <linux/if_vlan.h>
#include <linux/platform_data/cpsw.h>
@@ -118,6 +119,13 @@ do { \
#define TX_PRIORITY_MAPPING 0x33221100
#define CPDMA_TX_PRIORITY_MAP 0x76543210
+#define CPSW_VLAN_AWARE BIT(1)
+#define CPSW_ALE_VLAN_AWARE 1
+
+#define CPSW_FIFO_NORMAL_MODE (0 << 15)
+#define CPSW_FIFO_DUAL_MAC_MODE (1 << 15)
+#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15)
+
#define cpsw_enable_irq(priv) \
do { \
u32 i; \
@@ -250,7 +258,7 @@ struct cpsw_ss_regs {
struct cpsw_host_regs {
u32 max_blks;
u32 blk_cnt;
- u32 flow_thresh;
+ u32 tx_in_ctl;
u32 port_vlan;
u32 tx_pri_map;
u32 cpdma_tx_pri_map;
@@ -277,6 +285,9 @@ struct cpsw_slave {
u32 mac_control;
struct cpsw_slave_data *data;
struct phy_device *phy;
+ struct net_device *ndev;
+ u32 port_vlan;
+ u32 open_stat;
};
static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
@@ -315,17 +326,65 @@ struct cpsw_priv {
/* snapshot of IRQ numbers */
u32 irqs_table[4];
u32 num_irqs;
- struct cpts cpts;
+ struct cpts *cpts;
+ u32 emac_port;
};
#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi)
-#define for_each_slave(priv, func, arg...) \
- do { \
- int idx; \
- for (idx = 0; idx < (priv)->data.slaves; idx++) \
- (func)((priv)->slaves + idx, ##arg); \
+#define for_each_slave(priv, func, arg...) \
+ do { \
+ int idx; \
+ if (priv->data.dual_emac) \
+ (func)((priv)->slaves + priv->emac_port, ##arg);\
+ else \
+ for (idx = 0; idx < (priv)->data.slaves; idx++) \
+ (func)((priv)->slaves + idx, ##arg); \
+ } while (0)
+#define cpsw_get_slave_ndev(priv, __slave_no__) \
+ (priv->slaves[__slave_no__].ndev)
+#define cpsw_get_slave_priv(priv, __slave_no__) \
+ ((priv->slaves[__slave_no__].ndev) ? \
+ netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \
+
+#define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \
+ do { \
+ if (!priv->data.dual_emac) \
+ break; \
+ if (CPDMA_RX_SOURCE_PORT(status) == 1) { \
+ ndev = cpsw_get_slave_ndev(priv, 0); \
+ priv = netdev_priv(ndev); \
+ skb->dev = ndev; \
+ } else if (CPDMA_RX_SOURCE_PORT(status) == 2) { \
+ ndev = cpsw_get_slave_ndev(priv, 1); \
+ priv = netdev_priv(ndev); \
+ skb->dev = ndev; \
+ } \
+ } while (0)
+#define cpsw_add_mcast(priv, addr) \
+ do { \
+ if (priv->data.dual_emac) { \
+ struct cpsw_slave *slave = priv->slaves + \
+ priv->emac_port; \
+ int slave_port = cpsw_get_slave_port(priv, \
+ slave->slave_num); \
+ cpsw_ale_add_mcast(priv->ale, addr, \
+ 1 << slave_port | 1 << priv->host_port, \
+ ALE_VLAN, slave->port_vlan, 0); \
+ } else { \
+ cpsw_ale_add_mcast(priv->ale, addr, \
+ ALE_ALL_PORTS << priv->host_port, \
+ 0, 0, 0); \
+ } \
} while (0)
+static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
+{
+ if (priv->host_port == 0)
+ return slave_num + 1;
+ else
+ return slave_num;
+}
+
static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
@@ -344,8 +403,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
/* program multicast address list into ALE register */
netdev_for_each_mc_addr(ha, ndev) {
- cpsw_ale_add_mcast(priv->ale, (u8 *)ha->addr,
- ALE_ALL_PORTS << priv->host_port, 0, 0);
+ cpsw_add_mcast(priv, (u8 *)ha->addr);
}
}
}
@@ -374,9 +432,12 @@ void cpsw_tx_handler(void *token, int len, int status)
struct net_device *ndev = skb->dev;
struct cpsw_priv *priv = netdev_priv(ndev);
+ /* Check whether the queue is stopped due to stalled tx dma, if the
+ * queue is stopped then start the queue as we have free desc for tx
+ */
if (unlikely(netif_queue_stopped(ndev)))
netif_start_queue(ndev);
- cpts_tx_timestamp(&priv->cpts, skb);
+ cpts_tx_timestamp(priv->cpts, skb);
priv->stats.tx_packets++;
priv->stats.tx_bytes += len;
dev_kfree_skb_any(skb);
@@ -389,6 +450,8 @@ void cpsw_rx_handler(void *token, int len, int status)
struct cpsw_priv *priv = netdev_priv(ndev);
int ret = 0;
+ cpsw_dual_emac_src_port_detect(status, priv, ndev, skb);
+
/* free and bail if we are shutting down */
if (unlikely(!netif_running(ndev)) ||
unlikely(!netif_carrier_ok(ndev))) {
@@ -397,7 +460,7 @@ void cpsw_rx_handler(void *token, int len, int status)
}
if (likely(status >= 0)) {
skb_put(skb, len);
- cpts_rx_timestamp(&priv->cpts, skb);
+ cpts_rx_timestamp(priv->cpts, skb);
skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb(skb);
priv->stats.rx_bytes += len;
@@ -417,7 +480,7 @@ void cpsw_rx_handler(void *token, int len, int status)
return;
ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
- skb_tailroom(skb), GFP_KERNEL);
+ skb_tailroom(skb), 0, GFP_KERNEL);
}
WARN_ON(ret < 0);
}
@@ -430,37 +493,38 @@ static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
cpsw_intr_disable(priv);
cpsw_disable_irq(priv);
napi_schedule(&priv->napi);
+ } else {
+ priv = cpsw_get_slave_priv(priv, 1);
+ if (likely(priv) && likely(netif_running(priv->ndev))) {
+ cpsw_intr_disable(priv);
+ cpsw_disable_irq(priv);
+ napi_schedule(&priv->napi);
+ }
}
return IRQ_HANDLED;
}
-static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
-{
- if (priv->host_port == 0)
- return slave_num + 1;
- else
- return slave_num;
-}
-
static int cpsw_poll(struct napi_struct *napi, int budget)
{
struct cpsw_priv *priv = napi_to_priv(napi);
int num_tx, num_rx;
num_tx = cpdma_chan_process(priv->txch, 128);
- num_rx = cpdma_chan_process(priv->rxch, budget);
-
- if (num_rx || num_tx)
- cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n",
- num_rx, num_tx);
+ if (num_tx)
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+ num_rx = cpdma_chan_process(priv->rxch, budget);
if (num_rx < budget) {
napi_complete(napi);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
cpsw_enable_irq(priv);
}
+ if (num_rx || num_tx)
+ cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n",
+ num_rx, num_tx);
+
return num_rx;
}
@@ -559,6 +623,54 @@ static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val)
leader + strlen(name), val);
}
+static int cpsw_common_res_usage_state(struct cpsw_priv *priv)
+{
+ u32 i;
+ u32 usage_count = 0;
+
+ if (!priv->data.dual_emac)
+ return 0;
+
+ for (i = 0; i < priv->data.slaves; i++)
+ if (priv->slaves[i].open_stat)
+ usage_count++;
+
+ return usage_count;
+}
+
+static inline int cpsw_tx_packet_submit(struct net_device *ndev,
+ struct cpsw_priv *priv, struct sk_buff *skb)
+{
+ if (!priv->data.dual_emac)
+ return cpdma_chan_submit(priv->txch, skb, skb->data,
+ skb->len, 0, GFP_KERNEL);
+
+ if (ndev == cpsw_get_slave_ndev(priv, 0))
+ return cpdma_chan_submit(priv->txch, skb, skb->data,
+ skb->len, 1, GFP_KERNEL);
+ else
+ return cpdma_chan_submit(priv->txch, skb, skb->data,
+ skb->len, 2, GFP_KERNEL);
+}
+
+static inline void cpsw_add_dual_emac_def_ale_entries(
+ struct cpsw_priv *priv, struct cpsw_slave *slave,
+ u32 slave_port)
+{
+ u32 port_mask = 1 << slave_port | 1 << priv->host_port;
+
+ if (priv->version == CPSW_VERSION_1)
+ slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
+ else
+ slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
+ cpsw_ale_add_vlan(priv->ale, slave->port_vlan, port_mask,
+ port_mask, port_mask, 0);
+ cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+ port_mask, ALE_VLAN, slave->port_vlan, 0);
+ cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
+ priv->host_port, ALE_VLAN, slave->port_vlan);
+}
+
static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
{
char name[32];
@@ -588,11 +700,14 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave_port = cpsw_get_slave_port(priv, slave->slave_num);
- cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
- 1 << slave_port, 0, ALE_MCAST_FWD_2);
+ if (priv->data.dual_emac)
+ cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
+ else
+ cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+ 1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
- &cpsw_adjust_link, 0, slave->data->phy_if);
+ &cpsw_adjust_link, slave->data->phy_if);
if (IS_ERR(slave->phy)) {
dev_err(priv->dev, "phy %s not found on slave %d\n",
slave->data->phy_id, slave->slave_num);
@@ -604,14 +719,44 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
}
}
+static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
+{
+ const int vlan = priv->data.default_vlan;
+ const int port = priv->host_port;
+ u32 reg;
+ int i;
+
+ reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
+ CPSW2_PORT_VLAN;
+
+ writel(vlan, &priv->host_port_regs->port_vlan);
+
+ for (i = 0; i < 2; i++)
+ slave_write(priv->slaves + i, vlan, reg);
+
+ cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port,
+ ALE_ALL_PORTS << port, ALE_ALL_PORTS << port,
+ (ALE_PORT_1 | ALE_PORT_2) << port);
+}
+
static void cpsw_init_host_port(struct cpsw_priv *priv)
{
+ u32 control_reg;
+ u32 fifo_mode;
+
/* soft reset the controller and initialize ale */
soft_reset("cpsw", &priv->regs->soft_reset);
cpsw_ale_start(priv->ale);
/* switch to vlan unaware mode */
- cpsw_ale_control_set(priv->ale, 0, ALE_VLAN_AWARE, 0);
+ cpsw_ale_control_set(priv->ale, priv->host_port, ALE_VLAN_AWARE,
+ CPSW_ALE_VLAN_AWARE);
+ control_reg = readl(&priv->regs->control);
+ control_reg |= CPSW_VLAN_AWARE;
+ writel(control_reg, &priv->regs->control);
+ fifo_mode = (priv->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
+ CPSW_FIFO_NORMAL_MODE;
+ writel(fifo_mode, &priv->host_port_regs->tx_in_ctl);
/* setup host port priority mapping */
__raw_writel(CPDMA_TX_PRIORITY_MAP,
@@ -621,9 +766,12 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
cpsw_ale_control_set(priv->ale, priv->host_port,
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
- cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port, 0);
- cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
- 1 << priv->host_port, 0, ALE_MCAST_FWD_2);
+ if (!priv->data.dual_emac) {
+ cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port,
+ 0, 0);
+ cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+ 1 << priv->host_port, 0, 0, ALE_MCAST_FWD_2);
+ }
}
static int cpsw_ndo_open(struct net_device *ndev)
@@ -632,7 +780,8 @@ static int cpsw_ndo_open(struct net_device *ndev)
int i, ret;
u32 reg;
- cpsw_intr_disable(priv);
+ if (!cpsw_common_res_usage_state(priv))
+ cpsw_intr_disable(priv);
netif_carrier_off(ndev);
pm_runtime_get_sync(&priv->pdev->dev);
@@ -644,43 +793,55 @@ static int cpsw_ndo_open(struct net_device *ndev)
CPSW_RTL_VERSION(reg));
/* initialize host and slave ports */
- cpsw_init_host_port(priv);
+ if (!cpsw_common_res_usage_state(priv))
+ cpsw_init_host_port(priv);
for_each_slave(priv, cpsw_slave_open, priv);
- /* setup tx dma to fixed prio and zero offset */
- cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
- cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
+ /* Add default VLAN */
+ if (!priv->data.dual_emac)
+ cpsw_add_default_vlan(priv);
- /* disable priority elevation and enable statistics on all ports */
- __raw_writel(0, &priv->regs->ptype);
+ if (!cpsw_common_res_usage_state(priv)) {
+ /* setup tx dma to fixed prio and zero offset */
+ cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
+ cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
- /* enable statistics collection only on the host port */
- __raw_writel(0x7, &priv->regs->stat_port_en);
+ /* disable priority elevation */
+ __raw_writel(0, &priv->regs->ptype);
- if (WARN_ON(!priv->data.rx_descs))
- priv->data.rx_descs = 128;
+ /* enable statistics collection only on all ports */
+ __raw_writel(0x7, &priv->regs->stat_port_en);
- for (i = 0; i < priv->data.rx_descs; i++) {
- struct sk_buff *skb;
+ if (WARN_ON(!priv->data.rx_descs))
+ priv->data.rx_descs = 128;
- ret = -ENOMEM;
- skb = netdev_alloc_skb_ip_align(priv->ndev,
- priv->rx_packet_max);
- if (!skb)
- break;
- ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
- skb_tailroom(skb), GFP_KERNEL);
- if (WARN_ON(ret < 0))
- break;
+ for (i = 0; i < priv->data.rx_descs; i++) {
+ struct sk_buff *skb;
+
+ ret = -ENOMEM;
+ skb = netdev_alloc_skb_ip_align(priv->ndev,
+ priv->rx_packet_max);
+ if (!skb)
+ break;
+ ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
+ skb_tailroom(skb), 0, GFP_KERNEL);
+ if (WARN_ON(ret < 0))
+ break;
+ }
+ /* continue even if we didn't manage to submit all
+ * receive descs
+ */
+ cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
}
- /* continue even if we didn't manage to submit all receive descs */
- cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
cpdma_ctlr_start(priv->dma);
cpsw_intr_enable(priv);
napi_enable(&priv->napi);
- cpdma_ctlr_eoi(priv->dma);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+ if (priv->data.dual_emac)
+ priv->slaves[priv->emac_port].open_stat = true;
return 0;
}
@@ -701,12 +862,17 @@ static int cpsw_ndo_stop(struct net_device *ndev)
netif_stop_queue(priv->ndev);
napi_disable(&priv->napi);
netif_carrier_off(priv->ndev);
- cpsw_intr_disable(priv);
- cpdma_ctlr_int_ctrl(priv->dma, false);
- cpdma_ctlr_stop(priv->dma);
- cpsw_ale_stop(priv->ale);
+
+ if (cpsw_common_res_usage_state(priv) <= 1) {
+ cpsw_intr_disable(priv);
+ cpdma_ctlr_int_ctrl(priv->dma, false);
+ cpdma_ctlr_stop(priv->dma);
+ cpsw_ale_stop(priv->ale);
+ }
for_each_slave(priv, cpsw_slave_stop, priv);
pm_runtime_put_sync(&priv->pdev->dev);
+ if (priv->data.dual_emac)
+ priv->slaves[priv->emac_port].open_stat = false;
return 0;
}
@@ -724,18 +890,24 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && priv->cpts.tx_enable)
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ priv->cpts->tx_enable)
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb_tx_timestamp(skb);
- ret = cpdma_chan_submit(priv->txch, skb, skb->data,
- skb->len, GFP_KERNEL);
+ ret = cpsw_tx_packet_submit(ndev, priv, skb);
if (unlikely(ret != 0)) {
cpsw_err(priv, tx_err, "desc submit failed\n");
goto fail;
}
+ /* If there is no more tx desc left free then we need to
+ * tell the kernel to stop sending us tx frames.
+ */
+ if (unlikely(cpdma_check_free_tx_desc(priv->txch)))
+ netif_stop_queue(ndev);
+
return NETDEV_TX_OK;
fail:
priv->stats.tx_dropped++;
@@ -773,7 +945,7 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
u32 ts_en, seq_id;
- if (!priv->cpts.tx_enable && !priv->cpts.rx_enable) {
+ if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) {
slave_write(slave, 0, CPSW1_TS_CTL);
return;
}
@@ -781,10 +953,10 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
- if (priv->cpts.tx_enable)
+ if (priv->cpts->tx_enable)
ts_en |= CPSW_V1_TS_TX_EN;
- if (priv->cpts.rx_enable)
+ if (priv->cpts->rx_enable)
ts_en |= CPSW_V1_TS_RX_EN;
slave_write(slave, ts_en, CPSW1_TS_CTL);
@@ -793,16 +965,21 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
{
- struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
+ struct cpsw_slave *slave;
u32 ctrl, mtype;
+ if (priv->data.dual_emac)
+ slave = &priv->slaves[priv->emac_port];
+ else
+ slave = &priv->slaves[priv->data.cpts_active_slave];
+
ctrl = slave_read(slave, CPSW2_CONTROL);
ctrl &= ~CTRL_ALL_TS_MASK;
- if (priv->cpts.tx_enable)
+ if (priv->cpts->tx_enable)
ctrl |= CTRL_TX_TS_BITS;
- if (priv->cpts.rx_enable)
+ if (priv->cpts->rx_enable)
ctrl |= CTRL_RX_TS_BITS;
mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
@@ -815,7 +992,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
{
struct cpsw_priv *priv = netdev_priv(dev);
- struct cpts *cpts = &priv->cpts;
+ struct cpts *cpts = priv->cpts;
struct hwtstamp_config cfg;
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
@@ -901,7 +1078,9 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
cpdma_chan_start(priv->txch);
cpdma_ctlr_int_ctrl(priv->dma, true);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+
}
static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev)
@@ -920,10 +1099,79 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
cpsw_interrupt(ndev->irq, priv);
cpdma_ctlr_int_ctrl(priv->dma, true);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+
}
#endif
+static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
+ unsigned short vid)
+{
+ int ret;
+
+ ret = cpsw_ale_add_vlan(priv->ale, vid,
+ ALE_ALL_PORTS << priv->host_port,
+ 0, ALE_ALL_PORTS << priv->host_port,
+ (ALE_PORT_1 | ALE_PORT_2) << priv->host_port);
+ if (ret != 0)
+ return ret;
+
+ ret = cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
+ priv->host_port, ALE_VLAN, vid);
+ if (ret != 0)
+ goto clean_vid;
+
+ ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+ ALE_ALL_PORTS << priv->host_port,
+ ALE_VLAN, vid, 0);
+ if (ret != 0)
+ goto clean_vlan_ucast;
+ return 0;
+
+clean_vlan_ucast:
+ cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
+ priv->host_port, ALE_VLAN, vid);
+clean_vid:
+ cpsw_ale_del_vlan(priv->ale, vid, 0);
+ return ret;
+}
+
+static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
+ unsigned short vid)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ if (vid == priv->data.default_vlan)
+ return 0;
+
+ dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
+ return cpsw_add_vlan_ale_entry(priv, vid);
+}
+
+static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
+ unsigned short vid)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ if (vid == priv->data.default_vlan)
+ return 0;
+
+ dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
+ ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
+ if (ret != 0)
+ return ret;
+
+ ret = cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
+ priv->host_port, ALE_VLAN, vid);
+ if (ret != 0)
+ return ret;
+
+ return cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast,
+ 0, ALE_VLAN, vid);
+}
+
static const struct net_device_ops cpsw_netdev_ops = {
.ndo_open = cpsw_ndo_open,
.ndo_stop = cpsw_ndo_stop,
@@ -938,15 +1186,18 @@ static const struct net_device_ops cpsw_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cpsw_ndo_poll_controller,
#endif
+ .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid,
};
static void cpsw_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
struct cpsw_priv *priv = netdev_priv(ndev);
- strcpy(info->driver, "TI CPSW Driver v1.0");
- strcpy(info->version, "1.0");
- strcpy(info->bus_info, priv->pdev->name);
+
+ strlcpy(info->driver, "TI CPSW Driver v1.0", sizeof(info->driver));
+ strlcpy(info->version, "1.0", sizeof(info->version));
+ strlcpy(info->bus_info, priv->pdev->name, sizeof(info->bus_info));
}
static u32 cpsw_get_msglevel(struct net_device *ndev)
@@ -974,7 +1225,7 @@ static int cpsw_get_ts_info(struct net_device *ndev,
SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = priv->cpts.phc_index;
+ info->phc_index = priv->cpts->phc_index;
info->tx_types =
(1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
@@ -1011,6 +1262,7 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
slave->data = data;
slave->regs = regs + slave_reg_ofs;
slave->sliver = regs + sliver_reg_ofs;
+ slave->port_vlan = data->dual_emac_res_vlan;
}
static int cpsw_probe_dt(struct cpsw_platform_data *data,
@@ -1051,12 +1303,10 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
data->cpts_clock_shift = prop;
- data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) *
- data->slaves, GFP_KERNEL);
- if (!data->slave_data) {
- pr_err("Could not allocate slave memory.\n");
+ data->slave_data = kcalloc(data->slaves, sizeof(struct cpsw_slave_data),
+ GFP_KERNEL);
+ if (!data->slave_data)
return -EINVAL;
- }
if (of_property_read_u32(node, "cpdma_channels", &prop)) {
pr_err("Missing cpdma_channels property in the DT.\n");
@@ -1093,6 +1343,9 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
data->mac_control = prop;
+ if (!of_property_read_u32(node, "dual_emac", &prop))
+ data->dual_emac = prop;
+
/*
* Populate all the child nodes here...
*/
@@ -1126,6 +1379,18 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
if (mac_addr)
memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
+ if (data->dual_emac) {
+ if (of_property_read_u32(node, "dual_emac_res_vlan",
+ &prop)) {
+ pr_err("Missing dual_emac_res_vlan in DT.\n");
+ slave_data->dual_emac_res_vlan = i+1;
+ pr_err("Using %d as Reserved VLAN for %d slave\n",
+ slave_data->dual_emac_res_vlan, i);
+ } else {
+ slave_data->dual_emac_res_vlan = prop;
+ }
+ }
+
i++;
}
@@ -1136,6 +1401,79 @@ error_ret:
return ret;
}
+static int cpsw_probe_dual_emac(struct platform_device *pdev,
+ struct cpsw_priv *priv)
+{
+ struct cpsw_platform_data *data = &priv->data;
+ struct net_device *ndev;
+ struct cpsw_priv *priv_sl2;
+ int ret = 0, i;
+
+ ndev = alloc_etherdev(sizeof(struct cpsw_priv));
+ if (!ndev) {
+ pr_err("cpsw: error allocating net_device\n");
+ return -ENOMEM;
+ }
+
+ priv_sl2 = netdev_priv(ndev);
+ spin_lock_init(&priv_sl2->lock);
+ priv_sl2->data = *data;
+ priv_sl2->pdev = pdev;
+ priv_sl2->ndev = ndev;
+ priv_sl2->dev = &ndev->dev;
+ priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
+ priv_sl2->rx_packet_max = max(rx_packet_max, 128);
+
+ if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
+ memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
+ ETH_ALEN);
+ pr_info("cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr);
+ } else {
+ random_ether_addr(priv_sl2->mac_addr);
+ pr_info("cpsw: Random MACID = %pM\n", priv_sl2->mac_addr);
+ }
+ memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
+
+ priv_sl2->slaves = priv->slaves;
+ priv_sl2->clk = priv->clk;
+
+ priv_sl2->cpsw_res = priv->cpsw_res;
+ priv_sl2->regs = priv->regs;
+ priv_sl2->host_port = priv->host_port;
+ priv_sl2->host_port_regs = priv->host_port_regs;
+ priv_sl2->wr_regs = priv->wr_regs;
+ priv_sl2->dma = priv->dma;
+ priv_sl2->txch = priv->txch;
+ priv_sl2->rxch = priv->rxch;
+ priv_sl2->ale = priv->ale;
+ priv_sl2->emac_port = 1;
+ priv->slaves[1].ndev = ndev;
+ priv_sl2->cpts = priv->cpts;
+ priv_sl2->version = priv->version;
+
+ for (i = 0; i < priv->num_irqs; i++) {
+ priv_sl2->irqs_table[i] = priv->irqs_table[i];
+ priv_sl2->num_irqs = priv->num_irqs;
+ }
+
+ ndev->features |= NETIF_F_HW_VLAN_FILTER;
+
+ ndev->netdev_ops = &cpsw_netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
+ netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT);
+
+ /* register the network device */
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ ret = register_netdev(ndev);
+ if (ret) {
+ pr_err("cpsw: error registering net device\n");
+ free_netdev(ndev);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
static int cpsw_probe(struct platform_device *pdev)
{
struct cpsw_platform_data *data = pdev->dev.platform_data;
@@ -1162,6 +1500,11 @@ static int cpsw_probe(struct platform_device *pdev)
priv->dev = &ndev->dev;
priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
priv->rx_packet_max = max(rx_packet_max, 128);
+ priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
+ if (!ndev) {
+ pr_err("error allocating cpts\n");
+ goto clean_ndev_ret;
+ }
/*
* This may be required here for child devices.
@@ -1194,6 +1537,9 @@ static int cpsw_probe(struct platform_device *pdev)
for (i = 0; i < data->slaves; i++)
priv->slaves[i].slave_num = i;
+ priv->slaves[0].ndev = ndev;
+ priv->emac_port = 0;
+
priv->clk = clk_get(&pdev->dev, "fck");
if (IS_ERR(priv->clk)) {
dev_err(&pdev->dev, "fck is not found\n");
@@ -1248,7 +1594,7 @@ static int cpsw_probe(struct platform_device *pdev)
switch (priv->version) {
case CPSW_VERSION_1:
priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
- priv->cpts.reg = ss_regs + CPSW1_CPTS_OFFSET;
+ priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET;
dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
@@ -1259,7 +1605,7 @@ static int cpsw_probe(struct platform_device *pdev)
break;
case CPSW_VERSION_2:
priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
- priv->cpts.reg = ss_regs + CPSW2_CPTS_OFFSET;
+ priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET;
dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
@@ -1346,7 +1692,7 @@ static int cpsw_probe(struct platform_device *pdev)
k++;
}
- ndev->flags |= IFF_ALLMULTI; /* see cpsw_ndo_change_rx_flags() */
+ ndev->features |= NETIF_F_HW_VLAN_FILTER;
ndev->netdev_ops = &cpsw_netdev_ops;
SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
@@ -1361,13 +1707,21 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_irq_ret;
}
- if (cpts_register(&pdev->dev, &priv->cpts,
+ if (cpts_register(&pdev->dev, priv->cpts,
data->cpts_clock_mult, data->cpts_clock_shift))
dev_err(priv->dev, "error registering cpts device\n");
cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
priv->cpsw_res->start, ndev->irq);
+ if (priv->data.dual_emac) {
+ ret = cpsw_probe_dual_emac(pdev, priv);
+ if (ret) {
+ cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
+ goto clean_irq_ret;
+ }
+ }
+
return 0;
clean_irq_ret:
@@ -1406,7 +1760,7 @@ static int cpsw_remove(struct platform_device *pdev)
pr_info("removing device");
platform_set_drvdata(pdev, NULL);
- cpts_unregister(&priv->cpts);
+ cpts_unregister(priv->cpts);
free_irq(ndev->irq, priv);
cpsw_ale_destroy(priv->ale);
cpdma_chan_destroy(priv->txch);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 0e9ccc2cf91f..7fa60d6092ed 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -148,7 +148,7 @@ static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry)
return idx;
}
-static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr)
+int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS];
int type, idx;
@@ -160,6 +160,8 @@ static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr)
type = cpsw_ale_get_entry_type(ale_entry);
if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
continue;
+ if (cpsw_ale_get_vlan_id(ale_entry) != vid)
+ continue;
cpsw_ale_get_addr(ale_entry, entry_addr);
if (memcmp(entry_addr, addr, 6) == 0)
return idx;
@@ -167,6 +169,22 @@ static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr)
return -ENOENT;
}
+int cpsw_ale_match_vlan(struct cpsw_ale *ale, u16 vid)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int type, idx;
+
+ for (idx = 0; idx < ale->params.ale_entries; idx++) {
+ cpsw_ale_read(ale, idx, ale_entry);
+ type = cpsw_ale_get_entry_type(ale_entry);
+ if (type != ALE_TYPE_VLAN)
+ continue;
+ if (cpsw_ale_get_vlan_id(ale_entry) == vid)
+ return idx;
+ }
+ return -ENOENT;
+}
+
static int cpsw_ale_match_free(struct cpsw_ale *ale)
{
u32 ale_entry[ALE_ENTRY_WORDS];
@@ -274,19 +292,32 @@ int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask)
return 0;
}
-int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags)
+static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry,
+ int flags, u16 vid)
+{
+ if (flags & ALE_VLAN) {
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN_ADDR);
+ cpsw_ale_set_vlan_id(ale_entry, vid);
+ } else {
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
+ }
+}
+
+int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+ int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
int idx;
- cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
+ cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid);
+
cpsw_ale_set_addr(ale_entry, addr);
cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
cpsw_ale_set_port_num(ale_entry, port);
- idx = cpsw_ale_match_addr(ale, addr);
+ idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
if (idx < 0)
idx = cpsw_ale_match_free(ale);
if (idx < 0)
@@ -298,12 +329,13 @@ int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags)
return 0;
}
-int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port)
+int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+ int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
int idx;
- idx = cpsw_ale_match_addr(ale, addr);
+ idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
if (idx < 0)
return -ENOENT;
@@ -313,18 +345,19 @@ int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port)
}
int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
- int super, int mcast_state)
+ int flags, u16 vid, int mcast_state)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
int idx, mask;
- idx = cpsw_ale_match_addr(ale, addr);
+ idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
if (idx >= 0)
cpsw_ale_read(ale, idx, ale_entry);
- cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
+ cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid);
+
cpsw_ale_set_addr(ale_entry, addr);
- cpsw_ale_set_super(ale_entry, super);
+ cpsw_ale_set_super(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
cpsw_ale_set_mcast_state(ale_entry, mcast_state);
mask = cpsw_ale_get_port_mask(ale_entry);
@@ -342,12 +375,13 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
return 0;
}
-int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask)
+int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+ int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
int idx;
- idx = cpsw_ale_match_addr(ale, addr);
+ idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
if (idx < 0)
return -EINVAL;
@@ -362,6 +396,55 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask)
return 0;
}
+int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
+ int reg_mcast, int unreg_mcast)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int idx;
+
+ idx = cpsw_ale_match_vlan(ale, vid);
+ if (idx >= 0)
+ cpsw_ale_read(ale, idx, ale_entry);
+
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN);
+ cpsw_ale_set_vlan_id(ale_entry, vid);
+
+ cpsw_ale_set_vlan_untag_force(ale_entry, untag);
+ cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast);
+ cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast);
+ cpsw_ale_set_vlan_member_list(ale_entry, port);
+
+ if (idx < 0)
+ idx = cpsw_ale_match_free(ale);
+ if (idx < 0)
+ idx = cpsw_ale_find_ageable(ale);
+ if (idx < 0)
+ return -ENOMEM;
+
+ cpsw_ale_write(ale, idx, ale_entry);
+ return 0;
+}
+
+int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int idx;
+
+ idx = cpsw_ale_match_vlan(ale, vid);
+ if (idx < 0)
+ return -ENOENT;
+
+ cpsw_ale_read(ale, idx, ale_entry);
+
+ if (port_mask)
+ cpsw_ale_set_vlan_member_list(ale_entry, port_mask);
+ else
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+
+ cpsw_ale_write(ale, idx, ale_entry);
+ return 0;
+}
+
struct ale_control_info {
const char *name;
int offset, port_offset;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 2bd09cbce522..30daa1265f0c 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -64,8 +64,14 @@ enum cpsw_ale_port_state {
};
/* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
-#define ALE_SECURE 1
-#define ALE_BLOCKED 2
+#define ALE_SECURE BIT(0)
+#define ALE_BLOCKED BIT(1)
+#define ALE_SUPER BIT(2)
+#define ALE_VLAN BIT(3)
+
+#define ALE_PORT_HOST BIT(0)
+#define ALE_PORT_1 BIT(1)
+#define ALE_PORT_2 BIT(2)
#define ALE_MCAST_FWD 0
#define ALE_MCAST_BLOCK_LEARN_FWD 1
@@ -81,11 +87,17 @@ void cpsw_ale_stop(struct cpsw_ale *ale);
int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask);
-int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags);
-int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port);
+int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+ int flags, u16 vid);
+int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+ int flags, u16 vid);
int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
- int super, int mcast_state);
-int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask);
+ int flags, u16 vid, int mcast_state);
+int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+ int flags, u16 vid);
+int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
+ int reg_mcast, int unreg_mcast);
+int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port);
int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 49956730cd8d..ee13dc78430c 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -60,6 +60,9 @@
#define CPDMA_DESC_EOQ BIT(28)
#define CPDMA_DESC_TD_COMPLETE BIT(27)
#define CPDMA_DESC_PASS_CRC BIT(26)
+#define CPDMA_DESC_TO_PORT_EN BIT(20)
+#define CPDMA_TO_PORT_SHIFT 16
+#define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16))
#define CPDMA_TEARDOWN_VALUE 0xfffffffc
@@ -105,13 +108,13 @@ struct cpdma_ctlr {
};
struct cpdma_chan {
+ struct cpdma_desc __iomem *head, *tail;
+ void __iomem *hdp, *cp, *rxfree;
enum cpdma_state state;
struct cpdma_ctlr *ctlr;
int chan_num;
spinlock_t lock;
- struct cpdma_desc __iomem *head, *tail;
int count;
- void __iomem *hdp, *cp, *rxfree;
u32 mask;
cpdma_handler_fn handler;
enum dma_data_direction dir;
@@ -132,6 +135,14 @@ struct cpdma_chan {
#define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
#define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
+#define cpdma_desc_to_port(chan, mode, directed) \
+ do { \
+ if (!is_rx_chan(chan) && ((directed == 1) || \
+ (directed == 2))) \
+ mode |= (CPDMA_DESC_TO_PORT_EN | \
+ (directed << CPDMA_TO_PORT_SHIFT)); \
+ } while (0)
+
/*
* Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
* emac) have dedicated on-chip memory for these descriptors. Some other
@@ -217,17 +228,27 @@ desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
}
static struct cpdma_desc __iomem *
-cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc)
+cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx)
{
unsigned long flags;
int index;
+ int desc_start;
+ int desc_end;
struct cpdma_desc __iomem *desc = NULL;
spin_lock_irqsave(&pool->lock, flags);
- index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0,
- num_desc, 0);
- if (index < pool->num_desc) {
+ if (is_rx) {
+ desc_start = 0;
+ desc_end = pool->num_desc/2;
+ } else {
+ desc_start = pool->num_desc/2;
+ desc_end = pool->num_desc;
+ }
+
+ index = bitmap_find_next_zero_area(pool->bitmap,
+ desc_end, desc_start, num_desc, 0);
+ if (index < desc_end) {
bitmap_set(pool->bitmap, index, num_desc);
desc = pool->iomap + pool->desc_size * index;
pool->used_desc++;
@@ -439,10 +460,8 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
if (ctlr->state != CPDMA_STATE_IDLE)
cpdma_ctlr_stop(ctlr);
- for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
- if (ctlr->channels[i])
- cpdma_chan_destroy(ctlr->channels[i]);
- }
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
+ cpdma_chan_destroy(ctlr->channels[i]);
cpdma_desc_pool_destroy(ctlr->pool);
spin_unlock_irqrestore(&ctlr->lock, flags);
@@ -473,11 +492,13 @@ int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
spin_unlock_irqrestore(&ctlr->lock, flags);
return 0;
}
+EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl);
-void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr)
+void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
{
- dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0);
+ dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
}
+EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi);
struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
cpdma_handler_fn handler)
@@ -652,7 +673,7 @@ static void __cpdma_chan_submit(struct cpdma_chan *chan,
}
int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
- int len, gfp_t gfp_mask)
+ int len, int directed, gfp_t gfp_mask)
{
struct cpdma_ctlr *ctlr = chan->ctlr;
struct cpdma_desc __iomem *desc;
@@ -668,7 +689,7 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
goto unlock_ret;
}
- desc = cpdma_desc_alloc(ctlr->pool, 1);
+ desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan));
if (!desc) {
chan->stats.desc_alloc_fail++;
ret = -ENOMEM;
@@ -682,6 +703,7 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
+ cpdma_desc_to_port(chan, mode, directed);
desc_write(desc, hw_next, 0);
desc_write(desc, hw_buffer, buffer);
@@ -704,6 +726,29 @@ unlock_ret:
}
EXPORT_SYMBOL_GPL(cpdma_chan_submit);
+bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
+{
+ unsigned long flags;
+ int index;
+ bool ret;
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+
+ spin_lock_irqsave(&pool->lock, flags);
+
+ index = bitmap_find_next_zero_area(pool->bitmap,
+ pool->num_desc, pool->num_desc/2, 1, 0);
+
+ if (index < pool->num_desc)
+ ret = true;
+ else
+ ret = false;
+
+ spin_unlock_irqrestore(&pool->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
+
static void __cpdma_chan_free(struct cpdma_chan *chan,
struct cpdma_desc __iomem *desc,
int outlen, int status)
@@ -749,7 +794,8 @@ static int __cpdma_chan_process(struct cpdma_chan *chan)
status = -EBUSY;
goto unlock_ret;
}
- status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE);
+ status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
+ CPDMA_DESC_PORT_MASK);
chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
chan_write(chan, cp, desc_dma);
@@ -984,3 +1030,4 @@ unlock_ret:
spin_unlock_irqrestore(&ctlr->lock, flags);
return ret;
}
+EXPORT_SYMBOL_GPL(cpdma_control_set);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
index afa19a0c0d81..d9bcc6032fdc 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.h
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -24,6 +24,13 @@
#define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
#define chan_linear(chan) __chan_linear((chan)->chan_num)
+#define CPDMA_RX_SOURCE_PORT(__status__) ((__status__ >> 16) & 0x7)
+
+#define CPDMA_EOI_RX_THRESH 0x0
+#define CPDMA_EOI_RX 0x1
+#define CPDMA_EOI_TX 0x2
+#define CPDMA_EOI_MISC 0x3
+
struct cpdma_params {
struct device *dev;
void __iomem *dmaregs;
@@ -82,12 +89,13 @@ int cpdma_chan_dump(struct cpdma_chan *chan);
int cpdma_chan_get_stats(struct cpdma_chan *chan,
struct cpdma_chan_stats *stats);
int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
- int len, gfp_t gfp_mask);
+ int len, int directed, gfp_t gfp_mask);
int cpdma_chan_process(struct cpdma_chan *chan, int quota);
int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable);
-void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr);
+void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value);
int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable);
+bool cpdma_check_free_tx_desc(struct cpdma_chan *chan);
enum cpdma_control {
CPDMA_CMD_IDLE, /* write-only */
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 2a3e2c56bc60..52c05366599a 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -120,7 +120,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
#define EMAC_DEF_TX_CH (0) /* Default 0th channel */
#define EMAC_DEF_RX_CH (0) /* Default 0th channel */
#define EMAC_DEF_RX_NUM_DESC (128)
-#define EMAC_DEF_TX_NUM_DESC (128)
#define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */
#define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */
#define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */
@@ -342,7 +341,6 @@ struct emac_priv {
u32 mac_hash2;
u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
u32 rx_addr_type;
- atomic_t cur_tx;
const char *phy_id;
#ifdef CONFIG_OF
struct device_node *phy_node;
@@ -480,8 +478,8 @@ static void emac_dump_regs(struct emac_priv *priv)
static void emac_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, emac_version_string);
- strcpy(info->version, EMAC_MODULE_VERSION);
+ strlcpy(info->driver, emac_version_string, sizeof(info->driver));
+ strlcpy(info->version, EMAC_MODULE_VERSION, sizeof(info->version));
}
/**
@@ -1039,7 +1037,7 @@ static void emac_rx_handler(void *token, int len, int status)
recycle:
ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
- skb_tailroom(skb), GFP_KERNEL);
+ skb_tailroom(skb), 0, GFP_KERNEL);
WARN_ON(ret == -ENOMEM);
if (unlikely(ret < 0))
@@ -1050,10 +1048,10 @@ static void emac_tx_handler(void *token, int len, int status)
{
struct sk_buff *skb = token;
struct net_device *ndev = skb->dev;
- struct emac_priv *priv = netdev_priv(ndev);
-
- atomic_dec(&priv->cur_tx);
+ /* Check whether the queue is stopped due to stalled tx dma, if the
+ * queue is stopped then start the queue as we have free desc for tx
+ */
if (unlikely(netif_queue_stopped(ndev)))
netif_start_queue(ndev);
ndev->stats.tx_packets++;
@@ -1094,14 +1092,17 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_tx_timestamp(skb);
ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len,
- GFP_KERNEL);
+ 0, GFP_KERNEL);
if (unlikely(ret_code != 0)) {
if (netif_msg_tx_err(priv) && net_ratelimit())
dev_err(emac_dev, "DaVinci EMAC: desc submit failed");
goto fail_tx;
}
- if (atomic_inc_return(&priv->cur_tx) >= EMAC_DEF_TX_NUM_DESC)
+ /* If there is no more tx desc left free then we need to
+ * tell the kernel to stop sending us tx frames.
+ */
+ if (unlikely(cpdma_check_free_tx_desc(priv->txchan)))
netif_stop_queue(ndev);
return NETDEV_TX_OK;
@@ -1264,7 +1265,6 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
/* Store mac addr in priv and rx channel and set it in EMAC hw */
memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len);
- ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
/* MAC address is configured only after the interface is enabled. */
if (netif_running(ndev)) {
@@ -1558,7 +1558,7 @@ static int emac_dev_open(struct net_device *ndev)
break;
ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
- skb_tailroom(skb), GFP_KERNEL);
+ skb_tailroom(skb), 0, GFP_KERNEL);
if (WARN_ON(ret < 0))
break;
}
@@ -1600,7 +1600,7 @@ static int emac_dev_open(struct net_device *ndev)
if (priv->phy_id && *priv->phy_id) {
priv->phydev = phy_connect(ndev, priv->phy_id,
- &emac_adjust_link, 0,
+ &emac_adjust_link,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(priv->phydev)) {
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index cca25509b039..d04a622b08d4 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -320,10 +320,8 @@ static int davinci_mdio_probe(struct platform_device *pdev)
int ret, addr;
data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data) {
- dev_err(dev, "failed to alloc device data\n");
+ if (!data)
return -ENOMEM;
- }
data->bus = mdiobus_alloc();
if (!data->bus) {
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 96070e9b50dc..36435499814b 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -2195,7 +2195,6 @@ static int tile_net_set_mac_address(struct net_device *dev, void *p)
/* ISSUE: Note that "dev_addr" is now a pointer. */
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
return 0;
}
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index e321d0b6fc88..445c0595c997 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1226,8 +1226,8 @@ int gelic_net_open(struct net_device *netdev)
void gelic_net_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strncpy(info->driver, DRV_NAME, sizeof(info->driver) - 1);
- strncpy(info->version, DRV_VERSION, sizeof(info->version) - 1);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static int gelic_ether_get_settings(struct net_device *netdev,
diff --git a/drivers/net/ethernet/toshiba/spider_net_ethtool.c b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
index 9c288cd7d171..ffe519382e11 100644
--- a/drivers/net/ethernet/toshiba/spider_net_ethtool.c
+++ b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
@@ -72,11 +72,13 @@ spider_net_ethtool_get_drvinfo(struct net_device *netdev,
card = netdev_priv(netdev);
/* clear and fill out info */
- memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
- strncpy(drvinfo->driver, spider_net_driver_name, 32);
- strncpy(drvinfo->version, VERSION, 32);
- strcpy(drvinfo->fw_version, "no information");
- strncpy(drvinfo->bus_info, pci_name(card->pdev), 32);
+ strlcpy(drvinfo->driver, spider_net_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "no information",
+ sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, pci_name(card->pdev),
+ sizeof(drvinfo->bus_info));
}
static void
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 9819349eaa1e..fe256094db35 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -633,9 +633,8 @@ static int tc_mii_probe(struct net_device *dev)
/* attach the mac to the phy */
phydev = phy_connect(dev, dev_name(&phydev->dev),
- &tc_handle_link_change, 0,
- lp->chiptype == TC35815_TX4939 ?
- PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII);
+ &tc_handle_link_change,
+ lp->chiptype == TC35815_TX4939 ? PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
return PTR_ERR(phydev);
@@ -856,7 +855,6 @@ static int tc35815_init_one(struct pci_dev *pdev,
if (rc)
goto err_out;
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
printk(KERN_INFO "%s: %s at 0x%lx, %pM, IRQ %d\n",
dev->name,
chip_info[ent->driver_data].name,
@@ -1976,9 +1974,10 @@ tc35815_set_multicast_list(struct net_device *dev)
static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct tc35815_local *lp = netdev_priv(dev);
- strcpy(info->driver, MODNAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(lp->pci_dev));
+
+ strlcpy(info->driver, MODNAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(lp->pci_dev), sizeof(info->bus_info));
}
static int tc35815_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 7992b3e05d3d..185c721c52d7 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -417,6 +417,12 @@ enum chip_cmd_bits {
Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
};
+struct rhine_stats {
+ u64 packets;
+ u64 bytes;
+ struct u64_stats_sync syncp;
+};
+
struct rhine_private {
/* Bit mask for configured VLAN ids */
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -458,6 +464,8 @@ struct rhine_private {
unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
unsigned int cur_tx, dirty_tx;
unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ struct rhine_stats rx_stats;
+ struct rhine_stats tx_stats;
u8 wolopts;
u8 tx_thresh, rx_thresh;
@@ -495,7 +503,8 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
static void rhine_tx(struct net_device *dev);
static int rhine_rx(struct net_device *dev, int limit);
static void rhine_set_rx_mode(struct net_device *dev);
-static struct net_device_stats *rhine_get_stats(struct net_device *dev);
+static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats);
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static const struct ethtool_ops netdev_ethtool_ops;
static int rhine_close(struct net_device *dev);
@@ -842,7 +851,7 @@ static const struct net_device_ops rhine_netdev_ops = {
.ndo_open = rhine_open,
.ndo_stop = rhine_close,
.ndo_start_xmit = rhine_start_tx,
- .ndo_get_stats = rhine_get_stats,
+ .ndo_get_stats64 = rhine_get_stats64,
.ndo_set_rx_mode = rhine_set_rx_mode,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
@@ -990,7 +999,6 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev_info(dev, "Using random MAC address: %pM\n",
dev->dev_addr);
}
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
/* For Rhine-I/II, phy_id is loaded from EEPROM */
if (!phy_id)
@@ -1791,8 +1799,11 @@ static void rhine_tx(struct net_device *dev)
dev->stats.collisions += txstatus & 0x0F;
netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
(txstatus >> 3) & 0xF, txstatus & 0xF);
- dev->stats.tx_bytes += rp->tx_skbuff[entry]->len;
- dev->stats.tx_packets++;
+
+ u64_stats_update_begin(&rp->tx_stats.syncp);
+ rp->tx_stats.bytes += rp->tx_skbuff[entry]->len;
+ rp->tx_stats.packets++;
+ u64_stats_update_end(&rp->tx_stats.syncp);
}
/* Free the original skb. */
if (rp->tx_skbuff_dma[entry]) {
@@ -1801,7 +1812,7 @@ static void rhine_tx(struct net_device *dev)
rp->tx_skbuff[entry]->len,
PCI_DMA_TODEVICE);
}
- dev_kfree_skb_irq(rp->tx_skbuff[entry]);
+ dev_kfree_skb(rp->tx_skbuff[entry]);
rp->tx_skbuff[entry] = NULL;
entry = (++rp->dirty_tx) % TX_RING_SIZE;
}
@@ -1924,8 +1935,11 @@ static int rhine_rx(struct net_device *dev, int limit)
if (unlikely(desc_length & DescTag))
__vlan_hwaccel_put_tag(skb, vlan_tci);
netif_receive_skb(skb);
- dev->stats.rx_bytes += pkt_len;
- dev->stats.rx_packets++;
+
+ u64_stats_update_begin(&rp->rx_stats.syncp);
+ rp->rx_stats.bytes += pkt_len;
+ rp->rx_stats.packets++;
+ u64_stats_update_end(&rp->rx_stats.syncp);
}
entry = (++rp->cur_rx) % RX_RING_SIZE;
rp->rx_head_desc = &rp->rx_ring[entry];
@@ -2010,25 +2024,37 @@ static void rhine_slow_event_task(struct work_struct *work)
if (intr_status & IntrPCIErr)
netif_warn(rp, hw, dev, "PCI error\n");
- napi_disable(&rp->napi);
- rhine_irq_disable(rp);
- /* Slow and safe. Consider __napi_schedule as a replacement ? */
- napi_enable(&rp->napi);
- napi_schedule(&rp->napi);
+ iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
out_unlock:
mutex_unlock(&rp->task_lock);
}
-static struct net_device_stats *rhine_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *
+rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct rhine_private *rp = netdev_priv(dev);
+ unsigned int start;
spin_lock_bh(&rp->lock);
rhine_update_rx_crc_and_missed_errord(rp);
spin_unlock_bh(&rp->lock);
- return &dev->stats;
+ netdev_stats_to_stats64(stats, &dev->stats);
+
+ do {
+ start = u64_stats_fetch_begin_bh(&rp->rx_stats.syncp);
+ stats->rx_packets = rp->rx_stats.packets;
+ stats->rx_bytes = rp->rx_stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&rp->rx_stats.syncp, start));
+
+ do {
+ start = u64_stats_fetch_begin_bh(&rp->tx_stats.syncp);
+ stats->tx_packets = rp->tx_stats.packets;
+ stats->tx_bytes = rp->tx_stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&rp->tx_stats.syncp, start));
+
+ return stats;
}
static void rhine_set_rx_mode(struct net_device *dev)
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 352383890326..545043cc4c0b 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -570,7 +570,6 @@ static int w5100_set_macaddr(struct net_device *ndev, void *addr)
if (!is_valid_ether_addr(sock_addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
- ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
w5100_write_macaddr(priv);
return 0;
}
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 9d1d986f8d40..7cbd0e6fc6f3 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -490,7 +490,6 @@ static int w5300_set_macaddr(struct net_device *ndev, void *addr)
if (!is_valid_ether_addr(sock_addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
- ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
w5300_write_macaddr(priv);
return 0;
}
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 5778a4ae1164..122d60c0481b 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -27,7 +27,7 @@ config XILINX_EMACLITE
config XILINX_AXI_EMAC
tristate "Xilinx 10/100/1000 AXI Ethernet support"
- depends on (PPC32 || MICROBLAZE)
+ depends on MICROBLAZE
select PHYLIB
---help---
This driver supports the 10/100/1000 Ethernet from Xilinx for the
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index aad909d793d7..9fc2ada4c3c2 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -238,11 +238,9 @@ static int temac_dma_bd_init(struct net_device *ndev)
int i;
lp->rx_skb = kcalloc(RX_BD_NUM, sizeof(*lp->rx_skb), GFP_KERNEL);
- if (!lp->rx_skb) {
- dev_err(&ndev->dev,
- "can't allocate memory for DMA RX buffer\n");
+ if (!lp->rx_skb)
goto out;
- }
+
/* allocate the tx and rx ring buffer descriptors. */
/* returns a virtual address and a physical address. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
@@ -319,18 +317,10 @@ out:
* net_device_ops
*/
-static int temac_set_mac_address(struct net_device *ndev, void *address)
+static void temac_do_set_mac_address(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
- if (address)
- memcpy(ndev->dev_addr, address, ETH_ALEN);
-
- if (!is_valid_ether_addr(ndev->dev_addr))
- eth_hw_addr_random(ndev);
- else
- ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
-
/* set up unicast MAC address filter set its mac address */
mutex_lock(&lp->indirect_mutex);
temac_indirect_out32(lp, XTE_UAW0_OFFSET,
@@ -344,15 +334,26 @@ static int temac_set_mac_address(struct net_device *ndev, void *address)
(ndev->dev_addr[4] & 0x000000ff) |
(ndev->dev_addr[5] << 8));
mutex_unlock(&lp->indirect_mutex);
+}
+static int temac_init_mac_address(struct net_device *ndev, void *address)
+{
+ memcpy(ndev->dev_addr, address, ETH_ALEN);
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ eth_hw_addr_random(ndev);
+ temac_do_set_mac_address(ndev);
return 0;
}
-static int netdev_set_mac_address(struct net_device *ndev, void *p)
+static int temac_set_mac_address(struct net_device *ndev, void *p)
{
struct sockaddr *addr = p;
- return temac_set_mac_address(ndev, addr->sa_data);
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+ memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
+ temac_do_set_mac_address(ndev);
+ return 0;
}
static void temac_set_multicast_list(struct net_device *ndev)
@@ -579,7 +580,7 @@ static void temac_device_reset(struct net_device *ndev)
temac_setoptions(ndev,
lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
- temac_set_mac_address(ndev, NULL);
+ temac_do_set_mac_address(ndev);
/* Set address filter table */
temac_set_multicast_list(ndev);
@@ -938,7 +939,7 @@ static const struct net_device_ops temac_netdev_ops = {
.ndo_open = temac_open,
.ndo_stop = temac_stop,
.ndo_start_xmit = temac_start_xmit,
- .ndo_set_mac_address = netdev_set_mac_address,
+ .ndo_set_mac_address = temac_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = temac_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1106,7 +1107,7 @@ static int temac_of_probe(struct platform_device *op)
rc = -ENODEV;
goto err_iounmap_2;
}
- temac_set_mac_address(ndev, (void *)addr);
+ temac_init_mac_address(ndev, (void *)addr);
rc = temac_mdio_setup(lp, op->dev.of_node);
if (rc)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index d9f69b82cc4f..278c9db3b5b8 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1124,9 +1124,8 @@ static int axienet_ethtools_set_settings(struct net_device *ndev,
static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *ed)
{
- memset(ed, 0, sizeof(struct ethtool_drvinfo));
- strcpy(ed->driver, DRIVER_NAME);
- strcpy(ed->version, DRIVER_VERSION);
+ strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
+ strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
ed->regdump_len = sizeof(u32) * AXIENET_REGS_N;
}
@@ -1590,7 +1589,7 @@ static int axienet_of_probe(struct platform_device *op)
lp->rx_irq = irq_of_parse_and_map(np, 1);
lp->tx_irq = irq_of_parse_and_map(np, 0);
of_node_put(np);
- if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
+ if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
dev_err(&op->dev, "could not determine irqs\n");
ret = -ENOMEM;
goto err_iounmap_2;
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index 94a1f94f74b8..98e09d0d3ce2 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1412,7 +1412,8 @@ static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, "xirc2ps_cs", sizeof(info->driver));
- sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+ snprintf(info->bus_info, sizeof(info->bus_info), "PCMCIA 0x%lx",
+ dev->base_addr);
}
static const struct ethtool_ops netdev_ethtool_ops = {
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index d3ebb73277be..6958a5e87703 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -977,11 +977,12 @@ static void ixp4xx_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct port *port = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
+
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u",
port->firmware[0], port->firmware[1],
port->firmware[2], port->firmware[3]);
- strcpy(info->bus_info, "internal");
+ strlcpy(info->bus_info, "internal", sizeof(info->bus_info));
}
static int ixp4xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -1450,7 +1451,7 @@ static int eth_init_one(struct platform_device *pdev)
snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
mdio_bus->id, plat->phy);
- port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0,
+ port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(port->phydev)) {
err = PTR_ERR(port->phydev);
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
index 95dbcfdf131d..bf5e59687680 100644
--- a/drivers/net/hamradio/Kconfig
+++ b/drivers/net/hamradio/Kconfig
@@ -1,6 +1,6 @@
config MKISS
tristate "Serial port KISS driver"
- depends on AX25
+ depends on AX25 && TTY
select CRC16
---help---
KISS is a protocol used for the exchange of data between a computer
@@ -18,7 +18,7 @@ config MKISS
config 6PACK
tristate "Serial port 6PACK driver"
- depends on AX25
+ depends on AX25 && TTY
---help---
6pack is a transmission protocol for the data exchange between your
PC and your TNC (the Terminal Node Controller acts as a kind of
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index c2e5497397d5..02de6c891670 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -586,7 +586,8 @@ static int bpq_device_event(struct notifier_block *this,unsigned long event, voi
static int __init bpq_init_driver(void)
{
#ifdef CONFIG_PROC_FS
- if (!proc_net_fops_create(&init_net, "bpqether", S_IRUGO, &bpq_info_fops)) {
+ if (!proc_create("bpqether", S_IRUGO, init_net.proc_net,
+ &bpq_info_fops)) {
printk(KERN_ERR
"bpq: cannot create /proc/net/bpqether entry.\n");
return -ENOENT;
@@ -610,7 +611,7 @@ static void __exit bpq_cleanup_driver(void)
unregister_netdevice_notifier(&bpq_dev_notifier);
- proc_net_remove(&init_net, "bpqether");
+ remove_proc_entry("bpqether", init_net.proc_net);
rtnl_lock();
while (!list_empty(&bpq_devices)) {
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index ce555d9ac02c..6636022a1027 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -463,13 +463,8 @@ static int __init setup_adapter(int card_base, int type, int n)
/* Initialize what is necessary for write_scc and write_scc_data */
info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
- if (!info) {
- printk(KERN_ERR "dmascc: "
- "could not allocate memory for %s at %#3x\n",
- hw[type].name, card_base);
+ if (!info)
goto out;
- }
-
info->dev[0] = alloc_netdev(0, "", dev_setup);
if (!info->dev[0]) {
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index 1b4a47bd32b7..bc1d52170389 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -2118,7 +2118,7 @@ static int __init scc_init_driver (void)
}
rtnl_unlock();
- proc_net_fops_create(&init_net, "z8530drv", 0, &scc_net_seq_fops);
+ proc_create("z8530drv", 0, init_net.proc_net, &scc_net_seq_fops);
return 0;
}
@@ -2173,7 +2173,7 @@ static void __exit scc_cleanup_driver(void)
if (Vector_Latch)
release_region(Vector_Latch, 1);
- proc_net_remove(&init_net, "z8530drv");
+ remove_proc_entry("z8530drv", init_net.proc_net);
}
MODULE_AUTHOR("Joerg Reuter <jreuter@yaina.de>");
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index c6645f1017af..4cf8f1017aad 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1167,7 +1167,7 @@ static int __init yam_init_driver(void)
yam_timer.expires = jiffies + HZ / 100;
add_timer(&yam_timer);
- proc_net_fops_create(&init_net, "yam", S_IRUGO, &yam_info_fops);
+ proc_create("yam", S_IRUGO, init_net.proc_net, &yam_info_fops);
return 0;
error:
while (--i >= 0) {
@@ -1199,7 +1199,7 @@ static void __exit yam_cleanup_driver(void)
kfree(p);
}
- proc_net_remove(&init_net, "yam");
+ remove_proc_entry("yam", init_net.proc_net);
}
/* --------------------------------------------------------------------- */
diff --git a/drivers/net/hippi/Kconfig b/drivers/net/hippi/Kconfig
index 95eb34fdbba7..f71515dc5beb 100644
--- a/drivers/net/hippi/Kconfig
+++ b/drivers/net/hippi/Kconfig
@@ -3,8 +3,8 @@
#
config HIPPI
- bool "HIPPI driver support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && INET && PCI
+ bool "HIPPI driver support"
+ depends on INET && PCI
---help---
HIgh Performance Parallel Interface (HIPPI) is a 800Mbit/sec and
1600Mbit/sec dual-simplex switched or point-to-point network. HIPPI
@@ -18,7 +18,7 @@ config HIPPI
if HIPPI
config ROADRUNNER
- tristate "Essential RoadRunner HIPPI PCI adapter support (EXPERIMENTAL)"
+ tristate "Essential RoadRunner HIPPI PCI adapter support"
depends on PCI
---help---
Say Y here if this is your PCI HIPPI network card.
@@ -27,7 +27,7 @@ config ROADRUNNER
will be called rrunner. If unsure, say N.
config ROADRUNNER_LARGE_RINGS
- bool "Use large TX/RX rings (EXPERIMENTAL)"
+ bool "Use large TX/RX rings"
depends on ROADRUNNER
---help---
If you say Y here, the RoadRunner driver will preallocate up to 2 MB
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 5fd6f4674326..e6fe0d80d612 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -84,7 +84,7 @@ struct hv_netvsc_packet {
};
struct netvsc_device_info {
- unsigned char mac_adr[6];
+ unsigned char mac_adr[ETH_ALEN];
bool link_state; /* 0 - link up, 1 - link down */
int ring_size;
};
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index f825a629a699..5f85205cd12b 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -304,9 +304,9 @@ int netvsc_recv_callback(struct hv_device *device_obj,
static void netvsc_get_drvinfo(struct net_device *net,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, KBUILD_MODNAME);
- strcpy(info->version, HV_DRV_VERSION);
- strcpy(info->fw_version, "N/A");
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, HV_DRV_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
}
static int netvsc_change_mtu(struct net_device *ndev, int mtu)
@@ -349,7 +349,7 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
struct net_device_context *ndevctx = netdev_priv(ndev);
struct hv_device *hdev = ndevctx->device_ctx;
struct sockaddr *addr = p;
- char save_adr[14];
+ char save_adr[ETH_ALEN];
unsigned char save_aatype;
int err;
@@ -498,8 +498,7 @@ static int netvsc_remove(struct hv_device *dev)
static const struct hv_vmbus_device_id id_table[] = {
/* Network guid */
- { VMBUS_DEVICE(0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
- 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E) },
+ { HV_NIC_GUID, },
{ },
};
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index a4a62e170ec0..fc1687ea4a42 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -751,16 +751,6 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
return 0;
}
-static int at86rf230_suspend(struct spi_device *spi, pm_message_t message)
-{
- return 0;
-}
-
-static int at86rf230_resume(struct spi_device *spi)
-{
- return 0;
-}
-
static int at86rf230_fill_data(struct spi_device *spi)
{
struct at86rf230_local *lp = spi_get_drvdata(spi);
@@ -948,8 +938,6 @@ static struct spi_driver at86rf230_driver = {
},
.probe = at86rf230_probe,
.remove = at86rf230_remove,
- .suspend = at86rf230_suspend,
- .resume = at86rf230_resume,
};
module_spi_driver(at86rf230_driver);
diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
index 1e9cb0bbf62c..8f1c25676d44 100644
--- a/drivers/net/ieee802154/fakehard.c
+++ b/drivers/net/ieee802154/fakehard.c
@@ -372,7 +372,6 @@ static int ieee802154fake_probe(struct platform_device *pdev)
memcpy(dev->dev_addr, "\xba\xbe\xca\xfe\xde\xad\xbe\xef",
dev->addr_len);
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
/*
* For now we'd like to emulate 2.4 GHz-only device,
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 344dceb1aaf9..82164381f778 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -90,7 +90,7 @@ static void ri_tasklet(unsigned long dev)
u64_stats_update_end(&dp->tsync);
rcu_read_lock();
- skb->dev = dev_get_by_index_rcu(&init_net, skb->skb_iif);
+ skb->dev = dev_get_by_index_rcu(dev_net(_dev), skb->skb_iif);
if (!skb->dev) {
rcu_read_unlock();
dev_kfree_skb(skb);
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 595205406d73..2a30193d0d50 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -5,7 +5,7 @@ comment "SIR device drivers"
config IRTTY_SIR
tristate "IrTTY (uses Linux serial driver)"
- depends on IRDA
+ depends on IRDA && TTY
help
Say Y here if you want to build support for the IrTTY line
discipline. To compile it as a module, choose M here: the module
@@ -140,7 +140,7 @@ config LITELINK_DONGLE
config MA600_DONGLE
tristate "Mobile Action MA600 dongle"
- depends on IRTTY_SIR && DONGLE && IRDA && EXPERIMENTAL
+ depends on IRTTY_SIR && DONGLE && IRDA
help
Say Y here if you want to build support for the Mobile Action MA600
dongle. To compile it as a module, choose M here. The MA600 dongle
@@ -153,7 +153,7 @@ config MA600_DONGLE
config GIRBIL_DONGLE
tristate "Greenwich GIrBIL dongle"
- depends on IRTTY_SIR && DONGLE && IRDA && EXPERIMENTAL
+ depends on IRTTY_SIR && DONGLE && IRDA
help
Say Y here if you want to build support for the Greenwich GIrBIL
dongle. If you want to compile it as a module, choose M here.
@@ -164,7 +164,7 @@ config GIRBIL_DONGLE
config MCP2120_DONGLE
tristate "Microchip MCP2120"
- depends on IRTTY_SIR && DONGLE && IRDA && EXPERIMENTAL
+ depends on IRTTY_SIR && DONGLE && IRDA
help
Say Y here if you want to build support for the Microchip MCP2120
dongle. If you want to compile it as a module, choose M here.
@@ -178,7 +178,7 @@ config MCP2120_DONGLE
config OLD_BELKIN_DONGLE
tristate "Old Belkin dongle"
- depends on IRTTY_SIR && DONGLE && IRDA && EXPERIMENTAL
+ depends on IRTTY_SIR && DONGLE && IRDA
help
Say Y here if you want to build support for the Adaptec Airport 1000
and 2000 dongles. If you want to compile it as a module, choose
@@ -187,7 +187,7 @@ config OLD_BELKIN_DONGLE
config ACT200L_DONGLE
tristate "ACTiSYS IR-200L dongle"
- depends on IRTTY_SIR && DONGLE && IRDA && EXPERIMENTAL
+ depends on IRTTY_SIR && DONGLE && IRDA
help
Say Y here if you want to build support for the ACTiSYS IR-200L
dongle. If you want to compile it as a module, choose M here.
@@ -198,7 +198,7 @@ config ACT200L_DONGLE
config KINGSUN_DONGLE
tristate "KingSun/DonShine DS-620 IrDA-USB dongle"
- depends on IRDA && USB && EXPERIMENTAL
+ depends on IRDA && USB
help
Say Y or M here if you want to build support for the KingSun/DonShine
DS-620 IrDA-USB bridge device driver.
@@ -212,14 +212,14 @@ config KINGSUN_DONGLE
config EP7211_DONGLE
tristate "Cirrus Logic clps711x I/R support"
- depends on IRTTY_SIR && ARCH_CLPS711X && IRDA && EXPERIMENTAL
+ depends on IRTTY_SIR && ARCH_CLPS711X && IRDA
help
Say Y here if you want to build support for the Cirrus logic
EP7211 chipset's infrared module.
config KSDAZZLE_DONGLE
- tristate "KingSun Dazzle IrDA-USB dongle (EXPERIMENTAL)"
- depends on IRDA && USB && EXPERIMENTAL
+ tristate "KingSun Dazzle IrDA-USB dongle"
+ depends on IRDA && USB
help
Say Y or M here if you want to build support for the KingSun Dazzle
IrDA-USB bridge device driver.
@@ -232,8 +232,8 @@ config KSDAZZLE_DONGLE
ksdazzle-sir.
config KS959_DONGLE
- tristate "KingSun KS-959 IrDA-USB dongle (EXPERIMENTAL)"
- depends on IRDA && USB && EXPERIMENTAL
+ tristate "KingSun KS-959 IrDA-USB dongle"
+ depends on IRDA && USB
help
Say Y or M here if you want to build support for the KingSun KS-959
IrDA-USB bridge device driver.
@@ -264,8 +264,8 @@ config USB_IRDA
you will need both USB and IrDA support in your kernel...
config SIGMATEL_FIR
- tristate "SigmaTel STIr4200 bridge (EXPERIMENTAL)"
- depends on IRDA && USB && EXPERIMENTAL
+ tristate "SigmaTel STIr4200 bridge"
+ depends on IRDA && USB
select CRC32
---help---
Say Y here if you want to build support for the SigmaTel STIr4200
@@ -331,8 +331,8 @@ config SMC_IRCC_FIR
smsc-ircc2.o.
config ALI_FIR
- tristate "ALi M5123 FIR (EXPERIMENTAL)"
- depends on EXPERIMENTAL && IRDA && ISA_DMA_API
+ tristate "ALi M5123 FIR"
+ depends on IRDA && ISA_DMA_API
help
Say Y here if you want to build support for the ALi M5123 FIR
Controller. The ALi M5123 FIR Controller is embedded in ALi M1543C,
@@ -343,8 +343,8 @@ config ALI_FIR
ali-ircc.
config VLSI_FIR
- tristate "VLSI 82C147 SIR/MIR/FIR (EXPERIMENTAL)"
- depends on EXPERIMENTAL && IRDA && PCI
+ tristate "VLSI 82C147 SIR/MIR/FIR"
+ depends on IRDA && PCI
help
Say Y here if you want to build support for the VLSI 82C147
PCI-IrDA Controller. This controller is used by the HP OmniBook 800
@@ -387,7 +387,7 @@ config PXA_FICP
config MCS_FIR
tristate "MosChip MCS7780 IrDA-USB dongle"
- depends on IRDA && USB && EXPERIMENTAL
+ depends on IRDA && USB
select CRC32
help
Say Y or M here if you want to build support for the MosChip
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 84872043b5c6..9cea451a6081 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -993,7 +993,7 @@ static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
/* Enable Interuupt */
self->ier = IER_EOM; // benjamin 2000/11/20 07:24PM
- /* Be ready for incomming frames */
+ /* Be ready for incoming frames */
ali_ircc_dma_receive(self); // benajmin 2000/11/8 07:46PM not complete
}
/* Go to SIR Speed */
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 6e4d4b62c9a8..a41267197839 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -210,7 +210,7 @@ static int irtty_do_write(struct sir_dev *dev, const unsigned char *ptr, size_t
* been received, which can now be decapsulated and delivered for
* further processing
*
- * calling context depends on underlying driver and tty->low_latency!
+ * calling context depends on underlying driver and tty->port->low_latency!
* for example (low_latency: 1 / 0):
* serial.c: uart-interrupt / softint
* usbserial: urb-complete-interrupt / softint
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 81f8f9e31db5..fcbf680c3e62 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -77,6 +77,11 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
skb_orphan(skb);
+ /* Before queueing this packet to netif_rx(),
+ * make sure dst is refcounted.
+ */
+ skb_dst_force(skb);
+
skb->protocol = eth_type_trans(skb, dev);
/* it's OK to use per_cpu_ptr() because BHs are off */
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 68a43fe602e7..defcd8a85744 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -29,6 +29,7 @@
#include <linux/if_vlan.h>
#include <linux/if_link.h>
#include <linux/if_macvlan.h>
+#include <linux/hash.h>
#include <net/rtnetlink.h>
#include <net/xfrm.h>
@@ -126,6 +127,21 @@ static int macvlan_broadcast_one(struct sk_buff *skb,
return vlan->receive(skb);
}
+static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
+{
+ return (u32)(((unsigned long)vlan) >> L1_CACHE_SHIFT);
+}
+
+
+static unsigned int mc_hash(const struct macvlan_dev *vlan,
+ const unsigned char *addr)
+{
+ u32 val = __get_unaligned_cpu32(addr + 2);
+
+ val ^= macvlan_hash_mix(vlan);
+ return hash_32(val, MACVLAN_MC_FILTER_BITS);
+}
+
static void macvlan_broadcast(struct sk_buff *skb,
const struct macvlan_port *port,
struct net_device *src,
@@ -137,6 +153,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
struct sk_buff *nskb;
unsigned int i;
int err;
+ unsigned int hash;
if (skb->protocol == htons(ETH_P_PAUSE))
return;
@@ -146,6 +163,9 @@ static void macvlan_broadcast(struct sk_buff *skb,
if (vlan->dev == src || !(vlan->mode & mode))
continue;
+ hash = mc_hash(vlan, eth->h_dest);
+ if (!test_bit(hash, vlan->mc_filter))
+ continue;
nskb = skb_clone(skb, GFP_ATOMIC);
err = macvlan_broadcast_one(nskb, vlan, eth,
mode == MACVLAN_MODE_BRIDGE);
@@ -375,7 +395,6 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p)
if (!(dev->flags & IFF_UP)) {
/* Just copy in the new address */
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
} else {
/* Rehash and update the device filters */
@@ -406,6 +425,21 @@ static void macvlan_set_mac_lists(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
+ if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
+ bitmap_fill(vlan->mc_filter, MACVLAN_MC_FILTER_SZ);
+ } else {
+ struct netdev_hw_addr *ha;
+ DECLARE_BITMAP(filter, MACVLAN_MC_FILTER_SZ);
+
+ bitmap_zero(filter, MACVLAN_MC_FILTER_SZ);
+ netdev_for_each_mc_addr(ha, dev) {
+ __set_bit(mc_hash(vlan, ha->addr), filter);
+ }
+
+ __set_bit(mc_hash(vlan, dev->broadcast), filter);
+
+ bitmap_copy(vlan->mc_filter, filter, MACVLAN_MC_FILTER_SZ);
+ }
dev_uc_sync(vlan->lowerdev, dev);
dev_mc_sync(vlan->lowerdev, dev);
}
@@ -565,7 +599,7 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return err;
}
-static int macvlan_fdb_del(struct ndmsg *ndm,
+static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr)
{
@@ -586,8 +620,8 @@ static int macvlan_fdb_del(struct ndmsg *ndm,
static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- snprintf(drvinfo->driver, 32, "macvlan");
- snprintf(drvinfo->version, 32, "0.1");
+ strlcpy(drvinfo->driver, "macvlan", sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, "0.1", sizeof(drvinfo->version));
}
static int macvlan_ethtool_get_settings(struct net_device *dev,
@@ -765,16 +799,22 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN);
}
+ err = netdev_upper_dev_link(lowerdev, dev);
+ if (err)
+ goto destroy_port;
+
port->count += 1;
err = register_netdevice(dev);
if (err < 0)
- goto destroy_port;
+ goto upper_dev_unlink;
list_add_tail(&vlan->list, &port->vlans);
netif_stacked_transfer_operstate(lowerdev, dev);
return 0;
+upper_dev_unlink:
+ netdev_upper_dev_unlink(lowerdev, dev);
destroy_port:
port->count -= 1;
if (!port->count)
@@ -798,6 +838,7 @@ void macvlan_dellink(struct net_device *dev, struct list_head *head)
list_del(&vlan->list);
unregister_netdevice_queue(dev, head);
+ netdev_upper_dev_unlink(vlan->lowerdev, dev);
}
EXPORT_SYMBOL_GPL(macvlan_dellink);
@@ -822,7 +863,10 @@ static int macvlan_changelink(struct net_device *dev,
static size_t macvlan_get_size(const struct net_device *dev)
{
- return nla_total_size(4);
+ return (0
+ + nla_total_size(4) /* IFLA_MACVLAN_MODE */
+ + nla_total_size(2) /* IFLA_MACVLAN_FLAGS */
+ );
}
static int macvlan_fill_info(struct sk_buff *skb,
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 0f0f9ce3a776..97243011d319 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -742,6 +742,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
if (zerocopy) {
skb_shinfo(skb)->destructor_arg = m->msg_control;
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+ skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
}
if (vlan)
macvlan_start_xmit(skb, vlan->dev);
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 6989ebe2bc79..37add21a3d7d 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -269,12 +269,18 @@ static ssize_t show_remote_port(struct netconsole_target *nt, char *buf)
static ssize_t show_local_ip(struct netconsole_target *nt, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%pI4\n", &nt->np.local_ip);
+ if (nt->np.ipv6)
+ return snprintf(buf, PAGE_SIZE, "%pI6c\n", &nt->np.local_ip.in6);
+ else
+ return snprintf(buf, PAGE_SIZE, "%pI4\n", &nt->np.local_ip);
}
static ssize_t show_remote_ip(struct netconsole_target *nt, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%pI4\n", &nt->np.remote_ip);
+ if (nt->np.ipv6)
+ return snprintf(buf, PAGE_SIZE, "%pI6c\n", &nt->np.remote_ip.in6);
+ else
+ return snprintf(buf, PAGE_SIZE, "%pI4\n", &nt->np.remote_ip);
}
static ssize_t show_local_mac(struct netconsole_target *nt, char *buf)
@@ -410,7 +416,22 @@ static ssize_t store_local_ip(struct netconsole_target *nt,
return -EINVAL;
}
- nt->np.local_ip = in_aton(buf);
+ if (strnchr(buf, count, ':')) {
+ const char *end;
+ if (in6_pton(buf, count, nt->np.local_ip.in6.s6_addr, -1, &end) > 0) {
+ if (*end && *end != '\n') {
+ printk(KERN_ERR "netconsole: invalid IPv6 address at: <%c>\n", *end);
+ return -EINVAL;
+ }
+ nt->np.ipv6 = true;
+ } else
+ return -EINVAL;
+ } else {
+ if (!nt->np.ipv6) {
+ nt->np.local_ip.ip = in_aton(buf);
+ } else
+ return -EINVAL;
+ }
return strnlen(buf, count);
}
@@ -426,7 +447,22 @@ static ssize_t store_remote_ip(struct netconsole_target *nt,
return -EINVAL;
}
- nt->np.remote_ip = in_aton(buf);
+ if (strnchr(buf, count, ':')) {
+ const char *end;
+ if (in6_pton(buf, count, nt->np.remote_ip.in6.s6_addr, -1, &end) > 0) {
+ if (*end && *end != '\n') {
+ printk(KERN_ERR "netconsole: invalid IPv6 address at: <%c>\n", *end);
+ return -EINVAL;
+ }
+ nt->np.ipv6 = true;
+ } else
+ return -EINVAL;
+ } else {
+ if (!nt->np.ipv6) {
+ nt->np.remote_ip.ip = in_aton(buf);
+ } else
+ return -EINVAL;
+ }
return strnlen(buf, count);
}
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
new file mode 100644
index 000000000000..ed947dd76fbd
--- /dev/null
+++ b/drivers/net/ntb_netdev.c
@@ -0,0 +1,408 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copy
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Network Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/ntb.h>
+
+#define NTB_NETDEV_VER "0.7"
+
+MODULE_DESCRIPTION(KBUILD_MODNAME);
+MODULE_VERSION(NTB_NETDEV_VER);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel Corporation");
+
+struct ntb_netdev {
+ struct list_head list;
+ struct pci_dev *pdev;
+ struct net_device *ndev;
+ struct ntb_transport_qp *qp;
+};
+
+#define NTB_TX_TIMEOUT_MS 1000
+#define NTB_RXQ_SIZE 100
+
+static LIST_HEAD(dev_list);
+
+static void ntb_netdev_event_handler(void *data, int status)
+{
+ struct net_device *ndev = data;
+ struct ntb_netdev *dev = netdev_priv(ndev);
+
+ netdev_dbg(ndev, "Event %x, Link %x\n", status,
+ ntb_transport_link_query(dev->qp));
+
+ /* Currently, only link status event is supported */
+ if (status)
+ netif_carrier_on(ndev);
+ else
+ netif_carrier_off(ndev);
+}
+
+static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
+ void *data, int len)
+{
+ struct net_device *ndev = qp_data;
+ struct sk_buff *skb;
+ int rc;
+
+ skb = data;
+ if (!skb)
+ return;
+
+ netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
+
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ if (netif_rx(skb) == NET_RX_DROP) {
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_dropped++;
+ } else {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+ }
+
+ skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
+ if (!skb) {
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_frame_errors++;
+ return;
+ }
+
+ rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
+ if (rc) {
+ dev_kfree_skb(skb);
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_fifo_errors++;
+ }
+}
+
+static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
+ void *data, int len)
+{
+ struct net_device *ndev = qp_data;
+ struct sk_buff *skb;
+
+ skb = data;
+ if (!skb || !ndev)
+ return;
+
+ if (len > 0) {
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ } else {
+ ndev->stats.tx_errors++;
+ ndev->stats.tx_aborted_errors++;
+ }
+
+ dev_kfree_skb(skb);
+}
+
+static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct ntb_netdev *dev = netdev_priv(ndev);
+ int rc;
+
+ netdev_dbg(ndev, "%s: skb len %d\n", __func__, skb->len);
+
+ rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len);
+ if (rc)
+ goto err;
+
+ return NETDEV_TX_OK;
+
+err:
+ ndev->stats.tx_dropped++;
+ ndev->stats.tx_errors++;
+ return NETDEV_TX_BUSY;
+}
+
+static int ntb_netdev_open(struct net_device *ndev)
+{
+ struct ntb_netdev *dev = netdev_priv(ndev);
+ struct sk_buff *skb;
+ int rc, i, len;
+
+ /* Add some empty rx bufs */
+ for (i = 0; i < NTB_RXQ_SIZE; i++) {
+ skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
+ ndev->mtu + ETH_HLEN);
+ if (rc == -EINVAL)
+ goto err;
+ }
+
+ netif_carrier_off(ndev);
+ ntb_transport_link_up(dev->qp);
+
+ return 0;
+
+err:
+ while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
+ dev_kfree_skb(skb);
+ return rc;
+}
+
+static int ntb_netdev_close(struct net_device *ndev)
+{
+ struct ntb_netdev *dev = netdev_priv(ndev);
+ struct sk_buff *skb;
+ int len;
+
+ ntb_transport_link_down(dev->qp);
+
+ while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static int ntb_netdev_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct ntb_netdev *dev = netdev_priv(ndev);
+ struct sk_buff *skb;
+ int len, rc;
+
+ if (new_mtu > ntb_transport_max_size(dev->qp) - ETH_HLEN)
+ return -EINVAL;
+
+ if (!netif_running(ndev)) {
+ ndev->mtu = new_mtu;
+ return 0;
+ }
+
+ /* Bring down the link and dispose of posted rx entries */
+ ntb_transport_link_down(dev->qp);
+
+ if (ndev->mtu < new_mtu) {
+ int i;
+
+ for (i = 0; (skb = ntb_transport_rx_remove(dev->qp, &len)); i++)
+ dev_kfree_skb(skb);
+
+ for (; i; i--) {
+ skb = netdev_alloc_skb(ndev, new_mtu + ETH_HLEN);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
+ new_mtu + ETH_HLEN);
+ if (rc) {
+ dev_kfree_skb(skb);
+ goto err;
+ }
+ }
+ }
+
+ ndev->mtu = new_mtu;
+
+ ntb_transport_link_up(dev->qp);
+
+ return 0;
+
+err:
+ ntb_transport_link_down(dev->qp);
+
+ while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
+ dev_kfree_skb(skb);
+
+ netdev_err(ndev, "Error changing MTU, device inoperable\n");
+ return rc;
+}
+
+static const struct net_device_ops ntb_netdev_ops = {
+ .ndo_open = ntb_netdev_open,
+ .ndo_stop = ntb_netdev_close,
+ .ndo_start_xmit = ntb_netdev_start_xmit,
+ .ndo_change_mtu = ntb_netdev_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static void ntb_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct ntb_netdev *dev = netdev_priv(ndev);
+
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, NTB_NETDEV_VER, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info));
+}
+
+static int ntb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ cmd->supported = SUPPORTED_Backplane;
+ cmd->advertising = ADVERTISED_Backplane;
+ cmd->speed = SPEED_UNKNOWN;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_FULL;
+ cmd->port = PORT_OTHER;
+ cmd->phy_address = 0;
+ cmd->transceiver = XCVR_DUMMY1;
+ cmd->autoneg = AUTONEG_ENABLE;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+
+ return 0;
+}
+
+static const struct ethtool_ops ntb_ethtool_ops = {
+ .get_drvinfo = ntb_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_settings = ntb_get_settings,
+};
+
+static const struct ntb_queue_handlers ntb_netdev_handlers = {
+ .tx_handler = ntb_netdev_tx_handler,
+ .rx_handler = ntb_netdev_rx_handler,
+ .event_handler = ntb_netdev_event_handler,
+};
+
+static int ntb_netdev_probe(struct pci_dev *pdev)
+{
+ struct net_device *ndev;
+ struct ntb_netdev *dev;
+ int rc;
+
+ ndev = alloc_etherdev(sizeof(struct ntb_netdev));
+ if (!ndev)
+ return -ENOMEM;
+
+ dev = netdev_priv(ndev);
+ dev->ndev = ndev;
+ dev->pdev = pdev;
+ BUG_ON(!dev->pdev);
+ ndev->features = NETIF_F_HIGHDMA;
+
+ ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
+ ndev->hw_features = ndev->features;
+ ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS);
+
+ random_ether_addr(ndev->perm_addr);
+ memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len);
+
+ ndev->netdev_ops = &ntb_netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &ntb_ethtool_ops);
+
+ dev->qp = ntb_transport_create_queue(ndev, pdev, &ntb_netdev_handlers);
+ if (!dev->qp) {
+ rc = -EIO;
+ goto err;
+ }
+
+ ndev->mtu = ntb_transport_max_size(dev->qp) - ETH_HLEN;
+
+ rc = register_netdev(ndev);
+ if (rc)
+ goto err1;
+
+ list_add(&dev->list, &dev_list);
+ dev_info(&pdev->dev, "%s created\n", ndev->name);
+ return 0;
+
+err1:
+ ntb_transport_free_queue(dev->qp);
+err:
+ free_netdev(ndev);
+ return rc;
+}
+
+static void ntb_netdev_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev;
+ struct ntb_netdev *dev;
+
+ list_for_each_entry(dev, &dev_list, list) {
+ if (dev->pdev == pdev)
+ break;
+ }
+ if (dev == NULL)
+ return;
+
+ ndev = dev->ndev;
+
+ unregister_netdev(ndev);
+ ntb_transport_free_queue(dev->qp);
+ free_netdev(ndev);
+}
+
+static struct ntb_client ntb_netdev_client = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .probe = ntb_netdev_probe,
+ .remove = ntb_netdev_remove,
+};
+
+static int __init ntb_netdev_init_module(void)
+{
+ int rc;
+
+ rc = ntb_register_client_dev(KBUILD_MODNAME);
+ if (rc)
+ return rc;
+ return ntb_register_client(&ntb_netdev_client);
+}
+module_init(ntb_netdev_init_module);
+
+static void __exit ntb_netdev_exit_module(void)
+{
+ ntb_unregister_client(&ntb_netdev_client);
+ ntb_unregister_client_dev(KBUILD_MODNAME);
+}
+module_exit(ntb_netdev_exit_module);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 961f0b293913..450345261bd3 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -4,7 +4,6 @@
menuconfig PHYLIB
tristate "PHY Device support and infrastructure"
- depends on !S390
depends on NETDEVICES
help
Ethernet controllers are usually attached to PHY
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index d5199cb4caec..b5ddd5077a80 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -36,8 +36,9 @@ MODULE_LICENSE("GPL");
/* IP101A/G - IP1001 */
#define IP10XX_SPEC_CTRL_STATUS 16 /* Spec. Control Register */
+#define IP1001_RXPHASE_SEL (1<<0) /* Add delay on RX_CLK */
+#define IP1001_TXPHASE_SEL (1<<1) /* Add delay on TX_CLK */
#define IP1001_SPEC_CTRL_STATUS_2 20 /* IP1001 Spec. Control Reg 2 */
-#define IP1001_PHASE_SEL_MASK 3 /* IP1001 RX/TXPHASE_SEL */
#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */
#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */
#define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */
@@ -138,19 +139,24 @@ static int ip1001_config_init(struct phy_device *phydev)
if (c < 0)
return c;
- /* INTR pin used: speed/link/duplex will cause an interrupt */
- c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT);
- if (c < 0)
- return c;
+ if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
+ (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
+ (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
+ (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
- /* Additional delay (2ns) used to adjust RX clock phase
- * at RGMII interface */
c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
if (c < 0)
return c;
- c |= IP1001_PHASE_SEL_MASK;
+ c &= ~(IP1001_RXPHASE_SEL | IP1001_TXPHASE_SEL);
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+ c |= (IP1001_RXPHASE_SEL | IP1001_TXPHASE_SEL);
+ else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+ c |= IP1001_RXPHASE_SEL;
+ else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ c |= IP1001_TXPHASE_SEL;
+
c = phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c);
if (c < 0)
return c;
@@ -167,6 +173,11 @@ static int ip101a_g_config_init(struct phy_device *phydev)
if (c < 0)
return c;
+ /* INTR pin used: speed/link/duplex will cause an interrupt */
+ c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT);
+ if (c < 0)
+ return c;
+
/* Enable Auto Power Saving mode */
c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
c |= IP101A_G_APS_ON;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 5d2a3f215887..22dec9c7ef05 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -353,15 +353,6 @@ static int m88e1111_config_init(struct phy_device *phydev)
int err;
int temp;
- /* Enable Fiber/Copper auto selection */
- temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
- temp &= ~MII_M1111_HWCFG_FIBER_COPPER_AUTO;
- phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
-
- temp = phy_read(phydev, MII_BMCR);
- temp |= BMCR_RESET;
- phy_write(phydev, MII_BMCR, temp);
-
if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
(phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
(phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index 0c9accb1c14f..e91d7d736ae2 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -53,7 +53,7 @@ static int mdio_mux_gpio_probe(struct platform_device *pdev)
{
enum of_gpio_flags f;
struct mdio_mux_gpio_state *s;
- unsigned int num_gpios;
+ int num_gpios;
unsigned int n;
int r;
@@ -61,7 +61,7 @@ static int mdio_mux_gpio_probe(struct platform_device *pdev)
return -ENODEV;
num_gpios = of_gpio_count(pdev->dev.of_node);
- if (num_gpios == 0 || num_gpios > MDIO_MUX_GPIO_MAX_BITS)
+ if (num_gpios <= 0 || num_gpios > MDIO_MUX_GPIO_MAX_BITS)
return -ENODEV;
s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 044b5326459f..dc920974204e 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -95,7 +95,7 @@ static struct class mdio_bus_class = {
#if IS_ENABLED(CONFIG_OF_MDIO)
/* Helper function for of_mdio_find_bus */
-static int of_mdio_bus_match(struct device *dev, void *mdio_bus_np)
+static int of_mdio_bus_match(struct device *dev, const void *mdio_bus_np)
{
return dev->of_node == mdio_bus_np;
}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index b983596abcbb..29934446436a 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -5,15 +5,20 @@
*
* Author: David J. Choi
*
- * Copyright (c) 2010 Micrel, Inc.
+ * Copyright (c) 2010-2013 Micrel, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
- * Support : ksz9021 1000/100/10 phy from Micrel
- * ks8001, ks8737, ks8721, ks8041, ks8051 100/10 phy
+ * Support : Micrel Phys:
+ * Giga phys: ksz9021, ksz9031
+ * 100/10 Phys : ksz8001, ksz8721, ksz8737, ksz8041
+ * ksz8021, ksz8031, ksz8051,
+ * ksz8081, ksz8091,
+ * ksz8061,
+ * Switch : ksz8873, ksz886x
*/
#include <linux/kernel.h>
@@ -176,7 +181,7 @@ static struct phy_driver ksphy_driver[] = {
}, {
.phy_id = PHY_ID_KSZ8021,
.phy_id_mask = 0x00ffffff,
- .name = "Micrel KSZ8021",
+ .name = "Micrel KSZ8021 or KSZ8031",
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
@@ -225,6 +230,30 @@ static struct phy_driver ksphy_driver[] = {
.config_intr = kszphy_config_intr,
.driver = { .owner = THIS_MODULE,},
}, {
+ .phy_id = PHY_ID_KSZ8081,
+ .name = "Micrel KSZ8081 or KSZ8091",
+ .phy_id_mask = 0x00fffff0,
+ .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = kszphy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = kszphy_ack_interrupt,
+ .config_intr = kszphy_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+}, {
+ .phy_id = PHY_ID_KSZ8061,
+ .name = "Micrel KSZ8061",
+ .phy_id_mask = 0x00fffff0,
+ .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = kszphy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = kszphy_ack_interrupt,
+ .config_intr = kszphy_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+}, {
.phy_id = PHY_ID_KSZ9021,
.phy_id_mask = 0x000ffffe,
.name = "Micrel KSZ9021 Gigabit PHY",
@@ -238,6 +267,19 @@ static struct phy_driver ksphy_driver[] = {
.config_intr = ksz9021_config_intr,
.driver = { .owner = THIS_MODULE, },
}, {
+ .phy_id = PHY_ID_KSZ9031,
+ .phy_id_mask = 0x00fffff0,
+ .name = "Micrel KSZ9031 Gigabit PHY",
+ .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause
+ | SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = kszphy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = kszphy_ack_interrupt,
+ .config_intr = ksz9021_config_intr,
+ .driver = { .owner = THIS_MODULE, },
+}, {
.phy_id = PHY_ID_KSZ8873MLL,
.phy_id_mask = 0x00fffff0,
.name = "Micrel KSZ8873MLL Switch",
@@ -247,6 +289,16 @@ static struct phy_driver ksphy_driver[] = {
.config_aneg = ksz8873mll_config_aneg,
.read_status = ksz8873mll_read_status,
.driver = { .owner = THIS_MODULE, },
+}, {
+ .phy_id = PHY_ID_KSZ886X,
+ .phy_id_mask = 0x00fffff0,
+ .name = "Micrel KSZ886X Switch",
+ .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = kszphy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .driver = { .owner = THIS_MODULE, },
} };
static int __init ksphy_init(void)
@@ -270,12 +322,16 @@ MODULE_LICENSE("GPL");
static struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ9021, 0x000ffffe },
+ { PHY_ID_KSZ9031, 0x00fffff0 },
{ PHY_ID_KSZ8001, 0x00ffffff },
{ PHY_ID_KS8737, 0x00fffff0 },
{ PHY_ID_KSZ8021, 0x00ffffff },
{ PHY_ID_KSZ8041, 0x00fffff0 },
{ PHY_ID_KSZ8051, 0x00fffff0 },
+ { PHY_ID_KSZ8061, 0x00fffff0 },
+ { PHY_ID_KSZ8081, 0x00fffff0 },
{ PHY_ID_KSZ8873MLL, 0x00fffff0 },
+ { PHY_ID_KSZ886X, 0x00fffff0 },
{ }
};
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 8af46e88a181..9930f9999561 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -416,16 +416,15 @@ static void phy_prepare_link(struct phy_device *phydev,
* @dev: the network device to connect
* @phydev: the pointer to the phy device
* @handler: callback function for state change notifications
- * @flags: PHY device's dev_flags
* @interface: PHY device's interface
*/
int phy_connect_direct(struct net_device *dev, struct phy_device *phydev,
- void (*handler)(struct net_device *), u32 flags,
+ void (*handler)(struct net_device *),
phy_interface_t interface)
{
int rc;
- rc = phy_attach_direct(dev, phydev, flags, interface);
+ rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface);
if (rc)
return rc;
@@ -443,7 +442,6 @@ EXPORT_SYMBOL(phy_connect_direct);
* @dev: the network device to connect
* @bus_id: the id string of the PHY device to connect
* @handler: callback function for state change notifications
- * @flags: PHY device's dev_flags
* @interface: PHY device's interface
*
* Description: Convenience function for connecting ethernet
@@ -455,7 +453,7 @@ EXPORT_SYMBOL(phy_connect_direct);
* the desired functionality.
*/
struct phy_device * phy_connect(struct net_device *dev, const char *bus_id,
- void (*handler)(struct net_device *), u32 flags,
+ void (*handler)(struct net_device *),
phy_interface_t interface)
{
struct phy_device *phydev;
@@ -471,7 +469,7 @@ struct phy_device * phy_connect(struct net_device *dev, const char *bus_id,
}
phydev = to_phy_device(d);
- rc = phy_connect_direct(dev, phydev, handler, flags, interface);
+ rc = phy_connect_direct(dev, phydev, handler, interface);
if (rc)
return ERR_PTR(rc);
@@ -576,14 +574,13 @@ static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
* phy_attach - attach a network device to a particular PHY device
* @dev: network device to attach
* @bus_id: Bus ID of PHY device to attach
- * @flags: PHY device's dev_flags
* @interface: PHY device's interface
*
* Description: Same as phy_attach_direct() except that a PHY bus_id
* string is passed instead of a pointer to a struct phy_device.
*/
struct phy_device *phy_attach(struct net_device *dev,
- const char *bus_id, u32 flags, phy_interface_t interface)
+ const char *bus_id, phy_interface_t interface)
{
struct bus_type *bus = &mdio_bus_type;
struct phy_device *phydev;
@@ -599,7 +596,7 @@ struct phy_device *phy_attach(struct net_device *dev,
}
phydev = to_phy_device(d);
- rc = phy_attach_direct(dev, phydev, flags, interface);
+ rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface);
if (rc)
return ERR_PTR(rc);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 72f93470ea35..8e7af8354342 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -23,6 +23,8 @@
#define RTL821x_INER_INIT 0x6400
#define RTL821x_INSR 0x13
+#define RTL8211E_INER_LINK_STAT 0x10
+
MODULE_DESCRIPTION("Realtek PHY driver");
MODULE_AUTHOR("Johnson Leung");
MODULE_LICENSE("GPL");
@@ -36,7 +38,7 @@ static int rtl821x_ack_interrupt(struct phy_device *phydev)
return (err < 0) ? err : 0;
}
-static int rtl821x_config_intr(struct phy_device *phydev)
+static int rtl8211b_config_intr(struct phy_device *phydev)
{
int err;
@@ -49,28 +51,63 @@ static int rtl821x_config_intr(struct phy_device *phydev)
return err;
}
+static int rtl8211e_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ err = phy_write(phydev, RTL821x_INER,
+ RTL8211E_INER_LINK_STAT);
+ else
+ err = phy_write(phydev, RTL821x_INER, 0);
+
+ return err;
+}
+
/* RTL8211B */
-static struct phy_driver rtl821x_driver = {
+static struct phy_driver rtl8211b_driver = {
.phy_id = 0x001cc912,
- .name = "RTL821x Gigabit Ethernet",
+ .name = "RTL8211B Gigabit Ethernet",
.phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_aneg = &genphy_config_aneg,
.read_status = &genphy_read_status,
.ack_interrupt = &rtl821x_ack_interrupt,
- .config_intr = &rtl821x_config_intr,
+ .config_intr = &rtl8211b_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+/* RTL8211E */
+static struct phy_driver rtl8211e_driver = {
+ .phy_id = 0x001cc915,
+ .name = "RTL8211E Gigabit Ethernet",
+ .phy_id_mask = 0x001fffff,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &rtl821x_ack_interrupt,
+ .config_intr = &rtl8211e_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
};
static int __init realtek_init(void)
{
- return phy_driver_register(&rtl821x_driver);
+ int ret;
+
+ ret = phy_driver_register(&rtl8211b_driver);
+ if (ret < 0)
+ return -ENODEV;
+ return phy_driver_register(&rtl8211e_driver);
}
static void __exit realtek_exit(void)
{
- phy_driver_unregister(&rtl821x_driver);
+ phy_driver_unregister(&rtl8211b_driver);
+ phy_driver_unregister(&rtl8211e_driver);
}
module_init(realtek_init);
@@ -78,6 +115,7 @@ module_exit(realtek_exit);
static struct mdio_device_id __maybe_unused realtek_tbl[] = {
{ 0x001cc912, 0x001fffff },
+ { 0x001cc915, 0x001fffff },
{ }
};
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 41eb8ffeb53d..5c87eef40bf9 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -275,10 +275,8 @@ static int ks8995_probe(struct spi_device *spi)
pdata = spi->dev.platform_data;
ks = kzalloc(sizeof(*ks), GFP_KERNEL);
- if (!ks) {
- dev_err(&spi->dev, "no memory for private data\n");
+ if (!ks)
return -ENOMEM;
- }
mutex_init(&ks->lock);
ks->pdata = pdata;
diff --git a/drivers/net/ppp/Kconfig b/drivers/net/ppp/Kconfig
index 872df3ef07a6..1373c6d7278d 100644
--- a/drivers/net/ppp/Kconfig
+++ b/drivers/net/ppp/Kconfig
@@ -82,8 +82,8 @@ config PPP_FILTER
If unsure, say N.
config PPP_MPPE
- tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)"
- depends on PPP && EXPERIMENTAL
+ tristate "PPP MPPE compression (encryption)"
+ depends on PPP
select CRYPTO
select CRYPTO_SHA1
select CRYPTO_ARC4
@@ -96,8 +96,8 @@ config PPP_MPPE
configuring PPTP clients and servers to utilize this method.
config PPP_MULTILINK
- bool "PPP multilink support (EXPERIMENTAL)"
- depends on PPP && EXPERIMENTAL
+ bool "PPP multilink support"
+ depends on PPP
---help---
PPP multilink is a protocol (defined in RFC 1990) which allows you
to combine several (logical or physical) lines into one logical PPP
@@ -118,8 +118,8 @@ config PPPOATM
changes its encapsulation unilaterally.
config PPPOE
- tristate "PPP over Ethernet (EXPERIMENTAL)"
- depends on EXPERIMENTAL && PPP
+ tristate "PPP over Ethernet"
+ depends on PPP
---help---
Support for PPP over Ethernet.
@@ -130,8 +130,8 @@ config PPPOE
the heading "Kernel mode PPPoE").
config PPTP
- tristate "PPP over IPv4 (PPTP) (EXPERIMENTAL)"
- depends on EXPERIMENTAL && PPP && NET_IPGRE_DEMUX
+ tristate "PPP over IPv4 (PPTP)"
+ depends on PPP && NET_IPGRE_DEMUX
---help---
Support for PPP over IPv4.(Point-to-Point Tunneling Protocol)
@@ -141,12 +141,13 @@ config PPTP
utilize this module.
config PPPOL2TP
- tristate "PPP over L2TP (EXPERIMENTAL)"
- depends on EXPERIMENTAL && L2TP && PPP
+ tristate "PPP over L2TP"
+ depends on L2TP && PPP
---help---
Support for PPP-over-L2TP socket family. L2TP is a protocol
used by ISPs and enterprises to tunnel PPP traffic over UDP
tunnels. L2TP is replacing PPTP for VPN uses.
+if TTY
config PPP_ASYNC
tristate "PPP support for async serial ports"
@@ -172,4 +173,6 @@ config PPP_SYNC_TTY
To compile this driver as a module, choose M here.
+endif # TTY
+
endif # PPP
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 0b2706abe3e3..3db9131e9229 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1058,7 +1058,15 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
return stats64;
}
+static struct lock_class_key ppp_tx_busylock;
+static int ppp_dev_init(struct net_device *dev)
+{
+ dev->qdisc_tx_busylock = &ppp_tx_busylock;
+ return 0;
+}
+
static const struct net_device_ops ppp_netdev_ops = {
+ .ndo_init = ppp_dev_init,
.ndo_start_xmit = ppp_start_xmit,
.ndo_do_ioctl = ppp_net_ioctl,
.ndo_get_stats64 = ppp_get_stats64,
@@ -1805,8 +1813,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
/* the filter instructions are constructed assuming
a four-byte PPP header on each packet */
if (ppp->pass_filter || ppp->active_filter) {
- if (skb_cloned(skb) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ if (skb_unclone(skb, GFP_ATOMIC))
goto err;
*skb_push(skb, 2) = 0;
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 20f31d0d1536..bb07ba94c3aa 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -1134,7 +1134,7 @@ static __net_init int pppoe_init_net(struct net *net)
rwlock_init(&pn->hash_lock);
- pde = proc_net_fops_create(net, "pppoe", S_IRUGO, &pppoe_seq_fops);
+ pde = proc_create("pppoe", S_IRUGO, net->proc_net, &pppoe_seq_fops);
#ifdef CONFIG_PROC_FS
if (!pde)
return -ENOMEM;
@@ -1145,7 +1145,7 @@ static __net_init int pppoe_init_net(struct net *net)
static __net_exit void pppoe_exit_net(struct net *net)
{
- proc_net_remove(net, "pppoe");
+ remove_proc_entry("pppoe", net->proc_net);
}
static struct pernet_operations pppoe_net_ops = {
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index d8b9b1e8ee02..f433b594388e 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -410,10 +410,10 @@ static void rionet_get_drvinfo(struct net_device *ndev,
{
struct rionet_private *rnet = netdev_priv(ndev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "n/a");
- strcpy(info->bus_info, rnet->mport->name);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, "n/a", sizeof(info->fw_version));
+ strlcpy(info->bus_info, rnet->mport->name, sizeof(info->bus_info));
}
static u32 rionet_get_msglevel(struct net_device *ndev)
diff --git a/drivers/net/slip/Kconfig b/drivers/net/slip/Kconfig
index 211b160e4e9c..48e68714eef3 100644
--- a/drivers/net/slip/Kconfig
+++ b/drivers/net/slip/Kconfig
@@ -4,6 +4,7 @@
config SLIP
tristate "SLIP (serial line) support"
+ depends on TTY
---help---
Say Y if you intend to use SLIP or CSLIP (compressed SLIP) to
connect to your Internet service provider or to connect to some
diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig
index 6b08bd419fba..c3011af68e91 100644
--- a/drivers/net/team/Kconfig
+++ b/drivers/net/team/Kconfig
@@ -1,6 +1,5 @@
menuconfig NET_TEAM
- tristate "Ethernet team driver support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ tristate "Ethernet team driver support"
---help---
This allows one to create virtual interfaces that teams together
multiple ethernet devices.
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index ad86660fb8f9..05c5efe84591 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -28,6 +28,7 @@
#include <net/genetlink.h>
#include <net/netlink.h>
#include <net/sch_generic.h>
+#include <generated/utsrelease.h>
#include <linux/if_team.h>
#define DRV_NAME "team"
@@ -507,6 +508,7 @@ static bool team_is_mode_set(struct team *team)
static void team_set_no_mode(struct team *team)
{
+ team->user_carrier_enabled = false;
team->mode = &__team_no_mode;
}
@@ -1054,10 +1056,11 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
}
}
- err = netdev_set_master(port_dev, dev);
+ err = netdev_master_upper_dev_link(port_dev, dev);
if (err) {
- netdev_err(dev, "Device %s failed to set master\n", portname);
- goto err_set_master;
+ netdev_err(dev, "Device %s failed to set upper link\n",
+ portname);
+ goto err_set_upper_link;
}
err = netdev_rx_handler_register(port_dev, team_handle_frame,
@@ -1090,9 +1093,9 @@ err_option_port_add:
netdev_rx_handler_unregister(port_dev);
err_handler_register:
- netdev_set_master(port_dev, NULL);
+ netdev_upper_dev_unlink(port_dev, dev);
-err_set_master:
+err_set_upper_link:
team_port_disable_netpoll(port);
err_enable_netpoll:
@@ -1129,18 +1132,20 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
return -ENOENT;
}
- __team_option_inst_mark_removed_port(team, port);
- __team_options_change_check(team);
- __team_option_inst_del_port(team, port);
- __team_port_change_port_removed(port);
team_port_disable(team, port);
list_del_rcu(&port->list);
netdev_rx_handler_unregister(port_dev);
- netdev_set_master(port_dev, NULL);
+ netdev_upper_dev_unlink(port_dev, dev);
team_port_disable_netpoll(port);
vlan_vids_del_by_dev(port_dev, dev);
dev_close(port_dev);
team_port_leave(team, port);
+
+ __team_option_inst_mark_removed_port(team, port);
+ __team_options_change_check(team);
+ __team_option_inst_del_port(team, port);
+ __team_port_change_port_removed(port);
+
team_port_set_orig_dev_addr(port);
dev_set_mtu(port_dev, port->orig.mtu);
synchronize_rcu();
@@ -1399,13 +1404,11 @@ static void team_destructor(struct net_device *dev)
static int team_open(struct net_device *dev)
{
- netif_carrier_on(dev);
return 0;
}
static int team_close(struct net_device *dev)
{
- netif_carrier_off(dev);
return 0;
}
@@ -1501,7 +1504,6 @@ static int team_set_mac_address(struct net_device *dev, void *p)
if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
rcu_read_lock();
list_for_each_entry_rcu(port, &team->port_list, list)
if (team->ops.port_change_dev_addr)
@@ -1707,6 +1709,19 @@ static netdev_features_t team_fix_features(struct net_device *dev,
return features;
}
+static int team_change_carrier(struct net_device *dev, bool new_carrier)
+{
+ struct team *team = netdev_priv(dev);
+
+ team->user_carrier_enabled = true;
+
+ if (new_carrier)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+ return 0;
+}
+
static const struct net_device_ops team_netdev_ops = {
.ndo_init = team_init,
.ndo_uninit = team_uninit,
@@ -1729,8 +1744,24 @@ static const struct net_device_ops team_netdev_ops = {
.ndo_add_slave = team_add_slave,
.ndo_del_slave = team_del_slave,
.ndo_fix_features = team_fix_features,
+ .ndo_change_carrier = team_change_carrier,
};
+/***********************
+ * ethtool interface
+ ***********************/
+
+static void team_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
+}
+
+static const struct ethtool_ops team_ethtool_ops = {
+ .get_drvinfo = team_ethtool_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
/***********************
* rt netlink interface
@@ -1746,7 +1777,6 @@ static void team_setup_by_port(struct net_device *dev,
dev->mtu = port_dev->mtu;
memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
memcpy(dev->dev_addr, port_dev->dev_addr, port_dev->addr_len);
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
}
static int team_dev_type_check_change(struct net_device *dev,
@@ -1780,6 +1810,7 @@ static void team_setup(struct net_device *dev)
ether_setup(dev);
dev->netdev_ops = &team_netdev_ops;
+ dev->ethtool_ops = &team_ethtool_ops;
dev->destructor = team_destructor;
dev->tx_queue_len = 0;
dev->flags |= IFF_MULTICAST;
@@ -1941,30 +1972,6 @@ static void team_nl_team_put(struct team *team)
dev_put(team->dev);
}
-static int team_nl_send_generic(struct genl_info *info, struct team *team,
- int (*fill_func)(struct sk_buff *skb,
- struct genl_info *info,
- int flags, struct team *team))
-{
- struct sk_buff *skb;
- int err;
-
- skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- err = fill_func(skb, info, NLM_F_ACK, team);
- if (err < 0)
- goto err_fill;
-
- err = genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
- return err;
-
-err_fill:
- nlmsg_free(skb);
- return err;
-}
-
typedef int team_nl_send_func_t(struct sk_buff *skb,
struct team *team, u32 portid);
@@ -2309,16 +2316,57 @@ team_put:
return err;
}
-static int team_nl_fill_port_list_get(struct sk_buff *skb,
- u32 portid, u32 seq, int flags,
- struct team *team,
- bool fillall)
+static int team_nl_fill_one_port_get(struct sk_buff *skb,
+ struct team_port *port)
+{
+ struct nlattr *port_item;
+
+ port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
+ if (!port_item)
+ goto nest_cancel;
+ if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
+ goto nest_cancel;
+ if (port->changed) {
+ if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
+ goto nest_cancel;
+ port->changed = false;
+ }
+ if ((port->removed &&
+ nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
+ (port->state.linkup &&
+ nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
+ nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
+ nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
+ goto nest_cancel;
+ nla_nest_end(skb, port_item);
+ return 0;
+
+nest_cancel:
+ nla_nest_cancel(skb, port_item);
+ return -EMSGSIZE;
+}
+
+static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
+ int flags, team_nl_send_func_t *send_func,
+ struct team_port *one_port)
{
struct nlattr *port_list;
+ struct nlmsghdr *nlh;
void *hdr;
struct team_port *port;
+ int err;
+ struct sk_buff *skb = NULL;
+ bool incomplete;
+ int i;
+
+ port = list_first_entry(&team->port_list, struct team_port, list);
+
+start_again:
+ err = __send_and_alloc_skb(&skb, team, portid, send_func);
+ if (err)
+ return err;
- hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags,
+ hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
TEAM_CMD_PORT_LIST_GET);
if (!hdr)
return -EMSGSIZE;
@@ -2329,47 +2377,54 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb,
if (!port_list)
goto nla_put_failure;
- list_for_each_entry(port, &team->port_list, list) {
- struct nlattr *port_item;
+ i = 0;
+ incomplete = false;
- /* Include only changed ports if fill all mode is not on */
- if (!fillall && !port->changed)
- continue;
- port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
- if (!port_item)
- goto nla_put_failure;
- if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
- goto nla_put_failure;
- if (port->changed) {
- if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
- goto nla_put_failure;
- port->changed = false;
+ /* If one port is selected, called wants to send port list containing
+ * only this port. Otherwise go through all listed ports and send all
+ */
+ if (one_port) {
+ err = team_nl_fill_one_port_get(skb, one_port);
+ if (err)
+ goto errout;
+ } else {
+ list_for_each_entry(port, &team->port_list, list) {
+ err = team_nl_fill_one_port_get(skb, port);
+ if (err) {
+ if (err == -EMSGSIZE) {
+ if (!i)
+ goto errout;
+ incomplete = true;
+ break;
+ }
+ goto errout;
+ }
+ i++;
}
- if ((port->removed &&
- nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
- (port->state.linkup &&
- nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
- nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
- nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
- goto nla_put_failure;
- nla_nest_end(skb, port_item);
}
nla_nest_end(skb, port_list);
- return genlmsg_end(skb, hdr);
+ genlmsg_end(skb, hdr);
+ if (incomplete)
+ goto start_again;
+
+send_done:
+ nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
+ if (!nlh) {
+ err = __send_and_alloc_skb(&skb, team, portid, send_func);
+ if (err)
+ goto errout;
+ goto send_done;
+ }
+
+ return send_func(skb, team, portid);
nla_put_failure:
+ err = -EMSGSIZE;
+errout:
genlmsg_cancel(skb, hdr);
- return -EMSGSIZE;
-}
-
-static int team_nl_fill_port_list_get_all(struct sk_buff *skb,
- struct genl_info *info, int flags,
- struct team *team)
-{
- return team_nl_fill_port_list_get(skb, info->snd_portid,
- info->snd_seq, NLM_F_ACK,
- team, true);
+ nlmsg_free(skb);
+ return err;
}
static int team_nl_cmd_port_list_get(struct sk_buff *skb,
@@ -2382,7 +2437,8 @@ static int team_nl_cmd_port_list_get(struct sk_buff *skb,
if (!team)
return -EINVAL;
- err = team_nl_send_generic(info, team, team_nl_fill_port_list_get_all);
+ err = team_nl_send_port_list_get(team, info->snd_portid, info->snd_seq,
+ NLM_F_ACK, team_nl_send_unicast, NULL);
team_nl_team_put(team);
@@ -2433,27 +2489,11 @@ static int team_nl_send_event_options_get(struct team *team,
sel_opt_inst_list);
}
-static int team_nl_send_event_port_list_get(struct team *team)
+static int team_nl_send_event_port_get(struct team *team,
+ struct team_port *port)
{
- struct sk_buff *skb;
- int err;
- struct net *net = dev_net(team->dev);
-
- skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- err = team_nl_fill_port_list_get(skb, 0, 0, 0, team, false);
- if (err < 0)
- goto err_fill;
-
- err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
- GFP_KERNEL);
- return err;
-
-err_fill:
- nlmsg_free(skb);
- return err;
+ return team_nl_send_port_list_get(team, 0, 0, 0, team_nl_send_multicast,
+ port);
}
static int team_nl_init(void)
@@ -2526,28 +2566,53 @@ static void __team_port_change_send(struct team_port *port, bool linkup)
port->state.duplex = 0;
send_event:
- err = team_nl_send_event_port_list_get(port->team);
+ err = team_nl_send_event_port_get(port->team, port);
if (err && err != -ESRCH)
netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
port->dev->name, err);
}
+static void __team_carrier_check(struct team *team)
+{
+ struct team_port *port;
+ bool team_linkup;
+
+ if (team->user_carrier_enabled)
+ return;
+
+ team_linkup = false;
+ list_for_each_entry(port, &team->port_list, list) {
+ if (port->linkup) {
+ team_linkup = true;
+ break;
+ }
+ }
+
+ if (team_linkup)
+ netif_carrier_on(team->dev);
+ else
+ netif_carrier_off(team->dev);
+}
+
static void __team_port_change_check(struct team_port *port, bool linkup)
{
if (port->state.linkup != linkup)
__team_port_change_send(port, linkup);
+ __team_carrier_check(port->team);
}
static void __team_port_change_port_added(struct team_port *port, bool linkup)
{
__team_port_change_send(port, linkup);
+ __team_carrier_check(port->team);
}
static void __team_port_change_port_removed(struct team_port *port)
{
port->removed = true;
__team_port_change_send(port, false);
+ __team_carrier_check(port->team);
}
static void team_port_change_check(struct team_port *port, bool linkup)
diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c
index 6262b4defd93..40fd3381b693 100644
--- a/drivers/net/team/team_mode_activebackup.c
+++ b/drivers/net/team/team_mode_activebackup.c
@@ -19,6 +19,7 @@
struct ab_priv {
struct team_port __rcu *active_port;
+ struct team_option_inst_info *ap_opt_inst_info;
};
static struct ab_priv *ab_priv(struct team *team)
@@ -54,8 +55,17 @@ drop:
static void ab_port_leave(struct team *team, struct team_port *port)
{
- if (ab_priv(team)->active_port == port)
+ if (ab_priv(team)->active_port == port) {
RCU_INIT_POINTER(ab_priv(team)->active_port, NULL);
+ team_option_inst_set_change(ab_priv(team)->ap_opt_inst_info);
+ }
+}
+
+static int ab_active_port_init(struct team *team,
+ struct team_option_inst_info *info)
+{
+ ab_priv(team)->ap_opt_inst_info = info;
+ return 0;
}
static int ab_active_port_get(struct team *team, struct team_gsetter_ctx *ctx)
@@ -88,6 +98,7 @@ static const struct team_option ab_options[] = {
{
.name = "activeport",
.type = TEAM_OPTION_TYPE_U32,
+ .init = ab_active_port_init,
.getter = ab_active_port_get,
.setter = ab_active_port_set,
},
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index fbd106edbe59..b6f45c5d84d5 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -109,11 +109,11 @@ struct tap_filter {
unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
};
-/* 1024 is probably a high enough limit: modern hypervisors seem to support on
- * the order of 100-200 CPUs so this leaves us some breathing space if we want
- * to match a queue per guest CPU.
- */
-#define MAX_TAP_QUEUES 1024
+/* DEFAULT_MAX_NUM_RSS_QUEUES were choosed to let the rx/tx queues allocated for
+ * the netdevice to be fit in one page. So we can make sure the success of
+ * memory allocation. TODO: increase the limit. */
+#define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
+#define MAX_TAP_FLOWS 4096
#define TUN_FLOW_EXPIRE (3 * HZ)
@@ -185,6 +185,8 @@ struct tun_struct {
unsigned long ageing_time;
unsigned int numdisabled;
struct list_head disabled;
+ void *security;
+ u32 flow_count;
};
static inline u32 tun_hashfn(u32 rxhash)
@@ -218,6 +220,7 @@ static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
e->queue_index = queue_index;
e->tun = tun;
hlist_add_head_rcu(&e->hash_link, head);
+ ++tun->flow_count;
}
return e;
}
@@ -228,6 +231,7 @@ static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
e->rxhash, e->queue_index);
hlist_del_rcu(&e->hash_link);
kfree_rcu(e, rcu);
+ --tun->flow_count;
}
static void tun_flow_flush(struct tun_struct *tun)
@@ -294,11 +298,12 @@ static void tun_flow_cleanup(unsigned long data)
}
static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
- u16 queue_index)
+ struct tun_file *tfile)
{
struct hlist_head *head;
struct tun_flow_entry *e;
unsigned long delay = tun->ageing_time;
+ u16 queue_index = tfile->queue_index;
if (!rxhash)
return;
@@ -307,7 +312,9 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
rcu_read_lock();
- if (tun->numqueues == 1)
+ /* We may get a very small possibility of OOO during switching, not
+ * worth to optimize.*/
+ if (tun->numqueues == 1 || tfile->detached)
goto unlock;
e = tun_flow_find(head, rxhash);
@@ -317,7 +324,8 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
e->updated = jiffies;
} else {
spin_lock_bh(&tun->lock);
- if (!tun_flow_find(head, rxhash))
+ if (!tun_flow_find(head, rxhash) &&
+ tun->flow_count < MAX_TAP_FLOWS)
tun_flow_create(tun, head, rxhash, queue_index);
if (!timer_pending(&tun->flow_gc_timer))
@@ -404,24 +412,23 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
struct tun_struct *tun;
struct net_device *dev;
- tun = rcu_dereference_protected(tfile->tun,
- lockdep_rtnl_is_held());
- if (tun) {
+ tun = rtnl_dereference(tfile->tun);
+
+ if (tun && !tfile->detached) {
u16 index = tfile->queue_index;
BUG_ON(index >= tun->numqueues);
dev = tun->dev;
rcu_assign_pointer(tun->tfiles[index],
tun->tfiles[tun->numqueues - 1]);
- rcu_assign_pointer(tfile->tun, NULL);
- ntfile = rcu_dereference_protected(tun->tfiles[index],
- lockdep_rtnl_is_held());
+ ntfile = rtnl_dereference(tun->tfiles[index]);
ntfile->queue_index = index;
--tun->numqueues;
- if (clean)
+ if (clean) {
+ rcu_assign_pointer(tfile->tun, NULL);
sock_put(&tfile->sk);
- else
+ } else
tun_disable_queue(tun, tfile);
synchronize_net();
@@ -429,14 +436,19 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
/* Drop read queue */
skb_queue_purge(&tfile->sk.sk_receive_queue);
tun_set_real_num_queues(tun);
- } else if (tfile->detached && clean)
+ } else if (tfile->detached && clean) {
tun = tun_enable_queue(tfile);
+ sock_put(&tfile->sk);
+ }
if (clean) {
- if (tun && tun->numqueues == 0 && tun->numdisabled == 0 &&
- !(tun->flags & TUN_PERSIST))
- if (tun->dev->reg_state == NETREG_REGISTERED)
+ if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
+ netif_carrier_off(tun->dev);
+
+ if (!(tun->flags & TUN_PERSIST) &&
+ tun->dev->reg_state == NETREG_REGISTERED)
unregister_netdevice(tun->dev);
+ }
BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
&tfile->socket.flags));
@@ -458,19 +470,21 @@ static void tun_detach_all(struct net_device *dev)
int i, n = tun->numqueues;
for (i = 0; i < n; i++) {
- tfile = rcu_dereference_protected(tun->tfiles[i],
- lockdep_rtnl_is_held());
+ tfile = rtnl_dereference(tun->tfiles[i]);
BUG_ON(!tfile);
wake_up_all(&tfile->wq.wait);
rcu_assign_pointer(tfile->tun, NULL);
--tun->numqueues;
}
+ list_for_each_entry(tfile, &tun->disabled, next) {
+ wake_up_all(&tfile->wq.wait);
+ rcu_assign_pointer(tfile->tun, NULL);
+ }
BUG_ON(tun->numqueues != 0);
synchronize_net();
for (i = 0; i < n; i++) {
- tfile = rcu_dereference_protected(tun->tfiles[i],
- lockdep_rtnl_is_held());
+ tfile = rtnl_dereference(tun->tfiles[i]);
/* Drop read queue */
skb_queue_purge(&tfile->sk.sk_receive_queue);
sock_put(&tfile->sk);
@@ -481,6 +495,9 @@ static void tun_detach_all(struct net_device *dev)
sock_put(&tfile->sk);
}
BUG_ON(tun->numdisabled != 0);
+
+ if (tun->flags & TUN_PERSIST)
+ module_put(THIS_MODULE);
}
static int tun_attach(struct tun_struct *tun, struct file *file)
@@ -488,8 +505,12 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
struct tun_file *tfile = file->private_data;
int err;
+ err = security_tun_dev_attach(tfile->socket.sk, tun->security);
+ if (err < 0)
+ goto out;
+
err = -EINVAL;
- if (rcu_dereference_protected(tfile->tun, lockdep_rtnl_is_held()))
+ if (rtnl_dereference(tfile->tun) && !tfile->detached)
goto out;
err = -EBUSY;
@@ -1179,6 +1200,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (zerocopy) {
skb_shinfo(skb)->destructor_arg = msg_control;
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+ skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
}
skb_reset_network_header(skb);
@@ -1188,7 +1210,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
tun->dev->stats.rx_packets++;
tun->dev->stats.rx_bytes += len;
- tun_flow_update(tun, rxhash, tfile->queue_index);
+ tun_flow_update(tun, rxhash, tfile);
return total_len;
}
@@ -1371,6 +1393,7 @@ static void tun_free_netdev(struct net_device *dev)
BUG_ON(!(list_empty(&tun->disabled)));
tun_flow_uninit(tun);
+ security_tun_dev_free_security(tun->security);
free_netdev(dev);
}
@@ -1544,6 +1567,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
struct net_device *dev;
int err;
+ if (tfile->detached)
+ return -EINVAL;
+
dev = __dev_get_by_name(net, ifr->ifr_name);
if (dev) {
if (ifr->ifr_flags & IFF_TUN_EXCL)
@@ -1557,7 +1583,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (tun_not_capable(tun))
return -EPERM;
- err = security_tun_dev_attach(tfile->socket.sk);
+ err = security_tun_dev_open(tun->security);
if (err < 0)
return err;
@@ -1572,6 +1598,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
else {
char *name;
unsigned long flags = 0;
+ int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
+ MAX_TAP_QUEUES : 1;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
@@ -1595,8 +1623,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
name = ifr->ifr_name;
dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
- tun_setup,
- MAX_TAP_QUEUES, MAX_TAP_QUEUES);
+ tun_setup, queues, queues);
+
if (!dev)
return -ENOMEM;
@@ -1614,7 +1642,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
spin_lock_init(&tun->lock);
- security_tun_dev_post_create(&tfile->sk);
+ err = security_tun_dev_alloc_security(&tun->security);
+ if (err < 0)
+ goto err_free_dev;
tun_net_init(dev);
@@ -1639,10 +1669,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
device_create_file(&tun->dev->dev, &dev_attr_owner) ||
device_create_file(&tun->dev->dev, &dev_attr_group))
pr_err("Failed to create tun sysfs files\n");
-
- netif_carrier_on(tun->dev);
}
+ netif_carrier_on(tun->dev);
+
tun_debug(KERN_INFO, tun, "tun_set_iff\n");
if (ifr->ifr_flags & IFF_NO_PI)
@@ -1738,8 +1768,7 @@ static void tun_detach_filter(struct tun_struct *tun, int n)
struct tun_file *tfile;
for (i = 0; i < n; i++) {
- tfile = rcu_dereference_protected(tun->tfiles[i],
- lockdep_rtnl_is_held());
+ tfile = rtnl_dereference(tun->tfiles[i]);
sk_detach_filter(tfile->socket.sk);
}
@@ -1752,8 +1781,7 @@ static int tun_attach_filter(struct tun_struct *tun)
struct tun_file *tfile;
for (i = 0; i < tun->numqueues; i++) {
- tfile = rcu_dereference_protected(tun->tfiles[i],
- lockdep_rtnl_is_held());
+ tfile = rtnl_dereference(tun->tfiles[i]);
ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
if (ret) {
tun_detach_filter(tun, i);
@@ -1771,8 +1799,7 @@ static void tun_set_sndbuf(struct tun_struct *tun)
int i;
for (i = 0; i < tun->numqueues; i++) {
- tfile = rcu_dereference_protected(tun->tfiles[i],
- lockdep_rtnl_is_held());
+ tfile = rtnl_dereference(tun->tfiles[i]);
tfile->socket.sk->sk_sndbuf = tun->sndbuf;
}
}
@@ -1787,22 +1814,24 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
tun = tfile->detached;
- if (!tun)
+ if (!tun) {
ret = -EINVAL;
- else if (tun_not_capable(tun))
- ret = -EPERM;
- else
- ret = tun_attach(tun, file);
+ goto unlock;
+ }
+ ret = security_tun_dev_attach_queue(tun->security);
+ if (ret < 0)
+ goto unlock;
+ ret = tun_attach(tun, file);
} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
- tun = rcu_dereference_protected(tfile->tun,
- lockdep_rtnl_is_held());
- if (!tun || !(tun->flags & TUN_TAP_MQ))
+ tun = rtnl_dereference(tfile->tun);
+ if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached)
ret = -EINVAL;
else
__tun_detach(tfile, false);
} else
ret = -EINVAL;
+unlock:
rtnl_unlock();
return ret;
}
@@ -1880,10 +1909,11 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
/* Disable/Enable persist mode. Keep an extra reference to the
* module to prevent the module being unprobed.
*/
- if (arg) {
+ if (arg && !(tun->flags & TUN_PERSIST)) {
tun->flags |= TUN_PERSIST;
__module_get(THIS_MODULE);
- } else {
+ }
+ if (!arg && (tun->flags & TUN_PERSIST)) {
tun->flags &= ~TUN_PERSIST;
module_put(THIS_MODULE);
}
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index ef976215b649..da92ed3797aa 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -8,8 +8,7 @@ menu "USB Network Adapters"
depends on USB && NET
config USB_CATC
- tristate "USB CATC NetMate-based Ethernet device support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ tristate "USB CATC NetMate-based Ethernet device support"
select CRC32
---help---
Say Y if you want to use one of the following 10Mbps USB Ethernet
@@ -83,8 +82,7 @@ config USB_PEGASUS
module will be called pegasus.
config USB_RTL8150
- tristate "USB RTL8150 based ethernet device support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ tristate "USB RTL8150 based ethernet device support"
select NET_CORE
select MII
help
@@ -188,7 +186,7 @@ config USB_NET_CDCETHER
config USB_NET_CDC_EEM
tristate "CDC EEM support"
- depends on USB_USBNET && EXPERIMENTAL
+ depends on USB_USBNET
help
This option supports devices conforming to the Communication Device
Class (CDC) Ethernet Emulation Model, a specification that's easy to
@@ -287,7 +285,7 @@ config USB_NET_PLUSB
tristate "Prolific PL-2301/2302/25A1 based cables"
# if the handshake/init/reset problems, from original 'plusb',
# are ever resolved ... then remove "experimental"
- depends on USB_USBNET && EXPERIMENTAL
+ depends on USB_USBNET
help
Choose this option if you're using a host-to-host cable
with one of these chips.
@@ -301,8 +299,8 @@ config USB_NET_MCS7830
adapters marketed under the DeLOCK brand.
config USB_NET_RNDIS_HOST
- tristate "Host for RNDIS and ActiveSync devices (EXPERIMENTAL)"
- depends on USB_USBNET && EXPERIMENTAL
+ tristate "Host for RNDIS and ActiveSync devices"
+ depends on USB_USBNET
select USB_NET_CDCETHER
help
This option enables hosting "Remote NDIS" USB networking links,
@@ -380,7 +378,7 @@ config USB_EPSON2888
config USB_KC2190
boolean "KT Technology KC2190 based cables (InstaNet)"
- depends on USB_NET_CDC_SUBSET && EXPERIMENTAL
+ depends on USB_NET_CDC_SUBSET
help
Choose this option if you're using a host-to-host cable
with one of these chips.
@@ -445,7 +443,7 @@ config USB_NET_QMI_WWAN
config USB_HSO
tristate "Option USB High Speed Mobile Devices"
- depends on USB && RFKILL
+ depends on USB && RFKILL && TTY
default n
help
Choose this option if you have an Option HSDPA/HSUPA card.
@@ -493,7 +491,7 @@ config USB_SIERRA_NET
config USB_VL600
tristate "LG VL600 modem dongle"
- depends on USB_NET_CDCETHER
+ depends on USB_NET_CDCETHER && TTY
select USB_ACM
help
Select this if you want to use an LG Electronics 4G/LTE usb modem
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index e889631161b8..346c032aa795 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -167,6 +167,20 @@ struct asix_data {
u8 res;
};
+struct asix_rx_fixup_info {
+ struct sk_buff *ax_skb;
+ u32 header;
+ u16 size;
+ bool split_head;
+};
+
+struct asix_common_private {
+ struct asix_rx_fixup_info rx_fixup_info;
+};
+
+/* ASIX specific flags */
+#define FLAG_EEPROM_MAC (1UL << 0) /* init device MAC from eeprom */
+
int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
u16 size, void *data);
@@ -176,7 +190,9 @@ int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value,
u16 index, u16 size, void *data);
-int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb);
+int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
+ struct asix_rx_fixup_info *rx);
+int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb);
struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
gfp_t flags);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 50d167330d38..f7f623a5390e 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -51,49 +51,89 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
value, index, data, size);
}
-int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
+ struct asix_rx_fixup_info *rx)
{
int offset = 0;
- while (offset + sizeof(u32) < skb->len) {
- struct sk_buff *ax_skb;
- u16 size;
- u32 header = get_unaligned_le32(skb->data + offset);
-
- offset += sizeof(u32);
-
- /* get the packet length */
- size = (u16) (header & 0x7ff);
- if (size != ((~header >> 16) & 0x07ff)) {
- netdev_err(dev->net, "asix_rx_fixup() Bad Header Length\n");
- return 0;
+ while (offset + sizeof(u16) <= skb->len) {
+ u16 remaining = 0;
+ unsigned char *data;
+
+ if (!rx->size) {
+ if ((skb->len - offset == sizeof(u16)) ||
+ rx->split_head) {
+ if(!rx->split_head) {
+ rx->header = get_unaligned_le16(
+ skb->data + offset);
+ rx->split_head = true;
+ offset += sizeof(u16);
+ break;
+ } else {
+ rx->header |= (get_unaligned_le16(
+ skb->data + offset)
+ << 16);
+ rx->split_head = false;
+ offset += sizeof(u16);
+ }
+ } else {
+ rx->header = get_unaligned_le32(skb->data +
+ offset);
+ offset += sizeof(u32);
+ }
+
+ /* get the packet length */
+ rx->size = (u16) (rx->header & 0x7ff);
+ if (rx->size != ((~rx->header >> 16) & 0x7ff)) {
+ netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n",
+ rx->header, offset);
+ rx->size = 0;
+ return 0;
+ }
+ rx->ax_skb = netdev_alloc_skb_ip_align(dev->net,
+ rx->size);
+ if (!rx->ax_skb)
+ return 0;
}
- if ((size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) ||
- (size + offset > skb->len)) {
+ if (rx->size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) {
netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
- size);
+ rx->size);
+ kfree_skb(rx->ax_skb);
return 0;
}
- ax_skb = netdev_alloc_skb_ip_align(dev->net, size);
- if (!ax_skb)
- return 0;
- skb_put(ax_skb, size);
- memcpy(ax_skb->data, skb->data + offset, size);
- usbnet_skb_return(dev, ax_skb);
+ if (rx->size > skb->len - offset) {
+ remaining = rx->size - (skb->len - offset);
+ rx->size = skb->len - offset;
+ }
+
+ data = skb_put(rx->ax_skb, rx->size);
+ memcpy(data, skb->data + offset, rx->size);
+ if (!remaining)
+ usbnet_skb_return(dev, rx->ax_skb);
- offset += (size + 1) & 0xfffe;
+ offset += (rx->size + 1) & 0xfffe;
+ rx->size = remaining;
}
if (skb->len != offset) {
- netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d\n",
- skb->len);
+ netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n",
+ skb->len, offset);
return 0;
}
+
return 1;
}
+int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb)
+{
+ struct asix_common_private *dp = dev->driver_priv;
+ struct asix_rx_fixup_info *rx = &dp->rx_fixup_info;
+
+ return asix_rx_fixup_internal(dev, skb, rx);
+}
+
struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
gfp_t flags)
{
@@ -510,8 +550,8 @@ void asix_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
{
/* Inherit standard device info */
usbnet_get_drvinfo(net, info);
- strncpy (info->driver, DRIVER_NAME, sizeof info->driver);
- strncpy (info->version, DRIVER_VERSION, sizeof info->version);
+ strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
info->eedump_len = AX_EEPROM_LEN;
}
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 7a6e758f48e7..2205dbc8d32f 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -422,14 +422,25 @@ static const struct net_device_ops ax88772_netdev_ops = {
static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
{
- int ret, embd_phy;
+ int ret, embd_phy, i;
u8 buf[ETH_ALEN];
u32 phyid;
usbnet_get_endpoints(dev,intf);
/* Get the MAC address */
- ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
+ if (dev->driver_info->data & FLAG_EEPROM_MAC) {
+ for (i = 0; i < (ETH_ALEN >> 1); i++) {
+ ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x04 + i,
+ 0, 2, buf + i * 2);
+ if (ret < 0)
+ break;
+ }
+ } else {
+ ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID,
+ 0, 0, ETH_ALEN, buf);
+ }
+
if (ret < 0) {
netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
return ret;
@@ -484,9 +495,19 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
dev->rx_urb_size = 2048;
}
+ dev->driver_priv = kzalloc(sizeof(struct asix_common_private), GFP_KERNEL);
+ if (!dev->driver_priv)
+ return -ENOMEM;
+
return 0;
}
+static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+ if (dev->driver_priv)
+ kfree(dev->driver_priv);
+}
+
static const struct ethtool_ops ax88178_ethtool_ops = {
.get_drvinfo = asix_get_drvinfo,
.get_link = asix_get_link,
@@ -818,6 +839,10 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
dev->rx_urb_size = 2048;
}
+ dev->driver_priv = kzalloc(sizeof(struct asix_common_private), GFP_KERNEL);
+ if (!dev->driver_priv)
+ return -ENOMEM;
+
return 0;
}
@@ -864,22 +889,38 @@ static const struct driver_info hawking_uf200_info = {
static const struct driver_info ax88772_info = {
.description = "ASIX AX88772 USB 2.0 Ethernet",
.bind = ax88772_bind,
+ .unbind = ax88772_unbind,
.status = asix_status,
.link_reset = ax88772_link_reset,
.reset = ax88772_reset,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET,
- .rx_fixup = asix_rx_fixup,
+ .rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
};
+static const struct driver_info ax88772b_info = {
+ .description = "ASIX AX88772B USB 2.0 Ethernet",
+ .bind = ax88772_bind,
+ .unbind = ax88772_unbind,
+ .status = asix_status,
+ .link_reset = ax88772_link_reset,
+ .reset = ax88772_reset,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
+ FLAG_MULTI_PACKET,
+ .rx_fixup = asix_rx_fixup_common,
+ .tx_fixup = asix_tx_fixup,
+ .data = FLAG_EEPROM_MAC,
+};
+
static const struct driver_info ax88178_info = {
.description = "ASIX AX88178 USB 2.0 Ethernet",
.bind = ax88178_bind,
+ .unbind = ax88772_unbind,
.status = asix_status,
.link_reset = ax88178_link_reset,
.reset = ax88178_reset,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
- .rx_fixup = asix_rx_fixup,
+ .rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
};
@@ -953,7 +994,7 @@ static const struct usb_device_id products [] = {
}, {
// ASIX AX88772B 10/100
USB_DEVICE (0x0b95, 0x772b),
- .driver_info = (unsigned long) &ax88772_info,
+ .driver_info = (unsigned long) &ax88772b_info,
}, {
// ASIX AX88772 10/100
USB_DEVICE (0x0b95, 0x7720),
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index c8e0aa85fb8e..d012203b0f29 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -35,6 +35,7 @@ struct ax88172a_private {
u16 phy_addr;
u16 oldmode;
int use_embdphy;
+ struct asix_rx_fixup_info rx_fixup_info;
};
/* MDIO read and write wrappers for phylib */
@@ -116,7 +117,6 @@ static int ax88172a_init_mdio(struct usbnet *dev)
priv->mdio->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
if (!priv->mdio->irq) {
- netdev_err(dev->net, "Could not allocate mdio->irq\n");
ret = -ENOMEM;
goto mfree;
}
@@ -235,10 +235,9 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
usbnet_get_endpoints(dev, intf);
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- netdev_err(dev->net, "Could not allocate memory for private data\n");
+ if (!priv)
return -ENOMEM;
- }
+
dev->driver_priv = priv;
/* Get the MAC address */
@@ -377,7 +376,7 @@ static int ax88172a_reset(struct usbnet *dev)
priv->phydev = phy_connect(dev->net, priv->phy_name,
&ax88172a_adjust_link,
- 0, PHY_INTERFACE_MODE_MII);
+ PHY_INTERFACE_MODE_MII);
if (IS_ERR(priv->phydev)) {
netdev_err(dev->net, "Could not connect to PHY device %s\n",
priv->phy_name);
@@ -400,6 +399,14 @@ out:
}
+static int ax88172a_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ struct ax88172a_private *dp = dev->driver_priv;
+ struct asix_rx_fixup_info *rx = &dp->rx_fixup_info;
+
+ return asix_rx_fixup_internal(dev, skb, rx);
+}
+
const struct driver_info ax88172a_info = {
.description = "ASIX AX88172A USB 2.0 Ethernet",
.bind = ax88172a_bind,
@@ -409,6 +416,6 @@ const struct driver_info ax88172a_info = {
.status = ax88172a_status,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
FLAG_MULTI_PACKET,
- .rx_fixup = asix_rx_fixup,
+ .rx_fixup = ax88172a_rx_fixup,
.tx_fixup = asix_tx_fixup,
};
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 18d9579123ea..8d5cac2d8e33 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -685,9 +685,9 @@ static void catc_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct catc *catc = netdev_priv(dev);
- strncpy(info->driver, driver_name, ETHTOOL_BUSINFO_LEN);
- strncpy(info->version, DRIVER_VERSION, ETHTOOL_BUSINFO_LEN);
- usb_make_path (catc->usbdev, info->bus_info, sizeof info->bus_info);
+ strlcpy(info->driver, driver_name, sizeof(info->driver));
+ strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ usb_make_path(catc->usbdev, info->bus_info, sizeof(info->bus_info));
}
static int catc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 3f3d12d766e7..57136dc1b887 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -615,6 +615,13 @@ static const struct usb_device_id products [] = {
.driver_info = 0,
},
+/* AnyDATA ADU960S - handled by qmi_wwan */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/*
* WHITELIST!!!
*
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 42f51c71ec1f..248d2dc765a5 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -374,6 +374,21 @@ static const struct driver_info cdc_mbim_info = {
.tx_fixup = cdc_mbim_tx_fixup,
};
+/* MBIM and NCM devices should not need a ZLP after NTBs with
+ * dwNtbOutMaxSize length. This driver_info is for the exceptional
+ * devices requiring it anyway, allowing them to be supported without
+ * forcing the performance penalty on all the sane devices.
+ */
+static const struct driver_info cdc_mbim_info_zlp = {
+ .description = "CDC MBIM",
+ .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN | FLAG_SEND_ZLP,
+ .bind = cdc_mbim_bind,
+ .unbind = cdc_mbim_unbind,
+ .manage_power = cdc_mbim_manage_power,
+ .rx_fixup = cdc_mbim_rx_fixup,
+ .tx_fixup = cdc_mbim_tx_fixup,
+};
+
static const struct usb_device_id mbim_devs[] = {
/* This duplicate NCM entry is intentional. MBIM devices can
* be disguised as NCM by default, and this is necessary to
@@ -385,6 +400,10 @@ static const struct usb_device_id mbim_devs[] = {
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info,
},
+ /* Sierra Wireless MC7710 need ZLPs */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&cdc_mbim_info_zlp,
+ },
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info,
},
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 71b6e92b8e9b..4a8c25a22294 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -65,9 +65,9 @@ cdc_ncm_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
{
struct usbnet *dev = netdev_priv(net);
- strncpy(info->driver, dev->driver_name, sizeof(info->driver));
- strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
- strncpy(info->fw_version, dev->driver_info->description,
+ strlcpy(info->driver, dev->driver_name, sizeof(info->driver));
+ strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, dev->driver_info->description,
sizeof(info->fw_version));
usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
}
@@ -435,6 +435,13 @@ advance:
len -= temp;
}
+ /* some buggy devices have an IAD but no CDC Union */
+ if (!ctx->union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) {
+ ctx->control = intf;
+ ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1);
+ dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n");
+ }
+
/* check if we got everything */
if ((ctx->control == NULL) || (ctx->data == NULL) ||
((!ctx->mbim_desc) && ((ctx->ether_desc == NULL) || (ctx->control != intf))))
@@ -497,7 +504,8 @@ advance:
error2:
usb_set_intfdata(ctx->control, NULL);
usb_set_intfdata(ctx->data, NULL);
- usb_driver_release_interface(driver, ctx->data);
+ if (ctx->data != ctx->control)
+ usb_driver_release_interface(driver, ctx->data);
error:
cdc_ncm_free((struct cdc_ncm_ctx *)dev->data[0]);
dev->data[0] = 0;
@@ -568,9 +576,14 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
if ((intf->num_altsetting == 2) &&
!usb_set_interface(dev->udev,
intf->cur_altsetting->desc.bInterfaceNumber,
- CDC_NCM_COMM_ALTSETTING_MBIM) &&
- cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
- return -ENODEV;
+ CDC_NCM_COMM_ALTSETTING_MBIM)) {
+ if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
+ return -ENODEV;
+ else
+ usb_set_interface(dev->udev,
+ intf->cur_altsetting->desc.bInterfaceNumber,
+ CDC_NCM_COMM_ALTSETTING_NCM);
+ }
#endif
/* NCM data altsetting is always 1 */
@@ -1155,6 +1168,20 @@ static const struct driver_info wwan_info = {
.tx_fixup = cdc_ncm_tx_fixup,
};
+/* Same as wwan_info, but with FLAG_NOARP */
+static const struct driver_info wwan_noarp_info = {
+ .description = "Mobile Broadband Network Device (NO ARP)",
+ .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
+ | FLAG_WWAN | FLAG_NOARP,
+ .bind = cdc_ncm_bind,
+ .unbind = cdc_ncm_unbind,
+ .check_connect = cdc_ncm_check_connect,
+ .manage_power = usbnet_manage_power,
+ .status = cdc_ncm_status,
+ .rx_fixup = cdc_ncm_rx_fixup,
+ .tx_fixup = cdc_ncm_tx_fixup,
+};
+
static const struct usb_device_id cdc_devs[] = {
/* Ericsson MBM devices like F5521gw */
{ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
@@ -1193,6 +1220,16 @@ static const struct usb_device_id cdc_devs[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
.driver_info = (unsigned long)&wwan_info,
},
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
+ .driver_info = (unsigned long)&wwan_info,
+ },
+
+ /* Infineon(now Intel) HSPA Modem platform */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_noarp_info,
+ },
/* Generic CDC-NCM devices */
{ USB_INTERFACE_INFO(USB_CLASS_COMM,
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 3f554c1149f3..174e5ecea4cc 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -45,6 +45,12 @@
#define DM_MCAST_ADDR 0x16 /* 8 bytes */
#define DM_GPR_CTRL 0x1e
#define DM_GPR_DATA 0x1f
+#define DM_CHIP_ID 0x2c
+#define DM_MODE_CTRL 0x91 /* only on dm9620 */
+
+/* chip id values */
+#define ID_DM9601 0
+#define ID_DM9620 1
#define DM_MAX_MCAST 64
#define DM_MCAST_SIZE 8
@@ -53,7 +59,6 @@
#define DM_RX_OVERHEAD 7 /* 3 byte header + 4 byte crc tail */
#define DM_TIMEOUT 1000
-
static int dm_read(struct usbnet *dev, u8 reg, u16 length, void *data)
{
int err;
@@ -84,32 +89,23 @@ static int dm_write(struct usbnet *dev, u8 reg, u16 length, void *data)
static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value)
{
- return usbnet_write_cmd(dev, DM_WRITE_REGS,
+ return usbnet_write_cmd(dev, DM_WRITE_REG,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, reg, NULL, 0);
}
-static void dm_write_async_helper(struct usbnet *dev, u8 reg, u8 value,
- u16 length, void *data)
+static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
{
usbnet_write_cmd_async(dev, DM_WRITE_REGS,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value, reg, data, length);
-}
-
-static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
-{
- netdev_dbg(dev->net, "dm_write_async() reg=0x%02x length=%d\n", reg, length);
-
- dm_write_async_helper(dev, reg, 0, length, data);
+ 0, reg, data, length);
}
static void dm_write_reg_async(struct usbnet *dev, u8 reg, u8 value)
{
- netdev_dbg(dev->net, "dm_write_reg_async() reg=0x%02x value=0x%02x\n",
- reg, value);
-
- dm_write_async_helper(dev, reg, value, 0, NULL);
+ usbnet_write_cmd_async(dev, DM_WRITE_REG,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, reg, NULL, 0);
}
static int dm_read_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 *value)
@@ -122,7 +118,7 @@ static int dm_read_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 *valu
dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0xc : 0x4);
for (i = 0; i < DM_TIMEOUT; i++) {
- u8 tmp;
+ u8 tmp = 0;
udelay(1);
ret = dm_read_reg(dev, DM_SHARED_CTRL, &tmp);
@@ -165,7 +161,7 @@ static int dm_write_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 valu
dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1a : 0x12);
for (i = 0; i < DM_TIMEOUT; i++) {
- u8 tmp;
+ u8 tmp = 0;
udelay(1);
ret = dm_read_reg(dev, DM_SHARED_CTRL, &tmp);
@@ -358,7 +354,7 @@ static const struct net_device_ops dm9601_netdev_ops = {
static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret;
- u8 mac[ETH_ALEN];
+ u8 mac[ETH_ALEN], id;
ret = usbnet_get_endpoints(dev, intf);
if (ret)
@@ -399,6 +395,24 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
__dm9601_set_mac_address(dev);
}
+ if (dm_read_reg(dev, DM_CHIP_ID, &id) < 0) {
+ netdev_err(dev->net, "Error reading chip ID\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* put dm9620 devices in dm9601 mode */
+ if (id == ID_DM9620) {
+ u8 mode;
+
+ if (dm_read_reg(dev, DM_MODE_CTRL, &mode) < 0) {
+ netdev_err(dev->net, "Error reading MODE_CTRL\n");
+ ret = -ENODEV;
+ goto out;
+ }
+ dm_write_reg(dev, DM_MODE_CTRL, mode & 0x7f);
+ }
+
/* power up phy */
dm_write_reg(dev, DM_GPR_CTRL, 1);
dm_write_reg(dev, DM_GPR_DATA, 0);
@@ -581,6 +595,10 @@ static const struct usb_device_id products[] = {
USB_DEVICE(0x0a46, 0x9000), /* DM9000E */
.driver_info = (unsigned long)&dm9601_info,
},
+ {
+ USB_DEVICE(0x0a46, 0x9620), /* DM9620 USB to Fast Ethernet Adapter */
+ .driver_info = (unsigned long)&dm9601_info,
+ },
{}, // END
};
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index cd8ccb240f4b..e2dd3249b6bd 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2035,25 +2035,23 @@ static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
tty = tty_port_tty_get(&serial->port);
/* Push data to tty */
- if (tty) {
- write_length_remaining = urb->actual_length -
- serial->curr_rx_urb_offset;
- D1("data to push to tty");
- while (write_length_remaining) {
- if (test_bit(TTY_THROTTLED, &tty->flags)) {
- tty_kref_put(tty);
- return -1;
- }
- curr_write_len = tty_insert_flip_string
- (tty, urb->transfer_buffer +
- serial->curr_rx_urb_offset,
- write_length_remaining);
- serial->curr_rx_urb_offset += curr_write_len;
- write_length_remaining -= curr_write_len;
- tty_flip_buffer_push(tty);
+ write_length_remaining = urb->actual_length -
+ serial->curr_rx_urb_offset;
+ D1("data to push to tty");
+ while (write_length_remaining) {
+ if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
+ tty_kref_put(tty);
+ return -1;
}
- tty_kref_put(tty);
+ curr_write_len = tty_insert_flip_string(&serial->port,
+ urb->transfer_buffer + serial->curr_rx_urb_offset,
+ write_length_remaining);
+ serial->curr_rx_urb_offset += curr_write_len;
+ write_length_remaining -= curr_write_len;
+ tty_flip_buffer_push(&serial->port);
}
+ tty_kref_put(tty);
+
if (write_length_remaining == 0) {
serial->curr_rx_urb_offset = 0;
serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
@@ -2317,10 +2315,8 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
serial->rx_urb[i]->transfer_buffer_length = 0;
serial->rx_data[i] = kzalloc(serial->rx_data_length,
GFP_KERNEL);
- if (!serial->rx_data[i]) {
- dev_err(dev, "%s - Out of memory\n", __func__);
+ if (!serial->rx_data[i])
goto exit;
- }
}
/* TX, allocate urb and initialize */
@@ -2336,15 +2332,12 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
serial->tx_buffer_count = 0;
serial->tx_data_length = tx_size;
serial->tx_data = kzalloc(serial->tx_data_length, GFP_KERNEL);
- if (!serial->tx_data) {
- dev_err(dev, "%s - Out of memory\n", __func__);
+ if (!serial->tx_data)
goto exit;
- }
+
serial->tx_buffer = kzalloc(serial->tx_data_length, GFP_KERNEL);
- if (!serial->tx_buffer) {
- dev_err(dev, "%s - Out of memory\n", __func__);
+ if (!serial->tx_buffer)
goto exit;
- }
return 0;
exit:
@@ -2580,10 +2573,8 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
}
hso_net->mux_bulk_rx_buf_pool[i] = kzalloc(MUX_BULK_RX_BUF_SIZE,
GFP_KERNEL);
- if (!hso_net->mux_bulk_rx_buf_pool[i]) {
- dev_err(&interface->dev, "Could not allocate rx buf\n");
+ if (!hso_net->mux_bulk_rx_buf_pool[i])
goto exit;
- }
}
hso_net->mux_bulk_tx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!hso_net->mux_bulk_tx_urb) {
@@ -2591,10 +2582,8 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
goto exit;
}
hso_net->mux_bulk_tx_buf = kzalloc(MUX_BULK_TX_BUF_SIZE, GFP_KERNEL);
- if (!hso_net->mux_bulk_tx_buf) {
- dev_err(&interface->dev, "Could not allocate tx buf\n");
+ if (!hso_net->mux_bulk_tx_buf)
goto exit;
- }
add_net_device(hso_dev);
@@ -2818,10 +2807,8 @@ struct hso_shared_int *hso_create_shared_int(struct usb_interface *interface)
mux->shared_intr_buf =
kzalloc(le16_to_cpu(mux->intr_endp->wMaxPacketSize),
GFP_KERNEL);
- if (!mux->shared_intr_buf) {
- dev_err(&interface->dev, "Could not allocate intr buf?\n");
+ if (!mux->shared_intr_buf)
goto exit;
- }
mutex_init(&mux->shared_int_lock);
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index 92c49e0a59ec..0192073e53a3 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -159,7 +159,6 @@ kalmia_bind(struct usbnet *dev, struct usb_interface *intf)
}
memcpy(dev->net->dev_addr, ethernet_addr, ETH_ALEN);
- memcpy(dev->net->perm_addr, ethernet_addr, ETH_ALEN);
return status;
}
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index a0b5807b30d4..73051d10ead2 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -149,11 +149,9 @@ static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
DECLARE_WAITQUEUE(wait, current);
buffer = kmalloc(size, GFP_KERNEL);
- if (!buffer) {
- netif_warn(pegasus, drv, pegasus->net,
- "out of memory in %s\n", __func__);
+ if (!buffer)
return -ENOMEM;
- }
+
add_wait_queue(&pegasus->ctrl_wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
while (pegasus->flags & ETH_REGS_CHANGED)
@@ -1074,8 +1072,9 @@ static void pegasus_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
pegasus_t *pegasus = netdev_priv(dev);
- strncpy(info->driver, driver_name, sizeof(info->driver) - 1);
- strncpy(info->version, DRIVER_VERSION, sizeof(info->version) - 1);
+
+ strlcpy(info->driver, driver_name, sizeof(info->driver));
+ strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
usb_make_path(pegasus->usb, info->bus_info, sizeof(info->bus_info));
}
@@ -1096,6 +1095,7 @@ pegasus_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
pegasus_t *pegasus = netdev_priv(dev);
u8 reg78 = 0x04;
+ int ret;
if (wol->wolopts & ~WOL_SUPPORTED)
return -EINVAL;
@@ -1110,7 +1110,12 @@ pegasus_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
else
pegasus->eth_regs[0] &= ~0x10;
pegasus->wolopts = wol->wolopts;
- return set_register(pegasus, WakeupControl, reg78);
+
+ ret = set_register(pegasus, WakeupControl, reg78);
+ if (!ret)
+ ret = device_set_wakeup_enable(&pegasus->usb->dev,
+ wol->wolopts);
+ return ret;
}
static inline void pegasus_reset_wol(struct net_device *dev)
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 6a1ca500e612..efb5c7c33a28 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -351,6 +351,10 @@ static const struct usb_device_id products[] = {
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57),
.driver_info = (unsigned long)&qmi_wwan_info,
},
+ { /* HUAWEI_INTERFACE_NDIS_CONTROL_QUALCOMM */
+ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
/* 2. Combined interface devices matching on class+protocol */
{ /* Huawei E367 and possibly others in "Windows mode" */
@@ -361,6 +365,14 @@ static const struct usb_device_id products[] = {
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17),
.driver_info = (unsigned long)&qmi_wwan_info,
},
+ { /* HUAWEI_NDIS_SINGLE_INTERFACE_VDF */
+ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x37),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
+ { /* HUAWEI_INTERFACE_NDIS_HW_QUALCOMM */
+ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x67),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
{ /* Pantech UML290, P4200 and more */
USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff),
.driver_info = (unsigned long)&qmi_wwan_info,
@@ -397,8 +409,16 @@ static const struct usb_device_id products[] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&qmi_wwan_info,
},
+ { /* ADU960S */
+ USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
/* 3. Combined interface devices matching on interface number */
+ {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
{QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
{QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
@@ -433,6 +453,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x0199, 1)}, /* ZTE MF820S */
{QMI_FIXED_INTF(0x19d2, 0x0200, 1)},
{QMI_FIXED_INTF(0x19d2, 0x0257, 3)}, /* ZTE MF821 */
+ {QMI_FIXED_INTF(0x19d2, 0x0265, 4)}, /* ONDA MT8205 4G LTE */
{QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */
{QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */
{QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */
@@ -459,6 +480,8 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
{QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
+ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
+ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 4a4335833c36..cc49aac70224 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -431,7 +431,6 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
goto halt_fail_and_release;
}
memcpy(net->dev_addr, bp, ETH_ALEN);
- memcpy(net->perm_addr, bp, ETH_ALEN);
/* set a nonzero filter to enable data transfers */
memset(u.set, 0, sizeof *u.set);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 5f39a3b225ef..a491d3a95393 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -776,9 +776,9 @@ static void rtl8150_get_drvinfo(struct net_device *netdev, struct ethtool_drvinf
{
rtl8150_t *dev = netdev_priv(netdev);
- strncpy(info->driver, driver_name, ETHTOOL_BUSINFO_LEN);
- strncpy(info->version, DRIVER_VERSION, ETHTOOL_BUSINFO_LEN);
- usb_make_path(dev->udev, info->bus_info, sizeof info->bus_info);
+ strlcpy(info->driver, driver_name, sizeof(info->driver));
+ strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
}
static int rtl8150_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index 18dd4257ab17..79ab2435d9d3 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -459,11 +459,9 @@ static void sierra_net_kevent(struct work_struct *work)
/* Query the modem for the LSI message */
buf = kzalloc(SIERRA_NET_USBCTL_BUF_LEN, GFP_KERNEL);
- if (!buf) {
- netdev_err(dev->net,
- "failed to allocate buf for LS msg\n");
+ if (!buf)
return;
- }
+
ifnum = priv->ifnum;
len = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
USB_CDC_GET_ENCAPSULATED_RESPONSE,
@@ -598,8 +596,8 @@ static void sierra_net_get_drvinfo(struct net_device *net,
{
/* Inherit standard device info */
usbnet_get_drvinfo(net, info);
- strncpy(info->driver, driver_name, sizeof info->driver);
- strncpy(info->version, DRIVER_VERSION, sizeof info->version);
+ strlcpy(info->driver, driver_name, sizeof(info->driver));
+ strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
}
static u32 sierra_net_get_link(struct net_device *net)
@@ -686,10 +684,8 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
}
/* Initialize sierra private data */
priv = kzalloc(sizeof *priv, GFP_KERNEL);
- if (!priv) {
- dev_err(&dev->udev->dev, "No memory");
+ if (!priv)
return -ENOMEM;
- }
priv->usbnet = dev;
priv->ifnum = ifacenum;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 251a3354a4b0..9abe51710f22 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -1393,13 +1393,11 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
}
dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc75xx_priv),
- GFP_KERNEL);
+ GFP_KERNEL);
pdata = (struct smsc75xx_priv *)(dev->data[0]);
- if (!pdata) {
- netdev_warn(dev->net, "Unable to allocate smsc75xx_priv\n");
+ if (!pdata)
return -ENOMEM;
- }
pdata->dev = dev;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 9b736701f854..ff4fa37dfd1d 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -55,6 +55,13 @@
#define FEATURE_PHY_NLP_CROSSOVER (0x02)
#define FEATURE_AUTOSUSPEND (0x04)
+#define SUSPEND_SUSPEND0 (0x01)
+#define SUSPEND_SUSPEND1 (0x02)
+#define SUSPEND_SUSPEND2 (0x04)
+#define SUSPEND_SUSPEND3 (0x08)
+#define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \
+ SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3)
+
struct smsc95xx_priv {
u32 mac_cr;
u32 hash_hi;
@@ -62,6 +69,7 @@ struct smsc95xx_priv {
u32 wolopts;
spinlock_t mac_cr_lock;
u8 features;
+ u8 suspend_flags;
};
static bool turbo_mode = true;
@@ -513,10 +521,8 @@ static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
u32 flow, afc_cfg = 0;
int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading AFC_CFG\n");
+ if (ret < 0)
return ret;
- }
if (duplex == DUPLEX_FULL) {
u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
@@ -541,16 +547,10 @@ static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
}
ret = smsc95xx_write_reg(dev, FLOW, flow);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing FLOW\n");
- return ret;
- }
-
- ret = smsc95xx_write_reg(dev, AFC_CFG, afc_cfg);
if (ret < 0)
- netdev_warn(dev->net, "Error writing AFC_CFG\n");
+ return ret;
- return ret;
+ return smsc95xx_write_reg(dev, AFC_CFG, afc_cfg);
}
static int smsc95xx_link_reset(struct usbnet *dev)
@@ -564,16 +564,12 @@ static int smsc95xx_link_reset(struct usbnet *dev)
/* clear interrupt status */
ret = smsc95xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading PHY_INT_SRC\n");
+ if (ret < 0)
return ret;
- }
ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing INT_STS\n");
+ if (ret < 0)
return ret;
- }
mii_check_media(mii, 1, 1);
mii_ethtool_gset(&dev->mii, &ecmd);
@@ -595,10 +591,8 @@ static int smsc95xx_link_reset(struct usbnet *dev)
spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing MAC_CR\n");
+ if (ret < 0)
return ret;
- }
ret = smsc95xx_phy_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv);
if (ret < 0)
@@ -638,10 +632,8 @@ static int smsc95xx_set_features(struct net_device *netdev,
int ret;
ret = smsc95xx_read_reg(dev, COE_CR, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read COE_CR: %d\n", ret);
+ if (ret < 0)
return ret;
- }
if (features & NETIF_F_HW_CSUM)
read_buf |= Tx_COE_EN_;
@@ -654,10 +646,8 @@ static int smsc95xx_set_features(struct net_device *netdev,
read_buf &= ~Rx_COE_EN_;
ret = smsc95xx_write_reg(dev, COE_CR, read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write COE_CR: %d\n", ret);
+ if (ret < 0)
return ret;
- }
netif_dbg(dev, hw, dev->net, "COE_CR = 0x%08x\n", read_buf);
return 0;
@@ -800,16 +790,10 @@ static int smsc95xx_set_mac_address(struct usbnet *dev)
int ret;
ret = smsc95xx_write_reg(dev, ADDRL, addr_lo);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write ADDRL: %d\n", ret);
- return ret;
- }
-
- ret = smsc95xx_write_reg(dev, ADDRH, addr_hi);
if (ret < 0)
- netdev_warn(dev->net, "Failed to write ADDRH: %d\n", ret);
+ return ret;
- return ret;
+ return smsc95xx_write_reg(dev, ADDRH, addr_hi);
}
/* starts the TX path */
@@ -825,17 +809,11 @@ static int smsc95xx_start_tx_path(struct usbnet *dev)
spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write MAC_CR: %d\n", ret);
+ if (ret < 0)
return ret;
- }
/* Enable Tx at SCSRs */
- ret = smsc95xx_write_reg(dev, TX_CFG, TX_CFG_ON_);
- if (ret < 0)
- netdev_warn(dev->net, "Failed to write TX_CFG: %d\n", ret);
-
- return ret;
+ return smsc95xx_write_reg(dev, TX_CFG, TX_CFG_ON_);
}
/* Starts the Receive path */
@@ -843,17 +821,12 @@ static int smsc95xx_start_rx_path(struct usbnet *dev, int in_pm)
{
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
unsigned long flags;
- int ret;
spin_lock_irqsave(&pdata->mac_cr_lock, flags);
pdata->mac_cr |= MAC_CR_RXEN_;
spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
- ret = __smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr, in_pm);
- if (ret < 0)
- netdev_warn(dev->net, "Failed to write MAC_CR: %d\n", ret);
-
- return ret;
+ return __smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr, in_pm);
}
static int smsc95xx_phy_initialize(struct usbnet *dev)
@@ -910,19 +883,15 @@ static int smsc95xx_reset(struct usbnet *dev)
netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n");
ret = smsc95xx_write_reg(dev, HW_CFG, HW_CFG_LRST_);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write HW_CFG_LRST_ bit in HW_CFG\n");
+ if (ret < 0)
return ret;
- }
timeout = 0;
do {
msleep(10);
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
+ if (ret < 0)
return ret;
- }
timeout++;
} while ((read_buf & HW_CFG_LRST_) && (timeout < 100));
@@ -932,19 +901,15 @@ static int smsc95xx_reset(struct usbnet *dev)
}
ret = smsc95xx_write_reg(dev, PM_CTRL, PM_CTL_PHY_RST_);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write PM_CTRL: %d\n", ret);
+ if (ret < 0)
return ret;
- }
timeout = 0;
do {
msleep(10);
ret = smsc95xx_read_reg(dev, PM_CTRL, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read PM_CTRL: %d\n", ret);
+ if (ret < 0)
return ret;
- }
timeout++;
} while ((read_buf & PM_CTL_PHY_RST_) && (timeout < 100));
@@ -961,10 +926,8 @@ static int smsc95xx_reset(struct usbnet *dev)
dev->net->dev_addr);
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
+ if (ret < 0)
return ret;
- }
netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG : 0x%08x\n",
read_buf);
@@ -972,16 +935,12 @@ static int smsc95xx_reset(struct usbnet *dev)
read_buf |= HW_CFG_BIR_;
ret = smsc95xx_write_reg(dev, HW_CFG, read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write HW_CFG_BIR_ bit in HW_CFG\n");
+ if (ret < 0)
return ret;
- }
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
+ if (ret < 0)
return ret;
- }
netif_dbg(dev, ifup, dev->net,
"Read Value from HW_CFG after writing HW_CFG_BIR_: 0x%08x\n",
@@ -1002,42 +961,32 @@ static int smsc95xx_reset(struct usbnet *dev)
(ulong)dev->rx_urb_size);
ret = smsc95xx_write_reg(dev, BURST_CAP, burst_cap);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write BURST_CAP: %d\n", ret);
+ if (ret < 0)
return ret;
- }
ret = smsc95xx_read_reg(dev, BURST_CAP, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read BURST_CAP: %d\n", ret);
+ if (ret < 0)
return ret;
- }
netif_dbg(dev, ifup, dev->net,
"Read Value from BURST_CAP after writing: 0x%08x\n",
read_buf);
ret = smsc95xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write BULK_IN_DLY: %d\n", ret);
+ if (ret < 0)
return ret;
- }
ret = smsc95xx_read_reg(dev, BULK_IN_DLY, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read BULK_IN_DLY: %d\n", ret);
+ if (ret < 0)
return ret;
- }
netif_dbg(dev, ifup, dev->net,
"Read Value from BULK_IN_DLY after writing: 0x%08x\n",
read_buf);
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
+ if (ret < 0)
return ret;
- }
netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG: 0x%08x\n",
read_buf);
@@ -1051,69 +1000,51 @@ static int smsc95xx_reset(struct usbnet *dev)
read_buf |= NET_IP_ALIGN << 9;
ret = smsc95xx_write_reg(dev, HW_CFG, read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write HW_CFG: %d\n", ret);
+ if (ret < 0)
return ret;
- }
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
+ if (ret < 0)
return ret;
- }
netif_dbg(dev, ifup, dev->net,
"Read Value from HW_CFG after writing: 0x%08x\n", read_buf);
ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write INT_STS: %d\n", ret);
+ if (ret < 0)
return ret;
- }
ret = smsc95xx_read_reg(dev, ID_REV, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read ID_REV: %d\n", ret);
+ if (ret < 0)
return ret;
- }
netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf);
/* Configure GPIO pins as LED outputs */
write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
LED_GPIO_CFG_FDX_LED;
ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write LED_GPIO_CFG: %d\n", ret);
+ if (ret < 0)
return ret;
- }
/* Init Tx */
ret = smsc95xx_write_reg(dev, FLOW, 0);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write FLOW: %d\n", ret);
+ if (ret < 0)
return ret;
- }
ret = smsc95xx_write_reg(dev, AFC_CFG, AFC_CFG_DEFAULT);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write AFC_CFG: %d\n", ret);
+ if (ret < 0)
return ret;
- }
/* Don't need mac_cr_lock during initialisation */
ret = smsc95xx_read_reg(dev, MAC_CR, &pdata->mac_cr);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read MAC_CR: %d\n", ret);
+ if (ret < 0)
return ret;
- }
/* Init Rx */
/* Set Vlan */
ret = smsc95xx_write_reg(dev, VLAN1, (u32)ETH_P_8021Q);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write VLAN1: %d\n", ret);
+ if (ret < 0)
return ret;
- }
/* Enable or disable checksum offload engines */
ret = smsc95xx_set_features(dev->net, dev->net->features);
@@ -1131,19 +1062,15 @@ static int smsc95xx_reset(struct usbnet *dev)
}
ret = smsc95xx_read_reg(dev, INT_EP_CTL, &read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read INT_EP_CTL: %d\n", ret);
+ if (ret < 0)
return ret;
- }
/* enable PHY interrupts */
read_buf |= INT_EP_CTL_PHY_INT_;
ret = smsc95xx_write_reg(dev, INT_EP_CTL, read_buf);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to write INT_EP_CTL: %d\n", ret);
+ if (ret < 0)
return ret;
- }
ret = smsc95xx_start_tx_path(dev);
if (ret < 0) {
@@ -1189,13 +1116,11 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
}
dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc95xx_priv),
- GFP_KERNEL);
+ GFP_KERNEL);
pdata = (struct smsc95xx_priv *)(dev->data[0]);
- if (!pdata) {
- netdev_warn(dev->net, "Unable to allocate struct smsc95xx_priv\n");
+ if (!pdata)
return -ENOMEM;
- }
spin_lock_init(&pdata->mac_cr_lock);
@@ -1213,10 +1138,8 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
/* detect device revision as different features may be available */
ret = smsc95xx_read_reg(dev, ID_REV, &val);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read ID_REV: %d\n", ret);
+ if (ret < 0)
return ret;
- }
val >>= 16;
if ((val == ID_REV_CHIP_ID_9500A_) || (val == ID_REV_CHIP_ID_9530_) ||
@@ -1261,17 +1184,13 @@ static int smsc95xx_enable_phy_wakeup_interrupts(struct usbnet *dev, u16 mask)
/* read to clear */
ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_SRC);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading PHY_INT_SRC\n");
+ if (ret < 0)
return ret;
- }
/* enable interrupt source */
ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_MASK);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading PHY_INT_MASK\n");
+ if (ret < 0)
return ret;
- }
ret |= mask;
@@ -1287,16 +1206,12 @@ static int smsc95xx_link_ok_nopm(struct usbnet *dev)
/* first, a dummy read, needed to latch some MII phys */
ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading MII_BMSR\n");
+ if (ret < 0)
return ret;
- }
ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading MII_BMSR\n");
+ if (ret < 0)
return ret;
- }
return !!(ret & BMSR_LSTATUS);
}
@@ -1308,19 +1223,15 @@ static int smsc95xx_enter_suspend0(struct usbnet *dev)
int ret;
ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading PM_CTRL\n");
+ if (ret < 0)
return ret;
- }
val &= (~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_));
val |= PM_CTL_SUS_MODE_0;
ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing PM_CTRL\n");
+ if (ret < 0)
return ret;
- }
/* clear wol status */
val &= ~PM_CTL_WUPS_;
@@ -1331,15 +1242,13 @@ static int smsc95xx_enter_suspend0(struct usbnet *dev)
val |= PM_CTL_WUPS_ED_;
ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing PM_CTRL\n");
+ if (ret < 0)
return ret;
- }
/* read back PM_CTRL */
ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
- if (ret < 0)
- netdev_warn(dev->net, "Error reading PM_CTRL\n");
+
+ pdata->suspend_flags |= SUSPEND_SUSPEND0;
return ret;
}
@@ -1360,10 +1269,8 @@ static int smsc95xx_enter_suspend1(struct usbnet *dev)
/* enable energy detect power-down mode */
ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_MODE_CTRL_STS);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading PHY_MODE_CTRL_STS\n");
+ if (ret < 0)
return ret;
- }
ret |= MODE_CTRL_STS_EDPWRDOWN_;
@@ -1371,52 +1278,133 @@ static int smsc95xx_enter_suspend1(struct usbnet *dev)
/* enter SUSPEND1 mode */
ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading PM_CTRL\n");
+ if (ret < 0)
return ret;
- }
val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
val |= PM_CTL_SUS_MODE_1;
ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing PM_CTRL\n");
+ if (ret < 0)
return ret;
- }
/* clear wol status, enable energy detection */
val &= ~PM_CTL_WUPS_;
val |= (PM_CTL_WUPS_ED_ | PM_CTL_ED_EN_);
ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
- if (ret < 0)
- netdev_warn(dev->net, "Error writing PM_CTRL\n");
+
+ pdata->suspend_flags |= SUSPEND_SUSPEND1;
return ret;
}
static int smsc95xx_enter_suspend2(struct usbnet *dev)
{
+ struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
u32 val;
int ret;
ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading PM_CTRL\n");
+ if (ret < 0)
return ret;
- }
val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
val |= PM_CTL_SUS_MODE_2;
ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
- if (ret < 0)
- netdev_warn(dev->net, "Error writing PM_CTRL\n");
+
+ pdata->suspend_flags |= SUSPEND_SUSPEND2;
return ret;
}
+static int smsc95xx_enter_suspend3(struct usbnet *dev)
+{
+ struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ u32 val;
+ int ret;
+
+ ret = smsc95xx_read_reg_nopm(dev, RX_FIFO_INF, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val & 0xFFFF) {
+ netdev_info(dev->net, "rx fifo not empty in autosuspend\n");
+ return -EBUSY;
+ }
+
+ ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
+ if (ret < 0)
+ return ret;
+
+ val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
+ val |= PM_CTL_SUS_MODE_3 | PM_CTL_RES_CLR_WKP_STS;
+
+ ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ if (ret < 0)
+ return ret;
+
+ /* clear wol status */
+ val &= ~PM_CTL_WUPS_;
+ val |= PM_CTL_WUPS_WOL_;
+
+ ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
+ if (ret < 0)
+ return ret;
+
+ pdata->suspend_flags |= SUSPEND_SUSPEND3;
+
+ return 0;
+}
+
+static int smsc95xx_autosuspend(struct usbnet *dev, u32 link_up)
+{
+ struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ int ret;
+
+ if (!netif_running(dev->net)) {
+ /* interface is ifconfig down so fully power down hw */
+ netdev_dbg(dev->net, "autosuspend entering SUSPEND2\n");
+ return smsc95xx_enter_suspend2(dev);
+ }
+
+ if (!link_up) {
+ /* link is down so enter EDPD mode, but only if device can
+ * reliably resume from it. This check should be redundant
+ * as current FEATURE_AUTOSUSPEND parts also support
+ * FEATURE_PHY_NLP_CROSSOVER but it's included for clarity */
+ if (!(pdata->features & FEATURE_PHY_NLP_CROSSOVER)) {
+ netdev_warn(dev->net, "EDPD not supported\n");
+ return -EBUSY;
+ }
+
+ netdev_dbg(dev->net, "autosuspend entering SUSPEND1\n");
+
+ /* enable PHY wakeup events for if cable is attached */
+ ret = smsc95xx_enable_phy_wakeup_interrupts(dev,
+ PHY_INT_MASK_ANEG_COMP_);
+ if (ret < 0) {
+ netdev_warn(dev->net, "error enabling PHY wakeup ints\n");
+ return ret;
+ }
+
+ netdev_info(dev->net, "entering SUSPEND1 mode\n");
+ return smsc95xx_enter_suspend1(dev);
+ }
+
+ /* enable PHY wakeup events so we remote wakeup if cable is pulled */
+ ret = smsc95xx_enable_phy_wakeup_interrupts(dev,
+ PHY_INT_MASK_LINK_DOWN_);
+ if (ret < 0) {
+ netdev_warn(dev->net, "error enabling PHY wakeup ints\n");
+ return ret;
+ }
+
+ netdev_dbg(dev->net, "autosuspend entering SUSPEND3\n");
+ return smsc95xx_enter_suspend3(dev);
+}
+
static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbnet *dev = usb_get_intfdata(intf);
@@ -1424,15 +1412,35 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
u32 val, link_up;
int ret;
+ /* TODO: don't indicate this feature to usb framework if
+ * our current hardware doesn't have the capability
+ */
+ if ((message.event == PM_EVENT_AUTO_SUSPEND) &&
+ (!(pdata->features & FEATURE_AUTOSUSPEND))) {
+ netdev_warn(dev->net, "autosuspend not supported\n");
+ return -EBUSY;
+ }
+
ret = usbnet_suspend(intf, message);
if (ret < 0) {
netdev_warn(dev->net, "usbnet_suspend error\n");
return ret;
}
+ if (pdata->suspend_flags) {
+ netdev_warn(dev->net, "error during last resume\n");
+ pdata->suspend_flags = 0;
+ }
+
/* determine if link is up using only _nopm functions */
link_up = smsc95xx_link_ok_nopm(dev);
+ if (message.event == PM_EVENT_AUTO_SUSPEND) {
+ ret = smsc95xx_autosuspend(dev, link_up);
+ goto done;
+ }
+
+ /* if we get this far we're not autosuspending */
/* if no wol options set, or if link is down and we're not waking on
* PHY activity, enter lowest power SUSPEND2 mode
*/
@@ -1442,32 +1450,24 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
/* disable energy detect (link up) & wake up events */
ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading WUCSR\n");
+ if (ret < 0)
goto done;
- }
val &= ~(WUCSR_MPEN_ | WUCSR_WAKE_EN_);
ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing WUCSR\n");
+ if (ret < 0)
goto done;
- }
ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading PM_CTRL\n");
+ if (ret < 0)
goto done;
- }
val &= ~(PM_CTL_ED_EN_ | PM_CTL_WOL_EN_);
ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing PM_CTRL\n");
+ if (ret < 0)
goto done;
- }
ret = smsc95xx_enter_suspend2(dev);
goto done;
@@ -1565,7 +1565,6 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
for (i = 0; i < (wuff_filter_count * 4); i++) {
ret = smsc95xx_write_reg_nopm(dev, WUFF, filter_mask[i]);
if (ret < 0) {
- netdev_warn(dev->net, "Error writing WUFF\n");
kfree(filter_mask);
goto done;
}
@@ -1574,67 +1573,51 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
for (i = 0; i < (wuff_filter_count / 4); i++) {
ret = smsc95xx_write_reg_nopm(dev, WUFF, command[i]);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing WUFF\n");
+ if (ret < 0)
goto done;
- }
}
for (i = 0; i < (wuff_filter_count / 4); i++) {
ret = smsc95xx_write_reg_nopm(dev, WUFF, offset[i]);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing WUFF\n");
+ if (ret < 0)
goto done;
- }
}
for (i = 0; i < (wuff_filter_count / 2); i++) {
ret = smsc95xx_write_reg_nopm(dev, WUFF, crc[i]);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing WUFF\n");
+ if (ret < 0)
goto done;
- }
}
/* clear any pending pattern match packet status */
ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading WUCSR\n");
+ if (ret < 0)
goto done;
- }
val |= WUCSR_WUFR_;
ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing WUCSR\n");
+ if (ret < 0)
goto done;
- }
}
if (pdata->wolopts & WAKE_MAGIC) {
/* clear any pending magic packet status */
ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading WUCSR\n");
+ if (ret < 0)
goto done;
- }
val |= WUCSR_MPR_;
ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing WUCSR\n");
+ if (ret < 0)
goto done;
- }
}
/* enable/disable wakeup sources */
ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading WUCSR\n");
+ if (ret < 0)
goto done;
- }
if (pdata->wolopts & (WAKE_BCAST | WAKE_MCAST | WAKE_ARP | WAKE_UCAST)) {
netdev_info(dev->net, "enabling pattern match wakeup\n");
@@ -1653,17 +1636,13 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
}
ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing WUCSR\n");
+ if (ret < 0)
goto done;
- }
/* enable wol wakeup source */
ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading PM_CTRL\n");
+ if (ret < 0)
goto done;
- }
val |= PM_CTL_WOL_EN_;
@@ -1672,10 +1651,8 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
val |= PM_CTL_ED_EN_;
ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing PM_CTRL\n");
+ if (ret < 0)
goto done;
- }
/* enable receiver to enable frame reception */
smsc95xx_start_rx_path(dev, 1);
@@ -1694,42 +1671,40 @@ static int smsc95xx_resume(struct usb_interface *intf)
{
struct usbnet *dev = usb_get_intfdata(intf);
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ u8 suspend_flags = pdata->suspend_flags;
int ret;
u32 val;
BUG_ON(!dev);
- if (pdata->wolopts) {
+ netdev_dbg(dev->net, "resume suspend_flags=0x%02x\n", suspend_flags);
+
+ /* do this first to ensure it's cleared even in error case */
+ pdata->suspend_flags = 0;
+
+ if (suspend_flags & SUSPEND_ALLMODES) {
/* clear wake-up sources */
ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading WUCSR\n");
+ if (ret < 0)
return ret;
- }
val &= ~(WUCSR_WAKE_EN_ | WUCSR_MPEN_);
ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing WUCSR\n");
+ if (ret < 0)
return ret;
- }
/* clear wake-up status */
ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error reading PM_CTRL\n");
+ if (ret < 0)
return ret;
- }
val &= ~PM_CTL_WOL_EN_;
val |= PM_CTL_WUPS_;
ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
- if (ret < 0) {
- netdev_warn(dev->net, "Error writing PM_CTRL\n");
+ if (ret < 0)
return ret;
- }
}
ret = usbnet_resume(intf);
@@ -1891,6 +1866,26 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
return skb;
}
+static int smsc95xx_manage_power(struct usbnet *dev, int on)
+{
+ struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+
+ dev->intf->needs_remote_wakeup = on;
+
+ if (pdata->features & FEATURE_AUTOSUSPEND)
+ return 0;
+
+ /* this chip revision doesn't support autosuspend */
+ netdev_info(dev->net, "hardware doesn't support USB autosuspend\n");
+
+ if (on)
+ usb_autopm_get_interface_no_resume(dev->intf);
+ else
+ usb_autopm_put_interface(dev->intf);
+
+ return 0;
+}
+
static const struct driver_info smsc95xx_info = {
.description = "smsc95xx USB 2.0 Ethernet",
.bind = smsc95xx_bind,
@@ -1900,6 +1895,7 @@ static const struct driver_info smsc95xx_info = {
.rx_fixup = smsc95xx_rx_fixup,
.tx_fixup = smsc95xx_tx_fixup,
.status = smsc95xx_status,
+ .manage_power = smsc95xx_manage_power,
.flags = FLAG_ETHER | FLAG_SEND_ZLP | FLAG_LINK_INTR,
};
@@ -2007,6 +2003,7 @@ static struct usb_driver smsc95xx_driver = {
.reset_resume = smsc95xx_resume,
.disconnect = usbnet_disconnect,
.disable_hub_initiated_lpm = 1,
+ .supports_autosuspend = 1,
};
module_usb_driver(smsc95xx_driver);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 3d4bf01641b4..51f3192f3931 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -380,6 +380,12 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
unsigned long lockflags;
size_t size = dev->rx_urb_size;
+ /* prevent rx skb allocation when error ratio is high */
+ if (test_bit(EVENT_RX_KILL, &dev->flags)) {
+ usb_free_urb(urb);
+ return -ENOLINK;
+ }
+
skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
if (!skb) {
netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
@@ -539,6 +545,17 @@ block:
break;
}
+ /* stop rx if packet error rate is high */
+ if (++dev->pkt_cnt > 30) {
+ dev->pkt_cnt = 0;
+ dev->pkt_err = 0;
+ } else {
+ if (state == rx_cleanup)
+ dev->pkt_err++;
+ if (dev->pkt_err > 20)
+ set_bit(EVENT_RX_KILL, &dev->flags);
+ }
+
state = defer_bh(dev, skb, &dev->rxq, state);
if (urb) {
@@ -791,6 +808,11 @@ int usbnet_open (struct net_device *net)
(dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
"simple");
+ /* reset rx error state */
+ dev->pkt_cnt = 0;
+ dev->pkt_err = 0;
+ clear_bit(EVENT_RX_KILL, &dev->flags);
+
// delay posting reads until we're fully open
tasklet_schedule (&dev->bh);
if (info->manage_power) {
@@ -1103,13 +1125,11 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
if (info->tx_fixup) {
skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
if (!skb) {
- if (netif_msg_tx_err(dev)) {
- netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
- goto drop;
- } else {
- /* cdc_ncm collected packet; waits for more */
+ /* packet collected; minidriver waiting for more */
+ if (info->flags & FLAG_MULTI_PACKET)
goto not_drop;
- }
+ netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
+ goto drop;
}
}
length = skb->len;
@@ -1254,6 +1274,9 @@ static void usbnet_bh (unsigned long param)
}
}
+ /* restart RX again after disabling due to high error rate */
+ clear_bit(EVENT_RX_KILL, &dev->flags);
+
// waiting for all pending urbs to complete?
if (dev->wait) {
if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
@@ -1448,6 +1471,10 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
if ((dev->driver_info->flags & FLAG_WWAN) != 0)
strcpy(net->name, "wwan%d");
+ /* devices that cannot do ARP */
+ if ((dev->driver_info->flags & FLAG_NOARP) != 0)
+ net->flags |= IFF_NOARP;
+
/* maybe the remote can't receive an Ethernet MTU */
if (net->mtu > (dev->hard_mtu - net->hard_header_len))
net->mtu = dev->hard_mtu - net->hard_header_len;
@@ -1786,11 +1813,8 @@ int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype,
}
req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
- if (!req) {
- netdev_err(dev->net, "Failed to allocate memory for %s\n",
- __func__);
+ if (!req)
goto fail_free_buf;
- }
req->bRequestType = reqtype;
req->bRequest = cmd;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 95814d9747ef..07a4af0aa3dc 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -25,18 +25,15 @@
#define MIN_MTU 68 /* Min L3 MTU */
#define MAX_MTU 65535 /* Max L3 MTU (arbitrary) */
-struct veth_net_stats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
- u64 rx_dropped;
+struct pcpu_vstats {
+ u64 packets;
+ u64 bytes;
struct u64_stats_sync syncp;
};
struct veth_priv {
- struct net_device *peer;
- struct veth_net_stats __percpu *stats;
+ struct net_device __rcu *peer;
+ atomic64_t dropped;
};
/*
@@ -92,10 +89,10 @@ static int veth_get_sset_count(struct net_device *dev, int sset)
static void veth_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
- struct veth_priv *priv;
+ struct veth_priv *priv = netdev_priv(dev);
+ struct net_device *peer = rtnl_dereference(priv->peer);
- priv = netdev_priv(dev);
- data[0] = priv->peer->ifindex;
+ data[0] = peer ? peer->ifindex : 0;
}
static const struct ethtool_ops veth_ethtool_ops = {
@@ -107,50 +104,37 @@ static const struct ethtool_ops veth_ethtool_ops = {
.get_ethtool_stats = veth_get_ethtool_stats,
};
-/*
- * xmit
- */
-
static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct net_device *rcv = NULL;
- struct veth_priv *priv, *rcv_priv;
- struct veth_net_stats *stats, *rcv_stats;
- int length;
-
- priv = netdev_priv(dev);
- rcv = priv->peer;
- rcv_priv = netdev_priv(rcv);
-
- stats = this_cpu_ptr(priv->stats);
- rcv_stats = this_cpu_ptr(rcv_priv->stats);
-
+ struct veth_priv *priv = netdev_priv(dev);
+ struct net_device *rcv;
+ int length = skb->len;
+
+ rcu_read_lock();
+ rcv = rcu_dereference(priv->peer);
+ if (unlikely(!rcv)) {
+ kfree_skb(skb);
+ goto drop;
+ }
/* don't change ip_summed == CHECKSUM_PARTIAL, as that
- will cause bad checksum on forwarded packets */
+ * will cause bad checksum on forwarded packets
+ */
if (skb->ip_summed == CHECKSUM_NONE &&
rcv->features & NETIF_F_RXCSUM)
skb->ip_summed = CHECKSUM_UNNECESSARY;
- length = skb->len;
- if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS)
- goto rx_drop;
+ if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) {
+ struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats);
- u64_stats_update_begin(&stats->syncp);
- stats->tx_bytes += length;
- stats->tx_packets++;
- u64_stats_update_end(&stats->syncp);
-
- u64_stats_update_begin(&rcv_stats->syncp);
- rcv_stats->rx_bytes += length;
- rcv_stats->rx_packets++;
- u64_stats_update_end(&rcv_stats->syncp);
-
- return NETDEV_TX_OK;
-
-rx_drop:
- u64_stats_update_begin(&rcv_stats->syncp);
- rcv_stats->rx_dropped++;
- u64_stats_update_end(&rcv_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ stats->bytes += length;
+ stats->packets++;
+ u64_stats_update_end(&stats->syncp);
+ } else {
+drop:
+ atomic64_inc(&priv->dropped);
+ }
+ rcu_read_unlock();
return NETDEV_TX_OK;
}
@@ -158,47 +142,63 @@ rx_drop:
* general routines
*/
-static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *tot)
+static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
int cpu;
+ result->packets = 0;
+ result->bytes = 0;
for_each_possible_cpu(cpu) {
- struct veth_net_stats *stats = per_cpu_ptr(priv->stats, cpu);
- u64 rx_packets, rx_bytes, rx_dropped;
- u64 tx_packets, tx_bytes;
+ struct pcpu_vstats *stats = per_cpu_ptr(dev->vstats, cpu);
+ u64 packets, bytes;
unsigned int start;
do {
start = u64_stats_fetch_begin_bh(&stats->syncp);
- rx_packets = stats->rx_packets;
- tx_packets = stats->tx_packets;
- rx_bytes = stats->rx_bytes;
- tx_bytes = stats->tx_bytes;
- rx_dropped = stats->rx_dropped;
+ packets = stats->packets;
+ bytes = stats->bytes;
} while (u64_stats_fetch_retry_bh(&stats->syncp, start));
- tot->rx_packets += rx_packets;
- tot->tx_packets += tx_packets;
- tot->rx_bytes += rx_bytes;
- tot->tx_bytes += tx_bytes;
- tot->rx_dropped += rx_dropped;
+ result->packets += packets;
+ result->bytes += bytes;
}
+ return atomic64_read(&priv->dropped);
+}
+
+static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *tot)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ struct net_device *peer;
+ struct pcpu_vstats one;
+
+ tot->tx_dropped = veth_stats_one(&one, dev);
+ tot->tx_bytes = one.bytes;
+ tot->tx_packets = one.packets;
+
+ rcu_read_lock();
+ peer = rcu_dereference(priv->peer);
+ if (peer) {
+ tot->rx_dropped = veth_stats_one(&one, peer);
+ tot->rx_bytes = one.bytes;
+ tot->rx_packets = one.packets;
+ }
+ rcu_read_unlock();
return tot;
}
static int veth_open(struct net_device *dev)
{
- struct veth_priv *priv;
+ struct veth_priv *priv = netdev_priv(dev);
+ struct net_device *peer = rtnl_dereference(priv->peer);
- priv = netdev_priv(dev);
- if (priv->peer == NULL)
+ if (!peer)
return -ENOTCONN;
- if (priv->peer->flags & IFF_UP) {
+ if (peer->flags & IFF_UP) {
netif_carrier_on(dev);
- netif_carrier_on(priv->peer);
+ netif_carrier_on(peer);
}
return 0;
}
@@ -206,9 +206,11 @@ static int veth_open(struct net_device *dev)
static int veth_close(struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
+ struct net_device *peer = rtnl_dereference(priv->peer);
netif_carrier_off(dev);
- netif_carrier_off(priv->peer);
+ if (peer)
+ netif_carrier_off(peer);
return 0;
}
@@ -228,24 +230,16 @@ static int veth_change_mtu(struct net_device *dev, int new_mtu)
static int veth_dev_init(struct net_device *dev)
{
- struct veth_net_stats __percpu *stats;
- struct veth_priv *priv;
-
- stats = alloc_percpu(struct veth_net_stats);
- if (stats == NULL)
+ dev->vstats = alloc_percpu(struct pcpu_vstats);
+ if (!dev->vstats)
return -ENOMEM;
- priv = netdev_priv(dev);
- priv->stats = stats;
return 0;
}
static void veth_dev_free(struct net_device *dev)
{
- struct veth_priv *priv;
-
- priv = netdev_priv(dev);
- free_percpu(priv->stats);
+ free_percpu(dev->vstats);
free_netdev(dev);
}
@@ -259,6 +253,10 @@ static const struct net_device_ops veth_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
+#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+ NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX)
+
static void veth_setup(struct net_device *dev)
{
ether_setup(dev);
@@ -269,9 +267,10 @@ static void veth_setup(struct net_device *dev)
dev->netdev_ops = &veth_netdev_ops;
dev->ethtool_ops = &veth_ethtool_ops;
dev->features |= NETIF_F_LLTX;
+ dev->features |= VETH_FEATURES;
dev->destructor = veth_dev_free;
- dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
+ dev->hw_features = VETH_FEATURES;
}
/*
@@ -396,10 +395,10 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
*/
priv = netdev_priv(dev);
- priv->peer = peer;
+ rcu_assign_pointer(priv->peer, peer);
priv = netdev_priv(peer);
- priv->peer = dev;
+ rcu_assign_pointer(priv->peer, dev);
return 0;
err_register_dev:
@@ -420,10 +419,20 @@ static void veth_dellink(struct net_device *dev, struct list_head *head)
struct net_device *peer;
priv = netdev_priv(dev);
- peer = priv->peer;
+ peer = rtnl_dereference(priv->peer);
+ /* Note : dellink() is called from default_device_exit_batch(),
+ * before a rcu_synchronize() point. The devices are guaranteed
+ * not being freed before one RCU grace period.
+ */
+ RCU_INIT_POINTER(priv->peer, NULL);
unregister_netdevice_queue(dev, head);
- unregister_netdevice_queue(peer, head);
+
+ if (peer) {
+ priv = netdev_priv(peer);
+ RCU_INIT_POINTER(priv->peer, NULL);
+ unregister_netdevice_queue(peer, head);
+ }
}
static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index a6fcf15adc4f..192c91c8e799 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -26,6 +26,7 @@
#include <linux/scatterlist.h>
#include <linux/if_vlan.h>
#include <linux/slab.h>
+#include <linux/cpu.h>
static int napi_weight = 128;
module_param(napi_weight, int, 0444);
@@ -123,6 +124,12 @@ struct virtnet_info {
/* Does the affinity hint is set for virtqueues? */
bool affinity_hint_set;
+
+ /* Per-cpu variable to show the mapping from CPU to virtqueue */
+ int __percpu *vq_index;
+
+ /* CPU hot plug notifier */
+ struct notifier_block nb;
};
struct skb_vnet_hdr {
@@ -220,6 +227,7 @@ static void set_skb_frag(struct sk_buff *skb, struct page *page,
skb->len += size;
skb->truesize += PAGE_SIZE;
skb_shinfo(skb)->nr_frags++;
+ skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
*len -= size;
}
@@ -753,19 +761,77 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+/*
+ * Send command via the control virtqueue and check status. Commands
+ * supported by the hypervisor, as indicated by feature bits, should
+ * never fail unless improperly formated.
+ */
+static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
+ struct scatterlist *data, int out, int in)
+{
+ struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
+ struct virtio_net_ctrl_hdr ctrl;
+ virtio_net_ctrl_ack status = ~0;
+ unsigned int tmp;
+ int i;
+
+ /* Caller should know better */
+ BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
+ (out + in > VIRTNET_SEND_COMMAND_SG_MAX));
+
+ out++; /* Add header */
+ in++; /* Add return status */
+
+ ctrl.class = class;
+ ctrl.cmd = cmd;
+
+ sg_init_table(sg, out + in);
+
+ sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
+ for_each_sg(data, s, out + in - 2, i)
+ sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
+ sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
+
+ BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0);
+
+ virtqueue_kick(vi->cvq);
+
+ /* Spin for a response, the kick causes an ioport write, trapping
+ * into the hypervisor, so the request should be handled immediately.
+ */
+ while (!virtqueue_get_buf(vi->cvq, &tmp))
+ cpu_relax();
+
+ return status == VIRTIO_NET_OK;
+}
+
static int virtnet_set_mac_address(struct net_device *dev, void *p)
{
struct virtnet_info *vi = netdev_priv(dev);
struct virtio_device *vdev = vi->vdev;
int ret;
+ struct sockaddr *addr = p;
+ struct scatterlist sg;
- ret = eth_mac_addr(dev, p);
+ ret = eth_prepare_mac_addr_change(dev, p);
if (ret)
return ret;
- if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
+ sg_init_one(&sg, addr->sa_data, dev->addr_len);
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
+ VIRTIO_NET_CTRL_MAC_ADDR_SET,
+ &sg, 1, 0)) {
+ dev_warn(&vdev->dev,
+ "Failed to set mac address by vq command.\n");
+ return -EINVAL;
+ }
+ } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
- dev->dev_addr, dev->addr_len);
+ addr->sa_data, dev->addr_len);
+ }
+
+ eth_commit_mac_addr_change(dev, p);
return 0;
}
@@ -819,51 +885,6 @@ static void virtnet_netpoll(struct net_device *dev)
}
#endif
-/*
- * Send command via the control virtqueue and check status. Commands
- * supported by the hypervisor, as indicated by feature bits, should
- * never fail unless improperly formated.
- */
-static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
- struct scatterlist *data, int out, int in)
-{
- struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
- struct virtio_net_ctrl_hdr ctrl;
- virtio_net_ctrl_ack status = ~0;
- unsigned int tmp;
- int i;
-
- /* Caller should know better */
- BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
- (out + in > VIRTNET_SEND_COMMAND_SG_MAX));
-
- out++; /* Add header */
- in++; /* Add return status */
-
- ctrl.class = class;
- ctrl.cmd = cmd;
-
- sg_init_table(sg, out + in);
-
- sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
- for_each_sg(data, s, out + in - 2, i)
- sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
- sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
-
- BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0);
-
- virtqueue_kick(vi->cvq);
-
- /*
- * Spin for a response, the kick causes an ioport write, trapping
- * into the hypervisor, so the request should be handled immediately.
- */
- while (!virtqueue_get_buf(vi->cvq, &tmp))
- cpu_relax();
-
- return status == VIRTIO_NET_OK;
-}
-
static void virtnet_ack_link_announce(struct virtnet_info *vi)
{
rtnl_lock();
@@ -952,10 +973,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
(2 * sizeof(mac_data->entries)), GFP_ATOMIC);
mac_data = buf;
- if (!buf) {
- dev_warn(&dev->dev, "No memory for MAC address buffer\n");
+ if (!buf)
return;
- }
sg_init_table(sg, 2);
@@ -1013,32 +1032,75 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
return 0;
}
-static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
+static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
{
int i;
+ int cpu;
+
+ if (vi->affinity_hint_set) {
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ virtqueue_set_affinity(vi->rq[i].vq, -1);
+ virtqueue_set_affinity(vi->sq[i].vq, -1);
+ }
+
+ vi->affinity_hint_set = false;
+ }
+
+ i = 0;
+ for_each_online_cpu(cpu) {
+ if (cpu == hcpu) {
+ *per_cpu_ptr(vi->vq_index, cpu) = -1;
+ } else {
+ *per_cpu_ptr(vi->vq_index, cpu) =
+ ++i % vi->curr_queue_pairs;
+ }
+ }
+}
+
+static void virtnet_set_affinity(struct virtnet_info *vi)
+{
+ int i;
+ int cpu;
/* In multiqueue mode, when the number of cpu is equal to the number of
* queue pairs, we let the queue pairs to be private to one cpu by
* setting the affinity hint to eliminate the contention.
*/
- if ((vi->curr_queue_pairs == 1 ||
- vi->max_queue_pairs != num_online_cpus()) && set) {
- if (vi->affinity_hint_set)
- set = false;
- else
- return;
+ if (vi->curr_queue_pairs == 1 ||
+ vi->max_queue_pairs != num_online_cpus()) {
+ virtnet_clean_affinity(vi, -1);
+ return;
}
- for (i = 0; i < vi->max_queue_pairs; i++) {
- int cpu = set ? i : -1;
+ i = 0;
+ for_each_online_cpu(cpu) {
virtqueue_set_affinity(vi->rq[i].vq, cpu);
virtqueue_set_affinity(vi->sq[i].vq, cpu);
+ *per_cpu_ptr(vi->vq_index, cpu) = i;
+ i++;
}
- if (set)
- vi->affinity_hint_set = true;
- else
- vi->affinity_hint_set = false;
+ vi->affinity_hint_set = true;
+}
+
+static int virtnet_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
+
+ switch(action & ~CPU_TASKS_FROZEN) {
+ case CPU_ONLINE:
+ case CPU_DOWN_FAILED:
+ case CPU_DEAD:
+ virtnet_set_affinity(vi);
+ break;
+ case CPU_DOWN_PREPARE:
+ virtnet_clean_affinity(vi, (long)hcpu);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
}
static void virtnet_get_ringparam(struct net_device *dev,
@@ -1082,13 +1144,15 @@ static int virtnet_set_channels(struct net_device *dev,
if (queue_pairs > vi->max_queue_pairs)
return -EINVAL;
+ get_online_cpus();
err = virtnet_set_queues(vi, queue_pairs);
if (!err) {
netif_set_real_num_tx_queues(dev, queue_pairs);
netif_set_real_num_rx_queues(dev, queue_pairs);
- virtnet_set_affinity(vi, true);
+ virtnet_set_affinity(vi);
}
+ put_online_cpus();
return err;
}
@@ -1127,12 +1191,19 @@ static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
/* To avoid contending a lock hold by a vcpu who would exit to host, select the
* txq based on the processor id.
- * TODO: handle cpu hotplug.
*/
static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb)
{
- int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
- smp_processor_id();
+ int txq;
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ if (skb_rx_queue_recorded(skb)) {
+ txq = skb_get_rx_queue(skb);
+ } else {
+ txq = *__this_cpu_ptr(vi->vq_index);
+ if (txq == -1)
+ txq = 0;
+ }
while (unlikely(txq >= dev->real_num_tx_queues))
txq -= dev->real_num_tx_queues;
@@ -1248,7 +1319,7 @@ static void virtnet_del_vqs(struct virtnet_info *vi)
{
struct virtio_device *vdev = vi->vdev;
- virtnet_set_affinity(vi, false);
+ virtnet_clean_affinity(vi, -1);
vdev->config->del_vqs(vdev);
@@ -1371,7 +1442,10 @@ static int init_vqs(struct virtnet_info *vi)
if (ret)
goto err_free;
- virtnet_set_affinity(vi, true);
+ get_online_cpus();
+ virtnet_set_affinity(vi);
+ put_online_cpus();
+
return 0;
err_free:
@@ -1453,6 +1527,10 @@ static int virtnet_probe(struct virtio_device *vdev)
if (vi->stats == NULL)
goto free;
+ vi->vq_index = alloc_percpu(int);
+ if (vi->vq_index == NULL)
+ goto free_stats;
+
mutex_init(&vi->config_lock);
vi->config_enable = true;
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
@@ -1476,7 +1554,7 @@ static int virtnet_probe(struct virtio_device *vdev)
/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
err = init_vqs(vi);
if (err)
- goto free_stats;
+ goto free_index;
netif_set_real_num_tx_queues(dev, 1);
netif_set_real_num_rx_queues(dev, 1);
@@ -1499,6 +1577,13 @@ static int virtnet_probe(struct virtio_device *vdev)
}
}
+ vi->nb.notifier_call = &virtnet_cpu_callback;
+ err = register_hotcpu_notifier(&vi->nb);
+ if (err) {
+ pr_debug("virtio_net: registering cpu notifier failed\n");
+ goto free_recv_bufs;
+ }
+
/* Assume link up if device can't report link status,
otherwise get link status from config. */
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
@@ -1520,6 +1605,8 @@ free_recv_bufs:
free_vqs:
cancel_delayed_work_sync(&vi->refill);
virtnet_del_vqs(vi);
+free_index:
+ free_percpu(vi->vq_index);
free_stats:
free_percpu(vi->stats);
free:
@@ -1543,6 +1630,8 @@ static void virtnet_remove(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
+ unregister_hotcpu_notifier(&vi->nb);
+
/* Prevent config work handler from accessing the device. */
mutex_lock(&vi->config_lock);
vi->config_enable = false;
@@ -1554,6 +1643,7 @@ static void virtnet_remove(struct virtio_device *vdev)
flush_work(&vi->config_work);
+ free_percpu(vi->vq_index);
free_percpu(vi->stats);
free_netdev(vi->dev);
}
@@ -1628,6 +1718,7 @@ static unsigned int features[] = {
VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
+ VIRTIO_NET_F_CTRL_MAC_ADDR,
};
static struct virtio_driver virtio_net_driver = {
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index dc8913c6238c..ffb97b2a15a0 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -43,11 +43,7 @@ static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = {
MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
-static atomic_t devices_found;
-
-#define VMXNET3_MAX_DEVICES 10
static int enable_mq = 1;
-static int irq_share_mode;
static void
vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
@@ -152,10 +148,9 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
adapter->link_speed = ret >> 16;
if (ret & 1) { /* Link is up. */
- printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
- adapter->netdev->name, adapter->link_speed);
- if (!netif_carrier_ok(adapter->netdev))
- netif_carrier_on(adapter->netdev);
+ netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
+ adapter->link_speed);
+ netif_carrier_on(adapter->netdev);
if (affectTxQueue) {
for (i = 0; i < adapter->num_tx_queues; i++)
@@ -163,10 +158,8 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
adapter);
}
} else {
- printk(KERN_INFO "%s: NIC Link is Down\n",
- adapter->netdev->name);
- if (netif_carrier_ok(adapter->netdev))
- netif_carrier_off(adapter->netdev);
+ netdev_info(adapter->netdev, "NIC Link is Down\n");
+ netif_carrier_off(adapter->netdev);
if (affectTxQueue) {
for (i = 0; i < adapter->num_tx_queues; i++)
@@ -510,8 +503,7 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
* sizeof(struct Vmxnet3_TxDesc),
&tq->tx_ring.basePA);
if (!tq->tx_ring.base) {
- printk(KERN_ERR "%s: failed to allocate tx ring\n",
- adapter->netdev->name);
+ netdev_err(adapter->netdev, "failed to allocate tx ring\n");
goto err;
}
@@ -520,8 +512,7 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
sizeof(struct Vmxnet3_TxDataDesc),
&tq->data_ring.basePA);
if (!tq->data_ring.base) {
- printk(KERN_ERR "%s: failed to allocate data ring\n",
- adapter->netdev->name);
+ netdev_err(adapter->netdev, "failed to allocate data ring\n");
goto err;
}
@@ -530,8 +521,7 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
sizeof(struct Vmxnet3_TxCompDesc),
&tq->comp_ring.basePA);
if (!tq->comp_ring.base) {
- printk(KERN_ERR "%s: failed to allocate tx comp ring\n",
- adapter->netdev->name);
+ netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
goto err;
}
@@ -580,15 +570,14 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
if (rbi->skb == NULL) {
- rbi->skb = dev_alloc_skb(rbi->len +
- NET_IP_ALIGN);
+ rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
+ rbi->len,
+ GFP_KERNEL);
if (unlikely(rbi->skb == NULL)) {
rq->stats.rx_buf_alloc_failure++;
break;
}
- rbi->skb->dev = adapter->netdev;
- skb_reserve(rbi->skb, NET_IP_ALIGN);
rbi->dma_addr = pci_map_single(adapter->pdev,
rbi->skb->data, rbi->len,
PCI_DMA_FROMDEVICE);
@@ -629,12 +618,10 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
num_allocated++;
vmxnet3_cmd_ring_adv_next2fill(ring);
}
- rq->uncommitted[ring_idx] += num_allocated;
- dev_dbg(&adapter->netdev->dev,
- "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
- "%u, uncommitted %u\n", num_allocated, ring->next2fill,
- ring->next2comp, rq->uncommitted[ring_idx]);
+ netdev_dbg(adapter->netdev,
+ "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
+ num_allocated, ring->next2fill, ring->next2comp);
/* so that the device can distinguish a full ring and an empty ring */
BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
@@ -691,7 +678,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
tbi = tq->buf_info + tq->tx_ring.next2fill;
tbi->map_type = VMXNET3_MAP_NONE;
- dev_dbg(&adapter->netdev->dev,
+ netdev_dbg(adapter->netdev,
"txd[%u]: 0x%Lx 0x%x 0x%x\n",
tq->tx_ring.next2fill,
le64_to_cpu(ctx->sop_txd->txd.addr),
@@ -731,7 +718,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
gdesc->dword[2] = cpu_to_le32(dw2);
gdesc->dword[3] = 0;
- dev_dbg(&adapter->netdev->dev,
+ netdev_dbg(adapter->netdev,
"txd[%u]: 0x%Lx 0x%x 0x%x\n",
tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
@@ -771,7 +758,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
gdesc->dword[2] = cpu_to_le32(dw2);
gdesc->dword[3] = 0;
- dev_dbg(&adapter->netdev->dev,
+ netdev_dbg(adapter->netdev,
"txd[%u]: 0x%llu %u %u\n",
tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
@@ -871,7 +858,7 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
tdd = tq->data_ring.base + tq->tx_ring.next2fill;
memcpy(tdd->data, skb->data, ctx->copy_size);
- dev_dbg(&adapter->netdev->dev,
+ netdev_dbg(adapter->netdev,
"copy %u bytes to dataRing[%u]\n",
ctx->copy_size, tq->tx_ring.next2fill);
return 1;
@@ -977,7 +964,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
tq->stats.tx_ring_full++;
- dev_dbg(&adapter->netdev->dev,
+ netdev_dbg(adapter->netdev,
"tx queue stopped on %s, next2comp %u"
" next2fill %u\n", adapter->netdev->name,
tq->tx_ring.next2comp, tq->tx_ring.next2fill);
@@ -1060,7 +1047,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
(struct Vmxnet3_TxDesc *)ctx.sop_txd);
gdesc = ctx.sop_txd;
#endif
- dev_dbg(&adapter->netdev->dev,
+ netdev_dbg(adapter->netdev,
"txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
(u32)(ctx.sop_txd -
tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
@@ -1213,7 +1200,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
if (unlikely(rcd->len == 0)) {
/* Pretend the rx buffer is skipped. */
BUG_ON(!(rcd->sop && rcd->eop));
- dev_dbg(&adapter->netdev->dev,
+ netdev_dbg(adapter->netdev,
"rxRing[%u][%u] 0 length\n",
ring_idx, idx);
goto rcd_done;
@@ -1221,7 +1208,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
skip_page_frags = false;
ctx->skb = rbi->skb;
- new_skb = dev_alloc_skb(rbi->len + NET_IP_ALIGN);
+ new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
+ rbi->len);
if (new_skb == NULL) {
/* Skb allocation failed, do not handover this
* skb to stack. Reuse it. Drop the existing pkt
@@ -1236,11 +1224,14 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
PCI_DMA_FROMDEVICE);
+#ifdef VMXNET3_RSS
+ if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
+ (adapter->netdev->features & NETIF_F_RXHASH))
+ ctx->skb->rxhash = le32_to_cpu(rcd->rssHash);
+#endif
skb_put(ctx->skb, rcd->len);
/* Immediate refill */
- new_skb->dev = adapter->netdev;
- skb_reserve(new_skb, NET_IP_ALIGN);
rbi->skb = new_skb;
rbi->dma_addr = pci_map_single(adapter->pdev,
rbi->skb->data, rbi->len,
@@ -1333,7 +1324,6 @@ rcd_done:
VMXNET3_WRITE_BAR0_REG(adapter,
rxprod_reg[ring_idx] + rq->qid * 8,
ring->next2fill);
- rq->uncommitted[ring_idx] = 0;
}
vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
@@ -1378,7 +1368,6 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
rq->rx_ring[ring_idx].next2fill =
rq->rx_ring[ring_idx].next2comp = 0;
- rq->uncommitted[ring_idx] = 0;
}
rq->comp_ring.gen = VMXNET3_INIT_GEN;
@@ -1459,7 +1448,6 @@ vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
/* reset internal state and allocate buffers for both rings */
for (i = 0; i < 2; i++) {
rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
- rq->uncommitted[i] = 0;
memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
sizeof(struct Vmxnet3_RxDesc));
@@ -1518,8 +1506,8 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz,
&rq->rx_ring[i].basePA);
if (!rq->rx_ring[i].base) {
- printk(KERN_ERR "%s: failed to allocate rx ring %d\n",
- adapter->netdev->name, i);
+ netdev_err(adapter->netdev,
+ "failed to allocate rx ring %d\n", i);
goto err;
}
}
@@ -1528,8 +1516,7 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz,
&rq->comp_ring.basePA);
if (!rq->comp_ring.base) {
- printk(KERN_ERR "%s: failed to allocate rx comp ring\n",
- adapter->netdev->name);
+ netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
goto err;
}
@@ -1821,9 +1808,10 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
adapter->rx_queue[i].name,
&(adapter->rx_queue[i]));
if (err) {
- printk(KERN_ERR "Failed to request irq for MSIX"
- ", %s, error %d\n",
- adapter->rx_queue[i].name, err);
+ netdev_err(adapter->netdev,
+ "Failed to request irq for MSIX, "
+ "%s, error %d\n",
+ adapter->rx_queue[i].name, err);
return err;
}
@@ -1852,8 +1840,9 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
#endif
intr->num_intrs = vector + 1;
if (err) {
- printk(KERN_ERR "Failed to request irq %s (intr type:%d), error"
- ":%d\n", adapter->netdev->name, intr->type, err);
+ netdev_err(adapter->netdev,
+ "Failed to request irq (intr type:%d), error %d\n",
+ intr->type, err);
} else {
/* Number of rx queues will not change after this */
for (i = 0; i < adapter->num_rx_queues; i++) {
@@ -1874,9 +1863,9 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
adapter->rx_queue[0].comp_ring.intr_idx = 0;
}
- printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors "
- "allocated\n", adapter->netdev->name, intr->type,
- intr->mask_mode, intr->num_intrs);
+ netdev_info(adapter->netdev,
+ "intr type %u, mode %u, %u vectors allocated\n",
+ intr->type, intr->mask_mode, intr->num_intrs);
}
return err;
@@ -2042,8 +2031,8 @@ vmxnet3_set_mc(struct net_device *netdev)
rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
new_table));
} else {
- printk(KERN_INFO "%s: failed to copy mcast list"
- ", setting ALL_MULTI\n", netdev->name);
+ netdev_info(netdev, "failed to copy mcast list"
+ ", setting ALL_MULTI\n");
new_mode |= VMXNET3_RXM_ALL_MULTI;
}
}
@@ -2171,6 +2160,14 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
if (adapter->rss) {
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
+ static const uint8_t rss_key[UPT1_RSS_MAX_KEY_SIZE] = {
+ 0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac,
+ 0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28,
+ 0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70,
+ 0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3,
+ 0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9,
+ };
+
devRead->misc.uptFeatures |= UPT1_F_RSS;
devRead->misc.numRxQueues = adapter->num_rx_queues;
rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
@@ -2180,7 +2177,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
- get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize);
+ memcpy(rssConf->hashKey, rss_key, sizeof(rss_key));
+
for (i = 0; i < rssConf->indTableSize; i++)
rssConf->indTable[i] = ethtool_rxfh_indir_default(
i, adapter->num_rx_queues);
@@ -2218,7 +2216,7 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
u32 ret;
unsigned long flags;
- dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
+ netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
" ring sizes %u %u %u\n", adapter->netdev->name,
adapter->skb_buf_size, adapter->rx_buf_per_pkt,
adapter->tx_queue[0].tx_ring.size,
@@ -2228,15 +2226,15 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
vmxnet3_tq_init_all(adapter);
err = vmxnet3_rq_init_all(adapter);
if (err) {
- printk(KERN_ERR "Failed to init rx queue for %s: error %d\n",
- adapter->netdev->name, err);
+ netdev_err(adapter->netdev,
+ "Failed to init rx queue error %d\n", err);
goto rq_err;
}
err = vmxnet3_request_irqs(adapter);
if (err) {
- printk(KERN_ERR "Failed to setup irq for %s: error %d\n",
- adapter->netdev->name, err);
+ netdev_err(adapter->netdev,
+ "Failed to setup irq for error %d\n", err);
goto irq_err;
}
@@ -2253,8 +2251,8 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
if (ret != 0) {
- printk(KERN_ERR "Failed to activate dev %s: error %u\n",
- adapter->netdev->name, ret);
+ netdev_err(adapter->netdev,
+ "Failed to activate dev: error %u\n", ret);
err = -EINVAL;
goto activate_err;
}
@@ -2369,23 +2367,22 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
err = pci_enable_device(pdev);
if (err) {
- printk(KERN_ERR "Failed to enable adapter %s: error %d\n",
- pci_name(pdev), err);
+ dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
return err;
}
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
- printk(KERN_ERR "pci_set_consistent_dma_mask failed "
- "for adapter %s\n", pci_name(pdev));
+ dev_err(&pdev->dev,
+ "pci_set_consistent_dma_mask failed\n");
err = -EIO;
goto err_set_mask;
}
*dma64 = true;
} else {
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
- printk(KERN_ERR "pci_set_dma_mask failed for adapter "
- "%s\n", pci_name(pdev));
+ dev_err(&pdev->dev,
+ "pci_set_dma_mask failed\n");
err = -EIO;
goto err_set_mask;
}
@@ -2395,8 +2392,8 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
err = pci_request_selected_regions(pdev, (1 << 2) - 1,
vmxnet3_driver_name);
if (err) {
- printk(KERN_ERR "Failed to request region for adapter %s: "
- "error %d\n", pci_name(pdev), err);
+ dev_err(&pdev->dev,
+ "Failed to request region for adapter: error %d\n", err);
goto err_set_mask;
}
@@ -2406,8 +2403,7 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
mmio_len = pci_resource_len(pdev, 0);
adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
if (!adapter->hw_addr0) {
- printk(KERN_ERR "Failed to map bar0 for adapter %s\n",
- pci_name(pdev));
+ dev_err(&pdev->dev, "Failed to map bar0\n");
err = -EIO;
goto err_ioremap;
}
@@ -2416,8 +2412,7 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
mmio_len = pci_resource_len(pdev, 1);
adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
if (!adapter->hw_addr1) {
- printk(KERN_ERR "Failed to map bar1 for adapter %s\n",
- pci_name(pdev));
+ dev_err(&pdev->dev, "Failed to map bar1\n");
err = -EIO;
goto err_bar1;
}
@@ -2524,12 +2519,14 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
err = vmxnet3_rq_create(rq, adapter);
if (err) {
if (i == 0) {
- printk(KERN_ERR "Could not allocate any rx"
- "queues. Aborting.\n");
+ netdev_err(adapter->netdev,
+ "Could not allocate any rx queues. "
+ "Aborting.\n");
goto queue_err;
} else {
- printk(KERN_INFO "Number of rx queues changed "
- "to : %d.\n", i);
+ netdev_info(adapter->netdev,
+ "Number of rx queues changed "
+ "to : %d.\n", i);
adapter->num_rx_queues = i;
err = 0;
break;
@@ -2642,15 +2639,17 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
vmxnet3_adjust_rx_ring_size(adapter);
err = vmxnet3_rq_create_all(adapter);
if (err) {
- printk(KERN_ERR "%s: failed to re-create rx queues,"
- " error %d. Closing it.\n", netdev->name, err);
+ netdev_err(netdev,
+ "failed to re-create rx queues, "
+ " error %d. Closing it.\n", err);
goto out;
}
err = vmxnet3_activate_dev(adapter);
if (err) {
- printk(KERN_ERR "%s: failed to re-activate, error %d. "
- "Closing it\n", netdev->name, err);
+ netdev_err(netdev,
+ "failed to re-activate, error %d. "
+ "Closing it\n", err);
goto out;
}
}
@@ -2678,10 +2677,6 @@ vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
netdev->vlan_features = netdev->hw_features &
~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_FILTER;
-
- netdev_info(adapter->netdev,
- "features: sg csum vlan jf tso tsoIPv6 lro%s\n",
- dma64 ? " highDMA" : "");
}
@@ -2724,7 +2719,7 @@ vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
adapter->intr.num_intrs = vectors;
return 0;
} else if (err < 0) {
- netdev_err(adapter->netdev,
+ dev_err(&adapter->netdev->dev,
"Failed to enable MSI-X, error: %d\n", err);
vectors = 0;
} else if (err < vector_threshold) {
@@ -2733,15 +2728,16 @@ vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
/* If fails to enable required number of MSI-x vectors
* try enabling minimum number of vectors required.
*/
- netdev_err(adapter->netdev,
- "Failed to enable %d MSI-X, trying %d instead\n",
+ dev_err(&adapter->netdev->dev,
+ "Failed to enable %d MSI-X, trying %d instead\n",
vectors, vector_threshold);
vectors = vector_threshold;
}
}
- netdev_info(adapter->netdev,
- "Number of MSI-X interrupts which can be allocated are lower than min threshold required.\n");
+ dev_info(&adapter->pdev->dev,
+ "Number of MSI-X interrupts which can be allocated "
+ "is lower than min threshold required.\n");
return err;
}
@@ -2796,7 +2792,8 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
|| adapter->num_rx_queues != 1) {
adapter->share_intr = VMXNET3_INTR_TXSHARE;
- printk(KERN_ERR "Number of rx queues : 1\n");
+ netdev_err(adapter->netdev,
+ "Number of rx queues : 1\n");
adapter->num_rx_queues = 1;
adapter->intr.num_intrs =
VMXNET3_LINUX_MIN_MSIX_VECT;
@@ -2807,9 +2804,9 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
return;
/* If we cannot allocate MSIx vectors use only one rx queue */
- netdev_info(adapter->netdev,
- "Failed to enable MSI-X, error %d . Limiting #rx queues to 1, try MSI.\n",
- err);
+ dev_info(&adapter->pdev->dev,
+ "Failed to enable MSI-X, error %d. "
+ "Limiting #rx queues to 1, try MSI.\n", err);
adapter->intr.type = VMXNET3_IT_MSI;
}
@@ -2826,7 +2823,8 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
#endif /* CONFIG_PCI_MSI */
adapter->num_rx_queues = 1;
- printk(KERN_INFO "Using INTx interrupt, #Rx queues: 1.\n");
+ dev_info(&adapter->netdev->dev,
+ "Using INTx interrupt, #Rx queues: 1.\n");
adapter->intr.type = VMXNET3_IT_INTX;
/* INT-X related setting */
@@ -2852,7 +2850,7 @@ vmxnet3_tx_timeout(struct net_device *netdev)
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
adapter->tx_timeout_count++;
- printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name);
+ netdev_err(adapter->netdev, "tx hang\n");
schedule_work(&adapter->work);
netif_wake_queue(adapter->netdev);
}
@@ -2872,12 +2870,12 @@ vmxnet3_reset_work(struct work_struct *data)
/* if the device is closed, we must leave it alone */
rtnl_lock();
if (netif_running(adapter->netdev)) {
- printk(KERN_INFO "%s: resetting\n", adapter->netdev->name);
+ netdev_notice(adapter->netdev, "resetting\n");
vmxnet3_quiesce_dev(adapter);
vmxnet3_reset_dev(adapter);
vmxnet3_activate_dev(adapter);
} else {
- printk(KERN_INFO "%s: already closed\n", adapter->netdev->name);
+ netdev_info(adapter->netdev, "already closed\n");
}
rtnl_unlock();
@@ -2936,8 +2934,9 @@ vmxnet3_probe_device(struct pci_dev *pdev,
num_tx_queues = rounddown_pow_of_two(num_tx_queues);
netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
max(num_tx_queues, num_rx_queues));
- printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n",
- num_tx_queues, num_rx_queues);
+ dev_info(&pdev->dev,
+ "# of Tx queues : %d, # of Rx queues : %d\n",
+ num_tx_queues, num_rx_queues);
if (!netdev)
return -ENOMEM;
@@ -2952,8 +2951,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
sizeof(struct Vmxnet3_DriverShared),
&adapter->shared_pa);
if (!adapter->shared) {
- printk(KERN_ERR "Failed to allocate memory for %s\n",
- pci_name(pdev));
+ dev_err(&pdev->dev, "Failed to allocate memory\n");
err = -ENOMEM;
goto err_alloc_shared;
}
@@ -2967,8 +2965,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
&adapter->queue_desc_pa);
if (!adapter->tqd_start) {
- printk(KERN_ERR "Failed to allocate memory for %s\n",
- pci_name(pdev));
+ dev_err(&pdev->dev, "Failed to allocate memory\n");
err = -ENOMEM;
goto err_alloc_queue_desc;
}
@@ -2998,8 +2995,8 @@ vmxnet3_probe_device(struct pci_dev *pdev,
if (ver & 1) {
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
} else {
- printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter"
- " %s\n", ver, pci_name(pdev));
+ dev_err(&pdev->dev,
+ "Incompatible h/w version (0x%x) for adapter\n", ver);
err = -EBUSY;
goto err_ver;
}
@@ -3008,8 +3005,8 @@ vmxnet3_probe_device(struct pci_dev *pdev,
if (ver & 1) {
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
} else {
- printk(KERN_ERR "Incompatible upt version (0x%x) for "
- "adapter %s\n", ver, pci_name(pdev));
+ dev_err(&pdev->dev,
+ "Incompatible upt version (0x%x) for adapter\n", ver);
err = -EBUSY;
goto err_ver;
}
@@ -3017,11 +3014,9 @@ vmxnet3_probe_device(struct pci_dev *pdev,
SET_NETDEV_DEV(netdev, &pdev->dev);
vmxnet3_declare_features(adapter, dma64);
- adapter->dev_number = atomic_read(&devices_found);
-
- adapter->share_intr = irq_share_mode;
- if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE &&
- adapter->num_tx_queues != adapter->num_rx_queues)
+ if (adapter->num_tx_queues == adapter->num_rx_queues)
+ adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
+ else
adapter->share_intr = VMXNET3_INTR_DONTSHARE;
vmxnet3_alloc_intr_resources(adapter);
@@ -3030,7 +3025,9 @@ vmxnet3_probe_device(struct pci_dev *pdev,
if (adapter->num_rx_queues > 1 &&
adapter->intr.type == VMXNET3_IT_MSIX) {
adapter->rss = true;
- printk(KERN_INFO "RSS is enabled.\n");
+ netdev->hw_features |= NETIF_F_RXHASH;
+ netdev->features |= NETIF_F_RXHASH;
+ dev_dbg(&pdev->dev, "RSS is enabled.\n");
} else {
adapter->rss = false;
}
@@ -3061,16 +3058,15 @@ vmxnet3_probe_device(struct pci_dev *pdev,
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
+ netif_carrier_off(netdev);
err = register_netdev(netdev);
if (err) {
- printk(KERN_ERR "Failed to register adapter %s\n",
- pci_name(pdev));
+ dev_err(&pdev->dev, "Failed to register adapter\n");
goto err_register;
}
vmxnet3_check_link(adapter, false);
- atomic_inc(&devices_found);
return 0;
err_register:
@@ -3312,7 +3308,7 @@ static struct pci_driver vmxnet3_driver = {
static int __init
vmxnet3_init_module(void)
{
- printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC,
+ pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
VMXNET3_DRIVER_VERSION_REPORT);
return pci_register_driver(&vmxnet3_driver);
}
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 587a218b2345..9bc542be2937 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -207,7 +207,7 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
- ETHTOOL_BUSINFO_LEN);
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = vmxnet3_get_sset_count(netdev, ETH_SS_STATS);
drvinfo->testinfo_len = 0;
drvinfo->eedump_len = 0;
@@ -522,24 +522,23 @@ vmxnet3_set_ringparam(struct net_device *netdev,
if (err) {
/* failed, most likely because of OOM, try default
* size */
- printk(KERN_ERR "%s: failed to apply new sizes, try the"
- " default ones\n", netdev->name);
+ netdev_err(netdev, "failed to apply new sizes, "
+ "try the default ones\n");
err = vmxnet3_create_queues(adapter,
VMXNET3_DEF_TX_RING_SIZE,
VMXNET3_DEF_RX_RING_SIZE,
VMXNET3_DEF_RX_RING_SIZE);
if (err) {
- printk(KERN_ERR "%s: failed to create queues "
- "with default sizes. Closing it\n",
- netdev->name);
+ netdev_err(netdev, "failed to create queues "
+ "with default sizes. Closing it\n");
goto out;
}
}
err = vmxnet3_activate_dev(adapter);
if (err)
- printk(KERN_ERR "%s: failed to re-activate, error %d."
- " Closing it\n", netdev->name, err);
+ netdev_err(netdev, "failed to re-activate, error %d."
+ " Closing it\n", err);
}
out:
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index fc46a81ad538..3198384689d9 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -276,8 +276,6 @@ struct vmxnet3_rx_queue {
struct vmxnet3_rx_ctx rx_ctx;
u32 qid; /* rqID in RCD for buffer from 1st ring */
u32 qid2; /* rqID in RCD for buffer from 2nd ring */
- u32 uncommitted[2]; /* # of buffers allocated since last RXPROD
- * update */
struct vmxnet3_rx_buf_info *buf_info[2];
struct Vmxnet3_RxQueueCtrl *shared;
struct vmxnet3_rq_driver_stats stats;
@@ -354,7 +352,6 @@ struct vmxnet3_adapter {
unsigned long state; /* VMXNET3_STATE_BIT_xxx */
- int dev_number;
int share_intr;
};
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 656230e0d18c..9d70421cf3a0 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -29,6 +29,7 @@
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/hash.h>
+#include <linux/ethtool.h>
#include <net/arp.h>
#include <net/ndisc.h>
#include <net/ip.h>
@@ -392,7 +393,8 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
}
/* Delete entry (via netlink) */
-static int vxlan_fdb_delete(struct ndmsg *ndm, struct net_device *dev,
+static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
const unsigned char *addr)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
@@ -1271,6 +1273,18 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
return 0;
}
+static void vxlan_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
+}
+
+static const struct ethtool_ops vxlan_ethtool_ops = {
+ .get_drvinfo = vxlan_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
static int vxlan_newlink(struct net *net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
@@ -1348,6 +1362,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
vxlan->port_max = ntohs(p->high);
}
+ SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops);
+
err = register_netdevice(dev);
if (!err)
hlist_add_head_rcu(&vxlan->hlist, vni_head(net, vxlan->vni));
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index d58431e99f73..94e234975c61 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -356,63 +356,9 @@ config SDLA
To compile this driver as a module, choose M here: the
module will be called sdla.
-# Wan router core.
-config WAN_ROUTER_DRIVERS
- tristate "WAN router drivers"
- depends on WAN_ROUTER
- ---help---
- Connect LAN to WAN via Linux box.
-
- Select driver your card and remember to say Y to "Wan Router."
- You will need the wan-tools package which is available from
- <ftp://ftp.sangoma.com/>.
-
- Note that the answer to this question won't directly affect the
- kernel except for how subordinate drivers may be built:
- saying N will just cause the configurator to skip all
- the questions about WAN router drivers.
-
- If unsure, say N.
-
-config CYCLADES_SYNC
- tristate "Cyclom 2X(tm) cards (EXPERIMENTAL)"
- depends on WAN_ROUTER_DRIVERS && (PCI || ISA)
- ---help---
- Cyclom 2X from Cyclades Corporation <http://www.avocent.com/> is an
- intelligent multiprotocol WAN adapter with data transfer rates up to
- 512 Kbps. These cards support the X.25 and SNA related protocols.
-
- While no documentation is available at this time please grab the
- wanconfig tarball in
- <http://www.conectiva.com.br/~acme/cycsyn-devel/> (with minor changes
- to make it compile with the current wanrouter include files; efforts
- are being made to use the original package available at
- <ftp://ftp.sangoma.com/>).
-
- Feel free to contact me or the cycsyn-devel mailing list at
- <acme@conectiva.com.br> and <cycsyn-devel@bazar.conectiva.com.br> for
- additional details, I hope to have documentation available as soon as
- possible. (Cyclades Brazil is writing the Documentation).
-
- The next questions will ask you about the protocols you want the
- driver to support (for now only X.25 is supported).
-
- If you have one or more of these cards, say Y to this option.
-
- To compile this driver as a module, choose M here: the
- module will be called cyclomx.
-
-config CYCLOMX_X25
- bool "Cyclom 2X X.25 support (EXPERIMENTAL)"
- depends on CYCLADES_SYNC
- help
- Connect a Cyclom 2X card to an X.25 network.
-
- Enabling X.25 support will enlarge your kernel by about 11 kB.
-
# X.25 network drivers
config LAPBETHER
- tristate "LAPB over Ethernet driver (EXPERIMENTAL)"
+ tristate "LAPB over Ethernet driver"
depends on LAPB && X25
---help---
Driver for a pseudo device (typically called /dev/lapb0) which allows
@@ -428,8 +374,8 @@ config LAPBETHER
If unsure, say N.
config X25_ASY
- tristate "X.25 async driver (EXPERIMENTAL)"
- depends on LAPB && X25
+ tristate "X.25 async driver"
+ depends on LAPB && X25 && TTY
---help---
Send and receive X.25 frames over regular asynchronous serial
lines such as telephone lines equipped with ordinary modems.
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index df70248e2fda..c135ef47cbca 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -5,10 +5,6 @@
# Rewritten to use lists instead of if-statements.
#
-cyclomx-y := cycx_main.o
-cyclomx-$(CONFIG_CYCLOMX_X25) += cycx_x25.o
-cyclomx-objs := $(cyclomx-y)
-
obj-$(CONFIG_HDLC) += hdlc.o
obj-$(CONFIG_HDLC_RAW) += hdlc_raw.o
obj-$(CONFIG_HDLC_RAW_ETH) += hdlc_raw_eth.o
@@ -28,7 +24,6 @@ obj-$(CONFIG_LANMEDIA) += lmc/
obj-$(CONFIG_DLCI) += dlci.o
obj-$(CONFIG_SDLA) += sdla.o
-obj-$(CONFIG_CYCLADES_SYNC) += cycx_drv.o cyclomx.o
obj-$(CONFIG_LAPBETHER) += lapbether.o
obj-$(CONFIG_SBNI) += sbni.o
obj-$(CONFIG_N2) += n2.o
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 6aed238e573e..0179cefae438 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -795,8 +795,8 @@ static ssize_t cosa_read(struct file *file,
if (mutex_lock_interruptible(&chan->rlock))
return -ERESTARTSYS;
- if ((chan->rxdata = kmalloc(COSA_MTU, GFP_DMA|GFP_KERNEL)) == NULL) {
- pr_info("%s: cosa_read() - OOM\n", cosa->name);
+ chan->rxdata = kmalloc(COSA_MTU, GFP_DMA|GFP_KERNEL);
+ if (chan->rxdata == NULL) {
mutex_unlock(&chan->rlock);
return -ENOMEM;
}
@@ -874,9 +874,8 @@ static ssize_t cosa_write(struct file *file,
count = COSA_MTU;
/* Allocate the buffer */
- if ((kbuf = kmalloc(count, GFP_KERNEL|GFP_DMA)) == NULL) {
- pr_notice("%s: cosa_write() OOM - dropping packet\n",
- cosa->name);
+ kbuf = kmalloc(count, GFP_KERNEL|GFP_DMA);
+ if (kbuf == NULL) {
up(&chan->wsem);
return -ENOMEM;
}
diff --git a/drivers/net/wan/cycx_drv.c b/drivers/net/wan/cycx_drv.c
deleted file mode 100644
index 2a3ecae67a90..000000000000
--- a/drivers/net/wan/cycx_drv.c
+++ /dev/null
@@ -1,569 +0,0 @@
-/*
-* cycx_drv.c Cyclom 2X Support Module.
-*
-* This module is a library of common hardware specific
-* functions used by the Cyclades Cyclom 2X sync card.
-*
-* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
-*
-* Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo
-*
-* Based on sdladrv.c by Gene Kozin <genek@compuserve.com>
-*
-* This program is free software; you can redistribute it and/or
-* modify it under the terms of the GNU General Public License
-* as published by the Free Software Foundation; either version
-* 2 of the License, or (at your option) any later version.
-* ============================================================================
-* 1999/11/11 acme set_current_state(TASK_INTERRUPTIBLE), code
-* cleanup
-* 1999/11/08 acme init_cyc2x deleted, doing nothing
-* 1999/11/06 acme back to read[bw], write[bw] and memcpy_to and
-* fromio to use dpmbase ioremaped
-* 1999/10/26 acme use isa_read[bw], isa_write[bw] & isa_memcpy_to
-* & fromio
-* 1999/10/23 acme cleanup to only supports cyclom2x: all the other
-* boards are no longer manufactured by cyclades,
-* if someone wants to support them... be my guest!
-* 1999/05/28 acme cycx_intack & cycx_intde gone for good
-* 1999/05/18 acme lots of unlogged work, submitting to Linus...
-* 1999/01/03 acme more judicious use of data types
-* 1999/01/03 acme judicious use of data types :>
-* cycx_inten trying to reset pending interrupts
-* from cyclom 2x - I think this isn't the way to
-* go, but for now...
-* 1999/01/02 acme cycx_intack ok, I think there's nothing to do
-* to ack an int in cycx_drv.c, only handle it in
-* cyx_isr (or in the other protocols: cyp_isr,
-* cyf_isr, when they get implemented.
-* Dec 31, 1998 acme cycx_data_boot & cycx_code_boot fixed, crossing
-* fingers to see x25_configure in cycx_x25.c
-* work... :)
-* Dec 26, 1998 acme load implementation fixed, seems to work! :)
-* cycx_2x_dpmbase_options with all the possible
-* DPM addresses (20).
-* cycx_intr implemented (test this!)
-* general code cleanup
-* Dec 8, 1998 Ivan Passos Cyclom-2X firmware load implementation.
-* Aug 8, 1998 acme Initial version.
-*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h> /* __init */
-#include <linux/module.h>
-#include <linux/kernel.h> /* printk(), and other useful stuff */
-#include <linux/stddef.h> /* offsetof(), etc. */
-#include <linux/errno.h> /* return codes */
-#include <linux/cycx_drv.h> /* API definitions */
-#include <linux/cycx_cfm.h> /* CYCX firmware module definitions */
-#include <linux/delay.h> /* udelay, msleep_interruptible */
-#include <asm/io.h> /* read[wl], write[wl], ioremap, iounmap */
-
-#define MOD_VERSION 0
-#define MOD_RELEASE 6
-
-MODULE_AUTHOR("Arnaldo Carvalho de Melo");
-MODULE_DESCRIPTION("Cyclom 2x Sync Card Driver");
-MODULE_LICENSE("GPL");
-
-/* Hardware-specific functions */
-static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len);
-static void cycx_bootcfg(struct cycx_hw *hw);
-
-static int reset_cyc2x(void __iomem *addr);
-static int detect_cyc2x(void __iomem *addr);
-
-/* Miscellaneous functions */
-static int get_option_index(const long *optlist, long optval);
-static u16 checksum(u8 *buf, u32 len);
-
-#define wait_cyc(addr) cycx_exec(addr + CMD_OFFSET)
-
-/* Global Data */
-
-/* private data */
-static const char fullname[] = "Cyclom 2X Support Module";
-static const char copyright[] =
- "(c) 1998-2003 Arnaldo Carvalho de Melo <acme@conectiva.com.br>";
-
-/* Hardware configuration options.
- * These are arrays of configuration options used by verification routines.
- * The first element of each array is its size (i.e. number of options).
- */
-static const long cyc2x_dpmbase_options[] = {
- 20,
- 0xA0000, 0xA4000, 0xA8000, 0xAC000, 0xB0000, 0xB4000, 0xB8000,
- 0xBC000, 0xC0000, 0xC4000, 0xC8000, 0xCC000, 0xD0000, 0xD4000,
- 0xD8000, 0xDC000, 0xE0000, 0xE4000, 0xE8000, 0xEC000
-};
-
-static const long cycx_2x_irq_options[] = { 7, 3, 5, 9, 10, 11, 12, 15 };
-
-/* Kernel Loadable Module Entry Points */
-/* Module 'insert' entry point.
- * o print announcement
- * o initialize static data
- *
- * Return: 0 Ok
- * < 0 error.
- * Context: process */
-
-static int __init cycx_drv_init(void)
-{
- pr_info("%s v%u.%u %s\n",
- fullname, MOD_VERSION, MOD_RELEASE, copyright);
-
- return 0;
-}
-
-/* Module 'remove' entry point.
- * o release all remaining system resources */
-static void cycx_drv_cleanup(void)
-{
-}
-
-/* Kernel APIs */
-/* Set up adapter.
- * o detect adapter type
- * o verify hardware configuration options
- * o check for hardware conflicts
- * o set up adapter shared memory
- * o test adapter memory
- * o load firmware
- * Return: 0 ok.
- * < 0 error */
-EXPORT_SYMBOL(cycx_setup);
-int cycx_setup(struct cycx_hw *hw, void *cfm, u32 len, unsigned long dpmbase)
-{
- int err;
-
- /* Verify IRQ configuration options */
- if (!get_option_index(cycx_2x_irq_options, hw->irq)) {
- pr_err("IRQ %d is invalid!\n", hw->irq);
- return -EINVAL;
- }
-
- /* Setup adapter dual-port memory window and test memory */
- if (!dpmbase) {
- pr_err("you must specify the dpm address!\n");
- return -EINVAL;
- } else if (!get_option_index(cyc2x_dpmbase_options, dpmbase)) {
- pr_err("memory address 0x%lX is invalid!\n", dpmbase);
- return -EINVAL;
- }
-
- hw->dpmbase = ioremap(dpmbase, CYCX_WINDOWSIZE);
- hw->dpmsize = CYCX_WINDOWSIZE;
-
- if (!detect_cyc2x(hw->dpmbase)) {
- pr_err("adapter Cyclom 2X not found at address 0x%lX!\n",
- dpmbase);
- return -EINVAL;
- }
-
- pr_info("found Cyclom 2X card at address 0x%lX\n", dpmbase);
-
- /* Load firmware. If loader fails then shut down adapter */
- err = load_cyc2x(hw, cfm, len);
-
- if (err)
- cycx_down(hw); /* shutdown adapter */
-
- return err;
-}
-
-EXPORT_SYMBOL(cycx_down);
-int cycx_down(struct cycx_hw *hw)
-{
- iounmap(hw->dpmbase);
- return 0;
-}
-
-/* Enable interrupt generation. */
-static void cycx_inten(struct cycx_hw *hw)
-{
- writeb(0, hw->dpmbase);
-}
-
-/* Generate an interrupt to adapter's CPU. */
-EXPORT_SYMBOL(cycx_intr);
-void cycx_intr(struct cycx_hw *hw)
-{
- writew(0, hw->dpmbase + GEN_CYCX_INTR);
-}
-
-/* Execute Adapter Command.
- * o Set exec flag.
- * o Busy-wait until flag is reset. */
-EXPORT_SYMBOL(cycx_exec);
-int cycx_exec(void __iomem *addr)
-{
- u16 i = 0;
- /* wait till addr content is zeroed */
-
- while (readw(addr)) {
- udelay(1000);
-
- if (++i > 50)
- return -1;
- }
-
- return 0;
-}
-
-/* Read absolute adapter memory.
- * Transfer data from adapter's memory to data buffer. */
-EXPORT_SYMBOL(cycx_peek);
-int cycx_peek(struct cycx_hw *hw, u32 addr, void *buf, u32 len)
-{
- if (len == 1)
- *(u8*)buf = readb(hw->dpmbase + addr);
- else
- memcpy_fromio(buf, hw->dpmbase + addr, len);
-
- return 0;
-}
-
-/* Write Absolute Adapter Memory.
- * Transfer data from data buffer to adapter's memory. */
-EXPORT_SYMBOL(cycx_poke);
-int cycx_poke(struct cycx_hw *hw, u32 addr, void *buf, u32 len)
-{
- if (len == 1)
- writeb(*(u8*)buf, hw->dpmbase + addr);
- else
- memcpy_toio(hw->dpmbase + addr, buf, len);
-
- return 0;
-}
-
-/* Hardware-Specific Functions */
-
-/* Load Aux Routines */
-/* Reset board hardware.
- return 1 if memory exists at addr and 0 if not. */
-static int memory_exists(void __iomem *addr)
-{
- int tries = 0;
-
- for (; tries < 3 ; tries++) {
- writew(TEST_PATTERN, addr + 0x10);
-
- if (readw(addr + 0x10) == TEST_PATTERN)
- if (readw(addr + 0x10) == TEST_PATTERN)
- return 1;
-
- msleep_interruptible(1 * 1000);
- }
-
- return 0;
-}
-
-/* Load reset code. */
-static void reset_load(void __iomem *addr, u8 *buffer, u32 cnt)
-{
- void __iomem *pt_code = addr + RESET_OFFSET;
- u16 i; /*, j; */
-
- for (i = 0 ; i < cnt ; i++) {
-/* for (j = 0 ; j < 50 ; j++); Delay - FIXME busy waiting... */
- writeb(*buffer++, pt_code++);
- }
-}
-
-/* Load buffer using boot interface.
- * o copy data from buffer to Cyclom-X memory
- * o wait for reset code to copy it to right portion of memory */
-static int buffer_load(void __iomem *addr, u8 *buffer, u32 cnt)
-{
- memcpy_toio(addr + DATA_OFFSET, buffer, cnt);
- writew(GEN_BOOT_DAT, addr + CMD_OFFSET);
-
- return wait_cyc(addr);
-}
-
-/* Set up entry point and kick start Cyclom-X CPU. */
-static void cycx_start(void __iomem *addr)
-{
- /* put in 0x30 offset the jump instruction to the code entry point */
- writeb(0xea, addr + 0x30);
- writeb(0x00, addr + 0x31);
- writeb(0xc4, addr + 0x32);
- writeb(0x00, addr + 0x33);
- writeb(0x00, addr + 0x34);
-
- /* cmd to start executing code */
- writew(GEN_START, addr + CMD_OFFSET);
-}
-
-/* Load and boot reset code. */
-static void cycx_reset_boot(void __iomem *addr, u8 *code, u32 len)
-{
- void __iomem *pt_start = addr + START_OFFSET;
-
- writeb(0xea, pt_start++); /* jmp to f000:3f00 */
- writeb(0x00, pt_start++);
- writeb(0xfc, pt_start++);
- writeb(0x00, pt_start++);
- writeb(0xf0, pt_start);
- reset_load(addr, code, len);
-
- /* 80186 was in hold, go */
- writeb(0, addr + START_CPU);
- msleep_interruptible(1 * 1000);
-}
-
-/* Load data.bin file through boot (reset) interface. */
-static int cycx_data_boot(void __iomem *addr, u8 *code, u32 len)
-{
- void __iomem *pt_boot_cmd = addr + CMD_OFFSET;
- u32 i;
-
- /* boot buffer length */
- writew(CFM_LOAD_BUFSZ, pt_boot_cmd + sizeof(u16));
- writew(GEN_DEFPAR, pt_boot_cmd);
-
- if (wait_cyc(addr) < 0)
- return -1;
-
- writew(0, pt_boot_cmd + sizeof(u16));
- writew(0x4000, pt_boot_cmd + 2 * sizeof(u16));
- writew(GEN_SET_SEG, pt_boot_cmd);
-
- if (wait_cyc(addr) < 0)
- return -1;
-
- for (i = 0 ; i < len ; i += CFM_LOAD_BUFSZ)
- if (buffer_load(addr, code + i,
- min_t(u32, CFM_LOAD_BUFSZ, (len - i))) < 0) {
- pr_err("Error !!\n");
- return -1;
- }
-
- return 0;
-}
-
-
-/* Load code.bin file through boot (reset) interface. */
-static int cycx_code_boot(void __iomem *addr, u8 *code, u32 len)
-{
- void __iomem *pt_boot_cmd = addr + CMD_OFFSET;
- u32 i;
-
- /* boot buffer length */
- writew(CFM_LOAD_BUFSZ, pt_boot_cmd + sizeof(u16));
- writew(GEN_DEFPAR, pt_boot_cmd);
-
- if (wait_cyc(addr) < 0)
- return -1;
-
- writew(0x0000, pt_boot_cmd + sizeof(u16));
- writew(0xc400, pt_boot_cmd + 2 * sizeof(u16));
- writew(GEN_SET_SEG, pt_boot_cmd);
-
- if (wait_cyc(addr) < 0)
- return -1;
-
- for (i = 0 ; i < len ; i += CFM_LOAD_BUFSZ)
- if (buffer_load(addr, code + i,
- min_t(u32, CFM_LOAD_BUFSZ, (len - i)))) {
- pr_err("Error !!\n");
- return -1;
- }
-
- return 0;
-}
-
-/* Load adapter from the memory image of the CYCX firmware module.
- * o verify firmware integrity and compatibility
- * o start adapter up */
-static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len)
-{
- int i, j;
- struct cycx_fw_header *img_hdr;
- u8 *reset_image,
- *data_image,
- *code_image;
- void __iomem *pt_cycld = hw->dpmbase + 0x400;
- u16 cksum;
-
- /* Announce */
- pr_info("firmware signature=\"%s\"\n", cfm->signature);
-
- /* Verify firmware signature */
- if (strcmp(cfm->signature, CFM_SIGNATURE)) {
- pr_err("load_cyc2x: not Cyclom-2X firmware!\n");
- return -EINVAL;
- }
-
- pr_info("firmware version=%u\n", cfm->version);
-
- /* Verify firmware module format version */
- if (cfm->version != CFM_VERSION) {
- pr_err("%s: firmware format %u rejected! Expecting %u.\n",
- __func__, cfm->version, CFM_VERSION);
- return -EINVAL;
- }
-
- /* Verify firmware module length and checksum */
- cksum = checksum((u8*)&cfm->info, sizeof(struct cycx_fw_info) +
- cfm->info.codesize);
-/*
- FIXME cfm->info.codesize is off by 2
- if (((len - sizeof(struct cycx_firmware) - 1) != cfm->info.codesize) ||
-*/
- if (cksum != cfm->checksum) {
- pr_err("%s: firmware corrupted!\n", __func__);
- pr_err(" cdsize = 0x%x (expected 0x%lx)\n",
- len - (int)sizeof(struct cycx_firmware) - 1,
- cfm->info.codesize);
- pr_err(" chksum = 0x%x (expected 0x%x)\n",
- cksum, cfm->checksum);
- return -EINVAL;
- }
-
- /* If everything is ok, set reset, data and code pointers */
- img_hdr = (struct cycx_fw_header *)&cfm->image;
-#ifdef FIRMWARE_DEBUG
- pr_info("%s: image sizes\n", __func__);
- pr_info(" reset=%lu\n", img_hdr->reset_size);
- pr_info(" data=%lu\n", img_hdr->data_size);
- pr_info(" code=%lu\n", img_hdr->code_size);
-#endif
- reset_image = ((u8 *)img_hdr) + sizeof(struct cycx_fw_header);
- data_image = reset_image + img_hdr->reset_size;
- code_image = data_image + img_hdr->data_size;
-
- /*---- Start load ----*/
- /* Announce */
- pr_info("loading firmware %s (ID=%u)...\n",
- cfm->descr[0] ? cfm->descr : "unknown firmware",
- cfm->info.codeid);
-
- for (i = 0 ; i < 5 ; i++) {
- /* Reset Cyclom hardware */
- if (!reset_cyc2x(hw->dpmbase)) {
- pr_err("dpm problem or board not found\n");
- return -EINVAL;
- }
-
- /* Load reset.bin */
- cycx_reset_boot(hw->dpmbase, reset_image, img_hdr->reset_size);
- /* reset is waiting for boot */
- writew(GEN_POWER_ON, pt_cycld);
- msleep_interruptible(1 * 1000);
-
- for (j = 0 ; j < 3 ; j++)
- if (!readw(pt_cycld))
- goto reset_loaded;
- else
- msleep_interruptible(1 * 1000);
- }
-
- pr_err("reset not started\n");
- return -EINVAL;
-
-reset_loaded:
- /* Load data.bin */
- if (cycx_data_boot(hw->dpmbase, data_image, img_hdr->data_size)) {
- pr_err("cannot load data file\n");
- return -EINVAL;
- }
-
- /* Load code.bin */
- if (cycx_code_boot(hw->dpmbase, code_image, img_hdr->code_size)) {
- pr_err("cannot load code file\n");
- return -EINVAL;
- }
-
- /* Prepare boot-time configuration data */
- cycx_bootcfg(hw);
-
- /* kick-off CPU */
- cycx_start(hw->dpmbase);
-
- /* Arthur Ganzert's tip: wait a while after the firmware loading...
- seg abr 26 17:17:12 EST 1999 - acme */
- msleep_interruptible(7 * 1000);
- pr_info("firmware loaded!\n");
-
- /* enable interrupts */
- cycx_inten(hw);
-
- return 0;
-}
-
-/* Prepare boot-time firmware configuration data.
- * o initialize configuration data area
- From async.doc - V_3.4.0 - 07/18/1994
- - As of now, only static buffers are available to the user.
- So, the bit VD_RXDIRC must be set in 'valid'. That means that user
- wants to use the static transmission and reception buffers. */
-static void cycx_bootcfg(struct cycx_hw *hw)
-{
- /* use fixed buffers */
- writeb(FIXED_BUFFERS, hw->dpmbase + CONF_OFFSET);
-}
-
-/* Detect Cyclom 2x adapter.
- * Following tests are used to detect Cyclom 2x adapter:
- * to be completed based on the tests done below
- * Return 1 if detected o.k. or 0 if failed.
- * Note: This test is destructive! Adapter will be left in shutdown
- * state after the test. */
-static int detect_cyc2x(void __iomem *addr)
-{
- reset_cyc2x(addr);
-
- return memory_exists(addr);
-}
-
-/* Miscellaneous */
-/* Get option's index into the options list.
- * Return option's index (1 .. N) or zero if option is invalid. */
-static int get_option_index(const long *optlist, long optval)
-{
- int i = 1;
-
- for (; i <= optlist[0]; ++i)
- if (optlist[i] == optval)
- return i;
-
- return 0;
-}
-
-/* Reset adapter's CPU. */
-static int reset_cyc2x(void __iomem *addr)
-{
- writeb(0, addr + RST_ENABLE);
- msleep_interruptible(2 * 1000);
- writeb(0, addr + RST_DISABLE);
- msleep_interruptible(2 * 1000);
-
- return memory_exists(addr);
-}
-
-/* Calculate 16-bit CRC using CCITT polynomial. */
-static u16 checksum(u8 *buf, u32 len)
-{
- u16 crc = 0;
- u16 mask, flag;
-
- for (; len; --len, ++buf)
- for (mask = 0x80; mask; mask >>= 1) {
- flag = (crc & 0x8000);
- crc <<= 1;
- crc |= ((*buf & mask) ? 1 : 0);
-
- if (flag)
- crc ^= 0x1021;
- }
-
- return crc;
-}
-
-module_init(cycx_drv_init);
-module_exit(cycx_drv_cleanup);
-
-/* End */
diff --git a/drivers/net/wan/cycx_main.c b/drivers/net/wan/cycx_main.c
deleted file mode 100644
index 81fbbad406be..000000000000
--- a/drivers/net/wan/cycx_main.c
+++ /dev/null
@@ -1,346 +0,0 @@
-/*
-* cycx_main.c Cyclades Cyclom 2X WAN Link Driver. Main module.
-*
-* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
-*
-* Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo
-*
-* Based on sdlamain.c by Gene Kozin <genek@compuserve.com> &
-* Jaspreet Singh <jaspreet@sangoma.com>
-*
-* This program is free software; you can redistribute it and/or
-* modify it under the terms of the GNU General Public License
-* as published by the Free Software Foundation; either version
-* 2 of the License, or (at your option) any later version.
-* ============================================================================
-* Please look at the bitkeeper changelog (or any other scm tool that ends up
-* importing bitkeeper changelog or that replaces bitkeeper in the future as
-* main tool for linux development).
-*
-* 2001/05/09 acme Fix MODULE_DESC for debug, .bss nitpicks,
-* some cleanups
-* 2000/07/13 acme remove useless #ifdef MODULE and crap
-* #if KERNEL_VERSION > blah
-* 2000/07/06 acme __exit at cyclomx_cleanup
-* 2000/04/02 acme dprintk and cycx_debug
-* module_init/module_exit
-* 2000/01/21 acme rename cyclomx_open to cyclomx_mod_inc_use_count
-* and cyclomx_close to cyclomx_mod_dec_use_count
-* 2000/01/08 acme cleanup
-* 1999/11/06 acme cycx_down back to life (it needs to be
-* called to iounmap the dpmbase)
-* 1999/08/09 acme removed references to enable_tx_int
-* use spinlocks instead of cli/sti in
-* cyclomx_set_state
-* 1999/05/19 acme works directly linked into the kernel
-* init_waitqueue_head for 2.3.* kernel
-* 1999/05/18 acme major cleanup (polling not needed), etc
-* 1998/08/28 acme minor cleanup (ioctls for firmware deleted)
-* queue_task activated
-* 1998/08/08 acme Initial version.
-*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/stddef.h> /* offsetof(), etc. */
-#include <linux/errno.h> /* return codes */
-#include <linux/string.h> /* inline memset(), etc. */
-#include <linux/slab.h> /* kmalloc(), kfree() */
-#include <linux/kernel.h> /* printk(), and other useful stuff */
-#include <linux/module.h> /* support for loadable modules */
-#include <linux/ioport.h> /* request_region(), release_region() */
-#include <linux/wanrouter.h> /* WAN router definitions */
-#include <linux/cyclomx.h> /* cyclomx common user API definitions */
-#include <linux/init.h> /* __init (when not using as a module) */
-#include <linux/interrupt.h>
-
-unsigned int cycx_debug;
-
-MODULE_AUTHOR("Arnaldo Carvalho de Melo");
-MODULE_DESCRIPTION("Cyclom 2X Sync Card Driver.");
-MODULE_LICENSE("GPL");
-module_param(cycx_debug, int, 0);
-MODULE_PARM_DESC(cycx_debug, "cyclomx debug level");
-
-/* Defines & Macros */
-
-#define CYCX_DRV_VERSION 0 /* version number */
-#define CYCX_DRV_RELEASE 11 /* release (minor version) number */
-#define CYCX_MAX_CARDS 1 /* max number of adapters */
-
-#define CONFIG_CYCX_CARDS 1
-
-/* Function Prototypes */
-
-/* WAN link driver entry points */
-static int cycx_wan_setup(struct wan_device *wandev, wandev_conf_t *conf);
-static int cycx_wan_shutdown(struct wan_device *wandev);
-
-/* Miscellaneous functions */
-static irqreturn_t cycx_isr(int irq, void *dev_id);
-
-/* Global Data
- * Note: All data must be explicitly initialized!!!
- */
-
-/* private data */
-static const char cycx_drvname[] = "cyclomx";
-static const char cycx_fullname[] = "CYCLOM 2X(tm) Sync Card Driver";
-static const char cycx_copyright[] = "(c) 1998-2003 Arnaldo Carvalho de Melo "
- "<acme@conectiva.com.br>";
-static int cycx_ncards = CONFIG_CYCX_CARDS;
-static struct cycx_device *cycx_card_array; /* adapter data space */
-
-/* Kernel Loadable Module Entry Points */
-
-/*
- * Module 'insert' entry point.
- * o print announcement
- * o allocate adapter data space
- * o initialize static data
- * o register all cards with WAN router
- * o calibrate Cyclom 2X shared memory access delay.
- *
- * Return: 0 Ok
- * < 0 error.
- * Context: process
- */
-static int __init cycx_init(void)
-{
- int cnt, err = -ENOMEM;
-
- pr_info("%s v%u.%u %s\n",
- cycx_fullname, CYCX_DRV_VERSION, CYCX_DRV_RELEASE,
- cycx_copyright);
-
- /* Verify number of cards and allocate adapter data space */
- cycx_ncards = min_t(int, cycx_ncards, CYCX_MAX_CARDS);
- cycx_ncards = max_t(int, cycx_ncards, 1);
- cycx_card_array = kcalloc(cycx_ncards, sizeof(struct cycx_device), GFP_KERNEL);
- if (!cycx_card_array)
- goto out;
-
-
- /* Register adapters with WAN router */
- for (cnt = 0; cnt < cycx_ncards; ++cnt) {
- struct cycx_device *card = &cycx_card_array[cnt];
- struct wan_device *wandev = &card->wandev;
-
- sprintf(card->devname, "%s%d", cycx_drvname, cnt + 1);
- wandev->magic = ROUTER_MAGIC;
- wandev->name = card->devname;
- wandev->private = card;
- wandev->setup = cycx_wan_setup;
- wandev->shutdown = cycx_wan_shutdown;
- err = register_wan_device(wandev);
-
- if (err) {
- pr_err("%s registration failed with error %d!\n",
- card->devname, err);
- break;
- }
- }
-
- err = -ENODEV;
- if (!cnt) {
- kfree(cycx_card_array);
- goto out;
- }
- err = 0;
- cycx_ncards = cnt; /* adjust actual number of cards */
-out: return err;
-}
-
-/*
- * Module 'remove' entry point.
- * o unregister all adapters from the WAN router
- * o release all remaining system resources
- */
-static void __exit cycx_exit(void)
-{
- int i = 0;
-
- for (; i < cycx_ncards; ++i) {
- struct cycx_device *card = &cycx_card_array[i];
- unregister_wan_device(card->devname);
- }
-
- kfree(cycx_card_array);
-}
-
-/* WAN Device Driver Entry Points */
-/*
- * Setup/configure WAN link driver.
- * o check adapter state
- * o make sure firmware is present in configuration
- * o allocate interrupt vector
- * o setup Cyclom 2X hardware
- * o call appropriate routine to perform protocol-specific initialization
- *
- * This function is called when router handles ROUTER_SETUP IOCTL. The
- * configuration structure is in kernel memory (including extended data, if
- * any).
- */
-static int cycx_wan_setup(struct wan_device *wandev, wandev_conf_t *conf)
-{
- int rc = -EFAULT;
- struct cycx_device *card;
- int irq;
-
- /* Sanity checks */
-
- if (!wandev || !wandev->private || !conf)
- goto out;
-
- card = wandev->private;
- rc = -EBUSY;
- if (wandev->state != WAN_UNCONFIGURED)
- goto out;
-
- rc = -EINVAL;
- if (!conf->data_size || !conf->data) {
- pr_err("%s: firmware not found in configuration data!\n",
- wandev->name);
- goto out;
- }
-
- if (conf->irq <= 0) {
- pr_err("%s: can't configure without IRQ!\n", wandev->name);
- goto out;
- }
-
- /* Allocate IRQ */
- irq = conf->irq == 2 ? 9 : conf->irq; /* IRQ2 -> IRQ9 */
-
- if (request_irq(irq, cycx_isr, 0, wandev->name, card)) {
- pr_err("%s: can't reserve IRQ %d!\n", wandev->name, irq);
- goto out;
- }
-
- /* Configure hardware, load firmware, etc. */
- memset(&card->hw, 0, sizeof(card->hw));
- card->hw.irq = irq;
- card->hw.dpmsize = CYCX_WINDOWSIZE;
- card->hw.fwid = CFID_X25_2X;
- spin_lock_init(&card->lock);
- init_waitqueue_head(&card->wait_stats);
-
- rc = cycx_setup(&card->hw, conf->data, conf->data_size, conf->maddr);
- if (rc)
- goto out_irq;
-
- /* Initialize WAN device data space */
- wandev->irq = irq;
- wandev->dma = wandev->ioport = 0;
- wandev->maddr = (unsigned long)card->hw.dpmbase;
- wandev->msize = card->hw.dpmsize;
- wandev->hw_opt[2] = 0;
- wandev->hw_opt[3] = card->hw.fwid;
-
- /* Protocol-specific initialization */
- switch (card->hw.fwid) {
-#ifdef CONFIG_CYCLOMX_X25
- case CFID_X25_2X:
- rc = cycx_x25_wan_init(card, conf);
- break;
-#endif
- default:
- pr_err("%s: this firmware is not supported!\n", wandev->name);
- rc = -EINVAL;
- }
-
- if (rc) {
- cycx_down(&card->hw);
- goto out_irq;
- }
-
- rc = 0;
-out:
- return rc;
-out_irq:
- free_irq(irq, card);
- goto out;
-}
-
-/*
- * Shut down WAN link driver.
- * o shut down adapter hardware
- * o release system resources.
- *
- * This function is called by the router when device is being unregistered or
- * when it handles ROUTER_DOWN IOCTL.
- */
-static int cycx_wan_shutdown(struct wan_device *wandev)
-{
- int ret = -EFAULT;
- struct cycx_device *card;
-
- /* sanity checks */
- if (!wandev || !wandev->private)
- goto out;
-
- ret = 0;
- if (wandev->state == WAN_UNCONFIGURED)
- goto out;
-
- card = wandev->private;
- wandev->state = WAN_UNCONFIGURED;
- cycx_down(&card->hw);
- pr_info("%s: irq %d being freed!\n", wandev->name, wandev->irq);
- free_irq(wandev->irq, card);
-out: return ret;
-}
-
-/* Miscellaneous */
-/*
- * Cyclom 2X Interrupt Service Routine.
- * o acknowledge Cyclom 2X hardware interrupt.
- * o call protocol-specific interrupt service routine, if any.
- */
-static irqreturn_t cycx_isr(int irq, void *dev_id)
-{
- struct cycx_device *card = dev_id;
-
- if (card->wandev.state == WAN_UNCONFIGURED)
- goto out;
-
- if (card->in_isr) {
- pr_warn("%s: interrupt re-entrancy on IRQ %d!\n",
- card->devname, card->wandev.irq);
- goto out;
- }
-
- if (card->isr)
- card->isr(card);
- return IRQ_HANDLED;
-out:
- return IRQ_NONE;
-}
-
-/* Set WAN device state. */
-void cycx_set_state(struct cycx_device *card, int state)
-{
- unsigned long flags;
- char *string_state = NULL;
-
- spin_lock_irqsave(&card->lock, flags);
-
- if (card->wandev.state != state) {
- switch (state) {
- case WAN_CONNECTED:
- string_state = "connected!";
- break;
- case WAN_DISCONNECTED:
- string_state = "disconnected!";
- break;
- }
- pr_info("%s: link %s\n", card->devname, string_state);
- card->wandev.state = state;
- }
-
- card->state_tick = jiffies;
- spin_unlock_irqrestore(&card->lock, flags);
-}
-
-module_init(cycx_init);
-module_exit(cycx_exit);
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
deleted file mode 100644
index 06f3f6309e4b..000000000000
--- a/drivers/net/wan/cycx_x25.c
+++ /dev/null
@@ -1,1602 +0,0 @@
-/*
-* cycx_x25.c Cyclom 2X WAN Link Driver. X.25 module.
-*
-* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
-*
-* Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo
-*
-* Based on sdla_x25.c by Gene Kozin <genek@compuserve.com>
-*
-* This program is free software; you can redistribute it and/or
-* modify it under the terms of the GNU General Public License
-* as published by the Free Software Foundation; either version
-* 2 of the License, or (at your option) any later version.
-* ============================================================================
-* 2001/01/12 acme use dev_kfree_skb_irq on interrupt context
-* 2000/04/02 acme dprintk, cycx_debug
-* fixed the bug introduced in get_dev_by_lcn and
-* get_dev_by_dte_addr by the anonymous hacker
-* that converted this driver to softnet
-* 2000/01/08 acme cleanup
-* 1999/10/27 acme use ARPHRD_HWX25 so that the X.25 stack know
-* that we have a X.25 stack implemented in
-* firmware onboard
-* 1999/10/18 acme support for X.25 sockets in if_send,
-* beware: socket(AF_X25...) IS WORK IN PROGRESS,
-* TCP/IP over X.25 via wanrouter not affected,
-* working.
-* 1999/10/09 acme chan_disc renamed to chan_disconnect,
-* began adding support for X.25 sockets:
-* conf->protocol in new_if
-* 1999/10/05 acme fixed return E... to return -E...
-* 1999/08/10 acme serialized access to the card thru a spinlock
-* in x25_exec
-* 1999/08/09 acme removed per channel spinlocks
-* removed references to enable_tx_int
-* 1999/05/28 acme fixed nibble_to_byte, ackvc now properly treated
-* if_send simplified
-* 1999/05/25 acme fixed t1, t2, t21 & t23 configuration
-* use spinlocks instead of cli/sti in some points
-* 1999/05/24 acme finished the x25_get_stat function
-* 1999/05/23 acme dev->type = ARPHRD_X25 (tcpdump only works,
-* AFAIT, with ARPHRD_ETHER). This seems to be
-* needed to use socket(AF_X25)...
-* Now the config file must specify a peer media
-* address for svc channels over a crossover cable.
-* Removed hold_timeout from x25_channel_t,
-* not used.
-* A little enhancement in the DEBUG processing
-* 1999/05/22 acme go to DISCONNECTED in disconnect_confirm_intr,
-* instead of chan_disc.
-* 1999/05/16 marcelo fixed timer initialization in SVCs
-* 1999/01/05 acme x25_configure now get (most of) all
-* parameters...
-* 1999/01/05 acme pktlen now (correctly) uses log2 (value
-* configured)
-* 1999/01/03 acme judicious use of data types (u8, u16, u32, etc)
-* 1999/01/03 acme cyx_isr: reset dpmbase to acknowledge
-* indication (interrupt from cyclom 2x)
-* 1999/01/02 acme cyx_isr: first hackings...
-* 1999/01/0203 acme when initializing an array don't give less
-* elements than declared...
-* example: char send_cmd[6] = "?\xFF\x10";
-* you'll gonna lose a couple hours, 'cause your
-* brain won't admit that there's an error in the
-* above declaration... the side effect is that
-* memset is put into the unresolved symbols
-* instead of using the inline memset functions...
-* 1999/01/02 acme began chan_connect, chan_send, x25_send
-* 1998/12/31 acme x25_configure
-* this code can be compiled as non module
-* 1998/12/27 acme code cleanup
-* IPX code wiped out! let's decrease code
-* complexity for now, remember: I'm learning! :)
-* bps_to_speed_code OK
-* 1998/12/26 acme Minimal debug code cleanup
-* 1998/08/08 acme Initial version.
-*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#define CYCLOMX_X25_DEBUG 1
-
-#include <linux/ctype.h> /* isdigit() */
-#include <linux/errno.h> /* return codes */
-#include <linux/if_arp.h> /* ARPHRD_HWX25 */
-#include <linux/kernel.h> /* printk(), and other useful stuff */
-#include <linux/module.h>
-#include <linux/string.h> /* inline memset(), etc. */
-#include <linux/sched.h>
-#include <linux/slab.h> /* kmalloc(), kfree() */
-#include <linux/stddef.h> /* offsetof(), etc. */
-#include <linux/wanrouter.h> /* WAN router definitions */
-
-#include <asm/byteorder.h> /* htons(), etc. */
-
-#include <linux/cyclomx.h> /* Cyclom 2X common user API definitions */
-#include <linux/cycx_x25.h> /* X.25 firmware API definitions */
-
-#include <net/x25device.h>
-
-/* Defines & Macros */
-#define CYCX_X25_MAX_CMD_RETRY 5
-#define CYCX_X25_CHAN_MTU 2048 /* unfragmented logical channel MTU */
-
-/* Data Structures */
-/* This is an extension of the 'struct net_device' we create for each network
- interface to keep the rest of X.25 channel-specific data. */
-struct cycx_x25_channel {
- /* This member must be first. */
- struct net_device *slave; /* WAN slave */
-
- char name[WAN_IFNAME_SZ+1]; /* interface name, ASCIIZ */
- char addr[WAN_ADDRESS_SZ+1]; /* media address, ASCIIZ */
- char *local_addr; /* local media address, ASCIIZ -
- svc thru crossover cable */
- s16 lcn; /* logical channel number/conn.req.key*/
- u8 link;
- struct timer_list timer; /* timer used for svc channel disc. */
- u16 protocol; /* ethertype, 0 - multiplexed */
- u8 svc; /* 0 - permanent, 1 - switched */
- u8 state; /* channel state */
- u8 drop_sequence; /* mark sequence for dropping */
- u32 idle_tmout; /* sec, before disconnecting */
- struct sk_buff *rx_skb; /* receive socket buffer */
- struct cycx_device *card; /* -> owner */
- struct net_device_stats ifstats;/* interface statistics */
-};
-
-/* Function Prototypes */
-/* WAN link driver entry points. These are called by the WAN router module. */
-static int cycx_wan_update(struct wan_device *wandev),
- cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev,
- wanif_conf_t *conf),
- cycx_wan_del_if(struct wan_device *wandev, struct net_device *dev);
-
-/* Network device interface */
-static int cycx_netdevice_init(struct net_device *dev);
-static int cycx_netdevice_open(struct net_device *dev);
-static int cycx_netdevice_stop(struct net_device *dev);
-static int cycx_netdevice_hard_header(struct sk_buff *skb,
- struct net_device *dev, u16 type,
- const void *daddr, const void *saddr,
- unsigned len);
-static int cycx_netdevice_rebuild_header(struct sk_buff *skb);
-static netdev_tx_t cycx_netdevice_hard_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
-
-static struct net_device_stats *
- cycx_netdevice_get_stats(struct net_device *dev);
-
-/* Interrupt handlers */
-static void cycx_x25_irq_handler(struct cycx_device *card),
- cycx_x25_irq_tx(struct cycx_device *card, struct cycx_x25_cmd *cmd),
- cycx_x25_irq_rx(struct cycx_device *card, struct cycx_x25_cmd *cmd),
- cycx_x25_irq_log(struct cycx_device *card,
- struct cycx_x25_cmd *cmd),
- cycx_x25_irq_stat(struct cycx_device *card,
- struct cycx_x25_cmd *cmd),
- cycx_x25_irq_connect_confirm(struct cycx_device *card,
- struct cycx_x25_cmd *cmd),
- cycx_x25_irq_disconnect_confirm(struct cycx_device *card,
- struct cycx_x25_cmd *cmd),
- cycx_x25_irq_connect(struct cycx_device *card,
- struct cycx_x25_cmd *cmd),
- cycx_x25_irq_disconnect(struct cycx_device *card,
- struct cycx_x25_cmd *cmd),
- cycx_x25_irq_spurious(struct cycx_device *card,
- struct cycx_x25_cmd *cmd);
-
-/* X.25 firmware interface functions */
-static int cycx_x25_configure(struct cycx_device *card,
- struct cycx_x25_config *conf),
- cycx_x25_get_stats(struct cycx_device *card),
- cycx_x25_send(struct cycx_device *card, u8 link, u8 lcn, u8 bitm,
- int len, void *buf),
- cycx_x25_connect_response(struct cycx_device *card,
- struct cycx_x25_channel *chan),
- cycx_x25_disconnect_response(struct cycx_device *card, u8 link,
- u8 lcn);
-
-/* channel functions */
-static int cycx_x25_chan_connect(struct net_device *dev),
- cycx_x25_chan_send(struct net_device *dev, struct sk_buff *skb);
-
-static void cycx_x25_chan_disconnect(struct net_device *dev),
- cycx_x25_chan_send_event(struct net_device *dev, u8 event);
-
-/* Miscellaneous functions */
-static void cycx_x25_set_chan_state(struct net_device *dev, u8 state),
- cycx_x25_chan_timer(unsigned long d);
-
-static void nibble_to_byte(u8 *s, u8 *d, u8 len, u8 nibble),
- reset_timer(struct net_device *dev);
-
-static u8 bps_to_speed_code(u32 bps);
-static u8 cycx_log2(u32 n);
-
-static unsigned dec_to_uint(u8 *str, int len);
-
-static struct net_device *cycx_x25_get_dev_by_lcn(struct wan_device *wandev,
- s16 lcn);
-static struct net_device *
- cycx_x25_get_dev_by_dte_addr(struct wan_device *wandev, char *dte);
-
-static void cycx_x25_chan_setup(struct net_device *dev);
-
-#ifdef CYCLOMX_X25_DEBUG
-static void hex_dump(char *msg, unsigned char *p, int len);
-static void cycx_x25_dump_config(struct cycx_x25_config *conf);
-static void cycx_x25_dump_stats(struct cycx_x25_stats *stats);
-static void cycx_x25_dump_devs(struct wan_device *wandev);
-#else
-#define hex_dump(msg, p, len)
-#define cycx_x25_dump_config(conf)
-#define cycx_x25_dump_stats(stats)
-#define cycx_x25_dump_devs(wandev)
-#endif
-/* Public Functions */
-
-/* X.25 Protocol Initialization routine.
- *
- * This routine is called by the main Cyclom 2X module during setup. At this
- * point adapter is completely initialized and X.25 firmware is running.
- * o configure adapter
- * o initialize protocol-specific fields of the adapter data space.
- *
- * Return: 0 o.k.
- * < 0 failure. */
-int cycx_x25_wan_init(struct cycx_device *card, wandev_conf_t *conf)
-{
- struct cycx_x25_config cfg;
-
- /* Verify configuration ID */
- if (conf->config_id != WANCONFIG_X25) {
- pr_info("%s: invalid configuration ID %u!\n",
- card->devname, conf->config_id);
- return -EINVAL;
- }
-
- /* Initialize protocol-specific fields */
- card->mbox = card->hw.dpmbase + X25_MBOX_OFFS;
- card->u.x.connection_keys = 0;
- spin_lock_init(&card->u.x.lock);
-
- /* Configure adapter. Here we set reasonable defaults, then parse
- * device configuration structure and set configuration options.
- * Most configuration options are verified and corrected (if
- * necessary) since we can't rely on the adapter to do so and don't
- * want it to fail either. */
- memset(&cfg, 0, sizeof(cfg));
- cfg.link = 0;
- cfg.clock = conf->clocking == WANOPT_EXTERNAL ? 8 : 55;
- cfg.speed = bps_to_speed_code(conf->bps);
- cfg.n3win = 7;
- cfg.n2win = 2;
- cfg.n2 = 5;
- cfg.nvc = 1;
- cfg.npvc = 1;
- cfg.flags = 0x02; /* default = V35 */
- cfg.t1 = 10; /* line carrier timeout */
- cfg.t2 = 29; /* tx timeout */
- cfg.t21 = 180; /* CALL timeout */
- cfg.t23 = 180; /* CLEAR timeout */
-
- /* adjust MTU */
- if (!conf->mtu || conf->mtu >= 512)
- card->wandev.mtu = 512;
- else if (conf->mtu >= 256)
- card->wandev.mtu = 256;
- else if (conf->mtu >= 128)
- card->wandev.mtu = 128;
- else
- card->wandev.mtu = 64;
-
- cfg.pktlen = cycx_log2(card->wandev.mtu);
-
- if (conf->station == WANOPT_DTE) {
- cfg.locaddr = 3; /* DTE */
- cfg.remaddr = 1; /* DCE */
- } else {
- cfg.locaddr = 1; /* DCE */
- cfg.remaddr = 3; /* DTE */
- }
-
- if (conf->interface == WANOPT_RS232)
- cfg.flags = 0; /* FIXME just reset the 2nd bit */
-
- if (conf->u.x25.hi_pvc) {
- card->u.x.hi_pvc = min_t(unsigned int, conf->u.x25.hi_pvc, 4095);
- card->u.x.lo_pvc = min_t(unsigned int, conf->u.x25.lo_pvc, card->u.x.hi_pvc);
- }
-
- if (conf->u.x25.hi_svc) {
- card->u.x.hi_svc = min_t(unsigned int, conf->u.x25.hi_svc, 4095);
- card->u.x.lo_svc = min_t(unsigned int, conf->u.x25.lo_svc, card->u.x.hi_svc);
- }
-
- if (card->u.x.lo_pvc == 255)
- cfg.npvc = 0;
- else
- cfg.npvc = card->u.x.hi_pvc - card->u.x.lo_pvc + 1;
-
- cfg.nvc = card->u.x.hi_svc - card->u.x.lo_svc + 1 + cfg.npvc;
-
- if (conf->u.x25.hdlc_window)
- cfg.n2win = min_t(unsigned int, conf->u.x25.hdlc_window, 7);
-
- if (conf->u.x25.pkt_window)
- cfg.n3win = min_t(unsigned int, conf->u.x25.pkt_window, 7);
-
- if (conf->u.x25.t1)
- cfg.t1 = min_t(unsigned int, conf->u.x25.t1, 30);
-
- if (conf->u.x25.t2)
- cfg.t2 = min_t(unsigned int, conf->u.x25.t2, 30);
-
- if (conf->u.x25.t11_t21)
- cfg.t21 = min_t(unsigned int, conf->u.x25.t11_t21, 30);
-
- if (conf->u.x25.t13_t23)
- cfg.t23 = min_t(unsigned int, conf->u.x25.t13_t23, 30);
-
- if (conf->u.x25.n2)
- cfg.n2 = min_t(unsigned int, conf->u.x25.n2, 30);
-
- /* initialize adapter */
- if (cycx_x25_configure(card, &cfg))
- return -EIO;
-
- /* Initialize protocol-specific fields of adapter data space */
- card->wandev.bps = conf->bps;
- card->wandev.interface = conf->interface;
- card->wandev.clocking = conf->clocking;
- card->wandev.station = conf->station;
- card->isr = cycx_x25_irq_handler;
- card->exec = NULL;
- card->wandev.update = cycx_wan_update;
- card->wandev.new_if = cycx_wan_new_if;
- card->wandev.del_if = cycx_wan_del_if;
- card->wandev.state = WAN_DISCONNECTED;
-
- return 0;
-}
-
-/* WAN Device Driver Entry Points */
-/* Update device status & statistics. */
-static int cycx_wan_update(struct wan_device *wandev)
-{
- /* sanity checks */
- if (!wandev || !wandev->private)
- return -EFAULT;
-
- if (wandev->state == WAN_UNCONFIGURED)
- return -ENODEV;
-
- cycx_x25_get_stats(wandev->private);
-
- return 0;
-}
-
-/* Create new logical channel.
- * This routine is called by the router when ROUTER_IFNEW IOCTL is being
- * handled.
- * o parse media- and hardware-specific configuration
- * o make sure that a new channel can be created
- * o allocate resources, if necessary
- * o prepare network device structure for registration.
- *
- * Return: 0 o.k.
- * < 0 failure (channel will not be created) */
-static int cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev,
- wanif_conf_t *conf)
-{
- struct cycx_device *card = wandev->private;
- struct cycx_x25_channel *chan;
- int err = 0;
-
- if (!conf->name[0] || strlen(conf->name) > WAN_IFNAME_SZ) {
- pr_info("%s: invalid interface name!\n", card->devname);
- return -EINVAL;
- }
-
- dev = alloc_netdev(sizeof(struct cycx_x25_channel), conf->name,
- cycx_x25_chan_setup);
- if (!dev)
- return -ENOMEM;
-
- chan = netdev_priv(dev);
- strcpy(chan->name, conf->name);
- chan->card = card;
- chan->link = conf->port;
- chan->protocol = conf->protocol ? ETH_P_X25 : ETH_P_IP;
- chan->rx_skb = NULL;
- /* only used in svc connected thru crossover cable */
- chan->local_addr = NULL;
-
- if (conf->addr[0] == '@') { /* SVC */
- int len = strlen(conf->local_addr);
-
- if (len) {
- if (len > WAN_ADDRESS_SZ) {
- pr_err("%s: %s local addr too long!\n",
- wandev->name, chan->name);
- err = -EINVAL;
- goto error;
- } else {
- chan->local_addr = kmalloc(len + 1, GFP_KERNEL);
-
- if (!chan->local_addr) {
- err = -ENOMEM;
- goto error;
- }
- }
-
- strncpy(chan->local_addr, conf->local_addr,
- WAN_ADDRESS_SZ);
- }
-
- chan->svc = 1;
- strncpy(chan->addr, &conf->addr[1], WAN_ADDRESS_SZ);
- init_timer(&chan->timer);
- chan->timer.function = cycx_x25_chan_timer;
- chan->timer.data = (unsigned long)dev;
-
- /* Set channel timeouts (default if not specified) */
- chan->idle_tmout = conf->idle_timeout ? conf->idle_timeout : 90;
- } else if (isdigit(conf->addr[0])) { /* PVC */
- s16 lcn = dec_to_uint(conf->addr, 0);
-
- if (lcn >= card->u.x.lo_pvc && lcn <= card->u.x.hi_pvc)
- chan->lcn = lcn;
- else {
- pr_err("%s: PVC %u is out of range on interface %s!\n",
- wandev->name, lcn, chan->name);
- err = -EINVAL;
- goto error;
- }
- } else {
- pr_err("%s: invalid media address on interface %s!\n",
- wandev->name, chan->name);
- err = -EINVAL;
- goto error;
- }
-
- return 0;
-
-error:
- free_netdev(dev);
- return err;
-}
-
-/* Delete logical channel. */
-static int cycx_wan_del_if(struct wan_device *wandev, struct net_device *dev)
-{
- struct cycx_x25_channel *chan = netdev_priv(dev);
-
- if (chan->svc) {
- kfree(chan->local_addr);
- if (chan->state == WAN_CONNECTED)
- del_timer(&chan->timer);
- }
-
- return 0;
-}
-
-
-/* Network Device Interface */
-
-static const struct header_ops cycx_header_ops = {
- .create = cycx_netdevice_hard_header,
- .rebuild = cycx_netdevice_rebuild_header,
-};
-
-static const struct net_device_ops cycx_netdev_ops = {
- .ndo_init = cycx_netdevice_init,
- .ndo_open = cycx_netdevice_open,
- .ndo_stop = cycx_netdevice_stop,
- .ndo_start_xmit = cycx_netdevice_hard_start_xmit,
- .ndo_get_stats = cycx_netdevice_get_stats,
-};
-
-static void cycx_x25_chan_setup(struct net_device *dev)
-{
- /* Initialize device driver entry points */
- dev->netdev_ops = &cycx_netdev_ops;
- dev->header_ops = &cycx_header_ops;
-
- /* Initialize media-specific parameters */
- dev->mtu = CYCX_X25_CHAN_MTU;
- dev->type = ARPHRD_HWX25; /* ARP h/w type */
- dev->hard_header_len = 0; /* media header length */
- dev->addr_len = 0; /* hardware address length */
-}
-
-/* Initialize Linux network interface.
- *
- * This routine is called only once for each interface, during Linux network
- * interface registration. Returning anything but zero will fail interface
- * registration. */
-static int cycx_netdevice_init(struct net_device *dev)
-{
- struct cycx_x25_channel *chan = netdev_priv(dev);
- struct cycx_device *card = chan->card;
- struct wan_device *wandev = &card->wandev;
-
- if (!chan->svc)
- *(__be16*)dev->dev_addr = htons(chan->lcn);
-
- /* Initialize hardware parameters (just for reference) */
- dev->irq = wandev->irq;
- dev->dma = wandev->dma;
- dev->base_addr = wandev->ioport;
- dev->mem_start = (unsigned long)wandev->maddr;
- dev->mem_end = (unsigned long)(wandev->maddr +
- wandev->msize - 1);
- dev->flags |= IFF_NOARP;
-
- /* Set transmit buffer queue length */
- dev->tx_queue_len = 10;
-
- /* Initialize socket buffers */
- cycx_x25_set_chan_state(dev, WAN_DISCONNECTED);
-
- return 0;
-}
-
-/* Open network interface.
- * o prevent module from unloading by incrementing use count
- * o if link is disconnected then initiate connection
- *
- * Return 0 if O.k. or errno. */
-static int cycx_netdevice_open(struct net_device *dev)
-{
- if (netif_running(dev))
- return -EBUSY; /* only one open is allowed */
-
- netif_start_queue(dev);
- return 0;
-}
-
-/* Close network interface.
- * o reset flags.
- * o if there's no more open channels then disconnect physical link. */
-static int cycx_netdevice_stop(struct net_device *dev)
-{
- struct cycx_x25_channel *chan = netdev_priv(dev);
-
- netif_stop_queue(dev);
-
- if (chan->state == WAN_CONNECTED || chan->state == WAN_CONNECTING)
- cycx_x25_chan_disconnect(dev);
-
- return 0;
-}
-
-/* Build media header.
- * o encapsulate packet according to encapsulation type.
- *
- * The trick here is to put packet type (Ethertype) into 'protocol' field of
- * the socket buffer, so that we don't forget it. If encapsulation fails,
- * set skb->protocol to 0 and discard packet later.
- *
- * Return: media header length. */
-static int cycx_netdevice_hard_header(struct sk_buff *skb,
- struct net_device *dev, u16 type,
- const void *daddr, const void *saddr,
- unsigned len)
-{
- skb->protocol = htons(type);
-
- return dev->hard_header_len;
-}
-
-/* * Re-build media header.
- * Return: 1 physical address resolved.
- * 0 physical address not resolved */
-static int cycx_netdevice_rebuild_header(struct sk_buff *skb)
-{
- return 1;
-}
-
-/* Send a packet on a network interface.
- * o set busy flag (marks start of the transmission).
- * o check link state. If link is not up, then drop the packet.
- * o check channel status. If it's down then initiate a call.
- * o pass a packet to corresponding WAN device.
- * o free socket buffer
- *
- * Return: 0 complete (socket buffer must be freed)
- * non-0 packet may be re-transmitted (tbusy must be set)
- *
- * Notes:
- * 1. This routine is called either by the protocol stack or by the "net
- * bottom half" (with interrupts enabled).
- * 2. Setting tbusy flag will inhibit further transmit requests from the
- * protocol stack and can be used for flow control with protocol layer. */
-static netdev_tx_t cycx_netdevice_hard_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct cycx_x25_channel *chan = netdev_priv(dev);
- struct cycx_device *card = chan->card;
-
- if (!chan->svc)
- chan->protocol = ntohs(skb->protocol);
-
- if (card->wandev.state != WAN_CONNECTED)
- ++chan->ifstats.tx_dropped;
- else if (chan->svc && chan->protocol &&
- chan->protocol != ntohs(skb->protocol)) {
- pr_info("%s: unsupported Ethertype 0x%04X on interface %s!\n",
- card->devname, ntohs(skb->protocol), dev->name);
- ++chan->ifstats.tx_errors;
- } else if (chan->protocol == ETH_P_IP) {
- switch (chan->state) {
- case WAN_DISCONNECTED:
- if (cycx_x25_chan_connect(dev)) {
- netif_stop_queue(dev);
- return NETDEV_TX_BUSY;
- }
- /* fall thru */
- case WAN_CONNECTED:
- reset_timer(dev);
- dev->trans_start = jiffies;
- netif_stop_queue(dev);
-
- if (cycx_x25_chan_send(dev, skb))
- return NETDEV_TX_BUSY;
-
- break;
- default:
- ++chan->ifstats.tx_dropped;
- ++card->wandev.stats.tx_dropped;
- }
- } else { /* chan->protocol == ETH_P_X25 */
- switch (skb->data[0]) {
- case X25_IFACE_DATA:
- break;
- case X25_IFACE_CONNECT:
- cycx_x25_chan_connect(dev);
- goto free_packet;
- case X25_IFACE_DISCONNECT:
- cycx_x25_chan_disconnect(dev);
- goto free_packet;
- default:
- pr_info("%s: unknown %d x25-iface request on %s!\n",
- card->devname, skb->data[0], dev->name);
- ++chan->ifstats.tx_errors;
- goto free_packet;
- }
-
- skb_pull(skb, 1); /* Remove control byte */
- reset_timer(dev);
- dev->trans_start = jiffies;
- netif_stop_queue(dev);
-
- if (cycx_x25_chan_send(dev, skb)) {
- /* prepare for future retransmissions */
- skb_push(skb, 1);
- return NETDEV_TX_BUSY;
- }
- }
-
-free_packet:
- dev_kfree_skb(skb);
-
- return NETDEV_TX_OK;
-}
-
-/* Get Ethernet-style interface statistics.
- * Return a pointer to struct net_device_stats */
-static struct net_device_stats *cycx_netdevice_get_stats(struct net_device *dev)
-{
- struct cycx_x25_channel *chan = netdev_priv(dev);
-
- return chan ? &chan->ifstats : NULL;
-}
-
-/* Interrupt Handlers */
-/* X.25 Interrupt Service Routine. */
-static void cycx_x25_irq_handler(struct cycx_device *card)
-{
- struct cycx_x25_cmd cmd;
- u16 z = 0;
-
- card->in_isr = 1;
- card->buff_int_mode_unbusy = 0;
- cycx_peek(&card->hw, X25_RXMBOX_OFFS, &cmd, sizeof(cmd));
-
- switch (cmd.command) {
- case X25_DATA_INDICATION:
- cycx_x25_irq_rx(card, &cmd);
- break;
- case X25_ACK_FROM_VC:
- cycx_x25_irq_tx(card, &cmd);
- break;
- case X25_LOG:
- cycx_x25_irq_log(card, &cmd);
- break;
- case X25_STATISTIC:
- cycx_x25_irq_stat(card, &cmd);
- break;
- case X25_CONNECT_CONFIRM:
- cycx_x25_irq_connect_confirm(card, &cmd);
- break;
- case X25_CONNECT_INDICATION:
- cycx_x25_irq_connect(card, &cmd);
- break;
- case X25_DISCONNECT_INDICATION:
- cycx_x25_irq_disconnect(card, &cmd);
- break;
- case X25_DISCONNECT_CONFIRM:
- cycx_x25_irq_disconnect_confirm(card, &cmd);
- break;
- case X25_LINE_ON:
- cycx_set_state(card, WAN_CONNECTED);
- break;
- case X25_LINE_OFF:
- cycx_set_state(card, WAN_DISCONNECTED);
- break;
- default:
- cycx_x25_irq_spurious(card, &cmd);
- break;
- }
-
- cycx_poke(&card->hw, 0, &z, sizeof(z));
- cycx_poke(&card->hw, X25_RXMBOX_OFFS, &z, sizeof(z));
- card->in_isr = 0;
-}
-
-/* Transmit interrupt handler.
- * o Release socket buffer
- * o Clear 'tbusy' flag */
-static void cycx_x25_irq_tx(struct cycx_device *card, struct cycx_x25_cmd *cmd)
-{
- struct net_device *dev;
- struct wan_device *wandev = &card->wandev;
- u8 lcn;
-
- cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
-
- /* unbusy device and then dev_tint(); */
- dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
- if (dev) {
- card->buff_int_mode_unbusy = 1;
- netif_wake_queue(dev);
- } else
- pr_err("%s:ackvc for inexistent lcn %d\n", card->devname, lcn);
-}
-
-/* Receive interrupt handler.
- * This routine handles fragmented IP packets using M-bit according to the
- * RFC1356.
- * o map logical channel number to network interface.
- * o allocate socket buffer or append received packet to the existing one.
- * o if M-bit is reset (i.e. it's the last packet in a sequence) then
- * decapsulate packet and pass socket buffer to the protocol stack.
- *
- * Notes:
- * 1. When allocating a socket buffer, if M-bit is set then more data is
- * coming and we have to allocate buffer for the maximum IP packet size
- * expected on this channel.
- * 2. If something goes wrong and X.25 packet has to be dropped (e.g. no
- * socket buffers available) the whole packet sequence must be discarded. */
-static void cycx_x25_irq_rx(struct cycx_device *card, struct cycx_x25_cmd *cmd)
-{
- struct wan_device *wandev = &card->wandev;
- struct net_device *dev;
- struct cycx_x25_channel *chan;
- struct sk_buff *skb;
- u8 bitm, lcn;
- int pktlen = cmd->len - 5;
-
- cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
- cycx_peek(&card->hw, cmd->buf + 4, &bitm, sizeof(bitm));
- bitm &= 0x10;
-
- dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
- if (!dev) {
- /* Invalid channel, discard packet */
- pr_info("%s: receiving on orphaned LCN %d!\n",
- card->devname, lcn);
- return;
- }
-
- chan = netdev_priv(dev);
- reset_timer(dev);
-
- if (chan->drop_sequence) {
- if (!bitm)
- chan->drop_sequence = 0;
- else
- return;
- }
-
- if ((skb = chan->rx_skb) == NULL) {
- /* Allocate new socket buffer */
- int bufsize = bitm ? dev->mtu : pktlen;
-
- if ((skb = dev_alloc_skb((chan->protocol == ETH_P_X25 ? 1 : 0) +
- bufsize +
- dev->hard_header_len)) == NULL) {
- pr_info("%s: no socket buffers available!\n",
- card->devname);
- chan->drop_sequence = 1;
- ++chan->ifstats.rx_dropped;
- return;
- }
-
- if (chan->protocol == ETH_P_X25) /* X.25 socket layer control */
- /* 0 = data packet (dev_alloc_skb zeroed skb->data) */
- skb_put(skb, 1);
-
- skb->dev = dev;
- skb->protocol = htons(chan->protocol);
- chan->rx_skb = skb;
- }
-
- if (skb_tailroom(skb) < pktlen) {
- /* No room for the packet. Call off the whole thing! */
- dev_kfree_skb_irq(skb);
- chan->rx_skb = NULL;
-
- if (bitm)
- chan->drop_sequence = 1;
-
- pr_info("%s: unexpectedly long packet sequence on interface %s!\n",
- card->devname, dev->name);
- ++chan->ifstats.rx_length_errors;
- return;
- }
-
- /* Append packet to the socket buffer */
- cycx_peek(&card->hw, cmd->buf + 5, skb_put(skb, pktlen), pktlen);
-
- if (bitm)
- return; /* more data is coming */
-
- chan->rx_skb = NULL; /* dequeue packet */
-
- ++chan->ifstats.rx_packets;
- chan->ifstats.rx_bytes += pktlen;
-
- skb_reset_mac_header(skb);
- netif_rx(skb);
-}
-
-/* Connect interrupt handler. */
-static void cycx_x25_irq_connect(struct cycx_device *card,
- struct cycx_x25_cmd *cmd)
-{
- struct wan_device *wandev = &card->wandev;
- struct net_device *dev = NULL;
- struct cycx_x25_channel *chan;
- u8 d[32],
- loc[24],
- rem[24];
- u8 lcn, sizeloc, sizerem;
-
- cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
- cycx_peek(&card->hw, cmd->buf + 5, &sizeloc, sizeof(sizeloc));
- cycx_peek(&card->hw, cmd->buf + 6, d, cmd->len - 6);
-
- sizerem = sizeloc >> 4;
- sizeloc &= 0x0F;
-
- loc[0] = rem[0] = '\0';
-
- if (sizeloc)
- nibble_to_byte(d, loc, sizeloc, 0);
-
- if (sizerem)
- nibble_to_byte(d + (sizeloc >> 1), rem, sizerem, sizeloc & 1);
-
- dprintk(1, KERN_INFO "%s:lcn=%d, local=%s, remote=%s\n",
- __func__, lcn, loc, rem);
-
- dev = cycx_x25_get_dev_by_dte_addr(wandev, rem);
- if (!dev) {
- /* Invalid channel, discard packet */
- pr_info("%s: connect not expected: remote %s!\n",
- card->devname, rem);
- return;
- }
-
- chan = netdev_priv(dev);
- chan->lcn = lcn;
- cycx_x25_connect_response(card, chan);
- cycx_x25_set_chan_state(dev, WAN_CONNECTED);
-}
-
-/* Connect confirm interrupt handler. */
-static void cycx_x25_irq_connect_confirm(struct cycx_device *card,
- struct cycx_x25_cmd *cmd)
-{
- struct wan_device *wandev = &card->wandev;
- struct net_device *dev;
- struct cycx_x25_channel *chan;
- u8 lcn, key;
-
- cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
- cycx_peek(&card->hw, cmd->buf + 1, &key, sizeof(key));
- dprintk(1, KERN_INFO "%s: %s:lcn=%d, key=%d\n",
- card->devname, __func__, lcn, key);
-
- dev = cycx_x25_get_dev_by_lcn(wandev, -key);
- if (!dev) {
- /* Invalid channel, discard packet */
- clear_bit(--key, (void*)&card->u.x.connection_keys);
- pr_info("%s: connect confirm not expected: lcn %d, key=%d!\n",
- card->devname, lcn, key);
- return;
- }
-
- clear_bit(--key, (void*)&card->u.x.connection_keys);
- chan = netdev_priv(dev);
- chan->lcn = lcn;
- cycx_x25_set_chan_state(dev, WAN_CONNECTED);
-}
-
-/* Disconnect confirm interrupt handler. */
-static void cycx_x25_irq_disconnect_confirm(struct cycx_device *card,
- struct cycx_x25_cmd *cmd)
-{
- struct wan_device *wandev = &card->wandev;
- struct net_device *dev;
- u8 lcn;
-
- cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
- dprintk(1, KERN_INFO "%s: %s:lcn=%d\n",
- card->devname, __func__, lcn);
- dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
- if (!dev) {
- /* Invalid channel, discard packet */
- pr_info("%s:disconnect confirm not expected!:lcn %d\n",
- card->devname, lcn);
- return;
- }
-
- cycx_x25_set_chan_state(dev, WAN_DISCONNECTED);
-}
-
-/* disconnect interrupt handler. */
-static void cycx_x25_irq_disconnect(struct cycx_device *card,
- struct cycx_x25_cmd *cmd)
-{
- struct wan_device *wandev = &card->wandev;
- struct net_device *dev;
- u8 lcn;
-
- cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
- dprintk(1, KERN_INFO "%s:lcn=%d\n", __func__, lcn);
-
- dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
- if (dev) {
- struct cycx_x25_channel *chan = netdev_priv(dev);
-
- cycx_x25_disconnect_response(card, chan->link, lcn);
- cycx_x25_set_chan_state(dev, WAN_DISCONNECTED);
- } else
- cycx_x25_disconnect_response(card, 0, lcn);
-}
-
-/* LOG interrupt handler. */
-static void cycx_x25_irq_log(struct cycx_device *card, struct cycx_x25_cmd *cmd)
-{
-#if CYCLOMX_X25_DEBUG
- char bf[20];
- u16 size, toread, link, msg_code;
- u8 code, routine;
-
- cycx_peek(&card->hw, cmd->buf, &msg_code, sizeof(msg_code));
- cycx_peek(&card->hw, cmd->buf + 2, &link, sizeof(link));
- cycx_peek(&card->hw, cmd->buf + 4, &size, sizeof(size));
- /* at most 20 bytes are available... thanks to Daniela :) */
- toread = size < 20 ? size : 20;
- cycx_peek(&card->hw, cmd->buf + 10, &bf, toread);
- cycx_peek(&card->hw, cmd->buf + 10 + toread, &code, 1);
- cycx_peek(&card->hw, cmd->buf + 10 + toread + 1, &routine, 1);
-
- pr_info("cycx_x25_irq_handler: X25_LOG (0x4500) indic.:\n");
- pr_info("cmd->buf=0x%X\n", cmd->buf);
- pr_info("Log message code=0x%X\n", msg_code);
- pr_info("Link=%d\n", link);
- pr_info("log code=0x%X\n", code);
- pr_info("log routine=0x%X\n", routine);
- pr_info("Message size=%d\n", size);
- hex_dump("Message", bf, toread);
-#endif
-}
-
-/* STATISTIC interrupt handler. */
-static void cycx_x25_irq_stat(struct cycx_device *card,
- struct cycx_x25_cmd *cmd)
-{
- cycx_peek(&card->hw, cmd->buf, &card->u.x.stats,
- sizeof(card->u.x.stats));
- hex_dump("cycx_x25_irq_stat", (unsigned char*)&card->u.x.stats,
- sizeof(card->u.x.stats));
- cycx_x25_dump_stats(&card->u.x.stats);
- wake_up_interruptible(&card->wait_stats);
-}
-
-/* Spurious interrupt handler.
- * o print a warning
- * If number of spurious interrupts exceeded some limit, then ??? */
-static void cycx_x25_irq_spurious(struct cycx_device *card,
- struct cycx_x25_cmd *cmd)
-{
- pr_info("%s: spurious interrupt (0x%X)!\n",
- card->devname, cmd->command);
-}
-#ifdef CYCLOMX_X25_DEBUG
-static void hex_dump(char *msg, unsigned char *p, int len)
-{
- print_hex_dump(KERN_INFO, msg, DUMP_PREFIX_OFFSET, 16, 1,
- p, len, true);
-}
-#endif
-
-/* Cyclom 2X Firmware-Specific Functions */
-/* Exec X.25 command. */
-static int x25_exec(struct cycx_device *card, int command, int link,
- void *d1, int len1, void *d2, int len2)
-{
- struct cycx_x25_cmd c;
- unsigned long flags;
- u32 addr = 0x1200 + 0x2E0 * link + 0x1E2;
- u8 retry = CYCX_X25_MAX_CMD_RETRY;
- int err = 0;
-
- c.command = command;
- c.link = link;
- c.len = len1 + len2;
-
- spin_lock_irqsave(&card->u.x.lock, flags);
-
- /* write command */
- cycx_poke(&card->hw, X25_MBOX_OFFS, &c, sizeof(c) - sizeof(c.buf));
-
- /* write X.25 data */
- if (d1) {
- cycx_poke(&card->hw, addr, d1, len1);
-
- if (d2) {
- if (len2 > 254) {
- u32 addr1 = 0xA00 + 0x400 * link;
-
- cycx_poke(&card->hw, addr + len1, d2, 249);
- cycx_poke(&card->hw, addr1, ((u8*)d2) + 249,
- len2 - 249);
- } else
- cycx_poke(&card->hw, addr + len1, d2, len2);
- }
- }
-
- /* generate interruption, executing command */
- cycx_intr(&card->hw);
-
- /* wait till card->mbox == 0 */
- do {
- err = cycx_exec(card->mbox);
- } while (retry-- && err);
-
- spin_unlock_irqrestore(&card->u.x.lock, flags);
-
- return err;
-}
-
-/* Configure adapter. */
-static int cycx_x25_configure(struct cycx_device *card,
- struct cycx_x25_config *conf)
-{
- struct {
- u16 nlinks;
- struct cycx_x25_config conf[2];
- } x25_cmd_conf;
-
- memset(&x25_cmd_conf, 0, sizeof(x25_cmd_conf));
- x25_cmd_conf.nlinks = 2;
- x25_cmd_conf.conf[0] = *conf;
- /* FIXME: we need to find a way in the wanrouter framework
- to configure the second link, for now lets use it
- with the same config from the first link, fixing
- the interface type to RS232, the speed in 38400 and
- the clock to external */
- x25_cmd_conf.conf[1] = *conf;
- x25_cmd_conf.conf[1].link = 1;
- x25_cmd_conf.conf[1].speed = 5; /* 38400 */
- x25_cmd_conf.conf[1].clock = 8;
- x25_cmd_conf.conf[1].flags = 0; /* default = RS232 */
-
- cycx_x25_dump_config(&x25_cmd_conf.conf[0]);
- cycx_x25_dump_config(&x25_cmd_conf.conf[1]);
-
- return x25_exec(card, X25_CONFIG, 0,
- &x25_cmd_conf, sizeof(x25_cmd_conf), NULL, 0);
-}
-
-/* Get protocol statistics. */
-static int cycx_x25_get_stats(struct cycx_device *card)
-{
- /* the firmware expects 20 in the size field!!!
- thanks to Daniela */
- int err = x25_exec(card, X25_STATISTIC, 0, NULL, 20, NULL, 0);
-
- if (err)
- return err;
-
- interruptible_sleep_on(&card->wait_stats);
-
- if (signal_pending(current))
- return -EINTR;
-
- card->wandev.stats.rx_packets = card->u.x.stats.n2_rx_frames;
- card->wandev.stats.rx_over_errors = card->u.x.stats.rx_over_errors;
- card->wandev.stats.rx_crc_errors = card->u.x.stats.rx_crc_errors;
- card->wandev.stats.rx_length_errors = 0; /* not available from fw */
- card->wandev.stats.rx_frame_errors = 0; /* not available from fw */
- card->wandev.stats.rx_missed_errors = card->u.x.stats.rx_aborts;
- card->wandev.stats.rx_dropped = 0; /* not available from fw */
- card->wandev.stats.rx_errors = 0; /* not available from fw */
- card->wandev.stats.tx_packets = card->u.x.stats.n2_tx_frames;
- card->wandev.stats.tx_aborted_errors = card->u.x.stats.tx_aborts;
- card->wandev.stats.tx_dropped = 0; /* not available from fw */
- card->wandev.stats.collisions = 0; /* not available from fw */
- card->wandev.stats.tx_errors = 0; /* not available from fw */
-
- cycx_x25_dump_devs(&card->wandev);
-
- return 0;
-}
-
-/* return the number of nibbles */
-static int byte_to_nibble(u8 *s, u8 *d, char *nibble)
-{
- int i = 0;
-
- if (*nibble && *s) {
- d[i] |= *s++ - '0';
- *nibble = 0;
- ++i;
- }
-
- while (*s) {
- d[i] = (*s - '0') << 4;
- if (*(s + 1))
- d[i] |= *(s + 1) - '0';
- else {
- *nibble = 1;
- break;
- }
- ++i;
- s += 2;
- }
-
- return i;
-}
-
-static void nibble_to_byte(u8 *s, u8 *d, u8 len, u8 nibble)
-{
- if (nibble) {
- *d++ = '0' + (*s++ & 0x0F);
- --len;
- }
-
- while (len) {
- *d++ = '0' + (*s >> 4);
-
- if (--len) {
- *d++ = '0' + (*s & 0x0F);
- --len;
- } else break;
-
- ++s;
- }
-
- *d = '\0';
-}
-
-/* Place X.25 call. */
-static int x25_place_call(struct cycx_device *card,
- struct cycx_x25_channel *chan)
-{
- int err = 0,
- len;
- char d[64],
- nibble = 0,
- mylen = chan->local_addr ? strlen(chan->local_addr) : 0,
- remotelen = strlen(chan->addr);
- u8 key;
-
- if (card->u.x.connection_keys == ~0U) {
- pr_info("%s: too many simultaneous connection requests!\n",
- card->devname);
- return -EAGAIN;
- }
-
- key = ffz(card->u.x.connection_keys);
- set_bit(key, (void*)&card->u.x.connection_keys);
- ++key;
- dprintk(1, KERN_INFO "%s:x25_place_call:key=%d\n", card->devname, key);
- memset(d, 0, sizeof(d));
- d[1] = key; /* user key */
- d[2] = 0x10;
- d[4] = 0x0B;
-
- len = byte_to_nibble(chan->addr, d + 6, &nibble);
-
- if (chan->local_addr)
- len += byte_to_nibble(chan->local_addr, d + 6 + len, &nibble);
-
- if (nibble)
- ++len;
-
- d[5] = mylen << 4 | remotelen;
- d[6 + len + 1] = 0xCC; /* TCP/IP over X.25, thanks to Daniela :) */
-
- if ((err = x25_exec(card, X25_CONNECT_REQUEST, chan->link,
- &d, 7 + len + 1, NULL, 0)) != 0)
- clear_bit(--key, (void*)&card->u.x.connection_keys);
- else
- chan->lcn = -key;
-
- return err;
-}
-
-/* Place X.25 CONNECT RESPONSE. */
-static int cycx_x25_connect_response(struct cycx_device *card,
- struct cycx_x25_channel *chan)
-{
- u8 d[8];
-
- memset(d, 0, sizeof(d));
- d[0] = d[3] = chan->lcn;
- d[2] = 0x10;
- d[4] = 0x0F;
- d[7] = 0xCC; /* TCP/IP over X.25, thanks Daniela */
-
- return x25_exec(card, X25_CONNECT_RESPONSE, chan->link, &d, 8, NULL, 0);
-}
-
-/* Place X.25 DISCONNECT RESPONSE. */
-static int cycx_x25_disconnect_response(struct cycx_device *card, u8 link,
- u8 lcn)
-{
- char d[5];
-
- memset(d, 0, sizeof(d));
- d[0] = d[3] = lcn;
- d[2] = 0x10;
- d[4] = 0x17;
-
- return x25_exec(card, X25_DISCONNECT_RESPONSE, link, &d, 5, NULL, 0);
-}
-
-/* Clear X.25 call. */
-static int x25_clear_call(struct cycx_device *card, u8 link, u8 lcn, u8 cause,
- u8 diagn)
-{
- u8 d[7];
-
- memset(d, 0, sizeof(d));
- d[0] = d[3] = lcn;
- d[2] = 0x10;
- d[4] = 0x13;
- d[5] = cause;
- d[6] = diagn;
-
- return x25_exec(card, X25_DISCONNECT_REQUEST, link, d, 7, NULL, 0);
-}
-
-/* Send X.25 data packet. */
-static int cycx_x25_send(struct cycx_device *card, u8 link, u8 lcn, u8 bitm,
- int len, void *buf)
-{
- u8 d[] = "?\xFF\x10??";
-
- d[0] = d[3] = lcn;
- d[4] = bitm;
-
- return x25_exec(card, X25_DATA_REQUEST, link, &d, 5, buf, len);
-}
-
-/* Miscellaneous */
-/* Find network device by its channel number. */
-static struct net_device *cycx_x25_get_dev_by_lcn(struct wan_device *wandev,
- s16 lcn)
-{
- struct net_device *dev = wandev->dev;
- struct cycx_x25_channel *chan;
-
- while (dev) {
- chan = netdev_priv(dev);
-
- if (chan->lcn == lcn)
- break;
- dev = chan->slave;
- }
- return dev;
-}
-
-/* Find network device by its remote dte address. */
-static struct net_device *
- cycx_x25_get_dev_by_dte_addr(struct wan_device *wandev, char *dte)
-{
- struct net_device *dev = wandev->dev;
- struct cycx_x25_channel *chan;
-
- while (dev) {
- chan = netdev_priv(dev);
-
- if (!strcmp(chan->addr, dte))
- break;
- dev = chan->slave;
- }
- return dev;
-}
-
-/* Initiate connection on the logical channel.
- * o for PVC we just get channel configuration
- * o for SVCs place an X.25 call
- *
- * Return: 0 connected
- * >0 connection in progress
- * <0 failure */
-static int cycx_x25_chan_connect(struct net_device *dev)
-{
- struct cycx_x25_channel *chan = netdev_priv(dev);
- struct cycx_device *card = chan->card;
-
- if (chan->svc) {
- if (!chan->addr[0])
- return -EINVAL; /* no destination address */
-
- dprintk(1, KERN_INFO "%s: placing X.25 call to %s...\n",
- card->devname, chan->addr);
-
- if (x25_place_call(card, chan))
- return -EIO;
-
- cycx_x25_set_chan_state(dev, WAN_CONNECTING);
- return 1;
- } else
- cycx_x25_set_chan_state(dev, WAN_CONNECTED);
-
- return 0;
-}
-
-/* Disconnect logical channel.
- * o if SVC then clear X.25 call */
-static void cycx_x25_chan_disconnect(struct net_device *dev)
-{
- struct cycx_x25_channel *chan = netdev_priv(dev);
-
- if (chan->svc) {
- x25_clear_call(chan->card, chan->link, chan->lcn, 0, 0);
- cycx_x25_set_chan_state(dev, WAN_DISCONNECTING);
- } else
- cycx_x25_set_chan_state(dev, WAN_DISCONNECTED);
-}
-
-/* Called by kernel timer */
-static void cycx_x25_chan_timer(unsigned long d)
-{
- struct net_device *dev = (struct net_device *)d;
- struct cycx_x25_channel *chan = netdev_priv(dev);
-
- if (chan->state == WAN_CONNECTED)
- cycx_x25_chan_disconnect(dev);
- else
- pr_err("%s: %s for svc (%s) not connected!\n",
- chan->card->devname, __func__, dev->name);
-}
-
-/* Set logical channel state. */
-static void cycx_x25_set_chan_state(struct net_device *dev, u8 state)
-{
- struct cycx_x25_channel *chan = netdev_priv(dev);
- struct cycx_device *card = chan->card;
- unsigned long flags;
- char *string_state = NULL;
-
- spin_lock_irqsave(&card->lock, flags);
-
- if (chan->state != state) {
- if (chan->svc && chan->state == WAN_CONNECTED)
- del_timer(&chan->timer);
-
- switch (state) {
- case WAN_CONNECTED:
- string_state = "connected!";
- *(__be16*)dev->dev_addr = htons(chan->lcn);
- netif_wake_queue(dev);
- reset_timer(dev);
-
- if (chan->protocol == ETH_P_X25)
- cycx_x25_chan_send_event(dev,
- X25_IFACE_CONNECT);
-
- break;
- case WAN_CONNECTING:
- string_state = "connecting...";
- break;
- case WAN_DISCONNECTING:
- string_state = "disconnecting...";
- break;
- case WAN_DISCONNECTED:
- string_state = "disconnected!";
-
- if (chan->svc) {
- *(unsigned short*)dev->dev_addr = 0;
- chan->lcn = 0;
- }
-
- if (chan->protocol == ETH_P_X25)
- cycx_x25_chan_send_event(dev,
- X25_IFACE_DISCONNECT);
-
- netif_wake_queue(dev);
- break;
- }
-
- pr_info("%s: interface %s %s\n",
- card->devname, dev->name, string_state);
- chan->state = state;
- }
-
- spin_unlock_irqrestore(&card->lock, flags);
-}
-
-/* Send packet on a logical channel.
- * When this function is called, tx_skb field of the channel data space
- * points to the transmit socket buffer. When transmission is complete,
- * release socket buffer and reset 'tbusy' flag.
- *
- * Return: 0 - transmission complete
- * 1 - busy
- *
- * Notes:
- * 1. If packet length is greater than MTU for this channel, we'll fragment
- * the packet into 'complete sequence' using M-bit.
- * 2. When transmission is complete, an event notification should be issued
- * to the router. */
-static int cycx_x25_chan_send(struct net_device *dev, struct sk_buff *skb)
-{
- struct cycx_x25_channel *chan = netdev_priv(dev);
- struct cycx_device *card = chan->card;
- int bitm = 0; /* final packet */
- unsigned len = skb->len;
-
- if (skb->len > card->wandev.mtu) {
- len = card->wandev.mtu;
- bitm = 0x10; /* set M-bit (more data) */
- }
-
- if (cycx_x25_send(card, chan->link, chan->lcn, bitm, len, skb->data))
- return 1;
-
- if (bitm) {
- skb_pull(skb, len);
- return 1;
- }
-
- ++chan->ifstats.tx_packets;
- chan->ifstats.tx_bytes += len;
-
- return 0;
-}
-
-/* Send event (connection, disconnection, etc) to X.25 socket layer */
-
-static void cycx_x25_chan_send_event(struct net_device *dev, u8 event)
-{
- struct sk_buff *skb;
- unsigned char *ptr;
-
- if ((skb = dev_alloc_skb(1)) == NULL) {
- pr_err("%s: out of memory\n", __func__);
- return;
- }
-
- ptr = skb_put(skb, 1);
- *ptr = event;
-
- skb->protocol = x25_type_trans(skb, dev);
- netif_rx(skb);
-}
-
-/* Convert line speed in bps to a number used by cyclom 2x code. */
-static u8 bps_to_speed_code(u32 bps)
-{
- u8 number = 0; /* defaults to the lowest (1200) speed ;> */
-
- if (bps >= 512000) number = 8;
- else if (bps >= 256000) number = 7;
- else if (bps >= 64000) number = 6;
- else if (bps >= 38400) number = 5;
- else if (bps >= 19200) number = 4;
- else if (bps >= 9600) number = 3;
- else if (bps >= 4800) number = 2;
- else if (bps >= 2400) number = 1;
-
- return number;
-}
-
-/* log base 2 */
-static u8 cycx_log2(u32 n)
-{
- u8 log = 0;
-
- if (!n)
- return 0;
-
- while (n > 1) {
- n >>= 1;
- ++log;
- }
-
- return log;
-}
-
-/* Convert decimal string to unsigned integer.
- * If len != 0 then only 'len' characters of the string are converted. */
-static unsigned dec_to_uint(u8 *str, int len)
-{
- unsigned val = 0;
-
- if (!len)
- len = strlen(str);
-
- for (; len && isdigit(*str); ++str, --len)
- val = (val * 10) + (*str - (unsigned) '0');
-
- return val;
-}
-
-static void reset_timer(struct net_device *dev)
-{
- struct cycx_x25_channel *chan = netdev_priv(dev);
-
- if (chan->svc)
- mod_timer(&chan->timer, jiffies+chan->idle_tmout*HZ);
-}
-#ifdef CYCLOMX_X25_DEBUG
-static void cycx_x25_dump_config(struct cycx_x25_config *conf)
-{
- pr_info("X.25 configuration\n");
- pr_info("-----------------\n");
- pr_info("link number=%d\n", conf->link);
- pr_info("line speed=%d\n", conf->speed);
- pr_info("clock=%sternal\n", conf->clock == 8 ? "Ex" : "In");
- pr_info("# level 2 retransm.=%d\n", conf->n2);
- pr_info("level 2 window=%d\n", conf->n2win);
- pr_info("level 3 window=%d\n", conf->n3win);
- pr_info("# logical channels=%d\n", conf->nvc);
- pr_info("level 3 pkt len=%d\n", conf->pktlen);
- pr_info("my address=%d\n", conf->locaddr);
- pr_info("remote address=%d\n", conf->remaddr);
- pr_info("t1=%d seconds\n", conf->t1);
- pr_info("t2=%d seconds\n", conf->t2);
- pr_info("t21=%d seconds\n", conf->t21);
- pr_info("# PVCs=%d\n", conf->npvc);
- pr_info("t23=%d seconds\n", conf->t23);
- pr_info("flags=0x%x\n", conf->flags);
-}
-
-static void cycx_x25_dump_stats(struct cycx_x25_stats *stats)
-{
- pr_info("X.25 statistics\n");
- pr_info("--------------\n");
- pr_info("rx_crc_errors=%d\n", stats->rx_crc_errors);
- pr_info("rx_over_errors=%d\n", stats->rx_over_errors);
- pr_info("n2_tx_frames=%d\n", stats->n2_tx_frames);
- pr_info("n2_rx_frames=%d\n", stats->n2_rx_frames);
- pr_info("tx_timeouts=%d\n", stats->tx_timeouts);
- pr_info("rx_timeouts=%d\n", stats->rx_timeouts);
- pr_info("n3_tx_packets=%d\n", stats->n3_tx_packets);
- pr_info("n3_rx_packets=%d\n", stats->n3_rx_packets);
- pr_info("tx_aborts=%d\n", stats->tx_aborts);
- pr_info("rx_aborts=%d\n", stats->rx_aborts);
-}
-
-static void cycx_x25_dump_devs(struct wan_device *wandev)
-{
- struct net_device *dev = wandev->dev;
-
- pr_info("X.25 dev states\n");
- pr_info("name: addr: txoff: protocol:\n");
- pr_info("---------------------------------------\n");
-
- while(dev) {
- struct cycx_x25_channel *chan = netdev_priv(dev);
-
- pr_info("%-5.5s %-15.15s %d ETH_P_%s\n",
- chan->name, chan->addr, netif_queue_stopped(dev),
- chan->protocol == ETH_P_IP ? "IP" : "X25");
- dev = chan->slave;
- }
-}
-
-#endif /* CYCLOMX_X25_DEBUG */
-/* End */
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 56941d6547eb..3f0c4f268751 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2448,11 +2448,9 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Allocate driver private data */
- card = kzalloc(sizeof (struct fst_card_info), GFP_KERNEL);
- if (card == NULL) {
- pr_err("FarSync card found but insufficient memory for driver storage\n");
+ card = kzalloc(sizeof(struct fst_card_info), GFP_KERNEL);
+ if (card == NULL)
return -ENOMEM;
- }
/* Try to enable the device */
if ((err = pci_enable_device(pdev)) != 0) {
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index 10cc7df95498..a0a932c63d0a 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -280,14 +280,13 @@ int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
if (!try_module_get(proto->module))
return -ENOSYS;
- if (size)
- if ((dev_to_hdlc(dev)->state = kmalloc(size,
- GFP_KERNEL)) == NULL) {
- netdev_warn(dev,
- "Memory squeeze on hdlc_proto_attach()\n");
+ if (size) {
+ dev_to_hdlc(dev)->state = kmalloc(size, GFP_KERNEL);
+ if (dev_to_hdlc(dev)->state == NULL) {
module_put(proto->module);
return -ENOBUFS;
}
+ }
dev_to_hdlc(dev)->proto = proto;
return 0;
}
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 44db8b75a531..5895f1978691 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -128,7 +128,6 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
rbuff = kmalloc(len + 4, GFP_ATOMIC);
if (xbuff == NULL || rbuff == NULL) {
- netdev_warn(dev, "unable to grow X.25 buffers, MTU change cancelled\n");
kfree(xbuff);
kfree(rbuff);
return -ENOMEM;
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index def12b38cbf7..c9c711dcd0e6 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -1055,7 +1055,6 @@ int i2400m_read_mac_addr(struct i2400m *i2400m)
result = 0;
}
net_dev->addr_len = ETH_ALEN;
- memcpy(net_dev->perm_addr, ack_buf.ack_pl, ETH_ALEN);
memcpy(net_dev->dev_addr, ack_buf.ack_pl, ETH_ALEN);
error_read_mac:
d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, result);
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index 1d76ae855f07..48896138418f 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -156,7 +156,7 @@ void i2400m_wake_tx_work(struct work_struct *ws)
struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws);
struct net_device *net_dev = i2400m->wimax_dev.net_dev;
struct device *dev = i2400m_dev(i2400m);
- struct sk_buff *skb = i2400m->wake_tx_skb;
+ struct sk_buff *skb;
unsigned long flags;
spin_lock_irqsave(&i2400m->tx_lock, flags);
@@ -236,23 +236,26 @@ void i2400m_tx_prep_header(struct sk_buff *skb)
void i2400m_net_wake_stop(struct i2400m *i2400m)
{
struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *wake_tx_skb;
+ unsigned long flags;
d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
- /* See i2400m_hard_start_xmit(), references are taken there
- * and here we release them if the work was still
- * pending. Note we can't differentiate work not pending vs
- * never scheduled, so the NULL check does that. */
- if (cancel_work_sync(&i2400m->wake_tx_ws) == 0
- && i2400m->wake_tx_skb != NULL) {
- unsigned long flags;
- struct sk_buff *wake_tx_skb;
- spin_lock_irqsave(&i2400m->tx_lock, flags);
- wake_tx_skb = i2400m->wake_tx_skb; /* compat help */
- i2400m->wake_tx_skb = NULL; /* compat help */
- spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+ /*
+ * See i2400m_hard_start_xmit(), references are taken there and
+ * here we release them if the packet was still pending.
+ */
+ cancel_work_sync(&i2400m->wake_tx_ws);
+
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ wake_tx_skb = i2400m->wake_tx_skb;
+ i2400m->wake_tx_skb = NULL;
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+
+ if (wake_tx_skb) {
i2400m_put(i2400m);
kfree_skb(wake_tx_skb);
}
+
d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
}
@@ -288,7 +291,7 @@ int i2400m_net_wake_tx(struct i2400m *i2400m, struct net_device *net_dev,
* and if pending, release those resources. */
result = 0;
spin_lock_irqsave(&i2400m->tx_lock, flags);
- if (!work_pending(&i2400m->wake_tx_ws)) {
+ if (!i2400m->wake_tx_skb) {
netif_stop_queue(net_dev);
i2400m_get(i2400m);
i2400m->wake_tx_skb = skb_get(skb); /* transfer ref count */
@@ -596,12 +599,12 @@ static void i2400m_get_drvinfo(struct net_device *net_dev,
{
struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
- strncpy(info->driver, KBUILD_MODNAME, sizeof(info->driver) - 1);
- strncpy(info->fw_version,
- i2400m->fw_name ? : "", sizeof(info->fw_version) - 1);
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->fw_version, i2400m->fw_name ? : "",
+ sizeof(info->fw_version));
if (net_dev->dev.parent)
- strncpy(info->bus_info, dev_name(net_dev->dev.parent),
- sizeof(info->bus_info) - 1);
+ strlcpy(info->bus_info, dev_name(net_dev->dev.parent),
+ sizeof(info->bus_info));
}
static const struct ethtool_ops i2400m_ethtool_ops = {
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index 37becfcc98f2..0b602951ff6b 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -1346,29 +1346,22 @@ EXPORT_SYMBOL(i2400m_unknown_barker);
int i2400m_rx_setup(struct i2400m *i2400m)
{
int result = 0;
- struct device *dev = i2400m_dev(i2400m);
i2400m->rx_reorder = i2400m_rx_reorder_disabled? 0 : 1;
if (i2400m->rx_reorder) {
unsigned itr;
- size_t size;
struct i2400m_roq_log *rd;
result = -ENOMEM;
- size = sizeof(i2400m->rx_roq[0]) * (I2400M_RO_CIN + 1);
- i2400m->rx_roq = kzalloc(size, GFP_KERNEL);
- if (i2400m->rx_roq == NULL) {
- dev_err(dev, "RX: cannot allocate %zu bytes for "
- "reorder queues\n", size);
+ i2400m->rx_roq = kcalloc(I2400M_RO_CIN + 1,
+ sizeof(i2400m->rx_roq[0]), GFP_KERNEL);
+ if (i2400m->rx_roq == NULL)
goto error_roq_alloc;
- }
- size = sizeof(*i2400m->rx_roq[0].log) * (I2400M_RO_CIN + 1);
- rd = kzalloc(size, GFP_KERNEL);
+ rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
+ GFP_KERNEL);
if (rd == NULL) {
- dev_err(dev, "RX: cannot allocate %zu bytes for "
- "reorder queues log areas\n", size);
result = -ENOMEM;
goto error_roq_log_alloc;
}
diff --git a/drivers/net/wimax/i2400m/usb-notif.c b/drivers/net/wimax/i2400m/usb-notif.c
index d44b545f4082..fc1355d98bc6 100644
--- a/drivers/net/wimax/i2400m/usb-notif.c
+++ b/drivers/net/wimax/i2400m/usb-notif.c
@@ -199,7 +199,6 @@ int i2400mu_notification_setup(struct i2400mu *i2400mu)
d_fnstart(4, dev, "(i2400m %p)\n", i2400mu);
buf = kmalloc(I2400MU_MAX_NOTIFICATION_LEN, GFP_KERNEL | GFP_DMA);
if (buf == NULL) {
- dev_err(dev, "notification: buffer allocation failed\n");
ret = -ENOMEM;
goto error_buf_alloc;
}
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 080f36303a4f..cd15a93d9084 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -346,9 +346,9 @@ static void i2400mu_get_drvinfo(struct net_device *net_dev,
struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
struct usb_device *udev = i2400mu->usb_dev;
- strncpy(info->driver, KBUILD_MODNAME, sizeof(info->driver) - 1);
- strncpy(info->fw_version,
- i2400m->fw_name ? : "", sizeof(info->fw_version) - 1);
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->fw_version, i2400m->fw_name ? : "",
+ sizeof(info->fw_version));
usb_make_path(udev, info->bus_info, sizeof(info->bus_info));
}
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 28aa05f60c26..f8f0156dff4e 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -138,7 +138,7 @@ config AIRO_CS
config PCMCIA_WL3501
tristate "Planet WL3501 PCMCIA cards"
- depends on EXPERIMENTAL && PCMCIA
+ depends on PCMCIA
select WIRELESS_EXT
select WEXT_SPY
help
@@ -148,7 +148,7 @@ config PCMCIA_WL3501
config PRISM54
tristate 'Intersil Prism GT/Duette/Indigo PCI/Cardbus (DEPRECATED)'
- depends on PCI && EXPERIMENTAL
+ depends on PCI
select WIRELESS_EXT
select WEXT_SPY
select WEXT_PRIV
@@ -187,7 +187,7 @@ config USB_ZD1201
config USB_NET_RNDIS_WLAN
tristate "Wireless RNDIS USB support"
- depends on USB && EXPERIMENTAL
+ depends on USB
depends on CFG80211
select USB_USBNET
select USB_NET_CDCETHER
@@ -217,7 +217,7 @@ source "drivers/net/wireless/rtl818x/Kconfig"
config ADM8211
tristate "ADMtek ADM8211 support"
- depends on MAC80211 && PCI && EXPERIMENTAL
+ depends on MAC80211 && PCI
select CRC32
select EEPROM_93CX6
---help---
@@ -257,7 +257,7 @@ config MAC80211_HWSIM
config MWL8K
tristate "Marvell 88W8xxx PCI/PCIe Wireless support"
- depends on MAC80211 && PCI && EXPERIMENTAL
+ depends on MAC80211 && PCI
---help---
This driver supports Marvell TOPDOG 802.11 wireless cards.
diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c
index 630577dd3a7a..956024a636e6 100644
--- a/drivers/net/wireless/airo_cs.c
+++ b/drivers/net/wireless/airo_cs.c
@@ -69,10 +69,9 @@ static int airo_probe(struct pcmcia_device *p_dev)
/* Allocate space for private device-specific data */
local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
- if (!local) {
- printk(KERN_ERR "airo_cs: no memory for new device\n");
+ if (!local)
return -ENOMEM;
- }
+
p_dev->priv = local;
return airo_config(p_dev);
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 77fa4286e5e9..5ac5f7ae2721 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -2164,10 +2164,8 @@ static int at76_alloc_urbs(struct at76_priv *priv,
buffer_size = sizeof(struct at76_tx_buffer) + MAX_PADDING_SIZE;
priv->bulk_out_buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (!priv->bulk_out_buffer) {
- dev_err(&interface->dev, "cannot allocate output buffer\n");
+ if (!priv->bulk_out_buffer)
return -ENOMEM;
- }
at76_dbg(DBG_PROC_ENTRY, "%s: EXIT", __func__);
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 1a67a4f829fe..2c02b4e84094 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -30,5 +30,6 @@ source "drivers/net/wireless/ath/ath9k/Kconfig"
source "drivers/net/wireless/ath/carl9170/Kconfig"
source "drivers/net/wireless/ath/ath6kl/Kconfig"
source "drivers/net/wireless/ath/ar5523/Kconfig"
+source "drivers/net/wireless/ath/wil6210/Kconfig"
endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index 1e18621326dc..97b964ded2be 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_ATH9K_HW) += ath9k/
obj-$(CONFIG_CARL9170) += carl9170/
obj-$(CONFIG_ATH6KL) += ath6kl/
obj-$(CONFIG_AR5523) += ar5523/
+obj-$(CONFIG_WIL6210) += wil6210/
obj-$(CONFIG_ATH_COMMON) += ath.o
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 30ca0a60a64c..1d264c0f5a9b 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -240,13 +240,14 @@ static const struct ath_ops ath5k_common_ops = {
* Driver Initialization *
\***********************/
-static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+static void ath5k_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ath5k_hw *ah = hw->priv;
struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
- return ath_reg_notifier_apply(wiphy, request, regulatory);
+ ath_reg_notifier_apply(wiphy, request, regulatory);
}
/********************\
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index ab363f34b4df..a78afa98c650 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -1613,6 +1613,10 @@ ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
ah->ah_cal_mask |= AR5K_CALIBRATION_NF;
ee_mode = ath5k_eeprom_mode_from_channel(ah->ah_current_channel);
+ if (WARN_ON(ee_mode < 0)) {
+ ah->ah_cal_mask &= ~AR5K_CALIBRATION_NF;
+ return;
+ }
/* completed NF calibration, test threshold */
nf = ath5k_hw_read_measured_noise_floor(ah);
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 4084b1076286..e2d8b2cf19eb 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -985,6 +985,8 @@ ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
return;
ee_mode = ath5k_eeprom_mode_from_channel(channel);
+ if (WARN_ON(ee_mode < 0))
+ return;
/* Adjust power delta for channel 14 */
if (channel->center_freq == 2484)
diff --git a/drivers/net/wireless/ath/ath6kl/Kconfig b/drivers/net/wireless/ath/ath6kl/Kconfig
index 26c4b7220859..630c83db056e 100644
--- a/drivers/net/wireless/ath/ath6kl/Kconfig
+++ b/drivers/net/wireless/ath/ath6kl/Kconfig
@@ -18,7 +18,6 @@ config ATH6KL_USB
depends on ATH6KL
depends on USB
depends on CFG80211
- depends on EXPERIMENTAL
---help---
This module adds support for wireless adapters based on
Atheros AR6004 chipset running over USB. This is still under
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 5516a8ccc3c6..752ffc4f4166 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -427,6 +427,30 @@ static bool ath6kl_is_tx_pending(struct ath6kl *ar)
return ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)] == 0;
}
+static void ath6kl_cfg80211_sta_bmiss_enhance(struct ath6kl_vif *vif,
+ bool enable)
+{
+ int err;
+
+ if (WARN_ON(!test_bit(WMI_READY, &vif->ar->flag)))
+ return;
+
+ if (vif->nw_type != INFRA_NETWORK)
+ return;
+
+ if (!test_bit(ATH6KL_FW_CAPABILITY_BMISS_ENHANCE,
+ vif->ar->fw_capabilities))
+ return;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s fw bmiss enhance\n",
+ enable ? "enable" : "disable");
+
+ err = ath6kl_wmi_sta_bmiss_enhance_cmd(vif->ar->wmi,
+ vif->fw_vif_idx, enable);
+ if (err)
+ ath6kl_err("failed to %s enhanced bmiss detection: %d\n",
+ enable ? "enable" : "disable", err);
+}
static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_connect_params *sme)
@@ -616,13 +640,13 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
vif->req_bssid, vif->ch_hint,
ar->connect_ctrl_flags, nw_subtype);
- /* disable background scan if period is 0 */
- if (sme->bg_scan_period == 0)
+ if (sme->bg_scan_period == 0) {
+ /* disable background scan if period is 0 */
sme->bg_scan_period = 0xffff;
-
- /* configure default value if not specified */
- if (sme->bg_scan_period == -1)
+ } else if (sme->bg_scan_period == -1) {
+ /* configure default value if not specified */
sme->bg_scan_period = DEFAULT_BG_SCAN_PERIOD;
+ }
ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, 0, 0,
sme->bg_scan_period, 0, 0, 0, 3, 0, 0, 0);
@@ -767,7 +791,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "ad-hoc %s selected\n",
nw_type & ADHOC_CREATOR ? "creator" : "joiner");
cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(ar->wiphy, bss);
return;
}
@@ -778,7 +802,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
assoc_req_ie, assoc_req_len,
assoc_resp_ie, assoc_resp_len,
WLAN_STATUS_SUCCESS, GFP_KERNEL);
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(ar->wiphy, bss);
} else if (vif->sme_state == SME_CONNECTED) {
/* inform roam event to cfg80211 */
cfg80211_roamed_bss(vif->ndev, bss, assoc_req_ie, assoc_req_len,
@@ -1454,10 +1478,10 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
return -EIO;
if (pmgmt) {
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: max perf\n", __func__);
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: rec power\n", __func__);
mode.pwr_mode = REC_POWER;
} else {
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: rec power\n", __func__);
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: max perf\n", __func__);
mode.pwr_mode = MAX_PERF_POWER;
}
@@ -1509,7 +1533,7 @@ static int ath6kl_cfg80211_del_iface(struct wiphy *wiphy,
list_del(&vif->list);
spin_unlock_bh(&ar->list_lock);
- ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag));
+ ath6kl_cfg80211_vif_stop(vif, test_bit(WMI_READY, &ar->flag));
ath6kl_cfg80211_vif_cleanup(vif);
@@ -1559,17 +1583,13 @@ static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy,
set_iface_type:
switch (type) {
case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
vif->next_mode = INFRA_NETWORK;
break;
case NL80211_IFTYPE_ADHOC:
vif->next_mode = ADHOC_NETWORK;
break;
case NL80211_IFTYPE_AP:
- vif->next_mode = AP_NETWORK;
- break;
- case NL80211_IFTYPE_P2P_CLIENT:
- vif->next_mode = INFRA_NETWORK;
- break;
case NL80211_IFTYPE_P2P_GO:
vif->next_mode = AP_NETWORK;
break;
@@ -1778,14 +1798,14 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
if (vif->target_stats.rx_byte) {
sinfo->rx_bytes = vif->target_stats.rx_byte;
- sinfo->filled |= STATION_INFO_RX_BYTES;
+ sinfo->filled |= STATION_INFO_RX_BYTES64;
sinfo->rx_packets = vif->target_stats.rx_pkt;
sinfo->filled |= STATION_INFO_RX_PACKETS;
}
if (vif->target_stats.tx_byte) {
sinfo->tx_bytes = vif->target_stats.tx_byte;
- sinfo->filled |= STATION_INFO_TX_BYTES;
+ sinfo->filled |= STATION_INFO_TX_BYTES64;
sinfo->tx_packets = vif->target_stats.tx_pkt;
sinfo->filled |= STATION_INFO_TX_PACKETS;
}
@@ -2673,30 +2693,6 @@ static int ath6kl_set_ies(struct ath6kl_vif *vif,
return 0;
}
-void ath6kl_cfg80211_sta_bmiss_enhance(struct ath6kl_vif *vif, bool enable)
-{
- int err;
-
- if (WARN_ON(!test_bit(WMI_READY, &vif->ar->flag)))
- return;
-
- if (vif->nw_type != INFRA_NETWORK)
- return;
-
- if (!test_bit(ATH6KL_FW_CAPABILITY_BMISS_ENHANCE,
- vif->ar->fw_capabilities))
- return;
-
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s fw bmiss enhance\n",
- enable ? "enable" : "disable");
-
- err = ath6kl_wmi_sta_bmiss_enhance_cmd(vif->ar->wmi,
- vif->fw_vif_idx, enable);
- if (err)
- ath6kl_err("failed to %s enhanced bmiss detection: %d\n",
- enable ? "enable" : "disable", err);
-}
-
static int ath6kl_get_rsn_capab(struct cfg80211_beacon_data *beacon,
u8 *rsn_capab)
{
@@ -2776,9 +2772,11 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
ar->ap_mode_bkey.valid = false;
- /* TODO:
- * info->interval
- */
+ ret = ath6kl_wmi_ap_set_beacon_intvl_cmd(ar->wmi, vif->fw_vif_idx,
+ info->beacon_interval);
+
+ if (ret)
+ ath6kl_warn("Failed to set beacon interval: %d\n", ret);
ret = ath6kl_wmi_ap_set_dtim_cmd(ar->wmi, vif->fw_vif_idx,
info->dtim_period);
@@ -3492,8 +3490,8 @@ void ath6kl_cfg80211_stop_all(struct ath6kl *ar)
ath6kl_cfg80211_stop(vif);
}
-static int ath6kl_cfg80211_reg_notify(struct wiphy *wiphy,
- struct regulatory_request *request)
+static void ath6kl_cfg80211_reg_notify(struct wiphy *wiphy,
+ struct regulatory_request *request)
{
struct ath6kl *ar = wiphy_priv(wiphy);
u32 rates[IEEE80211_NUM_BANDS];
@@ -3506,17 +3504,13 @@ static int ath6kl_cfg80211_reg_notify(struct wiphy *wiphy,
request->processed ? " processed" : "",
request->initiator, request->user_reg_hint_type);
- /*
- * As firmware is not able intersect regdoms, we can only listen to
- * cellular hints.
- */
if (request->user_reg_hint_type != NL80211_USER_REG_HINT_CELL_BASE)
- return -EOPNOTSUPP;
+ return;
ret = ath6kl_wmi_set_regdomain_cmd(ar->wmi, request->alpha2);
if (ret) {
ath6kl_err("failed to set regdomain: %d\n", ret);
- return ret;
+ return;
}
/*
@@ -3536,10 +3530,8 @@ static int ath6kl_cfg80211_reg_notify(struct wiphy *wiphy,
if (ret) {
ath6kl_err("failed to start scan for a regdomain change: %d\n",
ret);
- return ret;
+ return;
}
-
- return 0;
}
static int ath6kl_cfg80211_vif_init(struct ath6kl_vif *vif)
@@ -3563,6 +3555,37 @@ static int ath6kl_cfg80211_vif_init(struct ath6kl_vif *vif)
return 0;
}
+void ath6kl_cfg80211_vif_stop(struct ath6kl_vif *vif, bool wmi_ready)
+{
+ static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ bool discon_issued;
+
+ netif_stop_queue(vif->ndev);
+
+ clear_bit(WLAN_ENABLED, &vif->flags);
+
+ if (wmi_ready) {
+ discon_issued = test_bit(CONNECTED, &vif->flags) ||
+ test_bit(CONNECT_PEND, &vif->flags);
+ ath6kl_disconnect(vif);
+ del_timer(&vif->disconnect_timer);
+
+ if (discon_issued)
+ ath6kl_disconnect_event(vif, DISCONNECT_CMD,
+ (vif->nw_type & AP_NETWORK) ?
+ bcast_mac : vif->bssid,
+ 0, NULL, 0);
+ }
+
+ if (vif->scan_req) {
+ cfg80211_scan_done(vif->scan_req, true);
+ vif->scan_req = NULL;
+ }
+
+ /* need to clean up enhanced bmiss detection fw state */
+ ath6kl_cfg80211_sta_bmiss_enhance(vif, false);
+}
+
void ath6kl_cfg80211_vif_cleanup(struct ath6kl_vif *vif)
{
struct ath6kl *ar = vif->ar;
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h
index e5e70f3a8ca8..b59becd91aea 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.h
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h
@@ -61,7 +61,5 @@ void ath6kl_cfg80211_cleanup(struct ath6kl *ar);
struct ath6kl *ath6kl_cfg80211_create(void);
void ath6kl_cfg80211_destroy(struct ath6kl *ar);
-/* TODO: remove this once ath6kl_vif_cleanup() is moved to cfg80211.c */
-void ath6kl_cfg80211_sta_bmiss_enhance(struct ath6kl_vif *vif, bool enable);
#endif /* ATH6KL_CFG80211_H */
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 189d8faf8c87..61b2f98b4e77 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -940,7 +940,7 @@ void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
bool wait_fot_compltn, bool cold_reset);
void ath6kl_init_control_info(struct ath6kl_vif *vif);
struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar);
-void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready);
+void ath6kl_cfg80211_vif_stop(struct ath6kl_vif *vif, bool wmi_ready);
int ath6kl_init_hw_start(struct ath6kl *ar);
int ath6kl_init_hw_stop(struct ath6kl *ar);
int ath6kl_init_fetch_firmwares(struct ath6kl *ar);
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index ba6bd497b787..281390178e3d 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -509,9 +509,7 @@ static void destroy_htc_txctrl_packet(struct htc_packet *packet)
{
struct sk_buff *skb;
skb = packet->skb;
- if (skb != NULL)
- dev_kfree_skb(skb);
-
+ dev_kfree_skb(skb);
kfree(packet);
}
@@ -969,6 +967,22 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
u16 payload_len;
int status = 0;
+ /*
+ * ar->htc_target can be NULL due to a race condition that can occur
+ * during driver initialization(we do 'ath6kl_hif_power_on' before
+ * initializing 'ar->htc_target' via 'ath6kl_htc_create').
+ * 'ath6kl_hif_power_on' assigns 'ath6kl_recv_complete' as
+ * usb_complete_t/callback function for 'usb_fill_bulk_urb'.
+ * Thus the possibility of ar->htc_target being NULL
+ * via ath6kl_recv_complete -> ath6kl_usb_io_comp_work.
+ */
+ if (WARN_ON_ONCE(!target)) {
+ ath6kl_err("Target not yet initialized\n");
+ status = -EINVAL;
+ goto free_skb;
+ }
+
+
netdata = skb->data;
netlen = skb->len;
@@ -1054,6 +1068,7 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
dev_kfree_skb(skb);
skb = NULL;
+
goto free_skb;
}
@@ -1089,8 +1104,7 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
skb = NULL;
free_skb:
- if (skb != NULL)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
return status;
@@ -1184,7 +1198,7 @@ static void reset_endpoint_states(struct htc_target *target)
INIT_LIST_HEAD(&ep->pipe.tx_lookup_queue);
INIT_LIST_HEAD(&ep->rx_bufq);
ep->target = target;
- ep->pipe.tx_credit_flow_enabled = (bool) 1; /* FIXME */
+ ep->pipe.tx_credit_flow_enabled = true;
}
}
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index f21fa322e5ca..5d434cf88f35 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -1715,38 +1715,6 @@ void ath6kl_init_hw_restart(struct ath6kl *ar)
}
}
-/* FIXME: move this to cfg80211.c and rename to ath6kl_cfg80211_vif_stop() */
-void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready)
-{
- static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- bool discon_issued;
-
- netif_stop_queue(vif->ndev);
-
- clear_bit(WLAN_ENABLED, &vif->flags);
-
- if (wmi_ready) {
- discon_issued = test_bit(CONNECTED, &vif->flags) ||
- test_bit(CONNECT_PEND, &vif->flags);
- ath6kl_disconnect(vif);
- del_timer(&vif->disconnect_timer);
-
- if (discon_issued)
- ath6kl_disconnect_event(vif, DISCONNECT_CMD,
- (vif->nw_type & AP_NETWORK) ?
- bcast_mac : vif->bssid,
- 0, NULL, 0);
- }
-
- if (vif->scan_req) {
- cfg80211_scan_done(vif->scan_req, true);
- vif->scan_req = NULL;
- }
-
- /* need to clean up enhanced bmiss detection fw state */
- ath6kl_cfg80211_sta_bmiss_enhance(vif, false);
-}
-
void ath6kl_stop_txrx(struct ath6kl *ar)
{
struct ath6kl_vif *vif, *tmp_vif;
@@ -1766,7 +1734,7 @@ void ath6kl_stop_txrx(struct ath6kl *ar)
list_for_each_entry_safe(vif, tmp_vif, &ar->vif_list, list) {
list_del(&vif->list);
spin_unlock_bh(&ar->list_lock);
- ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag));
+ ath6kl_cfg80211_vif_stop(vif, test_bit(WMI_READY, &ar->flag));
rtnl_lock();
ath6kl_cfg80211_vif_cleanup(vif);
rtnl_unlock();
@@ -1801,8 +1769,6 @@ void ath6kl_stop_txrx(struct ath6kl *ar)
"attempting to reset target on instance destroy\n");
ath6kl_reset_device(ar, ar->target_type, true, true);
- clear_bit(WLAN_ENABLED, &ar->flag);
-
up(&ar->sem);
}
EXPORT_SYMBOL(ath6kl_stop_txrx);
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index 62bcc0d5bc23..5fcd342762de 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -159,10 +159,8 @@ static void ath6kl_usb_free_urb_to_pipe(struct ath6kl_usb_pipe *pipe,
static void ath6kl_usb_cleanup_recv_urb(struct ath6kl_urb_context *urb_context)
{
- if (urb_context->skb != NULL) {
- dev_kfree_skb(urb_context->skb);
- urb_context->skb = NULL;
- }
+ dev_kfree_skb(urb_context->skb);
+ urb_context->skb = NULL;
ath6kl_usb_free_urb_to_pipe(urb_context->pipe, urb_context);
}
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 998f8b0f62fd..d76b5bd81a0d 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -751,6 +751,23 @@ int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid)
NO_SYNC_WMIFLAG);
}
+int ath6kl_wmi_ap_set_beacon_intvl_cmd(struct wmi *wmi, u8 if_idx,
+ u32 beacon_intvl)
+{
+ struct sk_buff *skb;
+ struct set_beacon_int_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct set_beacon_int_cmd *) skb->data;
+
+ cmd->beacon_intvl = cpu_to_le32(beacon_intvl);
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_SET_BEACON_INT_CMDID, NO_SYNC_WMIFLAG);
+}
+
int ath6kl_wmi_ap_set_dtim_cmd(struct wmi *wmi, u8 if_idx, u32 dtim_period)
{
struct sk_buff *skb;
@@ -1108,7 +1125,7 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len,
kfree(mgmt);
if (bss == NULL)
return -ENOMEM;
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(ar->wiphy, bss);
/*
* Firmware doesn't return any event when scheduled scan has
@@ -2480,16 +2497,11 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
free_cmd_skb:
/* free up any resources left over (possibly due to an error) */
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
free_data_skb:
- for (index = 0; index < num_pri_streams; index++) {
- if (data_sync_bufs[index].skb != NULL) {
- dev_kfree_skb((struct sk_buff *)data_sync_bufs[index].
- skb);
- }
- }
+ for (index = 0; index < num_pri_streams; index++)
+ dev_kfree_skb((struct sk_buff *)data_sync_bufs[index].skb);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 98b1755e67f4..b5f226503baf 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -1660,6 +1660,10 @@ struct roam_ctrl_cmd {
u8 roam_ctrl;
} __packed;
+struct set_beacon_int_cmd {
+ __le32 beacon_intvl;
+} __packed;
+
struct set_dtim_cmd {
__le32 dtim_period;
} __packed;
@@ -2649,6 +2653,8 @@ int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
int ath6kl_wmi_set_rssi_filter_cmd(struct wmi *wmi, u8 if_idx, s8 rssi);
int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi);
int ath6kl_wmi_ap_set_dtim_cmd(struct wmi *wmi, u8 if_idx, u32 dtim_period);
+int ath6kl_wmi_ap_set_beacon_intvl_cmd(struct wmi *wmi, u8 if_idx,
+ u32 beacon_interval);
int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid);
int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode);
int ath6kl_wmi_mcast_filter_cmd(struct wmi *wmi, u8 if_idx, bool mc_all_on);
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 7647ed6b73d7..17507dc8a1e7 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -58,6 +58,7 @@ config ATH9K_DEBUGFS
bool "Atheros ath9k debugging"
depends on ATH9K
select MAC80211_DEBUGFS
+ select RELAY
---help---
Say Y, if you need access to ath9k's statistics for
interrupts, rate control, etc.
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 3a69804f4c16..d1ff3c246a12 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -86,29 +86,25 @@ static int ath_ahb_probe(struct platform_device *pdev)
if (!pdev->dev.platform_data) {
dev_err(&pdev->dev, "no platform data specified\n");
- ret = -EINVAL;
- goto err_out;
+ return -EINVAL;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "no memory resource found\n");
- ret = -ENXIO;
- goto err_out;
+ return -ENXIO;
}
- mem = ioremap_nocache(res->start, resource_size(res));
+ mem = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res));
if (mem == NULL) {
dev_err(&pdev->dev, "ioremap failed\n");
- ret = -ENOMEM;
- goto err_out;
+ return -ENOMEM;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res == NULL) {
dev_err(&pdev->dev, "no IRQ resource found\n");
- ret = -ENXIO;
- goto err_iounmap;
+ return -ENXIO;
}
irq = res->start;
@@ -116,8 +112,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
if (hw == NULL) {
dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
- ret = -ENOMEM;
- goto err_iounmap;
+ return -ENOMEM;
}
SET_IEEE80211_DEV(hw, &pdev->dev);
@@ -156,9 +151,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
err_free_hw:
ieee80211_free_hw(hw);
platform_set_drvdata(pdev, NULL);
- err_iounmap:
- iounmap(mem);
- err_out:
return ret;
}
@@ -168,12 +160,10 @@ static int ath_ahb_remove(struct platform_device *pdev)
if (hw) {
struct ath_softc *sc = hw->priv;
- void __iomem *mem = sc->mem;
ath9k_deinit_device(sc);
free_irq(sc->irq, sc);
ieee80211_free_hw(sc->hw);
- iounmap(mem);
platform_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index e09ec40ce71a..7ecd40f07a74 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -152,7 +152,8 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
ath_dbg(common, ANI, "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
aniState->ofdmNoiseImmunityLevel,
immunityLevel, BEACON_RSSI(ah),
- aniState->rssiThrLow, aniState->rssiThrHigh);
+ ATH9K_ANI_RSSI_THR_LOW,
+ ATH9K_ANI_RSSI_THR_HIGH);
if (!scan)
aniState->ofdmNoiseImmunityLevel = immunityLevel;
@@ -173,7 +174,7 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
weak_sig = entry_ofdm->ofdm_weak_signal_on;
if (ah->opmode == NL80211_IFTYPE_STATION &&
- BEACON_RSSI(ah) <= aniState->rssiThrHigh)
+ BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_HIGH)
weak_sig = true;
if (aniState->ofdmWeakSigDetect != weak_sig)
@@ -216,11 +217,11 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel,
ath_dbg(common, ANI, "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
aniState->cckNoiseImmunityLevel, immunityLevel,
- BEACON_RSSI(ah), aniState->rssiThrLow,
- aniState->rssiThrHigh);
+ BEACON_RSSI(ah), ATH9K_ANI_RSSI_THR_LOW,
+ ATH9K_ANI_RSSI_THR_HIGH);
if (ah->opmode == NL80211_IFTYPE_STATION &&
- BEACON_RSSI(ah) <= aniState->rssiThrLow &&
+ BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_LOW &&
immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI)
immunityLevel = ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI;
@@ -418,9 +419,6 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan)
return;
aniState = &ah->curchan->ani;
- if (WARN_ON(!aniState))
- return;
-
if (!ath9k_hw_ani_read_counters(ah))
return;
@@ -489,23 +487,6 @@ void ath9k_hw_disable_mib_counters(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_disable_mib_counters);
-void ath9k_hw_ani_setup(struct ath_hw *ah)
-{
- int i;
-
- static const int totalSizeDesired[] = { -55, -55, -55, -55, -62 };
- static const int coarseHigh[] = { -14, -14, -14, -14, -12 };
- static const int coarseLow[] = { -64, -64, -64, -64, -70 };
- static const int firpwr[] = { -78, -78, -78, -78, -80 };
-
- for (i = 0; i < 5; i++) {
- ah->totalSizeDesired[i] = totalSizeDesired[i];
- ah->coarse_high[i] = coarseHigh[i];
- ah->coarse_low[i] = coarseLow[i];
- ah->firpwr[i] = firpwr[i];
- }
-}
-
void ath9k_hw_ani_init(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -531,8 +512,6 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
ani->ofdmsTurn = true;
- ani->rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH;
- ani->rssiThrLow = ATH9K_ANI_RSSI_THR_LOW;
ani->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG;
ani->cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL;
ani->ofdmNoiseImmunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL;
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index 1485bf5e3518..dddb1361039a 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -104,7 +104,6 @@ struct ath9k_ani_default {
};
struct ar5416AniState {
- struct ath9k_channel *c;
u8 noiseImmunityLevel;
u8 ofdmNoiseImmunityLevel;
u8 cckNoiseImmunityLevel;
@@ -113,15 +112,9 @@ struct ar5416AniState {
u8 spurImmunityLevel;
u8 firstepLevel;
u8 ofdmWeakSigDetect;
- u8 cckWeakSigThreshold;
u32 listenTime;
- int32_t rssiThrLow;
- int32_t rssiThrHigh;
u32 ofdmPhyErrCount;
u32 cckPhyErrCount;
- int16_t pktRssi[2];
- int16_t ofdmErrRssi[2];
- int16_t cckErrRssi[2];
struct ath9k_ani_default iniDef;
};
@@ -147,7 +140,6 @@ struct ar5416Stats {
void ath9k_enable_mib_counters(struct ath_hw *ah);
void ath9k_hw_disable_mib_counters(struct ath_hw *ah);
-void ath9k_hw_ani_setup(struct ath_hw *ah);
void ath9k_hw_ani_init(struct ath_hw *ah);
#endif /* ANI_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_initvals.h b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
index f81e7fc60a36..467ccfae2cee 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
@@ -466,7 +466,7 @@ static const u32 ar5416Bank0[][2] = {
};
static const u32 ar5416BB_RfGain[][3] = {
- /* Addr 5G_HT20 5G_HT40 */
+ /* Addr 5G 2G */
{0x00009a00, 0x00000000, 0x00000000},
{0x00009a04, 0x00000040, 0x00000040},
{0x00009a08, 0x00000080, 0x00000080},
@@ -546,12 +546,12 @@ static const u32 ar5416Bank2[][2] = {
};
static const u32 ar5416Bank3[][3] = {
- /* Addr 5G_HT20 5G_HT40 */
+ /* Addr 5G 2G */
{0x000098f0, 0x01400018, 0x01c00018},
};
static const u32 ar5416Bank6[][3] = {
- /* Addr 5G_HT20 5G_HT40 */
+ /* Addr 5G 2G */
{0x0000989c, 0x00000000, 0x00000000},
{0x0000989c, 0x00000000, 0x00000000},
{0x0000989c, 0x00000000, 0x00000000},
@@ -588,7 +588,7 @@ static const u32 ar5416Bank6[][3] = {
};
static const u32 ar5416Bank6TPC[][3] = {
- /* Addr 5G_HT20 5G_HT40 */
+ /* Addr 5G 2G */
{0x0000989c, 0x00000000, 0x00000000},
{0x0000989c, 0x00000000, 0x00000000},
{0x0000989c, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 874186bfda41..fd69376ecc83 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -470,16 +470,15 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
static int ar5008_hw_rf_alloc_ext_banks(struct ath_hw *ah)
{
#define ATH_ALLOC_BANK(bank, size) do { \
- bank = kzalloc((sizeof(u32) * size), GFP_KERNEL); \
- if (!bank) { \
- ath_err(common, "Cannot allocate RF banks\n"); \
- return -ENOMEM; \
- } \
+ bank = devm_kzalloc(ah->dev, sizeof(u32) * size, GFP_KERNEL); \
+ if (!bank) \
+ goto error; \
} while (0);
struct ath_common *common = ath9k_hw_common(ah);
- BUG_ON(AR_SREV_9280_20_OR_LATER(ah));
+ if (AR_SREV_9280_20_OR_LATER(ah))
+ return 0;
ATH_ALLOC_BANK(ah->analogBank0Data, ah->iniBank0.ia_rows);
ATH_ALLOC_BANK(ah->analogBank1Data, ah->iniBank1.ia_rows);
@@ -492,35 +491,12 @@ static int ar5008_hw_rf_alloc_ext_banks(struct ath_hw *ah)
return 0;
#undef ATH_ALLOC_BANK
+error:
+ ath_err(common, "Cannot allocate RF banks\n");
+ return -ENOMEM;
}
-/**
- * ar5008_hw_rf_free_ext_banks - Free memory for analog bank scratch buffers
- * @ah: atheros hardware struture
- * For the external AR2133/AR5133 radios banks.
- */
-static void ar5008_hw_rf_free_ext_banks(struct ath_hw *ah)
-{
-#define ATH_FREE_BANK(bank) do { \
- kfree(bank); \
- bank = NULL; \
- } while (0);
-
- BUG_ON(AR_SREV_9280_20_OR_LATER(ah));
-
- ATH_FREE_BANK(ah->analogBank0Data);
- ATH_FREE_BANK(ah->analogBank1Data);
- ATH_FREE_BANK(ah->analogBank2Data);
- ATH_FREE_BANK(ah->analogBank3Data);
- ATH_FREE_BANK(ah->analogBank6Data);
- ATH_FREE_BANK(ah->analogBank6TPCData);
- ATH_FREE_BANK(ah->analogBank7Data);
- ATH_FREE_BANK(ah->bank6Temp);
-
-#undef ATH_FREE_BANK
-}
-
/* *
* ar5008_hw_set_rf_regs - programs rf registers based on EEPROM
* @ah: atheros hardware structure
@@ -1380,7 +1356,7 @@ static void ar5008_hw_set_radar_conf(struct ath_hw *ah)
conf->radar_inband = 8;
}
-void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
+int ar5008_hw_attach_phy_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
static const u32 ar5416_cca_regs[6] = {
@@ -1391,12 +1367,15 @@ void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
AR_PHY_CH1_EXT_CCA,
AR_PHY_CH2_EXT_CCA
};
+ int ret;
+
+ ret = ar5008_hw_rf_alloc_ext_banks(ah);
+ if (ret)
+ return ret;
priv_ops->rf_set_freq = ar5008_hw_set_channel;
priv_ops->spur_mitigate_freq = ar5008_hw_spur_mitigate;
- priv_ops->rf_alloc_ext_banks = ar5008_hw_rf_alloc_ext_banks;
- priv_ops->rf_free_ext_banks = ar5008_hw_rf_free_ext_banks;
priv_ops->set_rf_regs = ar5008_hw_set_rf_regs;
priv_ops->set_channel_regs = ar5008_hw_set_channel_regs;
priv_ops->init_bb = ar5008_hw_init_bb;
@@ -1421,4 +1400,5 @@ void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
ar5008_hw_set_nf_limits(ah);
ar5008_hw_set_radar_conf(ah);
memcpy(ah->nf_regs, ar5416_cca_regs, sizeof(ah->nf_regs));
+ return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
index ea4a230997ac..59524e1d4678 100644
--- a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
@@ -460,7 +460,7 @@ static const u32 ar5416Common_9100[][2] = {
};
static const u32 ar5416Bank6_9100[][3] = {
- /* Addr 5G_HT20 5G_HT40 */
+ /* Addr 5G 2G */
{0x0000989c, 0x00000000, 0x00000000},
{0x0000989c, 0x00000000, 0x00000000},
{0x0000989c, 0x00000000, 0x00000000},
@@ -497,7 +497,7 @@ static const u32 ar5416Bank6_9100[][3] = {
};
static const u32 ar5416Bank6TPC_9100[][3] = {
- /* Addr 5G_HT20 5G_HT40 */
+ /* Addr 5G 2G */
{0x0000989c, 0x00000000, 0x00000000},
{0x0000989c, 0x00000000, 0x00000000},
{0x0000989c, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index 648da3e885e9..f053d978540e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -23,13 +23,13 @@
/* General hardware code for the A5008/AR9001/AR9002 hadware families */
-static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
+static int ar9002_hw_init_mode_regs(struct ath_hw *ah)
{
if (AR_SREV_9271(ah)) {
INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271);
INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271);
INIT_INI_ARRAY(&ah->iniModes_9271_ANI_reg, ar9271Modes_9271_ANI_reg);
- return;
+ return 0;
}
if (ah->config.pcie_clock_req)
@@ -102,9 +102,9 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
u32 size = sizeof(u32) * addac->ia_rows * addac->ia_columns;
u32 *data;
- data = kmalloc(size, GFP_KERNEL);
+ data = devm_kzalloc(ah->dev, size, GFP_KERNEL);
if (!data)
- return;
+ return -ENOMEM;
memcpy(data, addac->ia_array, size);
addac->ia_array = data;
@@ -120,6 +120,7 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
ar9287Common_japan_2484_cck_fir_coeff_9287_1_1);
}
+ return 0;
}
static void ar9280_20_hw_init_rxgain_ini(struct ath_hw *ah)
@@ -409,22 +410,30 @@ void ar9002_hw_enable_async_fifo(struct ath_hw *ah)
}
/* Sets up the AR5008/AR9001/AR9002 hardware familiy callbacks */
-void ar9002_hw_attach_ops(struct ath_hw *ah)
+int ar9002_hw_attach_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
struct ath_hw_ops *ops = ath9k_hw_ops(ah);
+ int ret;
+
+ ret = ar9002_hw_init_mode_regs(ah);
+ if (ret)
+ return ret;
- priv_ops->init_mode_regs = ar9002_hw_init_mode_regs;
priv_ops->init_mode_gain_regs = ar9002_hw_init_mode_gain_regs;
ops->config_pci_powersave = ar9002_hw_configpcipowersave;
- ar5008_hw_attach_phy_ops(ah);
+ ret = ar5008_hw_attach_phy_ops(ah);
+ if (ret)
+ return ret;
+
if (AR_SREV_9280_20_OR_LATER(ah))
ar9002_hw_attach_phy_ops(ah);
ar9002_hw_attach_calib_ops(ah);
ar9002_hw_attach_mac_ops(ah);
+ return 0;
}
void ar9002_hw_load_ani_reg(struct ath_hw *ah, struct ath9k_channel *chan)
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index 846dd7974eb8..f4003512d8d5 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -555,14 +555,73 @@ static void ar9002_hw_antdiv_comb_conf_set(struct ath_hw *ah,
REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regval);
}
+static void ar9002_hw_spectral_scan_config(struct ath_hw *ah,
+ struct ath_spec_scan *param)
+{
+ u8 count;
+
+ if (!param->enabled) {
+ REG_CLR_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_ENABLE);
+ return;
+ }
+ REG_SET_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_FFT_ENA);
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, AR_PHY_SPECTRAL_SCAN_ENABLE);
+
+ if (param->short_repeat)
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT);
+ else
+ REG_CLR_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT);
+
+ /* on AR92xx, the highest bit of count will make the the chip send
+ * spectral samples endlessly. Check if this really was intended,
+ * and fix otherwise.
+ */
+ count = param->count;
+ if (param->endless)
+ count = 0x80;
+ else if (count & 0x80)
+ count = 0x7f;
+
+ REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_COUNT, count);
+ REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_PERIOD, param->period);
+ REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_FFT_PERIOD, param->fft_period);
+
+ return;
+}
+
+static void ar9002_hw_spectral_scan_trigger(struct ath_hw *ah)
+{
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, AR_PHY_SPECTRAL_SCAN_ENABLE);
+ /* Activate spectral scan */
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_ACTIVE);
+}
+
+static void ar9002_hw_spectral_scan_wait(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ /* Poll for spectral scan complete */
+ if (!ath9k_hw_wait(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_ACTIVE,
+ 0, AH_WAIT_TIMEOUT)) {
+ ath_err(common, "spectral scan wait failed\n");
+ return;
+ }
+}
+
void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
struct ath_hw_ops *ops = ath9k_hw_ops(ah);
priv_ops->set_rf_regs = NULL;
- priv_ops->rf_alloc_ext_banks = NULL;
- priv_ops->rf_free_ext_banks = NULL;
priv_ops->rf_set_freq = ar9002_hw_set_channel;
priv_ops->spur_mitigate_freq = ar9002_hw_spur_mitigate;
priv_ops->olc_init = ar9002_olc_init;
@@ -571,6 +630,9 @@ void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
ops->antdiv_comb_conf_get = ar9002_hw_antdiv_comb_conf_get;
ops->antdiv_comb_conf_set = ar9002_hw_antdiv_comb_conf_set;
+ ops->spectral_scan_config = ar9002_hw_spectral_scan_config;
+ ops->spectral_scan_trigger = ar9002_hw_spectral_scan_trigger;
+ ops->spectral_scan_wait = ar9002_hw_spectral_scan_wait;
ar9002_hw_set_nf_limits(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 262e1e036fd7..db5ffada2217 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -744,6 +744,186 @@ static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
};
+static const u32 ar9300Modes_mixed_ob_db_tx_gain_table_2p2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x11000400, 0x11000400},
+ {0x0000a518, 0x21002220, 0x21002220, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1b000603, 0x1b000603},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x1f000a02, 0x1f000a02},
+ {0x0000a528, 0x34022225, 0x34022225, 0x23000a04, 0x23000a04},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x26000a20, 0x26000a20},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2a000e20, 0x2a000e20},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x2e000e22, 0x2e000e22},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x31000e24, 0x31000e24},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x34001640, 0x34001640},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x38001660, 0x38001660},
+ {0x0000a544, 0x52022470, 0x52022470, 0x3b001861, 0x3b001861},
+ {0x0000a548, 0x55022490, 0x55022490, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x59022492, 0x59022492, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5d022692, 0x5d022692, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x61022892, 0x61022892, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x65024890, 0x65024890, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x69024892, 0x69024892, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x6e024c92, 0x6e024c92, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x74026e92, 0x74026e92, 0x56001eec, 0x56001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x11800400, 0x11800400},
+ {0x0000a598, 0x21802220, 0x21802220, 0x15800402, 0x15800402},
+ {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1b800603, 0x1b800603},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x1f800a02, 0x1f800a02},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x23800a04, 0x23800a04},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x26800a20, 0x26800a20},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2a800e20, 0x2a800e20},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x2e800e22, 0x2e800e22},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x31800e24, 0x31800e24},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x34801640, 0x34801640},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x38801660, 0x38801660},
+ {0x0000a5c4, 0x52822470, 0x52822470, 0x3b801861, 0x3b801861},
+ {0x0000a5c8, 0x55822490, 0x55822490, 0x3e801a81, 0x3e801a81},
+ {0x0000a5cc, 0x59822492, 0x59822492, 0x42801a83, 0x42801a83},
+ {0x0000a5d0, 0x5d822692, 0x5d822692, 0x44801c84, 0x44801c84},
+ {0x0000a5d4, 0x61822892, 0x61822892, 0x48801ce3, 0x48801ce3},
+ {0x0000a5d8, 0x65824890, 0x65824890, 0x4c801ce5, 0x4c801ce5},
+ {0x0000a5dc, 0x69824892, 0x69824892, 0x50801ce9, 0x50801ce9},
+ {0x0000a5e0, 0x6e824c92, 0x6e824c92, 0x54801ceb, 0x54801ceb},
+ {0x0000a5e4, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec},
+ {0x0000a5e8, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec},
+ {0x0000a5ec, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec},
+ {0x0000a5f0, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec},
+ {0x0000a5f4, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec},
+ {0x0000a5f8, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec},
+ {0x0000a5fc, 0x74826e92, 0x74826e92, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x02004000, 0x02004000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x02004801, 0x02004801, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02808a02, 0x02808a02, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0380ce03, 0x0380ce03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04411104, 0x04411104, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04411104, 0x04411104, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000c2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x056db2e4, 0x056db2e4},
+ {0x00016048, 0x66480001, 0x66480001, 0x8e480001, 0x8e480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x056db2e4, 0x056db2e4},
+ {0x00016448, 0x66480001, 0x66480001, 0x8e480001, 0x8e480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x056db2e4, 0x056db2e4},
+ {0x00016848, 0x66480001, 0x66480001, 0x8e480001, 0x8e480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
+static const u32 ar9300Modes_type5_tx_gain_table_2p2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x15000028, 0x15000028, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1b00002b, 0x1b00002b, 0x12000400, 0x12000400},
+ {0x0000a518, 0x1f020028, 0x1f020028, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x2502002b, 0x2502002b, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2a04002a, 0x2a04002a, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2e06002a, 0x2e06002a, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x3302202d, 0x3302202d, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3804202c, 0x3804202c, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3c06202c, 0x3c06202c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4108202d, 0x4108202d, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4506402d, 0x4506402d, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4906222d, 0x4906222d, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4d062231, 0x4d062231, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x50082231, 0x50082231, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5608422e, 0x5608422e, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5e08442e, 0x5e08442e, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x620a4431, 0x620a4431, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x640a4432, 0x640a4432, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x680a4434, 0x680a4434, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x6c0a6434, 0x6c0a6434, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x6f0a6633, 0x6f0a6633, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a610, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01804601, 0x01804601, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01804601, 0x01804601, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x01804601, 0x01804601, 0x02008501, 0x02008501},
+ {0x0000a620, 0x03408d02, 0x03408d02, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x03410d04, 0x03410d04, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000c2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x65240001, 0x65240001, 0x66480001, 0x66480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x65240001, 0x65240001, 0x66480001, 0x66480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x65240001, 0x65240001, 0x66480001, 0x66480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
/* Addr allmodes */
{0x0000a000, 0x00010000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 8b0d8dcd7625..4cc13940c895 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -32,7 +32,6 @@ struct coeff {
enum ar9003_cal_types {
IQ_MISMATCH_CAL = BIT(0),
- TEMP_COMP_CAL = BIT(1),
};
static void ar9003_hw_setup_calibration(struct ath_hw *ah,
@@ -49,7 +48,7 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah,
*/
REG_RMW_FIELD(ah, AR_PHY_TIMING4,
AR_PHY_TIMING4_IQCAL_LOG_COUNT_MAX,
- currCal->calData->calCountMax);
+ currCal->calData->calCountMax);
REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
ath_dbg(common, CALIBRATE,
@@ -58,14 +57,8 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah,
/* Kick-off cal */
REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL);
break;
- case TEMP_COMP_CAL:
- REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_THERM,
- AR_PHY_65NM_CH0_THERM_LOCAL, 1);
- REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_THERM,
- AR_PHY_65NM_CH0_THERM_START, 1);
-
- ath_dbg(common, CALIBRATE,
- "starting Temperature Compensation Calibration\n");
+ default:
+ ath_err(common, "Invalid calibration type\n");
break;
}
}
@@ -323,6 +316,14 @@ static const struct ath9k_percal_data iq_cal_single_sample = {
static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
{
ah->iq_caldata.calData = &iq_cal_single_sample;
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ ah->enabled_cals |= TX_IQ_CAL;
+ if (AR_SREV_9485_OR_LATER(ah) && !AR_SREV_9340(ah))
+ ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
+ }
+
+ ah->supp_cals = IQ_MISMATCH_CAL;
}
/*
@@ -959,22 +960,70 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0);
}
+static void ar9003_hw_do_manual_peak_cal(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ int i;
+
+ if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah))
+ return;
+
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (!(ah->rxchainmask & (1 << i)))
+ continue;
+ ar9003_hw_manual_peak_cal(ah, i, IS_CHAN_2GHZ(chan));
+ }
+}
+
+static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
+{
+ u32 cl_idx[AR9300_MAX_CHAINS] = { AR_PHY_CL_TAB_0,
+ AR_PHY_CL_TAB_1,
+ AR_PHY_CL_TAB_2 };
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ bool txclcal_done = false;
+ int i, j;
+
+ if (!caldata || !(ah->enabled_cals & TX_CL_CAL))
+ return;
+
+ txclcal_done = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) &
+ AR_PHY_AGC_CONTROL_CLC_SUCCESS);
+
+ if (caldata->done_txclcal_once) {
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (!(ah->txchainmask & (1 << i)))
+ continue;
+ for (j = 0; j < MAX_CL_TAB_ENTRY; j++)
+ REG_WRITE(ah, CL_TAB_ENTRY(cl_idx[i]),
+ caldata->tx_clcal[i][j]);
+ }
+ } else if (is_reusable && txclcal_done) {
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (!(ah->txchainmask & (1 << i)))
+ continue;
+ for (j = 0; j < MAX_CL_TAB_ENTRY; j++)
+ caldata->tx_clcal[i][j] =
+ REG_READ(ah, CL_TAB_ENTRY(cl_idx[i]));
+ }
+ caldata->done_txclcal_once = true;
+ }
+}
+
static bool ar9003_hw_init_cal(struct ath_hw *ah,
struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_cal_data *caldata = ah->caldata;
- bool txiqcal_done = false, txclcal_done = false;
+ bool txiqcal_done = false;
bool is_reusable = true, status = true;
- bool run_rtt_cal = false, run_agc_cal;
+ bool run_rtt_cal = false, run_agc_cal, sep_iq_cal = false;
bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
AR_PHY_AGC_CONTROL_FLTR_CAL |
AR_PHY_AGC_CONTROL_PKDET_CAL;
- int i, j;
- u32 cl_idx[AR9300_MAX_CHAINS] = { AR_PHY_CL_TAB_0,
- AR_PHY_CL_TAB_1,
- AR_PHY_CL_TAB_2 };
+
+ ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
if (rtt) {
if (!ar9003_hw_rtt_restore(ah, chan))
@@ -1012,7 +1061,8 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
}
}
- if (!(ah->enabled_cals & TX_IQ_CAL))
+ if ((IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan)) ||
+ !(ah->enabled_cals & TX_IQ_CAL))
goto skip_tx_iqcal;
/* Do Tx IQ Calibration */
@@ -1032,21 +1082,22 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
txiqcal_done = run_agc_cal = true;
- goto skip_tx_iqcal;
- } else if (caldata && !caldata->done_txiqcal_once)
+ } else if (caldata && !caldata->done_txiqcal_once) {
run_agc_cal = true;
+ sep_iq_cal = true;
+ }
+skip_tx_iqcal:
if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
ar9003_mci_init_cal_req(ah, &is_reusable);
- if (!(IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan))) {
+ if (sep_iq_cal) {
txiqcal_done = ar9003_hw_tx_iq_cal_run(ah);
REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
udelay(5);
REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
}
-skip_tx_iqcal:
if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
/* Calibrate the AGC */
REG_WRITE(ah, AR_PHY_AGC_CONTROL,
@@ -1057,14 +1108,8 @@ skip_tx_iqcal:
status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT);
- if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
- for (i = 0; i < AR9300_MAX_CHAINS; i++) {
- if (!(ah->rxchainmask & (1 << i)))
- continue;
- ar9003_hw_manual_peak_cal(ah, i,
- IS_CHAN_2GHZ(chan));
- }
- }
+
+ ar9003_hw_do_manual_peak_cal(ah, chan);
}
if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
@@ -1089,31 +1134,7 @@ skip_tx_iqcal:
else if (caldata && caldata->done_txiqcal_once)
ar9003_hw_tx_iq_cal_reload(ah);
-#define CL_TAB_ENTRY(reg_base) (reg_base + (4 * j))
- if (caldata && (ah->enabled_cals & TX_CL_CAL)) {
- txclcal_done = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) &
- AR_PHY_AGC_CONTROL_CLC_SUCCESS);
- if (caldata->done_txclcal_once) {
- for (i = 0; i < AR9300_MAX_CHAINS; i++) {
- if (!(ah->txchainmask & (1 << i)))
- continue;
- for (j = 0; j < MAX_CL_TAB_ENTRY; j++)
- REG_WRITE(ah, CL_TAB_ENTRY(cl_idx[i]),
- caldata->tx_clcal[i][j]);
- }
- } else if (is_reusable && txclcal_done) {
- for (i = 0; i < AR9300_MAX_CHAINS; i++) {
- if (!(ah->txchainmask & (1 << i)))
- continue;
- for (j = 0; j < MAX_CL_TAB_ENTRY; j++)
- caldata->tx_clcal[i][j] =
- REG_READ(ah,
- CL_TAB_ENTRY(cl_idx[i]));
- }
- caldata->done_txclcal_once = true;
- }
- }
-#undef CL_TAB_ENTRY
+ ar9003_hw_cl_cal_post_proc(ah, is_reusable);
if (run_rtt_cal && caldata) {
if (is_reusable) {
@@ -1131,20 +1152,10 @@ skip_tx_iqcal:
/* Initialize list pointers */
ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
- ah->supp_cals = IQ_MISMATCH_CAL;
-
- if (ah->supp_cals & IQ_MISMATCH_CAL) {
- INIT_CAL(&ah->iq_caldata);
- INSERT_CAL(ah, &ah->iq_caldata);
- ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n");
- }
- if (ah->supp_cals & TEMP_COMP_CAL) {
- INIT_CAL(&ah->tempCompCalData);
- INSERT_CAL(ah, &ah->tempCompCalData);
- ath_dbg(common, CALIBRATE,
- "enabling Temperature Compensation Calibration\n");
- }
+ INIT_CAL(&ah->iq_caldata);
+ INSERT_CAL(ah, &ah->iq_caldata);
+ ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n");
/* Initialize current pointer to first element in list */
ah->cal_list_curr = ah->cal_list;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 562186ca9b52..881e989ea470 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -4586,14 +4586,14 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
return 0;
}
-static int ar9003_hw_power_control_override(struct ath_hw *ah,
- int frequency,
- int *correction,
- int *voltage, int *temperature)
+static void ar9003_hw_power_control_override(struct ath_hw *ah,
+ int frequency,
+ int *correction,
+ int *voltage, int *temperature)
{
- int tempSlope = 0;
+ int temp_slope = 0, temp_slope1 = 0, temp_slope2 = 0;
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
- int f[8], t[8], i;
+ int f[8], t[8], t1[3], t2[3], i;
REG_RMW(ah, AR_PHY_TPC_11_B0,
(correction[0] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
@@ -4624,38 +4624,108 @@ static int ar9003_hw_power_control_override(struct ath_hw *ah,
* enable temperature compensation
* Need to use register names
*/
- if (frequency < 4000)
- tempSlope = eep->modalHeader2G.tempSlope;
- else if ((eep->baseEepHeader.miscConfiguration & 0x20) != 0) {
- for (i = 0; i < 8; i++) {
- t[i] = eep->base_ext1.tempslopextension[i];
- f[i] = FBIN2FREQ(eep->calFreqPier5G[i], 0);
+ if (frequency < 4000) {
+ temp_slope = eep->modalHeader2G.tempSlope;
+ } else {
+ if (AR_SREV_9550(ah)) {
+ t[0] = eep->base_ext1.tempslopextension[2];
+ t1[0] = eep->base_ext1.tempslopextension[3];
+ t2[0] = eep->base_ext1.tempslopextension[4];
+ f[0] = 5180;
+
+ t[1] = eep->modalHeader5G.tempSlope;
+ t1[1] = eep->base_ext1.tempslopextension[0];
+ t2[1] = eep->base_ext1.tempslopextension[1];
+ f[1] = 5500;
+
+ t[2] = eep->base_ext1.tempslopextension[5];
+ t1[2] = eep->base_ext1.tempslopextension[6];
+ t2[2] = eep->base_ext1.tempslopextension[7];
+ f[2] = 5785;
+
+ temp_slope = ar9003_hw_power_interpolate(frequency,
+ f, t, 3);
+ temp_slope1 = ar9003_hw_power_interpolate(frequency,
+ f, t1, 3);
+ temp_slope2 = ar9003_hw_power_interpolate(frequency,
+ f, t2, 3);
+
+ goto tempslope;
}
- tempSlope = ar9003_hw_power_interpolate((s32) frequency,
- f, t, 8);
- } else if (eep->base_ext2.tempSlopeLow != 0) {
- t[0] = eep->base_ext2.tempSlopeLow;
- f[0] = 5180;
- t[1] = eep->modalHeader5G.tempSlope;
- f[1] = 5500;
- t[2] = eep->base_ext2.tempSlopeHigh;
- f[2] = 5785;
- tempSlope = ar9003_hw_power_interpolate((s32) frequency,
- f, t, 3);
- } else
- tempSlope = eep->modalHeader5G.tempSlope;
- REG_RMW_FIELD(ah, AR_PHY_TPC_19, AR_PHY_TPC_19_ALPHA_THERM, tempSlope);
+ if ((eep->baseEepHeader.miscConfiguration & 0x20) != 0) {
+ for (i = 0; i < 8; i++) {
+ t[i] = eep->base_ext1.tempslopextension[i];
+ f[i] = FBIN2FREQ(eep->calFreqPier5G[i], 0);
+ }
+ temp_slope = ar9003_hw_power_interpolate((s32) frequency,
+ f, t, 8);
+ } else if (eep->base_ext2.tempSlopeLow != 0) {
+ t[0] = eep->base_ext2.tempSlopeLow;
+ f[0] = 5180;
+ t[1] = eep->modalHeader5G.tempSlope;
+ f[1] = 5500;
+ t[2] = eep->base_ext2.tempSlopeHigh;
+ f[2] = 5785;
+ temp_slope = ar9003_hw_power_interpolate((s32) frequency,
+ f, t, 3);
+ } else {
+ temp_slope = eep->modalHeader5G.tempSlope;
+ }
+ }
+
+tempslope:
+ if (AR_SREV_9550(ah)) {
+ /*
+ * AR955x has tempSlope register for each chain.
+ * Check whether temp_compensation feature is enabled or not.
+ */
+ if (eep->baseEepHeader.featureEnable & 0x1) {
+ if (frequency < 4000) {
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ eep->base_ext2.tempSlopeLow);
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ temp_slope);
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ eep->base_ext2.tempSlopeHigh);
+ } else {
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ temp_slope);
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ temp_slope1);
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
+ AR_PHY_TPC_19_ALPHA_THERM,
+ temp_slope2);
+ }
+ } else {
+ /*
+ * If temp compensation is not enabled,
+ * set all registers to 0.
+ */
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19,
+ AR_PHY_TPC_19_ALPHA_THERM, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
+ AR_PHY_TPC_19_ALPHA_THERM, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
+ AR_PHY_TPC_19_ALPHA_THERM, 0);
+ }
+ } else {
+ REG_RMW_FIELD(ah, AR_PHY_TPC_19,
+ AR_PHY_TPC_19_ALPHA_THERM, temp_slope);
+ }
if (AR_SREV_9462_20(ah))
REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
- AR_PHY_TPC_19_B1_ALPHA_THERM, tempSlope);
+ AR_PHY_TPC_19_B1_ALPHA_THERM, temp_slope);
REG_RMW_FIELD(ah, AR_PHY_TPC_18, AR_PHY_TPC_18_THERM_CAL_VALUE,
temperature[0]);
-
- return 0;
}
/* Apply the recorded correction values. */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 59bf5f31e212..a3523c969a3a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -507,28 +507,59 @@ static void ar9003_tx_gain_table_mode4(struct ath_hw *ah)
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9580_1p0_mixed_ob_db_tx_gain_table);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_mixed_ob_db_tx_gain_table_2p2);
+}
+
+static void ar9003_tx_gain_table_mode5(struct ath_hw *ah)
+{
+ if (AR_SREV_9485_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485Modes_green_ob_db_tx_gain_1_1);
+ else if (AR_SREV_9340(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9340Modes_ub124_tx_gain_table_1p0);
+ else if (AR_SREV_9580(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9580_1p0_type5_tx_gain_table);
+ else if (AR_SREV_9300_22(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_type5_tx_gain_table_2p2);
}
+static void ar9003_tx_gain_table_mode6(struct ath_hw *ah)
+{
+ if (AR_SREV_9340(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9340Modes_low_ob_db_and_spur_tx_gain_table_1p0);
+ else if (AR_SREV_9485_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485Modes_green_spur_ob_db_tx_gain_1_1);
+ else if (AR_SREV_9580(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9580_1p0_type6_tx_gain_table);
+}
+
+typedef void (*ath_txgain_tab)(struct ath_hw *ah);
+
static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
{
- switch (ar9003_hw_get_tx_gain_idx(ah)) {
- case 0:
- default:
- ar9003_tx_gain_table_mode0(ah);
- break;
- case 1:
- ar9003_tx_gain_table_mode1(ah);
- break;
- case 2:
- ar9003_tx_gain_table_mode2(ah);
- break;
- case 3:
- ar9003_tx_gain_table_mode3(ah);
- break;
- case 4:
- ar9003_tx_gain_table_mode4(ah);
- break;
- }
+ static const ath_txgain_tab modes[] = {
+ ar9003_tx_gain_table_mode0,
+ ar9003_tx_gain_table_mode1,
+ ar9003_tx_gain_table_mode2,
+ ar9003_tx_gain_table_mode3,
+ ar9003_tx_gain_table_mode4,
+ ar9003_tx_gain_table_mode5,
+ ar9003_tx_gain_table_mode6,
+ };
+ int idx = ar9003_hw_get_tx_gain_idx(ah);
+
+ if (idx >= ARRAY_SIZE(modes))
+ idx = 0;
+
+ modes[idx](ah);
}
static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
@@ -673,7 +704,7 @@ void ar9003_hw_attach_ops(struct ath_hw *ah)
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
struct ath_hw_ops *ops = ath9k_hw_ops(ah);
- priv_ops->init_mode_regs = ar9003_hw_init_mode_regs;
+ ar9003_hw_init_mode_regs(ah);
priv_ops->init_mode_gain_regs = ar9003_hw_init_mode_gain_regs;
ops->config_pci_powersave = ar9003_hw_configpcipowersave;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index ce19c09fa8e8..2bf6548dd143 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -68,7 +68,7 @@ static const int m2ThreshExt_off = 127;
static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
{
u16 bMode, fracMode = 0, aModeRefSel = 0;
- u32 freq, channelSel = 0, reg32 = 0;
+ u32 freq, chan_frac, div, channelSel = 0, reg32 = 0;
struct chan_centers centers;
int loadSynthChannel;
@@ -77,9 +77,6 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
if (freq < 4800) { /* 2 GHz, fractional mode */
if (AR_SREV_9330(ah)) {
- u32 chan_frac;
- u32 div;
-
if (ah->is_clk_25mhz)
div = 75;
else
@@ -89,34 +86,40 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
chan_frac = (((freq * 4) % div) * 0x20000) / div;
channelSel = (channelSel << 17) | chan_frac;
} else if (AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
- u32 chan_frac;
-
/*
- * freq_ref = 40 / (refdiva >> amoderefsel); where refdiva=1 and amoderefsel=0
+ * freq_ref = 40 / (refdiva >> amoderefsel);
+ * where refdiva=1 and amoderefsel=0
* ndiv = ((chan_mhz * 4) / 3) / freq_ref;
* chansel = int(ndiv), chanfrac = (ndiv - chansel) * 0x20000
*/
channelSel = (freq * 4) / 120;
chan_frac = (((freq * 4) % 120) * 0x20000) / 120;
channelSel = (channelSel << 17) | chan_frac;
- } else if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) {
+ } else if (AR_SREV_9340(ah)) {
if (ah->is_clk_25mhz) {
- u32 chan_frac;
-
channelSel = (freq * 2) / 75;
chan_frac = (((freq * 2) % 75) * 0x20000) / 75;
channelSel = (channelSel << 17) | chan_frac;
- } else
+ } else {
channelSel = CHANSEL_2G(freq) >> 1;
- } else
+ }
+ } else if (AR_SREV_9550(ah)) {
+ if (ah->is_clk_25mhz)
+ div = 75;
+ else
+ div = 120;
+
+ channelSel = (freq * 4) / div;
+ chan_frac = (((freq * 4) % div) * 0x20000) / div;
+ channelSel = (channelSel << 17) | chan_frac;
+ } else {
channelSel = CHANSEL_2G(freq);
+ }
/* Set to 2G mode */
bMode = 1;
} else {
if ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) &&
ah->is_clk_25mhz) {
- u32 chan_frac;
-
channelSel = freq / 75;
chan_frac = ((freq % 75) * 0x20000) / 75;
channelSel = (channelSel << 17) | chan_frac;
@@ -586,32 +589,19 @@ static void ar9003_hw_init_bb(struct ath_hw *ah,
ath9k_hw_synth_delay(ah, chan, synthDelay);
}
-static void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
+void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
{
- switch (rx) {
- case 0x5:
+ if (ah->caps.tx_chainmask == 5 || ah->caps.rx_chainmask == 5)
REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
AR_PHY_SWAP_ALT_CHAIN);
- case 0x3:
- case 0x1:
- case 0x2:
- case 0x7:
- REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx);
- REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx);
- break;
- default:
- break;
- }
+
+ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx);
+ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx);
if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && (tx == 0x7))
- REG_WRITE(ah, AR_SELFGEN_MASK, 0x3);
- else
- REG_WRITE(ah, AR_SELFGEN_MASK, tx);
+ tx = 3;
- if (tx == 0x5) {
- REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
- AR_PHY_SWAP_ALT_CHAIN);
- }
+ REG_WRITE(ah, AR_SELFGEN_MASK, tx);
}
/*
@@ -1450,6 +1440,67 @@ set_rfmode:
return 0;
}
+static void ar9003_hw_spectral_scan_config(struct ath_hw *ah,
+ struct ath_spec_scan *param)
+{
+ u8 count;
+
+ if (!param->enabled) {
+ REG_CLR_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_ENABLE);
+ return;
+ }
+
+ REG_SET_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_FFT_ENA);
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, AR_PHY_SPECTRAL_SCAN_ENABLE);
+
+ /* on AR93xx and newer, count = 0 will make the the chip send
+ * spectral samples endlessly. Check if this really was intended,
+ * and fix otherwise.
+ */
+ count = param->count;
+ if (param->endless)
+ count = 0;
+ else if (param->count == 0)
+ count = 1;
+
+ if (param->short_repeat)
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT);
+ else
+ REG_CLR_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT);
+
+ REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_COUNT, count);
+ REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_PERIOD, param->period);
+ REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_FFT_PERIOD, param->fft_period);
+
+ return;
+}
+
+static void ar9003_hw_spectral_scan_trigger(struct ath_hw *ah)
+{
+ /* Activate spectral scan */
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_ACTIVE);
+}
+
+static void ar9003_hw_spectral_scan_wait(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ /* Poll for spectral scan complete */
+ if (!ath9k_hw_wait(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_ACTIVE,
+ 0, AH_WAIT_TIMEOUT)) {
+ ath_err(common, "spectral scan wait failed\n");
+ return;
+ }
+}
+
void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
@@ -1483,6 +1534,9 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
ops->antdiv_comb_conf_get = ar9003_hw_antdiv_comb_conf_get;
ops->antdiv_comb_conf_set = ar9003_hw_antdiv_comb_conf_set;
ops->antctrl_shared_chain_lnadiv = ar9003_hw_antctrl_shared_chain_lnadiv;
+ ops->spectral_scan_config = ar9003_hw_spectral_scan_config;
+ ops->spectral_scan_trigger = ar9003_hw_spectral_scan_trigger;
+ ops->spectral_scan_wait = ar9003_hw_spectral_scan_wait;
ar9003_hw_set_nf_limits(ah);
ar9003_hw_set_radar_conf(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 107956298488..e71774196c01 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -1028,7 +1028,7 @@
#define AR_PHY_TPC_5_B2 (AR_SM2_BASE + 0x208)
#define AR_PHY_TPC_6_B2 (AR_SM2_BASE + 0x20c)
#define AR_PHY_TPC_11_B2 (AR_SM2_BASE + 0x220)
-#define AR_PHY_PDADC_TAB_2 (AR_SM2_BASE + 0x240)
+#define AR_PHY_TPC_19_B2 (AR_SM2_BASE + 0x240)
#define AR_PHY_TX_IQCAL_STATUS_B2 (AR_SM2_BASE + 0x48c)
#define AR_PHY_TX_IQCAL_CORR_COEFF_B2(_i) (AR_SM2_BASE + 0x450 + ((_i) << 2))
diff --git a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
index f69d292bdc02..25db9215985a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
@@ -1172,6 +1172,106 @@ static const u32 ar9340Modes_mixed_ob_db_tx_gain_table_1p0[][5] = {
{0x00016448, 0x24925666, 0x24925666, 0x8e481266, 0x8e481266},
};
+static const u32 ar9340Modes_low_ob_db_and_spur_tx_gain_table_1p0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03eaac5a, 0x03eaac5a},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03f330ac, 0x03f330ac},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc3f00, 0x03fc3f00},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ffc000, 0x03ffc000},
+ {0x0000a394, 0x00000444, 0x00000444, 0x00000404, 0x00000404},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x02000001, 0x02000001},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x05000003, 0x05000003},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0a000005, 0x0a000005},
+ {0x0000a510, 0x16000220, 0x16000220, 0x0e000201, 0x0e000201},
+ {0x0000a514, 0x1c000223, 0x1c000223, 0x11000203, 0x11000203},
+ {0x0000a518, 0x21002220, 0x21002220, 0x14000401, 0x14000401},
+ {0x0000a51c, 0x27002223, 0x27002223, 0x18000403, 0x18000403},
+ {0x0000a520, 0x2b022220, 0x2b022220, 0x1b000602, 0x1b000602},
+ {0x0000a524, 0x2f022222, 0x2f022222, 0x1f000802, 0x1f000802},
+ {0x0000a528, 0x34022225, 0x34022225, 0x21000620, 0x21000620},
+ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x25000820, 0x25000820},
+ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x29000822, 0x29000822},
+ {0x0000a534, 0x4202242a, 0x4202242a, 0x2d000824, 0x2d000824},
+ {0x0000a538, 0x4702244a, 0x4702244a, 0x30000828, 0x30000828},
+ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x3400082a, 0x3400082a},
+ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x38000849, 0x38000849},
+ {0x0000a544, 0x5302266c, 0x5302266c, 0x3b000a2c, 0x3b000a2c},
+ {0x0000a548, 0x5702286c, 0x5702286c, 0x3e000e2b, 0x3e000e2b},
+ {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x42000e2d, 0x42000e2d},
+ {0x0000a550, 0x61024a6c, 0x61024a6c, 0x4500124a, 0x4500124a},
+ {0x0000a554, 0x66026a6c, 0x66026a6c, 0x4900124c, 0x4900124c},
+ {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x4c00126c, 0x4c00126c},
+ {0x0000a55c, 0x7002708c, 0x7002708c, 0x4f00128c, 0x4f00128c},
+ {0x0000a560, 0x7302b08a, 0x7302b08a, 0x52001290, 0x52001290},
+ {0x0000a564, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292},
+ {0x0000a568, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292},
+ {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292},
+ {0x0000a570, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292},
+ {0x0000a574, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292},
+ {0x0000a578, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292},
+ {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x56001292, 0x56001292},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x02800001, 0x02800001},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x05800003, 0x05800003},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0a800005, 0x0a800005},
+ {0x0000a590, 0x16800220, 0x16800220, 0x0e800201, 0x0e800201},
+ {0x0000a594, 0x1c800223, 0x1c800223, 0x11800203, 0x11800203},
+ {0x0000a598, 0x21820220, 0x21820220, 0x14800401, 0x14800401},
+ {0x0000a59c, 0x27820223, 0x27820223, 0x18800403, 0x18800403},
+ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1b800602, 0x1b800602},
+ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x1f800802, 0x1f800802},
+ {0x0000a5a8, 0x34822225, 0x34822225, 0x21800620, 0x21800620},
+ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x25800820, 0x25800820},
+ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x29800822, 0x29800822},
+ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x2d800824, 0x2d800824},
+ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x30800828, 0x30800828},
+ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x3480082a, 0x3480082a},
+ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x38800849, 0x38800849},
+ {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3b800a2c, 0x3b800a2c},
+ {0x0000a5c8, 0x5782286c, 0x5782286c, 0x3e800e2b, 0x3e800e2b},
+ {0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x42800e2d, 0x42800e2d},
+ {0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x4580124a, 0x4580124a},
+ {0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x4980124c, 0x4980124c},
+ {0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x4c80126c, 0x4c80126c},
+ {0x0000a5dc, 0x7086308c, 0x7086308c, 0x4f80128c, 0x4f80128c},
+ {0x0000a5e0, 0x738a308a, 0x738a308a, 0x52801290, 0x52801290},
+ {0x0000a5e4, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292},
+ {0x0000a5e8, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292},
+ {0x0000a5ec, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292},
+ {0x0000a5f0, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292},
+ {0x0000a5f4, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292},
+ {0x0000a5f8, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292},
+ {0x0000a5fc, 0x778a308c, 0x778a308c, 0x56801292, 0x56801292},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404501, 0x01404501},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x01404501, 0x01404501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x03c0cf02, 0x03c0cf02},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03c0cf03, 0x03c0cf03},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04011004, 0x04011004},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x05419405, 0x05419405},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x05419506, 0x05419506},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x05419506, 0x05419506},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x05419506, 0x05419506},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x05419506, 0x05419506},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03eaac5a, 0x03eaac5a},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03f330ac, 0x03f330ac},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc3f00, 0x03fc3f00},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ffc000, 0x03ffc000},
+ {0x00016044, 0x022492db, 0x022492db, 0x022492db, 0x022492db},
+ {0x00016048, 0x24925666, 0x24925666, 0x24925266, 0x24925266},
+ {0x00016280, 0x01000015, 0x01000015, 0x01001015, 0x01001015},
+ {0x00016288, 0xf0318000, 0xf0318000, 0xf0318000, 0xf0318000},
+ {0x00016444, 0x022492db, 0x022492db, 0x022492db, 0x022492db},
+ {0x00016448, 0x24925666, 0x24925666, 0x24925266, 0x24925266},
+};
+
static const u32 ar9340_1p0_mac_core[][2] = {
/* Addr allmodes */
{0x00000008, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index a3710f3bb90c..712f415b8c08 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -260,6 +260,79 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
{0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
};
+static const u32 ar9485Modes_green_ob_db_tx_gain_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
+ {0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x06000203, 0x06000203},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0a000401, 0x0a000401},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x0e000403, 0x0e000403},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x12000405, 0x12000405},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x15000604, 0x15000604},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x18000605, 0x18000605},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x1c000a04, 0x1c000a04},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x21000a06, 0x21000a06},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x29000a24, 0x29000a24},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2f000e21, 0x2f000e21},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x31000e20, 0x31000e20},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x33000e20, 0x33000e20},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b50c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b510, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b514, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b518, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b51c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b520, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b524, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b528, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b52c, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a},
+ {0x0000b530, 0x0000003a, 0x0000003a, 0x0000003a, 0x0000003a},
+ {0x0000b534, 0x0000004a, 0x0000004a, 0x0000004a, 0x0000004a},
+ {0x0000b538, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b53c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b540, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b544, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b548, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b54c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b550, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b554, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b558, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b55c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b560, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b564, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b568, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b56c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b570, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b574, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b578, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b57c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+ {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
@@ -450,6 +523,79 @@ static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
#define ar9485_modes_lowest_ob_db_tx_gain_1_1 ar9485Modes_low_ob_db_tx_gain_1_1
+static const u32 ar9485Modes_green_spur_ob_db_tx_gain_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
+ {0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x07000203, 0x07000203},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0a000401, 0x0a000401},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x0e000403, 0x0e000403},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x12000405, 0x12000405},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x14000406, 0x14000406},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1800040a, 0x1800040a},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x1c000460, 0x1c000460},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x22000463, 0x22000463},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x26000465, 0x26000465},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2e0006e0, 0x2e0006e0},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x310006e0, 0x310006e0},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x330006e0, 0x330006e0},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x3e0008e3, 0x3e0008e3},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x410008e5, 0x410008e5},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x430008e6, 0x430008e6},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4a0008ec, 0x4a0008ec},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4e0008f1, 0x4e0008f1},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x520008f3, 0x520008f3},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x54000eed, 0x54000eed},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x58000ef1, 0x58000ef1},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5c000ef3, 0x5c000ef3},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x60000ef5, 0x60000ef5},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x62000ef6, 0x62000ef6},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x62000ef6, 0x62000ef6},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
+ {0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b50c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b510, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b514, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b518, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b51c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b520, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b524, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b528, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b52c, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a},
+ {0x0000b530, 0x0000003a, 0x0000003a, 0x0000003a, 0x0000003a},
+ {0x0000b534, 0x0000004a, 0x0000004a, 0x0000004a, 0x0000004a},
+ {0x0000b538, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b53c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b540, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b544, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b548, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b54c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b550, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b554, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b558, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b55c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b560, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b564, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b568, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b56c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b570, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b574, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b578, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b57c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+ {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
static const u32 ar9485_1_1[][2] = {
/* Addr allmodes */
{0x0000a580, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
index df97f21c52dc..ccc5b6c99add 100644
--- a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
@@ -23,16 +23,16 @@
static const u32 ar955x_1p0_radio_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00016098, 0xd2dd5554, 0xd2dd5554, 0xd28b3330, 0xd28b3330},
- {0x0001609c, 0x0a566f3a, 0x0a566f3a, 0x06345f2a, 0x06345f2a},
- {0x000160ac, 0xa4647c00, 0xa4647c00, 0xa4646800, 0xa4646800},
- {0x000160b0, 0x01885f52, 0x01885f52, 0x04accf3a, 0x04accf3a},
- {0x00016104, 0xb7a00001, 0xb7a00001, 0xb7a00001, 0xb7a00001},
+ {0x0001609c, 0x0a566f3a, 0x0a566f3a, 0x0a566f3a, 0x0a566f3a},
+ {0x000160ac, 0xa4647c00, 0xa4647c00, 0x24647c00, 0x24647c00},
+ {0x000160b0, 0x01885f52, 0x01885f52, 0x01885f52, 0x01885f52},
+ {0x00016104, 0xb7a00000, 0xb7a00000, 0xb7a00001, 0xb7a00001},
{0x0001610c, 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000},
{0x00016140, 0x10804008, 0x10804008, 0x10804008, 0x10804008},
- {0x00016504, 0xb7a00001, 0xb7a00001, 0xb7a00001, 0xb7a00001},
+ {0x00016504, 0xb7a00000, 0xb7a00000, 0xb7a00001, 0xb7a00001},
{0x0001650c, 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000},
{0x00016540, 0x10804008, 0x10804008, 0x10804008, 0x10804008},
- {0x00016904, 0xb7a00001, 0xb7a00001, 0xb7a00001, 0xb7a00001},
+ {0x00016904, 0xb7a00000, 0xb7a00000, 0xb7a00001, 0xb7a00001},
{0x0001690c, 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000},
{0x00016940, 0x10804008, 0x10804008, 0x10804008, 0x10804008},
};
@@ -69,15 +69,15 @@ static const u32 ar955x_1p0_baseband_postamble[][5] = {
{0x0000a204, 0x005c0ec0, 0x005c0ec4, 0x005c0ec4, 0x005c0ec0},
{0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
{0x0000a22c, 0x07e26a2f, 0x07e26a2f, 0x01026a2f, 0x01026a2f},
- {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
{0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
{0x0000a238, 0xffb01018, 0xffb01018, 0xffb01018, 0xffb01018},
{0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
{0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
{0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
- {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01010e0e, 0x01010e0e},
{0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
- {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x01000e0e, 0x01000e0e},
{0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
{0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
@@ -125,7 +125,7 @@ static const u32 ar955x_1p0_radio_core[][2] = {
{0x00016094, 0x00000000},
{0x000160a0, 0x0a108ffe},
{0x000160a4, 0x812fc370},
- {0x000160a8, 0x423c8000},
+ {0x000160a8, 0x423c8100},
{0x000160b4, 0x92480080},
{0x000160c0, 0x006db6d0},
{0x000160c4, 0x6db6db60},
@@ -134,7 +134,7 @@ static const u32 ar955x_1p0_radio_core[][2] = {
{0x00016100, 0x11999601},
{0x00016108, 0x00080010},
{0x00016144, 0x02084080},
- {0x00016148, 0x000080c0},
+ {0x00016148, 0x00008040},
{0x00016280, 0x01800804},
{0x00016284, 0x00038dc5},
{0x00016288, 0x00000000},
@@ -178,7 +178,7 @@ static const u32 ar955x_1p0_radio_core[][2] = {
{0x00016500, 0x11999601},
{0x00016508, 0x00080010},
{0x00016544, 0x02084080},
- {0x00016548, 0x000080c0},
+ {0x00016548, 0x00008040},
{0x00016780, 0x00000000},
{0x00016784, 0x00000000},
{0x00016788, 0x00400705},
@@ -218,7 +218,7 @@ static const u32 ar955x_1p0_radio_core[][2] = {
{0x00016900, 0x11999601},
{0x00016908, 0x00080010},
{0x00016944, 0x02084080},
- {0x00016948, 0x000080c0},
+ {0x00016948, 0x00008040},
{0x00016b80, 0x00000000},
{0x00016b84, 0x00000000},
{0x00016b88, 0x00400705},
@@ -245,9 +245,9 @@ static const u32 ar955x_1p0_radio_core[][2] = {
static const u32 ar955x_1p0_modes_xpa_tx_gain_table[][9] = {
/* Addr 5G_HT20_L 5G_HT40_L 5G_HT20_M 5G_HT40_M 5G_HT20_H 5G_HT40_H 2G_HT40 2G_HT20 */
- {0x0000a2dc, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xfffd5aaa, 0xfffd5aaa},
- {0x0000a2e0, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xfffe9ccc, 0xfffe9ccc},
- {0x0000a2e4, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xffffe0f0, 0xffffe0f0},
+ {0x0000a2dc, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xfffd5aaa, 0xfffd5aaa},
+ {0x0000a2e0, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffe9ccc, 0xfffe9ccc},
+ {0x0000a2e4, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffffe0f0, 0xffffe0f0},
{0x0000a2e8, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xfffcff00, 0xfffcff00},
{0x0000a410, 0x000050de, 0x000050de, 0x000050de, 0x000050de, 0x000050de, 0x000050de, 0x000050da, 0x000050da},
{0x0000a500, 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000000, 0x00000000},
@@ -256,63 +256,63 @@ static const u32 ar955x_1p0_modes_xpa_tx_gain_table[][9] = {
{0x0000a50c, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c000006, 0x0c000006},
{0x0000a510, 0x1000000d, 0x1000000d, 0x1000000d, 0x1000000d, 0x1000000d, 0x1000000d, 0x0f00000a, 0x0f00000a},
{0x0000a514, 0x14000011, 0x14000011, 0x14000011, 0x14000011, 0x14000011, 0x14000011, 0x1300000c, 0x1300000c},
- {0x0000a518, 0x19004008, 0x19004008, 0x19004008, 0x19004008, 0x18004008, 0x18004008, 0x1700000e, 0x1700000e},
- {0x0000a51c, 0x1d00400a, 0x1d00400a, 0x1d00400a, 0x1d00400a, 0x1c00400a, 0x1c00400a, 0x1b000064, 0x1b000064},
- {0x0000a520, 0x230020a2, 0x230020a2, 0x210020a2, 0x210020a2, 0x200020a2, 0x200020a2, 0x1f000242, 0x1f000242},
- {0x0000a524, 0x2500006e, 0x2500006e, 0x2500006e, 0x2500006e, 0x2400006e, 0x2400006e, 0x23000229, 0x23000229},
- {0x0000a528, 0x29022221, 0x29022221, 0x28022221, 0x28022221, 0x27022221, 0x27022221, 0x270002a2, 0x270002a2},
- {0x0000a52c, 0x2d00062a, 0x2d00062a, 0x2c00062a, 0x2c00062a, 0x2a00062a, 0x2a00062a, 0x2c001203, 0x2c001203},
- {0x0000a530, 0x340220a5, 0x340220a5, 0x320220a5, 0x320220a5, 0x2f0220a5, 0x2f0220a5, 0x30001803, 0x30001803},
- {0x0000a534, 0x380022c5, 0x380022c5, 0x350022c5, 0x350022c5, 0x320022c5, 0x320022c5, 0x33000881, 0x33000881},
- {0x0000a538, 0x3b002486, 0x3b002486, 0x39002486, 0x39002486, 0x36002486, 0x36002486, 0x38001809, 0x38001809},
- {0x0000a53c, 0x3f00248a, 0x3f00248a, 0x3d00248a, 0x3d00248a, 0x3a00248a, 0x3a00248a, 0x3a000814, 0x3a000814},
- {0x0000a540, 0x4202242c, 0x4202242c, 0x4102242c, 0x4102242c, 0x3f02242c, 0x3f02242c, 0x3f001a0c, 0x3f001a0c},
- {0x0000a544, 0x490044c6, 0x490044c6, 0x460044c6, 0x460044c6, 0x420044c6, 0x420044c6, 0x43001a0e, 0x43001a0e},
- {0x0000a548, 0x4d024485, 0x4d024485, 0x4a024485, 0x4a024485, 0x46024485, 0x46024485, 0x46001812, 0x46001812},
- {0x0000a54c, 0x51044483, 0x51044483, 0x4e044483, 0x4e044483, 0x4a044483, 0x4a044483, 0x49001884, 0x49001884},
- {0x0000a550, 0x5404a40c, 0x5404a40c, 0x5204a40c, 0x5204a40c, 0x4d04a40c, 0x4d04a40c, 0x4d001e84, 0x4d001e84},
- {0x0000a554, 0x57024632, 0x57024632, 0x55024632, 0x55024632, 0x52024632, 0x52024632, 0x50001e69, 0x50001e69},
- {0x0000a558, 0x5c00a634, 0x5c00a634, 0x5900a634, 0x5900a634, 0x5600a634, 0x5600a634, 0x550006f4, 0x550006f4},
- {0x0000a55c, 0x5f026832, 0x5f026832, 0x5d026832, 0x5d026832, 0x5a026832, 0x5a026832, 0x59000ad3, 0x59000ad3},
- {0x0000a560, 0x6602b012, 0x6602b012, 0x6202b012, 0x6202b012, 0x5d02b012, 0x5d02b012, 0x5e000ad5, 0x5e000ad5},
- {0x0000a564, 0x6e02d0e1, 0x6e02d0e1, 0x6802d0e1, 0x6802d0e1, 0x6002d0e1, 0x6002d0e1, 0x61001ced, 0x61001ced},
- {0x0000a568, 0x7202b4c4, 0x7202b4c4, 0x6c02b4c4, 0x6c02b4c4, 0x6502b4c4, 0x6502b4c4, 0x660018d4, 0x660018d4},
- {0x0000a56c, 0x75007894, 0x75007894, 0x70007894, 0x70007894, 0x6b007894, 0x6b007894, 0x660018d4, 0x660018d4},
- {0x0000a570, 0x7b025c74, 0x7b025c74, 0x75025c74, 0x75025c74, 0x70025c74, 0x70025c74, 0x660018d4, 0x660018d4},
- {0x0000a574, 0x8300bcb5, 0x8300bcb5, 0x7a00bcb5, 0x7a00bcb5, 0x7600bcb5, 0x7600bcb5, 0x660018d4, 0x660018d4},
- {0x0000a578, 0x8a04dc74, 0x8a04dc74, 0x7f04dc74, 0x7f04dc74, 0x7c04dc74, 0x7c04dc74, 0x660018d4, 0x660018d4},
- {0x0000a57c, 0x8a04dc74, 0x8a04dc74, 0x7f04dc74, 0x7f04dc74, 0x7c04dc74, 0x7c04dc74, 0x660018d4, 0x660018d4},
+ {0x0000a518, 0x1700002b, 0x1700002b, 0x1700002b, 0x1700002b, 0x1600002b, 0x1600002b, 0x1700000e, 0x1700000e},
+ {0x0000a51c, 0x1b00002d, 0x1b00002d, 0x1b00002d, 0x1b00002d, 0x1a00002d, 0x1a00002d, 0x1b000064, 0x1b000064},
+ {0x0000a520, 0x20000031, 0x20000031, 0x1f000031, 0x1f000031, 0x1e000031, 0x1e000031, 0x1f000242, 0x1f000242},
+ {0x0000a524, 0x24000051, 0x24000051, 0x23000051, 0x23000051, 0x23000051, 0x23000051, 0x23000229, 0x23000229},
+ {0x0000a528, 0x27000071, 0x27000071, 0x27000071, 0x27000071, 0x26000071, 0x26000071, 0x270002a2, 0x270002a2},
+ {0x0000a52c, 0x2b000092, 0x2b000092, 0x2b000092, 0x2b000092, 0x2b000092, 0x2b000092, 0x2c001203, 0x2c001203},
+ {0x0000a530, 0x3000028c, 0x3000028c, 0x2f00028c, 0x2f00028c, 0x2e00028c, 0x2e00028c, 0x30001803, 0x30001803},
+ {0x0000a534, 0x34000290, 0x34000290, 0x33000290, 0x33000290, 0x32000290, 0x32000290, 0x33000881, 0x33000881},
+ {0x0000a538, 0x37000292, 0x37000292, 0x36000292, 0x36000292, 0x35000292, 0x35000292, 0x38001809, 0x38001809},
+ {0x0000a53c, 0x3b02028d, 0x3b02028d, 0x3a02028d, 0x3a02028d, 0x3902028d, 0x3902028d, 0x3a000814, 0x3a000814},
+ {0x0000a540, 0x3f020291, 0x3f020291, 0x3e020291, 0x3e020291, 0x3d020291, 0x3d020291, 0x3f001a0c, 0x3f001a0c},
+ {0x0000a544, 0x44020490, 0x44020490, 0x43020490, 0x43020490, 0x42020490, 0x42020490, 0x43001a0e, 0x43001a0e},
+ {0x0000a548, 0x48020492, 0x48020492, 0x47020492, 0x47020492, 0x46020492, 0x46020492, 0x46001812, 0x46001812},
+ {0x0000a54c, 0x4c020692, 0x4c020692, 0x4b020692, 0x4b020692, 0x4a020692, 0x4a020692, 0x49001884, 0x49001884},
+ {0x0000a550, 0x50020892, 0x50020892, 0x4f020892, 0x4f020892, 0x4e020892, 0x4e020892, 0x4d001e84, 0x4d001e84},
+ {0x0000a554, 0x53040891, 0x53040891, 0x53040891, 0x53040891, 0x52040891, 0x52040891, 0x50001e69, 0x50001e69},
+ {0x0000a558, 0x58040893, 0x58040893, 0x57040893, 0x57040893, 0x56040893, 0x56040893, 0x550006f4, 0x550006f4},
+ {0x0000a55c, 0x5c0408b4, 0x5c0408b4, 0x5a0408b4, 0x5a0408b4, 0x5a0408b4, 0x5a0408b4, 0x59000ad3, 0x59000ad3},
+ {0x0000a560, 0x610408b6, 0x610408b6, 0x5e0408b6, 0x5e0408b6, 0x5e0408b6, 0x5e0408b6, 0x5e000ad5, 0x5e000ad5},
+ {0x0000a564, 0x670408f6, 0x670408f6, 0x620408f6, 0x620408f6, 0x620408f6, 0x620408f6, 0x61001ced, 0x61001ced},
+ {0x0000a568, 0x6a040cf6, 0x6a040cf6, 0x66040cf6, 0x66040cf6, 0x66040cf6, 0x66040cf6, 0x660018d4, 0x660018d4},
+ {0x0000a56c, 0x6d040d76, 0x6d040d76, 0x6a040d76, 0x6a040d76, 0x6a040d76, 0x6a040d76, 0x660018d4, 0x660018d4},
+ {0x0000a570, 0x70060db6, 0x70060db6, 0x6e060db6, 0x6e060db6, 0x6e060db6, 0x6e060db6, 0x660018d4, 0x660018d4},
+ {0x0000a574, 0x730a0df6, 0x730a0df6, 0x720a0df6, 0x720a0df6, 0x720a0df6, 0x720a0df6, 0x660018d4, 0x660018d4},
+ {0x0000a578, 0x770a13f6, 0x770a13f6, 0x760a13f6, 0x760a13f6, 0x760a13f6, 0x760a13f6, 0x660018d4, 0x660018d4},
+ {0x0000a57c, 0x770a13f6, 0x770a13f6, 0x760a13f6, 0x760a13f6, 0x760a13f6, 0x760a13f6, 0x660018d4, 0x660018d4},
{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x03804000, 0x03804000},
- {0x0000a610, 0x04c08c01, 0x04c08c01, 0x04808b01, 0x04808b01, 0x04808a01, 0x04808a01, 0x0300ca02, 0x0300ca02},
- {0x0000a614, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00000e04, 0x00000e04},
- {0x0000a618, 0x04010c01, 0x04010c01, 0x03c10b01, 0x03c10b01, 0x03810a01, 0x03810a01, 0x03014000, 0x03014000},
- {0x0000a61c, 0x03814e05, 0x03814e05, 0x03414d05, 0x03414d05, 0x03414d05, 0x03414d05, 0x00000000, 0x00000000},
- {0x0000a620, 0x04010303, 0x04010303, 0x03c10303, 0x03c10303, 0x03810303, 0x03810303, 0x00000000, 0x00000000},
- {0x0000a624, 0x03814e05, 0x03814e05, 0x03414d05, 0x03414d05, 0x03414d05, 0x03414d05, 0x03014000, 0x03014000},
- {0x0000a628, 0x00c0c000, 0x00c0c000, 0x00c0c000, 0x00c0c000, 0x00c0c000, 0x00c0c000, 0x03804c05, 0x03804c05},
- {0x0000a62c, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x0701de06, 0x0701de06},
- {0x0000a630, 0x03418000, 0x03418000, 0x03018000, 0x03018000, 0x02c18000, 0x02c18000, 0x07819c07, 0x07819c07},
- {0x0000a634, 0x03815004, 0x03815004, 0x03414f04, 0x03414f04, 0x03414e04, 0x03414e04, 0x0701dc07, 0x0701dc07},
- {0x0000a638, 0x03005302, 0x03005302, 0x02c05202, 0x02c05202, 0x02805202, 0x02805202, 0x0701dc07, 0x0701dc07},
- {0x0000a63c, 0x04c09302, 0x04c09302, 0x04809202, 0x04809202, 0x04809202, 0x04809202, 0x0701dc07, 0x0701dc07},
- {0x0000b2dc, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xfffd5aaa, 0xfffd5aaa},
- {0x0000b2e0, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xfffe9ccc, 0xfffe9ccc},
- {0x0000b2e4, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xffffe0f0, 0xffffe0f0},
+ {0x0000a60c, 0x02c04b01, 0x02c04b01, 0x02c04b01, 0x02c04b01, 0x02c04b01, 0x02c04b01, 0x03804000, 0x03804000},
+ {0x0000a610, 0x04008b01, 0x04008b01, 0x04008b01, 0x04008b01, 0x03c08b01, 0x03c08b01, 0x0300ca02, 0x0300ca02},
+ {0x0000a614, 0x05811403, 0x05811403, 0x05411303, 0x05411303, 0x05411303, 0x05411303, 0x00000e04, 0x00000e04},
+ {0x0000a618, 0x05811604, 0x05811604, 0x05411504, 0x05411504, 0x05411504, 0x05411504, 0x03014000, 0x03014000},
+ {0x0000a61c, 0x05811604, 0x05811604, 0x05411504, 0x05411504, 0x05411504, 0x05411504, 0x00000000, 0x00000000},
+ {0x0000a620, 0x05811604, 0x05811604, 0x05411504, 0x05411504, 0x05411504, 0x05411504, 0x00000000, 0x00000000},
+ {0x0000a624, 0x05811604, 0x05811604, 0x05411504, 0x05411504, 0x05411504, 0x05411504, 0x03014000, 0x03014000},
+ {0x0000a628, 0x05811604, 0x05811604, 0x05411504, 0x05411504, 0x05411504, 0x05411504, 0x03804c05, 0x03804c05},
+ {0x0000a62c, 0x06815604, 0x06815604, 0x06415504, 0x06415504, 0x06015504, 0x06015504, 0x0701de06, 0x0701de06},
+ {0x0000a630, 0x07819a05, 0x07819a05, 0x07419905, 0x07419905, 0x07019805, 0x07019805, 0x07819c07, 0x07819c07},
+ {0x0000a634, 0x07819e06, 0x07819e06, 0x07419d06, 0x07419d06, 0x07019c06, 0x07019c06, 0x0701dc07, 0x0701dc07},
+ {0x0000a638, 0x07819e06, 0x07819e06, 0x07419d06, 0x07419d06, 0x07019c06, 0x07019c06, 0x0701dc07, 0x0701dc07},
+ {0x0000a63c, 0x07819e06, 0x07819e06, 0x07419d06, 0x07419d06, 0x07019c06, 0x07019c06, 0x0701dc07, 0x0701dc07},
+ {0x0000b2dc, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xfffd5aaa, 0xfffd5aaa},
+ {0x0000b2e0, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffe9ccc, 0xfffe9ccc},
+ {0x0000b2e4, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffffe0f0, 0xffffe0f0},
{0x0000b2e8, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xfffcff00, 0xfffcff00},
- {0x0000c2dc, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xfffd5aaa, 0xfffd5aaa},
- {0x0000c2e0, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xfffe9ccc, 0xfffe9ccc},
- {0x0000c2e4, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xffffe0f0, 0xffffe0f0},
+ {0x0000c2dc, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xffff6aaa, 0xfffd5aaa, 0xfffd5aaa},
+ {0x0000c2e0, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffdcccc, 0xfffe9ccc, 0xfffe9ccc},
+ {0x0000c2e4, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffe3b0f0, 0xffffe0f0, 0xffffe0f0},
{0x0000c2e8, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xfffcff00, 0xfffcff00},
{0x00016044, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x010002d4, 0x010002d4},
- {0x00016048, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x66482401, 0x66482401},
+ {0x00016048, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401},
{0x00016280, 0x01801e84, 0x01801e84, 0x01801e84, 0x01801e84, 0x01801e84, 0x01801e84, 0x01808e84, 0x01808e84},
{0x00016444, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x010002d4, 0x010002d4},
- {0x00016448, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x66482401, 0x66482401},
+ {0x00016448, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401},
{0x00016844, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x010002d4, 0x010002d4},
- {0x00016848, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x66482401, 0x66482401},
+ {0x00016848, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401},
};
static const u32 ar955x_1p0_mac_core[][2] = {
@@ -846,7 +846,7 @@ static const u32 ar955x_1p0_baseband_core[][2] = {
{0x0000a44c, 0x00000001},
{0x0000a450, 0x00010000},
{0x0000a458, 0x00000000},
- {0x0000a644, 0x3fad9d74},
+ {0x0000a644, 0xbfad9d74},
{0x0000a648, 0x0048060a},
{0x0000a64c, 0x00003c37},
{0x0000a670, 0x03020100},
@@ -1277,7 +1277,7 @@ static const u32 ar955x_1p0_modes_fast_clock[][3] = {
{0x0000801c, 0x148ec02b, 0x148ec057},
{0x00008318, 0x000044c0, 0x00008980},
{0x00009e00, 0x0372131c, 0x0372131c},
- {0x0000a230, 0x0000000b, 0x00000016},
+ {0x0000a230, 0x0000400b, 0x00004016},
{0x0000a254, 0x00000898, 0x00001130},
};
diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
index 6e1915aee712..28fd99203f64 100644
--- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
@@ -685,6 +685,82 @@ static const u32 ar9580_1p0_mixed_ob_db_tx_gain_table[][5] = {
#define ar9580_1p0_high_ob_db_tx_gain_table ar9300Modes_high_ob_db_tx_gain_table_2p2
+#define ar9580_1p0_type5_tx_gain_table ar9300Modes_type5_tx_gain_table_2p2
+
+static const u32 ar9580_1p0_type6_tx_gain_table[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+ {0x0000a510, 0x15000028, 0x15000028, 0x0f000202, 0x0f000202},
+ {0x0000a514, 0x1b00002b, 0x1b00002b, 0x12000400, 0x12000400},
+ {0x0000a518, 0x1f020028, 0x1f020028, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x2502002b, 0x2502002b, 0x19000404, 0x19000404},
+ {0x0000a520, 0x2a04002a, 0x2a04002a, 0x1c000603, 0x1c000603},
+ {0x0000a524, 0x2e06002a, 0x2e06002a, 0x21000a02, 0x21000a02},
+ {0x0000a528, 0x3302202d, 0x3302202d, 0x25000a04, 0x25000a04},
+ {0x0000a52c, 0x3804202c, 0x3804202c, 0x28000a20, 0x28000a20},
+ {0x0000a530, 0x3c06202c, 0x3c06202c, 0x2c000e20, 0x2c000e20},
+ {0x0000a534, 0x4108202d, 0x4108202d, 0x30000e22, 0x30000e22},
+ {0x0000a538, 0x4506402d, 0x4506402d, 0x34000e24, 0x34000e24},
+ {0x0000a53c, 0x4906222d, 0x4906222d, 0x38001640, 0x38001640},
+ {0x0000a540, 0x4d062231, 0x4d062231, 0x3c001660, 0x3c001660},
+ {0x0000a544, 0x50082231, 0x50082231, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x5608422e, 0x5608422e, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x5e08442e, 0x5e08442e, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x620a4431, 0x620a4431, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x640a4432, 0x640a4432, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x680a4434, 0x680a4434, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x6c0a6434, 0x6c0a6434, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x6f0a6633, 0x6f0a6633, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a610, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01804601, 0x01804601, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01804601, 0x01804601, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x01804601, 0x01804601, 0x02008501, 0x02008501},
+ {0x0000a620, 0x03408d02, 0x03408d02, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x03410d04, 0x03410d04, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000c2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+ {0x0000c2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+ {0x0000c2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016448, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016848, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+};
+
static const u32 ar9580_1p0_soc_preamble[][2] = {
/* Addr allmodes */
{0x000040a4, 0x00a0c1c9},
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 86e26a19efda..a56b2416e2f9 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -109,14 +109,11 @@ struct ath_descdma {
void *dd_desc;
dma_addr_t dd_desc_paddr;
u32 dd_desc_len;
- struct ath_buf *dd_bufptr;
};
int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
struct list_head *head, const char *name,
int nbuf, int ndesc, bool is_tx);
-void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
- struct list_head *head);
/***********/
/* RX / TX */
@@ -317,18 +314,17 @@ struct ath_rx {
u32 *rxlink;
u32 num_pkts;
unsigned int rxfilter;
- spinlock_t rxbuflock;
struct list_head rxbuf;
struct ath_descdma rxdma;
- struct ath_buf *rx_bufptr;
struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
struct sk_buff *frag;
+
+ u32 ampdu_ref;
};
int ath_startrecv(struct ath_softc *sc);
bool ath_stoprecv(struct ath_softc *sc);
-void ath_flushrecv(struct ath_softc *sc);
u32 ath_calcrxfilter(struct ath_softc *sc);
int ath_rx_init(struct ath_softc *sc, int nbufs);
void ath_rx_cleanup(struct ath_softc *sc);
@@ -338,14 +334,12 @@ void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq);
void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq);
void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq);
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
-bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx);
-void ath_draintxq(struct ath_softc *sc,
- struct ath_txq *txq, bool retry_tx);
+bool ath_drain_all_txq(struct ath_softc *sc);
+void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq);
void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an);
void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an);
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
int ath_tx_init(struct ath_softc *sc, int nbufs);
-void ath_tx_cleanup(struct ath_softc *sc);
int ath_txq_update(struct ath_softc *sc, int qnum,
struct ath9k_tx_queue_info *q);
void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop);
@@ -395,6 +389,7 @@ struct ath_beacon_config {
u16 bmiss_timeout;
u8 dtim_count;
bool enable_beacon;
+ bool ibss_creator;
};
struct ath_beacon {
@@ -646,7 +641,6 @@ void ath_ant_comb_update(struct ath_softc *sc);
enum sc_op_flags {
SC_OP_INVALID,
SC_OP_BEACONS,
- SC_OP_RXFLUSH,
SC_OP_ANI_RUN,
SC_OP_PRIM_STA_VIF,
SC_OP_HW_RESET,
@@ -675,6 +669,23 @@ struct ath9k_vif_iter_data {
int nadhocs; /* number of adhoc vifs */
};
+/* enum spectral_mode:
+ *
+ * @SPECTRAL_DISABLED: spectral mode is disabled
+ * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with
+ * something else.
+ * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples
+ * is performed manually.
+ * @SPECTRAL_CHANSCAN: Like manual, but also triggered when changing channels
+ * during a channel scan.
+ */
+enum spectral_mode {
+ SPECTRAL_DISABLED = 0,
+ SPECTRAL_BACKGROUND,
+ SPECTRAL_MANUAL,
+ SPECTRAL_CHANSCAN,
+};
+
struct ath_softc {
struct ieee80211_hw *hw;
struct device *dev;
@@ -743,6 +754,11 @@ struct ath_softc {
u8 ant_tx, ant_rx;
struct dfs_pattern_detector *dfs_detector;
u32 wow_enabled;
+ /* relay(fs) channel for spectral scan */
+ struct rchan *rfs_chan_spec_scan;
+ enum spectral_mode spectral_mode;
+ struct ath_spec_scan spec_config;
+ int scanning;
#ifdef CONFIG_PM_SLEEP
atomic_t wow_got_bmiss_intr;
@@ -751,6 +767,133 @@ struct ath_softc {
#endif
};
+#define SPECTRAL_SCAN_BITMASK 0x10
+/* Radar info packet format, used for DFS and spectral formats. */
+struct ath_radar_info {
+ u8 pulse_length_pri;
+ u8 pulse_length_ext;
+ u8 pulse_bw_info;
+} __packed;
+
+/* The HT20 spectral data has 4 bytes of additional information at it's end.
+ *
+ * [7:0]: all bins {max_magnitude[1:0], bitmap_weight[5:0]}
+ * [7:0]: all bins max_magnitude[9:2]
+ * [7:0]: all bins {max_index[5:0], max_magnitude[11:10]}
+ * [3:0]: max_exp (shift amount to size max bin to 8-bit unsigned)
+ */
+struct ath_ht20_mag_info {
+ u8 all_bins[3];
+ u8 max_exp;
+} __packed;
+
+#define SPECTRAL_HT20_NUM_BINS 56
+
+/* WARNING: don't actually use this struct! MAC may vary the amount of
+ * data by -1/+2. This struct is for reference only.
+ */
+struct ath_ht20_fft_packet {
+ u8 data[SPECTRAL_HT20_NUM_BINS];
+ struct ath_ht20_mag_info mag_info;
+ struct ath_radar_info radar_info;
+} __packed;
+
+#define SPECTRAL_HT20_TOTAL_DATA_LEN (sizeof(struct ath_ht20_fft_packet))
+
+/* Dynamic 20/40 mode:
+ *
+ * [7:0]: lower bins {max_magnitude[1:0], bitmap_weight[5:0]}
+ * [7:0]: lower bins max_magnitude[9:2]
+ * [7:0]: lower bins {max_index[5:0], max_magnitude[11:10]}
+ * [7:0]: upper bins {max_magnitude[1:0], bitmap_weight[5:0]}
+ * [7:0]: upper bins max_magnitude[9:2]
+ * [7:0]: upper bins {max_index[5:0], max_magnitude[11:10]}
+ * [3:0]: max_exp (shift amount to size max bin to 8-bit unsigned)
+ */
+struct ath_ht20_40_mag_info {
+ u8 lower_bins[3];
+ u8 upper_bins[3];
+ u8 max_exp;
+} __packed;
+
+#define SPECTRAL_HT20_40_NUM_BINS 128
+
+/* WARNING: don't actually use this struct! MAC may vary the amount of
+ * data. This struct is for reference only.
+ */
+struct ath_ht20_40_fft_packet {
+ u8 data[SPECTRAL_HT20_40_NUM_BINS];
+ struct ath_ht20_40_mag_info mag_info;
+ struct ath_radar_info radar_info;
+} __packed;
+
+
+#define SPECTRAL_HT20_40_TOTAL_DATA_LEN (sizeof(struct ath_ht20_40_fft_packet))
+
+/* grabs the max magnitude from the all/upper/lower bins */
+static inline u16 spectral_max_magnitude(u8 *bins)
+{
+ return (bins[0] & 0xc0) >> 6 |
+ (bins[1] & 0xff) << 2 |
+ (bins[2] & 0x03) << 10;
+}
+
+/* return the max magnitude from the all/upper/lower bins */
+static inline u8 spectral_max_index(u8 *bins)
+{
+ s8 m = (bins[2] & 0xfc) >> 2;
+
+ /* TODO: this still doesn't always report the right values ... */
+ if (m > 32)
+ m |= 0xe0;
+ else
+ m &= ~0xe0;
+
+ return m + 29;
+}
+
+/* return the bitmap weight from the all/upper/lower bins */
+static inline u8 spectral_bitmap_weight(u8 *bins)
+{
+ return bins[0] & 0x3f;
+}
+
+/* FFT sample format given to userspace via debugfs.
+ *
+ * Please keep the type/length at the front position and change
+ * other fields after adding another sample type
+ *
+ * TODO: this might need rework when switching to nl80211-based
+ * interface.
+ */
+enum ath_fft_sample_type {
+ ATH_FFT_SAMPLE_HT20 = 1,
+};
+
+struct fft_sample_tlv {
+ u8 type; /* see ath_fft_sample */
+ __be16 length;
+ /* type dependent data follows */
+} __packed;
+
+struct fft_sample_ht20 {
+ struct fft_sample_tlv tlv;
+
+ u8 max_exp;
+
+ __be16 freq;
+ s8 rssi;
+ s8 noise;
+
+ __be16 max_magnitude;
+ u8 max_index;
+ u8 bitmap_weight;
+
+ __be64 tsf;
+
+ u8 data[SPECTRAL_HT20_NUM_BINS];
+} __packed;
+
void ath9k_tasklet(unsigned long data);
int ath_cabq_update(struct ath_softc *);
@@ -773,6 +916,10 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
void ath9k_reload_chainmask_settings(struct ath_softc *sc);
bool ath9k_uses_beacons(int type);
+void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw);
+int ath9k_spectral_scan_config(struct ieee80211_hw *hw,
+ enum spectral_mode spectral_mode);
+
#ifdef CONFIG_ATH9K_PCI
int ath_pci_init(void);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 531fffd801a3..5f05c26d1ec4 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -147,6 +147,7 @@ static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw,
skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
bf->bf_buf_addr = 0;
+ bf->bf_mpdu = NULL;
}
skb = ieee80211_beacon_get(hw, vif);
@@ -198,7 +199,7 @@ static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw,
if (sc->nvifs > 1) {
ath_dbg(common, BEACON,
"Flushing previous cabq traffic\n");
- ath_draintxq(sc, cabq, false);
+ ath_draintxq(sc, cabq);
}
}
@@ -359,7 +360,6 @@ void ath9k_beacon_tasklet(unsigned long data)
return;
bf = ath9k_beacon_generate(sc->hw, vif);
- WARN_ON(!bf);
if (sc->beacon.bmisscnt != 0) {
ath_dbg(common, BSTUCK, "resume beacon xmit after %u misses\n",
@@ -407,12 +407,17 @@ void ath9k_beacon_tasklet(unsigned long data)
}
}
-static void ath9k_beacon_init(struct ath_softc *sc, u32 nexttbtt, u32 intval)
+/*
+ * Both nexttbtt and intval have to be in usecs.
+ */
+static void ath9k_beacon_init(struct ath_softc *sc, u32 nexttbtt,
+ u32 intval, bool reset_tsf)
{
struct ath_hw *ah = sc->sc_ah;
ath9k_hw_disable_interrupts(ah);
- ath9k_hw_reset_tsf(ah);
+ if (reset_tsf)
+ ath9k_hw_reset_tsf(ah);
ath9k_beaconq_config(sc);
ath9k_hw_beaconinit(ah, nexttbtt, intval);
sc->beacon.bmisscnt = 0;
@@ -442,10 +447,12 @@ static void ath9k_beacon_config_ap(struct ath_softc *sc,
else
ah->imask &= ~ATH9K_INT_SWBA;
- ath_dbg(common, BEACON, "AP nexttbtt: %u intval: %u conf_intval: %u\n",
+ ath_dbg(common, BEACON,
+ "AP (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
+ (conf->enable_beacon) ? "Enable" : "Disable",
nexttbtt, intval, conf->beacon_interval);
- ath9k_beacon_init(sc, nexttbtt, intval);
+ ath9k_beacon_init(sc, nexttbtt, intval, true);
}
/*
@@ -586,17 +593,45 @@ static void ath9k_beacon_config_adhoc(struct ath_softc *sc,
ath9k_reset_beacon_status(sc);
intval = TU_TO_USEC(conf->beacon_interval);
- nexttbtt = intval;
+
+ if (conf->ibss_creator) {
+ nexttbtt = intval;
+ } else {
+ u32 tbtt, offset, tsftu;
+ u64 tsf;
+
+ /*
+ * Pull nexttbtt forward to reflect the current
+ * sync'd TSF.
+ */
+ tsf = ath9k_hw_gettsf64(ah);
+ tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
+ offset = tsftu % conf->beacon_interval;
+ tbtt = tsftu - offset;
+ if (offset)
+ tbtt += conf->beacon_interval;
+
+ nexttbtt = TU_TO_USEC(tbtt);
+ }
if (conf->enable_beacon)
ah->imask |= ATH9K_INT_SWBA;
else
ah->imask &= ~ATH9K_INT_SWBA;
- ath_dbg(common, BEACON, "IBSS nexttbtt: %u intval: %u conf_intval: %u\n",
+ ath_dbg(common, BEACON,
+ "IBSS (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
+ (conf->enable_beacon) ? "Enable" : "Disable",
nexttbtt, intval, conf->beacon_interval);
- ath9k_beacon_init(sc, nexttbtt, intval);
+ ath9k_beacon_init(sc, nexttbtt, intval, conf->ibss_creator);
+
+ /*
+ * Set the global 'beacon has been configured' flag for the
+ * joiner case in IBSS mode.
+ */
+ if (!conf->ibss_creator && conf->enable_beacon)
+ set_bit(SC_OP_BEACONS, &sc->sc_flags);
}
bool ath9k_allow_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
@@ -639,6 +674,7 @@ static void ath9k_cache_beacon_config(struct ath_softc *sc,
cur_conf->dtim_period = bss_conf->dtim_period;
cur_conf->listen_interval = 1;
cur_conf->dtim_count = 1;
+ cur_conf->ibss_creator = bss_conf->ibss_creator;
cur_conf->bmiss_timeout =
ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
@@ -666,34 +702,59 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
{
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
+ unsigned long flags;
+ bool skip_beacon = false;
if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
ath9k_cache_beacon_config(sc, bss_conf);
ath9k_set_beacon(sc);
set_bit(SC_OP_BEACONS, &sc->sc_flags);
- } else {
- /*
- * Take care of multiple interfaces when
- * enabling/disabling SWBA.
- */
- if (changed & BSS_CHANGED_BEACON_ENABLED) {
- if (!bss_conf->enable_beacon &&
- (sc->nbcnvifs <= 1)) {
- cur_conf->enable_beacon = false;
- } else if (bss_conf->enable_beacon) {
- cur_conf->enable_beacon = true;
- ath9k_cache_beacon_config(sc, bss_conf);
- }
+ return;
+
+ }
+
+ /*
+ * Take care of multiple interfaces when
+ * enabling/disabling SWBA.
+ */
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ if (!bss_conf->enable_beacon &&
+ (sc->nbcnvifs <= 1)) {
+ cur_conf->enable_beacon = false;
+ } else if (bss_conf->enable_beacon) {
+ cur_conf->enable_beacon = true;
+ ath9k_cache_beacon_config(sc, bss_conf);
}
+ }
- if (cur_conf->beacon_interval) {
+ /*
+ * Configure the HW beacon registers only when we have a valid
+ * beacon interval.
+ */
+ if (cur_conf->beacon_interval) {
+ /*
+ * If we are joining an existing IBSS network, start beaconing
+ * only after a TSF-sync has taken place. Ensure that this
+ * happens by setting the appropriate flags.
+ */
+ if ((changed & BSS_CHANGED_IBSS) && !bss_conf->ibss_creator &&
+ bss_conf->enable_beacon) {
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+ skip_beacon = true;
+ } else {
ath9k_set_beacon(sc);
-
- if (cur_conf->enable_beacon)
- set_bit(SC_OP_BEACONS, &sc->sc_flags);
- else
- clear_bit(SC_OP_BEACONS, &sc->sc_flags);
}
+
+ /*
+ * Do not set the SC_OP_BEACONS flag for IBSS joiner mode
+ * here, it is done in ath9k_beacon_config_adhoc().
+ */
+ if (cur_conf->enable_beacon && !skip_beacon)
+ set_bit(SC_OP_BEACONS, &sc->sc_flags);
+ else
+ clear_bit(SC_OP_BEACONS, &sc->sc_flags);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 13ff9edc2401..3714b971d18e 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
+#include <linux/relay.h>
#include <asm/unaligned.h>
#include "ath9k.h"
@@ -861,7 +862,6 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
RXS_ERR("RX-LENGTH-ERR", rx_len_err);
RXS_ERR("RX-OOM-ERR", rx_oom_err);
RXS_ERR("RX-RATE-ERR", rx_rate_err);
- RXS_ERR("RX-DROP-RXFLUSH", rx_drop_rxflush);
RXS_ERR("RX-TOO-MANY-FRAGS", rx_too_many_frags_err);
PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
@@ -895,6 +895,7 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
RXS_ERR("RX-Bytes-All", rx_bytes_all);
RXS_ERR("RX-Beacons", rx_beacons);
RXS_ERR("RX-Frags", rx_frags);
+ RXS_ERR("RX-Spectral", rx_spectral);
if (len > size)
len = size;
@@ -966,6 +967,290 @@ static const struct file_operations fops_recv = {
.llseek = default_llseek,
};
+static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char *mode = "";
+ unsigned int len;
+
+ switch (sc->spectral_mode) {
+ case SPECTRAL_DISABLED:
+ mode = "disable";
+ break;
+ case SPECTRAL_BACKGROUND:
+ mode = "background";
+ break;
+ case SPECTRAL_CHANSCAN:
+ mode = "chanscan";
+ break;
+ case SPECTRAL_MANUAL:
+ mode = "manual";
+ break;
+ }
+ len = strlen(mode);
+ return simple_read_from_buffer(user_buf, count, ppos, mode, len);
+}
+
+static ssize_t write_file_spec_scan_ctl(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+
+ if (strncmp("trigger", buf, 7) == 0) {
+ ath9k_spectral_scan_trigger(sc->hw);
+ } else if (strncmp("background", buf, 9) == 0) {
+ ath9k_spectral_scan_config(sc->hw, SPECTRAL_BACKGROUND);
+ ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n");
+ } else if (strncmp("chanscan", buf, 8) == 0) {
+ ath9k_spectral_scan_config(sc->hw, SPECTRAL_CHANSCAN);
+ ath_dbg(common, CONFIG, "spectral scan: channel scan mode enabled\n");
+ } else if (strncmp("manual", buf, 6) == 0) {
+ ath9k_spectral_scan_config(sc->hw, SPECTRAL_MANUAL);
+ ath_dbg(common, CONFIG, "spectral scan: manual mode enabled\n");
+ } else if (strncmp("disable", buf, 7) == 0) {
+ ath9k_spectral_scan_config(sc->hw, SPECTRAL_DISABLED);
+ ath_dbg(common, CONFIG, "spectral scan: disabled\n");
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_spec_scan_ctl = {
+ .read = read_file_spec_scan_ctl,
+ .write = write_file_spec_scan_ctl,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_spectral_short_repeat(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", sc->spec_config.short_repeat);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_short_repeat(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 0 || val > 1)
+ return -EINVAL;
+
+ sc->spec_config.short_repeat = val;
+ return count;
+}
+
+static const struct file_operations fops_spectral_short_repeat = {
+ .read = read_file_spectral_short_repeat,
+ .write = write_file_spectral_short_repeat,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_spectral_count(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", sc->spec_config.count);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_count(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 0 || val > 255)
+ return -EINVAL;
+
+ sc->spec_config.count = val;
+ return count;
+}
+
+static const struct file_operations fops_spectral_count = {
+ .read = read_file_spectral_count,
+ .write = write_file_spectral_count,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_spectral_period(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", sc->spec_config.period);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_period(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 0 || val > 255)
+ return -EINVAL;
+
+ sc->spec_config.period = val;
+ return count;
+}
+
+static const struct file_operations fops_spectral_period = {
+ .read = read_file_spectral_period,
+ .write = write_file_spectral_period,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_spectral_fft_period(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", sc->spec_config.fft_period);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_fft_period(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 0 || val > 15)
+ return -EINVAL;
+
+ sc->spec_config.fft_period = val;
+ return count;
+}
+
+static const struct file_operations fops_spectral_fft_period = {
+ .read = read_file_spectral_fft_period,
+ .write = write_file_spectral_fft_period,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static struct dentry *create_buf_file_handler(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ struct dentry *buf_file;
+
+ buf_file = debugfs_create_file(filename, mode, parent, buf,
+ &relay_file_operations);
+ *is_global = 1;
+ return buf_file;
+}
+
+static int remove_buf_file_handler(struct dentry *dentry)
+{
+ debugfs_remove(dentry);
+
+ return 0;
+}
+
+void ath_debug_send_fft_sample(struct ath_softc *sc,
+ struct fft_sample_tlv *fft_sample_tlv)
+{
+ int length;
+ if (!sc->rfs_chan_spec_scan)
+ return;
+
+ length = __be16_to_cpu(fft_sample_tlv->length) +
+ sizeof(*fft_sample_tlv);
+ relay_write(sc->rfs_chan_spec_scan, fft_sample_tlv, length);
+}
+
+static struct rchan_callbacks rfs_spec_scan_cb = {
+ .create_buf_file = create_buf_file_handler,
+ .remove_buf_file = remove_buf_file_handler,
+};
+
+
static ssize_t read_file_regidx(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -1780,6 +2065,24 @@ int ath9k_init_debug(struct ath_hw *ah)
&fops_base_eeprom);
debugfs_create_file("modal_eeprom", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_modal_eeprom);
+ sc->rfs_chan_spec_scan = relay_open("spectral_scan",
+ sc->debug.debugfs_phy,
+ 262144, 4, &rfs_spec_scan_cb,
+ NULL);
+ debugfs_create_file("spectral_scan_ctl", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_spec_scan_ctl);
+ debugfs_create_file("spectral_short_repeat", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_spectral_short_repeat);
+ debugfs_create_file("spectral_count", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_spectral_count);
+ debugfs_create_file("spectral_period", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_spectral_period);
+ debugfs_create_file("spectral_fft_period", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc,
+ &fops_spectral_fft_period);
+
#ifdef CONFIG_ATH9K_MAC_DEBUG
debugfs_create_file("samples", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_samps);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 375c3b46411e..410d6d8f1aa7 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -23,6 +23,7 @@
struct ath_txq;
struct ath_buf;
+struct fft_sample_tlv;
#ifdef CONFIG_ATH9K_DEBUGFS
#define TX_STAT_INC(q, c) sc->debug.stats.txstats[q].c++
@@ -216,9 +217,9 @@ struct ath_tx_stats {
* @rx_oom_err: No. of frames dropped due to OOM issues.
* @rx_rate_err: No. of frames dropped due to rate errors.
* @rx_too_many_frags_err: Frames dropped due to too-many-frags received.
- * @rx_drop_rxflush: No. of frames dropped due to RX-FLUSH.
* @rx_beacons: No. of beacons received.
* @rx_frags: No. of rx-fragements received.
+ * @rx_spectral: No of spectral packets received.
*/
struct ath_rx_stats {
u32 rx_pkts_all;
@@ -235,9 +236,9 @@ struct ath_rx_stats {
u32 rx_oom_err;
u32 rx_rate_err;
u32 rx_too_many_frags_err;
- u32 rx_drop_rxflush;
u32 rx_beacons;
u32 rx_frags;
+ u32 rx_spectral;
};
struct ath_stats {
@@ -323,6 +324,10 @@ void ath9k_sta_remove_debugfs(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct dentry *dir);
+
+void ath_debug_send_fft_sample(struct ath_softc *sc,
+ struct fft_sample_tlv *fft_sample);
+
#else
#define RX_STAT_INC(c) /* NOP */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
index 24877b00cbf4..467b60014b7b 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
@@ -288,11 +288,11 @@ struct dfs_pattern_detector *
dfs_pattern_detector_init(enum nl80211_dfs_regions region)
{
struct dfs_pattern_detector *dpd;
+
dpd = kmalloc(sizeof(*dpd), GFP_KERNEL);
- if (dpd == NULL) {
- pr_err("allocation of dfs_pattern_detector failed\n");
+ if (dpd == NULL)
return NULL;
- }
+
*dpd = default_dpd;
INIT_LIST_HEAD(&dpd->channel_detectors);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 05d5ba66cac3..716058b67557 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -280,14 +280,14 @@ err:
return ret;
}
-static int ath9k_reg_notifier(struct wiphy *wiphy,
- struct regulatory_request *request)
+static void ath9k_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ath9k_htc_priv *priv = hw->priv;
- return ath_reg_notifier_apply(wiphy, request,
- ath9k_hw_regulatory(priv->ah));
+ ath_reg_notifier_apply(wiphy, request,
+ ath9k_hw_regulatory(priv->ah));
}
static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
@@ -783,7 +783,7 @@ static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv)
priv->fw_version_major = be16_to_cpu(cmd_rsp.major);
priv->fw_version_minor = be16_to_cpu(cmd_rsp.minor);
- snprintf(hw->wiphy->fw_version, ETHTOOL_BUSINFO_LEN, "%d.%d",
+ snprintf(hw->wiphy->fw_version, sizeof(hw->wiphy->fw_version), "%d.%d",
priv->fw_version_major,
priv->fw_version_minor);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 9c07a8fa5134..a8016d70088a 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1628,7 +1628,9 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
if (!ret)
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
- case IEEE80211_AMPDU_TX_STOP:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
ath9k_htc_tx_aggr_oper(priv, vif, sta, action, tid);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index b6a5a08810b8..3ad1fd05c5e7 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -1196,20 +1196,17 @@ void ath9k_rx_cleanup(struct ath9k_htc_priv *priv)
int ath9k_rx_init(struct ath9k_htc_priv *priv)
{
- struct ath_hw *ah = priv->ah;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_htc_rxbuf *rxbuf;
int i = 0;
INIT_LIST_HEAD(&priv->rx.rxbuf);
spin_lock_init(&priv->rx.rxbuflock);
for (i = 0; i < ATH9K_HTC_RXBUF; i++) {
- rxbuf = kzalloc(sizeof(struct ath9k_htc_rxbuf), GFP_KERNEL);
- if (rxbuf == NULL) {
- ath_err(common, "Unable to allocate RX buffers\n");
+ struct ath9k_htc_rxbuf *rxbuf =
+ kzalloc(sizeof(struct ath9k_htc_rxbuf), GFP_KERNEL);
+ if (rxbuf == NULL)
goto err;
- }
+
list_add_tail(&rxbuf->list, &priv->rx.rxbuf);
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index 4a9570dfba72..aac4a406a513 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -344,6 +344,8 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
endpoint->ep_callbacks.tx(endpoint->ep_callbacks.priv,
skb, htc_hdr->endpoint_id,
txok);
+ } else {
+ kfree_skb(skb);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 0f2b97f6b739..14b701140b49 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -101,22 +101,6 @@ static inline void ath9k_hw_spur_mitigate_freq(struct ath_hw *ah,
ath9k_hw_private_ops(ah)->spur_mitigate_freq(ah, chan);
}
-static inline int ath9k_hw_rf_alloc_ext_banks(struct ath_hw *ah)
-{
- if (!ath9k_hw_private_ops(ah)->rf_alloc_ext_banks)
- return 0;
-
- return ath9k_hw_private_ops(ah)->rf_alloc_ext_banks(ah);
-}
-
-static inline void ath9k_hw_rf_free_ext_banks(struct ath_hw *ah)
-{
- if (!ath9k_hw_private_ops(ah)->rf_free_ext_banks)
- return;
-
- ath9k_hw_private_ops(ah)->rf_free_ext_banks(ah);
-}
-
static inline bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
struct ath9k_channel *chan,
u16 modesIndex)
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 7cb787065913..2a2ae403e0e5 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -54,11 +54,6 @@ static void ath9k_hw_init_cal_settings(struct ath_hw *ah)
ath9k_hw_private_ops(ah)->init_cal_settings(ah);
}
-static void ath9k_hw_init_mode_regs(struct ath_hw *ah)
-{
- ath9k_hw_private_ops(ah)->init_mode_regs(ah);
-}
-
static u32 ath9k_hw_compute_pll_control(struct ath_hw *ah,
struct ath9k_channel *chan)
{
@@ -208,7 +203,7 @@ void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan,
udelay(hw_delay + BASE_ACTIVATE_DELAY);
}
-void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array,
+void ath9k_hw_write_array(struct ath_hw *ah, const struct ar5416IniArray *array,
int column, unsigned int *writecnt)
{
int r;
@@ -554,28 +549,19 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
ah->eep_ops->get_eeprom_ver(ah),
ah->eep_ops->get_eeprom_rev(ah));
- ecode = ath9k_hw_rf_alloc_ext_banks(ah);
- if (ecode) {
- ath_err(ath9k_hw_common(ah),
- "Failed allocating banks for external radio\n");
- ath9k_hw_rf_free_ext_banks(ah);
- return ecode;
- }
-
- if (ah->config.enable_ani) {
- ath9k_hw_ani_setup(ah);
+ if (ah->config.enable_ani)
ath9k_hw_ani_init(ah);
- }
return 0;
}
-static void ath9k_hw_attach_ops(struct ath_hw *ah)
+static int ath9k_hw_attach_ops(struct ath_hw *ah)
{
- if (AR_SREV_9300_20_OR_LATER(ah))
- ar9003_hw_attach_ops(ah);
- else
- ar9002_hw_attach_ops(ah);
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ return ar9002_hw_attach_ops(ah);
+
+ ar9003_hw_attach_ops(ah);
+ return 0;
}
/* Called for all hardware families */
@@ -611,7 +597,9 @@ static int __ath9k_hw_init(struct ath_hw *ah)
ath9k_hw_init_defaults(ah);
ath9k_hw_init_config(ah);
- ath9k_hw_attach_ops(ah);
+ r = ath9k_hw_attach_ops(ah);
+ if (r)
+ return r;
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) {
ath_err(common, "Couldn't wakeup chip\n");
@@ -675,8 +663,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
if (!AR_SREV_9300_20_OR_LATER(ah))
ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
- ath9k_hw_init_mode_regs(ah);
-
if (!ah->is_pciexpress)
ath9k_hw_disablepcie(ah);
@@ -1153,12 +1139,9 @@ void ath9k_hw_deinit(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
if (common->state < ATH_HW_INITIALIZED)
- goto free_hw;
+ return;
ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
-
-free_hw:
- ath9k_hw_rf_free_ext_banks(ah);
}
EXPORT_SYMBOL(ath9k_hw_deinit);
@@ -2576,12 +2559,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
rx_chainmask >>= 1;
}
- if (AR_SREV_9300_20_OR_LATER(ah)) {
- ah->enabled_cals |= TX_IQ_CAL;
- if (AR_SREV_9485_OR_LATER(ah))
- ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
- }
-
if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
if (!(ah->ent_mode & AR_ENT_OTP_49GHZ_DISABLE))
pCap->hw_caps |= ATH9K_HW_CAP_MCI;
@@ -2590,7 +2567,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->hw_caps |= ATH9K_HW_CAP_RTT;
}
-
if (AR_SREV_9280_20_OR_LATER(ah)) {
pCap->hw_caps |= ATH9K_HW_WOW_DEVICE_CAPABLE |
ATH9K_HW_WOW_PATTERN_MATCH_EXACT;
@@ -3005,13 +2981,8 @@ struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
struct ath_gen_timer *timer;
timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL);
-
- if (timer == NULL) {
- ath_err(ath9k_hw_common(ah),
- "Failed to allocate memory for hw timer[%d]\n",
- timer_index);
+ if (timer == NULL)
return NULL;
- }
/* allocate a hardware generic timer slot */
timer_table->timers[timer_index] = timer;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 7f1a8e91c908..784e81ccb903 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -397,6 +397,7 @@ enum ath9k_int {
#define MAX_RTT_TABLE_ENTRY 6
#define MAX_IQCAL_MEASUREMENT 8
#define MAX_CL_TAB_ENTRY 16
+#define CL_TAB_ENTRY(reg_base) (reg_base + (4 * j))
struct ath9k_hw_cal_data {
u16 channel;
@@ -599,13 +600,10 @@ struct ath_hw_radar_conf {
* @init_cal_settings: setup types of calibrations supported
* @init_cal: starts actual calibration
*
- * @init_mode_regs: Initializes mode registers
* @init_mode_gain_regs: Initialize TX/RX gain registers
*
* @rf_set_freq: change frequency
* @spur_mitigate_freq: spur mitigation
- * @rf_alloc_ext_banks:
- * @rf_free_ext_banks:
* @set_rf_regs:
* @compute_pll_control: compute the PLL control value to use for
* AR_RTC_PLL_CONTROL for a given channel
@@ -620,7 +618,6 @@ struct ath_hw_private_ops {
void (*init_cal_settings)(struct ath_hw *ah);
bool (*init_cal)(struct ath_hw *ah, struct ath9k_channel *chan);
- void (*init_mode_regs)(struct ath_hw *ah);
void (*init_mode_gain_regs)(struct ath_hw *ah);
void (*setup_calibration)(struct ath_hw *ah,
struct ath9k_cal_list *currCal);
@@ -630,8 +627,6 @@ struct ath_hw_private_ops {
struct ath9k_channel *chan);
void (*spur_mitigate_freq)(struct ath_hw *ah,
struct ath9k_channel *chan);
- int (*rf_alloc_ext_banks)(struct ath_hw *ah);
- void (*rf_free_ext_banks)(struct ath_hw *ah);
bool (*set_rf_regs)(struct ath_hw *ah,
struct ath9k_channel *chan,
u16 modesIndex);
@@ -661,6 +656,37 @@ struct ath_hw_private_ops {
};
/**
+ * struct ath_spec_scan - parameters for Atheros spectral scan
+ *
+ * @enabled: enable/disable spectral scan
+ * @short_repeat: controls whether the chip is in spectral scan mode
+ * for 4 usec (enabled) or 204 usec (disabled)
+ * @count: number of scan results requested. There are special meanings
+ * in some chip revisions:
+ * AR92xx: highest bit set (>=128) for endless mode
+ * (spectral scan won't stopped until explicitly disabled)
+ * AR9300 and newer: 0 for endless mode
+ * @endless: true if endless mode is intended. Otherwise, count value is
+ * corrected to the next possible value.
+ * @period: time duration between successive spectral scan entry points
+ * (period*256*Tclk). Tclk = ath_common->clockrate
+ * @fft_period: PHY passes FFT frames to MAC every (fft_period+1)*4uS
+ *
+ * Note: Tclk = 40MHz or 44MHz depending upon operating mode.
+ * Typically it's 44MHz in 2/5GHz on later chips, but there's
+ * a "fast clock" check for this in 5GHz.
+ *
+ */
+struct ath_spec_scan {
+ bool enabled;
+ bool short_repeat;
+ bool endless;
+ u8 count;
+ u8 period;
+ u8 fft_period;
+};
+
+/**
* struct ath_hw_ops - callbacks used by hardware code and driver code
*
* This structure contains callbacks designed to to be used internally by
@@ -668,6 +694,10 @@ struct ath_hw_private_ops {
*
* @config_pci_powersave:
* @calibrate: periodic calibration for NF, ANI, IQ, ADC gain, ADC-DC
+ *
+ * @spectral_scan_config: set parameters for spectral scan and enable/disable it
+ * @spectral_scan_trigger: trigger a spectral scan run
+ * @spectral_scan_wait: wait for a spectral scan run to finish
*/
struct ath_hw_ops {
void (*config_pci_powersave)(struct ath_hw *ah,
@@ -688,6 +718,10 @@ struct ath_hw_ops {
void (*antdiv_comb_conf_set)(struct ath_hw *ah,
struct ath_hw_antcomb_conf *antconf);
void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable);
+ void (*spectral_scan_config)(struct ath_hw *ah,
+ struct ath_spec_scan *param);
+ void (*spectral_scan_trigger)(struct ath_hw *ah);
+ void (*spectral_scan_wait)(struct ath_hw *ah);
};
struct ath_nf_limits {
@@ -710,6 +744,7 @@ enum ath_cal_list {
struct ath_hw {
struct ath_ops reg_ops;
+ struct device *dev;
struct ieee80211_hw *hw;
struct ath_common common;
struct ath9k_hw_version hw_version;
@@ -771,7 +806,6 @@ struct ath_hw {
struct ath9k_cal_list iq_caldata;
struct ath9k_cal_list adcgain_caldata;
struct ath9k_cal_list adcdc_caldata;
- struct ath9k_cal_list tempCompCalData;
struct ath9k_cal_list *cal_list;
struct ath9k_cal_list *cal_list_last;
struct ath9k_cal_list *cal_list_curr;
@@ -830,10 +864,6 @@ struct ath_hw {
/* ANI */
u32 proc_phyerr;
u32 aniperiod;
- int totalSizeDesired[5];
- int coarse_high[5];
- int coarse_low[5];
- int firpwr[5];
enum ath9k_ani_cmd ani_function;
u32 ani_skip_count;
@@ -979,7 +1009,7 @@ void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna);
void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan,
int hw_delay);
bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout);
-void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array,
+void ath9k_hw_write_array(struct ath_hw *ah, const struct ar5416IniArray *array,
int column, unsigned int *writecnt);
u32 ath9k_hw_reverse_bits(u32 val, u32 n);
u16 ath9k_hw_computetxtime(struct ath_hw *ah,
@@ -1066,16 +1096,17 @@ void ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain);
int ar9003_paprd_init_table(struct ath_hw *ah);
bool ar9003_paprd_is_done(struct ath_hw *ah);
bool ar9003_is_paprd_enabled(struct ath_hw *ah);
+void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
/* Hardware family op attach helpers */
-void ar5008_hw_attach_phy_ops(struct ath_hw *ah);
+int ar5008_hw_attach_phy_ops(struct ath_hw *ah);
void ar9002_hw_attach_phy_ops(struct ath_hw *ah);
void ar9003_hw_attach_phy_ops(struct ath_hw *ah);
void ar9002_hw_attach_calib_ops(struct ath_hw *ah);
void ar9003_hw_attach_calib_ops(struct ath_hw *ah);
-void ar9002_hw_attach_ops(struct ath_hw *ah);
+int ar9002_hw_attach_ops(struct ath_hw *ah);
void ar9003_hw_attach_ops(struct ath_hw *ah);
void ar9002_hw_load_ani_reg(struct ath_hw *ah, struct ath9k_channel *chan);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index f69ef5d48c7b..af932c9444de 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -20,6 +20,7 @@
#include <linux/slab.h>
#include <linux/ath9k_platform.h>
#include <linux/module.h>
+#include <linux/relay.h>
#include "ath9k.h"
@@ -302,16 +303,15 @@ static void setup_ht_cap(struct ath_softc *sc,
ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
}
-static int ath9k_reg_notifier(struct wiphy *wiphy,
- struct regulatory_request *request)
+static void ath9k_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
- int ret;
- ret = ath_reg_notifier_apply(wiphy, request, reg);
+ ath_reg_notifier_apply(wiphy, request, reg);
/* Set tx power */
if (ah->curchan) {
@@ -321,8 +321,6 @@ static int ath9k_reg_notifier(struct wiphy *wiphy,
sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
ath9k_ps_restore(sc);
}
-
- return ret;
}
/*
@@ -337,7 +335,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
u8 *ds;
struct ath_buf *bf;
- int i, bsize, error, desc_len;
+ int i, bsize, desc_len;
ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n",
name, nbuf, ndesc);
@@ -353,8 +351,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
if ((desc_len % 4) != 0) {
ath_err(common, "ath_desc not DWORD aligned\n");
BUG_ON((desc_len % 4) != 0);
- error = -ENOMEM;
- goto fail;
+ return -ENOMEM;
}
dd->dd_desc_len = desc_len * nbuf * ndesc;
@@ -378,12 +375,11 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
}
/* allocate descriptors */
- dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
- &dd->dd_desc_paddr, GFP_KERNEL);
- if (dd->dd_desc == NULL) {
- error = -ENOMEM;
- goto fail;
- }
+ dd->dd_desc = dmam_alloc_coherent(sc->dev, dd->dd_desc_len,
+ &dd->dd_desc_paddr, GFP_KERNEL);
+ if (!dd->dd_desc)
+ return -ENOMEM;
+
ds = (u8 *) dd->dd_desc;
ath_dbg(common, CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
name, ds, (u32) dd->dd_desc_len,
@@ -391,12 +387,9 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
/* allocate buffers */
bsize = sizeof(struct ath_buf) * nbuf;
- bf = kzalloc(bsize, GFP_KERNEL);
- if (bf == NULL) {
- error = -ENOMEM;
- goto fail2;
- }
- dd->dd_bufptr = bf;
+ bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL);
+ if (!bf)
+ return -ENOMEM;
for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
bf->bf_desc = ds;
@@ -422,12 +415,6 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
list_add_tail(&bf->list, head);
}
return 0;
-fail2:
- dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
- dd->dd_desc_paddr);
-fail:
- memset(dd, 0, sizeof(*dd));
- return error;
}
static int ath9k_init_queues(struct ath_softc *sc)
@@ -457,11 +444,13 @@ static int ath9k_init_channels_rates(struct ath_softc *sc)
ATH9K_NUM_CHANNELS);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
- channels = kmemdup(ath9k_2ghz_chantable,
+ channels = devm_kzalloc(sc->dev,
sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
if (!channels)
return -ENOMEM;
+ memcpy(channels, ath9k_2ghz_chantable,
+ sizeof(ath9k_2ghz_chantable));
sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
@@ -472,14 +461,13 @@ static int ath9k_init_channels_rates(struct ath_softc *sc)
}
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
- channels = kmemdup(ath9k_5ghz_chantable,
+ channels = devm_kzalloc(sc->dev,
sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
- if (!channels) {
- if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
- kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
+ if (!channels)
return -ENOMEM;
- }
+ memcpy(channels, ath9k_5ghz_chantable,
+ sizeof(ath9k_5ghz_chantable));
sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
@@ -509,6 +497,13 @@ static void ath9k_init_misc(struct ath_softc *sc)
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
+
+ sc->spec_config.enabled = 0;
+ sc->spec_config.short_repeat = true;
+ sc->spec_config.count = 8;
+ sc->spec_config.endless = false;
+ sc->spec_config.period = 0xFF;
+ sc->spec_config.fft_period = 0xF;
}
static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob,
@@ -565,10 +560,11 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
int ret = 0, i;
int csz = 0;
- ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
+ ah = devm_kzalloc(sc->dev, sizeof(struct ath_hw), GFP_KERNEL);
if (!ah)
return -ENOMEM;
+ ah->dev = sc->dev;
ah->hw = sc->hw;
ah->hw_version.devid = devid;
ah->reg_ops.read = ath9k_ioread32;
@@ -636,7 +632,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
if (pdata && pdata->eeprom_name) {
ret = ath9k_eeprom_request(sc, pdata->eeprom_name);
if (ret)
- goto err_eeprom;
+ return ret;
}
/* Initializes the hardware for all supported chipsets */
@@ -676,10 +672,6 @@ err_queues:
ath9k_hw_deinit(ah);
err_hw:
ath9k_eeprom_release(sc);
-err_eeprom:
- kfree(ah);
- sc->sc_ah = NULL;
-
return ret;
}
@@ -844,8 +836,8 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
/* Bring up device */
error = ath9k_init_softc(devid, sc, bus_ops);
- if (error != 0)
- goto error_init;
+ if (error)
+ return error;
ah = sc->sc_ah;
common = ath9k_hw_common(ah);
@@ -855,19 +847,19 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
ath9k_reg_notifier);
if (error)
- goto error_regd;
+ goto deinit;
reg = &common->regulatory;
/* Setup TX DMA */
error = ath_tx_init(sc, ATH_TXBUF);
if (error != 0)
- goto error_tx;
+ goto deinit;
/* Setup RX DMA */
error = ath_rx_init(sc, ATH_RXBUF);
if (error != 0)
- goto error_rx;
+ goto deinit;
ath9k_init_txpower_limits(sc);
@@ -881,19 +873,19 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
/* Register with mac80211 */
error = ieee80211_register_hw(hw);
if (error)
- goto error_register;
+ goto rx_cleanup;
error = ath9k_init_debug(ah);
if (error) {
ath_err(common, "Unable to create debugfs files\n");
- goto error_world;
+ goto unregister;
}
/* Handle world regulatory */
if (!ath_is_world_regd(reg)) {
error = regulatory_hint(hw->wiphy, reg->alpha2);
if (error)
- goto error_world;
+ goto unregister;
}
ath_init_leds(sc);
@@ -901,17 +893,12 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
return 0;
-error_world:
+unregister:
ieee80211_unregister_hw(hw);
-error_register:
+rx_cleanup:
ath_rx_cleanup(sc);
-error_rx:
- ath_tx_cleanup(sc);
-error_tx:
- /* Nothing */
-error_regd:
+deinit:
ath9k_deinit_softc(sc);
-error_init:
return error;
}
@@ -923,12 +910,6 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
{
int i = 0;
- if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
- kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
-
- if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
- kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
-
ath9k_deinit_btcoex(sc);
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
@@ -940,8 +921,11 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
sc->dfs_detector->exit(sc->dfs_detector);
ath9k_eeprom_release(sc);
- kfree(sc->sc_ah);
- sc->sc_ah = NULL;
+
+ if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) {
+ relay_close(sc->rfs_chan_spec_scan);
+ sc->rfs_chan_spec_scan = NULL;
+ }
}
void ath9k_deinit_device(struct ath_softc *sc)
@@ -957,22 +941,9 @@ void ath9k_deinit_device(struct ath_softc *sc)
ieee80211_unregister_hw(hw);
ath_rx_cleanup(sc);
- ath_tx_cleanup(sc);
ath9k_deinit_softc(sc);
}
-void ath_descdma_cleanup(struct ath_softc *sc,
- struct ath_descdma *dd,
- struct list_head *head)
-{
- dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
- dd->dd_desc_paddr);
-
- INIT_LIST_HEAD(head);
- kfree(dd->dd_bufptr);
- memset(dd, 0, sizeof(*dd));
-}
-
/************************/
/* Module Hooks */
/************************/
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index b42be910a83d..811007ec07a7 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -605,13 +605,13 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
* reported, then decryption and MIC errors are irrelevant,
* the frame is going to be dropped either way
*/
- if (ads.ds_rxstatus8 & AR_CRCErr)
- rs->rs_status |= ATH9K_RXERR_CRC;
- else if (ads.ds_rxstatus8 & AR_PHYErr) {
+ if (ads.ds_rxstatus8 & AR_PHYErr) {
rs->rs_status |= ATH9K_RXERR_PHY;
phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
rs->rs_phyerr = phyerr;
- } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
+ } else if (ads.ds_rxstatus8 & AR_CRCErr)
+ rs->rs_status |= ATH9K_RXERR_CRC;
+ else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
rs->rs_status |= ATH9K_RXERR_DECRYPT;
else if (ads.ds_rxstatus8 & AR_MichaelErr)
rs->rs_status |= ATH9K_RXERR_MIC;
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 4a745e68dd94..1ff817061ebc 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -226,7 +226,8 @@ enum ath9k_phyerr {
ATH9K_PHYERR_HT_LENGTH_ILLEGAL = 35,
ATH9K_PHYERR_HT_RATE_ILLEGAL = 36,
- ATH9K_PHYERR_MAX = 37,
+ ATH9K_PHYERR_SPECTRAL = 38,
+ ATH9K_PHYERR_MAX = 39,
};
struct ath_desc {
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index be30a9af1528..6e66f9c6782b 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -182,7 +182,7 @@ static void ath_restart_work(struct ath_softc *sc)
ath_start_ani(sc);
}
-static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
+static bool ath_prepare_reset(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
bool ret = true;
@@ -196,20 +196,12 @@ static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
ath9k_debug_samp_bb_mac(sc);
ath9k_hw_disable_interrupts(ah);
- if (!ath_stoprecv(sc))
+ if (!ath_drain_all_txq(sc))
ret = false;
- if (!ath_drain_all_txq(sc, retry_tx))
+ if (!ath_stoprecv(sc))
ret = false;
- if (!flush) {
- if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
- ath_rx_tasklet(sc, 1, true);
- ath_rx_tasklet(sc, 1, false);
- } else {
- ath_flushrecv(sc);
- }
-
return ret;
}
@@ -255,18 +247,17 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
return true;
}
-static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
- bool retry_tx)
+static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_cal_data *caldata = NULL;
bool fastcc = true;
- bool flush = false;
int r;
__ath_cancel_work(sc);
+ tasklet_disable(&sc->intr_tq);
spin_lock_bh(&sc->sc_pcu_lock);
if (!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) {
@@ -276,11 +267,10 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
if (!hchan) {
fastcc = false;
- flush = true;
hchan = ah->curchan;
}
- if (!ath_prepare_reset(sc, retry_tx, flush))
+ if (!ath_prepare_reset(sc))
fastcc = false;
ath_dbg(common, CONFIG, "Reset to %u MHz, HT40: %d fastcc: %d\n",
@@ -302,6 +292,8 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
out:
spin_unlock_bh(&sc->sc_pcu_lock);
+ tasklet_enable(&sc->intr_tq);
+
return r;
}
@@ -319,7 +311,7 @@ static int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
if (test_bit(SC_OP_INVALID, &sc->sc_flags))
return -EIO;
- r = ath_reset_internal(sc, hchan, false);
+ r = ath_reset_internal(sc, hchan);
return r;
}
@@ -328,28 +320,25 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
struct ieee80211_vif *vif)
{
struct ath_node *an;
- u8 density;
an = (struct ath_node *)sta->drv_priv;
an->sc = sc;
an->sta = sta;
an->vif = vif;
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
- ath_tx_node_init(sc, an);
+ ath_tx_node_init(sc, an);
+
+ if (sta->ht_cap.ht_supported) {
an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
sta->ht_cap.ampdu_factor);
- density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
- an->mpdudensity = density;
+ an->mpdudensity = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
}
}
static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
{
struct ath_node *an = (struct ath_node *)sta->drv_priv;
-
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
- ath_tx_node_cleanup(sc, an);
+ ath_tx_node_cleanup(sc, an);
}
void ath9k_tasklet(unsigned long data)
@@ -549,23 +538,21 @@ chip_reset:
#undef SCHED_INTR
}
-static int ath_reset(struct ath_softc *sc, bool retry_tx)
+static int ath_reset(struct ath_softc *sc)
{
- int r;
+ int i, r;
ath9k_ps_wakeup(sc);
- r = ath_reset_internal(sc, NULL, retry_tx);
+ r = ath_reset_internal(sc, NULL);
- if (retry_tx) {
- int i;
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
- if (ATH_TXQ_SETUP(sc, i)) {
- spin_lock_bh(&sc->tx.txq[i].axq_lock);
- ath_txq_schedule(sc, &sc->tx.txq[i]);
- spin_unlock_bh(&sc->tx.txq[i].axq_lock);
- }
- }
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+ if (!ATH_TXQ_SETUP(sc, i))
+ continue;
+
+ spin_lock_bh(&sc->tx.txq[i].axq_lock);
+ ath_txq_schedule(sc, &sc->tx.txq[i]);
+ spin_unlock_bh(&sc->tx.txq[i].axq_lock);
}
ath9k_ps_restore(sc);
@@ -586,7 +573,7 @@ void ath_reset_work(struct work_struct *work)
{
struct ath_softc *sc = container_of(work, struct ath_softc, hw_reset_work);
- ath_reset(sc, true);
+ ath_reset(sc);
}
/**********************/
@@ -804,7 +791,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
}
- ath_prepare_reset(sc, false, true);
+ ath_prepare_reset(sc);
if (sc->rx.frag) {
dev_kfree_skb_any(sc->rx.frag);
@@ -1075,6 +1062,75 @@ static void ath9k_disable_ps(struct ath_softc *sc)
ath_dbg(common, PS, "PowerSave disabled\n");
}
+void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 rxfilter;
+
+ if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
+ ath_err(common, "spectrum analyzer not implemented on this hardware\n");
+ return;
+ }
+
+ ath9k_ps_wakeup(sc);
+ rxfilter = ath9k_hw_getrxfilter(ah);
+ ath9k_hw_setrxfilter(ah, rxfilter |
+ ATH9K_RX_FILTER_PHYRADAR |
+ ATH9K_RX_FILTER_PHYERR);
+
+ /* TODO: usually this should not be neccesary, but for some reason
+ * (or in some mode?) the trigger must be called after the
+ * configuration, otherwise the register will have its values reset
+ * (on my ar9220 to value 0x01002310)
+ */
+ ath9k_spectral_scan_config(hw, sc->spectral_mode);
+ ath9k_hw_ops(ah)->spectral_scan_trigger(ah);
+ ath9k_ps_restore(sc);
+}
+
+int ath9k_spectral_scan_config(struct ieee80211_hw *hw,
+ enum spectral_mode spectral_mode)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
+ ath_err(common, "spectrum analyzer not implemented on this hardware\n");
+ return -1;
+ }
+
+ switch (spectral_mode) {
+ case SPECTRAL_DISABLED:
+ sc->spec_config.enabled = 0;
+ break;
+ case SPECTRAL_BACKGROUND:
+ /* send endless samples.
+ * TODO: is this really useful for "background"?
+ */
+ sc->spec_config.endless = 1;
+ sc->spec_config.enabled = 1;
+ break;
+ case SPECTRAL_CHANSCAN:
+ case SPECTRAL_MANUAL:
+ sc->spec_config.endless = 0;
+ sc->spec_config.enabled = 1;
+ break;
+ default:
+ return -1;
+ }
+
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_ops(ah)->spectral_scan_config(ah, &sc->spec_config);
+ ath9k_ps_restore(sc);
+
+ sc->spectral_mode = spectral_mode;
+
+ return 0;
+}
+
static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
{
struct ath_softc *sc = hw->priv;
@@ -1188,6 +1244,11 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
*/
if (old_pos >= 0)
ath_update_survey_nf(sc, old_pos);
+
+ /* perform spectral scan if requested. */
+ if (sc->scanning && sc->spectral_mode == SPECTRAL_CHANSCAN)
+ ath9k_spectral_scan_trigger(hw);
+
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
@@ -1610,7 +1671,9 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
ath9k_ps_restore(sc);
break;
- case IEEE80211_AMPDU_TX_STOP:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
ath9k_ps_wakeup(sc);
ath_tx_aggr_stop(sc, sta, tid);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
@@ -1729,11 +1792,11 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
if (drop) {
ath9k_ps_wakeup(sc);
spin_lock_bh(&sc->sc_pcu_lock);
- drain_txq = ath_drain_all_txq(sc, false);
+ drain_txq = ath_drain_all_txq(sc);
spin_unlock_bh(&sc->sc_pcu_lock);
if (!drain_txq)
- ath_reset(sc, false);
+ ath_reset(sc);
ath9k_ps_restore(sc);
ieee80211_wake_queues(hw);
@@ -1833,6 +1896,9 @@ static u32 fill_chainmask(u32 cap, u32 new)
static bool validate_antenna_mask(struct ath_hw *ah, u32 val)
{
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ return true;
+
switch (val & 0x7) {
case 0x1:
case 0x3:
@@ -2238,6 +2304,19 @@ static void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled)
}
#endif
+static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
+{
+ struct ath_softc *sc = hw->priv;
+
+ sc->scanning = 1;
+}
+
+static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
+{
+ struct ath_softc *sc = hw->priv;
+
+ sc->scanning = 0;
+}
struct ieee80211_ops ath9k_ops = {
.tx = ath9k_tx,
@@ -2284,4 +2363,6 @@ struct ieee80211_ops ath9k_ops = {
.sta_add_debugfs = ath9k_sta_add_debugfs,
.sta_remove_debugfs = ath9k_sta_remove_debugfs,
#endif
+ .sw_scan_start = ath9k_sw_scan_start,
+ .sw_scan_complete = ath9k_sw_scan_complete,
};
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 5c02702f21e7..815bee21c19a 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -438,7 +438,7 @@ int ath_mci_setup(struct ath_softc *sc)
struct ath_mci_buf *buf = &mci->sched_buf;
int ret;
- buf->bf_addr = dma_alloc_coherent(sc->dev,
+ buf->bf_addr = dmam_alloc_coherent(sc->dev,
ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE,
&buf->bf_paddr, GFP_KERNEL);
@@ -474,13 +474,6 @@ void ath_mci_cleanup(struct ath_softc *sc)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_hw *ah = sc->sc_ah;
- struct ath_mci_coex *mci = &sc->mci_coex;
- struct ath_mci_buf *buf = &mci->sched_buf;
-
- if (buf->bf_addr)
- dma_free_coherent(sc->dev,
- ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE,
- buf->bf_addr, buf->bf_paddr);
ar9003_mci_cleanup(ah);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 7ae73fbd9136..0e0d39583837 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -147,7 +147,6 @@ static const struct ath_bus_ops ath_pci_bus_ops = {
static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- void __iomem *mem;
struct ath_softc *sc;
struct ieee80211_hw *hw;
u8 csz;
@@ -155,19 +154,19 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
int ret = 0;
char hw_name[64];
- if (pci_enable_device(pdev))
+ if (pcim_enable_device(pdev))
return -EIO;
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
pr_err("32-bit DMA not available\n");
- goto err_dma;
+ return ret;
}
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
pr_err("32-bit DMA consistent DMA enable failed\n");
- goto err_dma;
+ return ret;
}
/*
@@ -203,25 +202,16 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if ((val & 0x0000ff00) != 0)
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
- ret = pci_request_region(pdev, 0, "ath9k");
+ ret = pcim_iomap_regions(pdev, BIT(0), "ath9k");
if (ret) {
dev_err(&pdev->dev, "PCI memory region reserve error\n");
- ret = -ENODEV;
- goto err_region;
- }
-
- mem = pci_iomap(pdev, 0, 0);
- if (!mem) {
- pr_err("PCI memory map error\n") ;
- ret = -EIO;
- goto err_iomap;
+ return -ENODEV;
}
hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
if (!hw) {
dev_err(&pdev->dev, "No memory for ieee80211_hw\n");
- ret = -ENOMEM;
- goto err_alloc_hw;
+ return -ENOMEM;
}
SET_IEEE80211_DEV(hw, &pdev->dev);
@@ -230,7 +220,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sc = hw->priv;
sc->hw = hw;
sc->dev = &pdev->dev;
- sc->mem = mem;
+ sc->mem = pcim_iomap_table(pdev)[0];
/* Will be cleared in ath9k_start() */
set_bit(SC_OP_INVALID, &sc->sc_flags);
@@ -251,7 +241,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name));
wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
- hw_name, (unsigned long)mem, pdev->irq);
+ hw_name, (unsigned long)sc->mem, pdev->irq);
return 0;
@@ -259,14 +249,6 @@ err_init:
free_irq(sc->irq, sc);
err_irq:
ieee80211_free_hw(hw);
-err_alloc_hw:
- pci_iounmap(pdev, mem);
-err_iomap:
- pci_release_region(pdev, 0);
-err_region:
- /* Nothing */
-err_dma:
- pci_disable_device(pdev);
return ret;
}
@@ -274,17 +256,12 @@ static void ath_pci_remove(struct pci_dev *pdev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath_softc *sc = hw->priv;
- void __iomem *mem = sc->mem;
if (!is_ath9k_unloaded)
sc->sc_ah->ah_flags |= AH_UNPLUGGED;
ath9k_deinit_device(sc);
free_irq(sc->irq, sc);
ieee80211_free_hw(sc->hw);
-
- pci_iounmap(pdev, mem);
- pci_disable_device(pdev);
- pci_release_region(pdev, 0);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 714558d1ba78..96ac433ba7f6 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1204,7 +1204,7 @@ static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta)
caps |= WLAN_RC_TS_FLAG | WLAN_RC_DS_FLAG;
else if (sta->ht_cap.mcs.rx_mask[1])
caps |= WLAN_RC_DS_FLAG;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
+ if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
caps |= WLAN_RC_40_FLAG;
if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
caps |= WLAN_RC_SGI_FLAG;
@@ -1452,17 +1452,7 @@ static void ath_rate_free(void *priv)
static void *ath_rate_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
{
- struct ath_softc *sc = priv;
- struct ath_rate_priv *rate_priv;
-
- rate_priv = kzalloc(sizeof(struct ath_rate_priv), gfp);
- if (!rate_priv) {
- ath_err(ath9k_hw_common(sc->sc_ah),
- "Unable to allocate private rc structure\n");
- return NULL;
- }
-
- return rate_priv;
+ return kzalloc(sizeof(struct ath_rate_priv), gfp);
}
static void ath_rate_free_sta(void *priv, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index d4df98a938bf..ee156e543147 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -15,6 +15,7 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/relay.h>
#include "ath9k.h"
#include "ar9003_mac.h"
@@ -180,11 +181,6 @@ static void ath_rx_edma_cleanup(struct ath_softc *sc)
bf->bf_mpdu = NULL;
}
}
-
- INIT_LIST_HEAD(&sc->rx.rxbuf);
-
- kfree(sc->rx.rx_bufptr);
- sc->rx.rx_bufptr = NULL;
}
static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
@@ -211,12 +207,11 @@ static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
ah->caps.rx_hp_qdepth);
size = sizeof(struct ath_buf) * nbufs;
- bf = kzalloc(size, GFP_KERNEL);
+ bf = devm_kzalloc(sc->dev, size, GFP_KERNEL);
if (!bf)
return -ENOMEM;
INIT_LIST_HEAD(&sc->rx.rxbuf);
- sc->rx.rx_bufptr = bf;
for (i = 0; i < nbufs; i++, bf++) {
skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
@@ -254,8 +249,6 @@ rx_init_fail:
static void ath_edma_start_recv(struct ath_softc *sc)
{
- spin_lock_bh(&sc->rx.rxbuflock);
-
ath9k_hw_rxena(sc->sc_ah);
ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP,
@@ -267,8 +260,6 @@ static void ath_edma_start_recv(struct ath_softc *sc)
ath_opmode_init(sc);
ath9k_hw_startpcureceive(sc->sc_ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
-
- spin_unlock_bh(&sc->rx.rxbuflock);
}
static void ath_edma_stop_recv(struct ath_softc *sc)
@@ -285,8 +276,6 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
int error = 0;
spin_lock_init(&sc->sc_pcu_lock);
- spin_lock_init(&sc->rx.rxbuflock);
- clear_bit(SC_OP_RXFLUSH, &sc->sc_flags);
common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
sc->sc_ah->caps.rx_status_len;
@@ -363,9 +352,6 @@ void ath_rx_cleanup(struct ath_softc *sc)
bf->bf_mpdu = NULL;
}
}
-
- if (sc->rx.rxdma.dd_desc_len != 0)
- ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
}
}
@@ -447,7 +433,6 @@ int ath_startrecv(struct ath_softc *sc)
return 0;
}
- spin_lock_bh(&sc->rx.rxbuflock);
if (list_empty(&sc->rx.rxbuf))
goto start_recv;
@@ -468,26 +453,31 @@ start_recv:
ath_opmode_init(sc);
ath9k_hw_startpcureceive(ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
- spin_unlock_bh(&sc->rx.rxbuflock);
-
return 0;
}
+static void ath_flushrecv(struct ath_softc *sc)
+{
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ ath_rx_tasklet(sc, 1, true);
+ ath_rx_tasklet(sc, 1, false);
+}
+
bool ath_stoprecv(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
bool stopped, reset = false;
- spin_lock_bh(&sc->rx.rxbuflock);
ath9k_hw_abortpcurecv(ah);
ath9k_hw_setrxfilter(ah, 0);
stopped = ath9k_hw_stopdmarecv(ah, &reset);
+ ath_flushrecv(sc);
+
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
ath_edma_stop_recv(sc);
else
sc->rx.rxlink = NULL;
- spin_unlock_bh(&sc->rx.rxbuflock);
if (!(ah->ah_flags & AH_UNPLUGGED) &&
unlikely(!stopped)) {
@@ -499,15 +489,6 @@ bool ath_stoprecv(struct ath_softc *sc)
return stopped && !reset;
}
-void ath_flushrecv(struct ath_softc *sc)
-{
- set_bit(SC_OP_RXFLUSH, &sc->sc_flags);
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
- ath_rx_tasklet(sc, 1, true);
- ath_rx_tasklet(sc, 1, false);
- clear_bit(SC_OP_RXFLUSH, &sc->sc_flags);
-}
-
static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
{
/* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
@@ -552,7 +533,7 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
if (sc->ps_flags & PS_BEACON_SYNC) {
sc->ps_flags &= ~PS_BEACON_SYNC;
ath_dbg(common, PS,
- "Reconfigure Beacon timers based on timestamp from the AP\n");
+ "Reconfigure beacon timers based on synchronized timestamp\n");
ath9k_set_beacon(sc);
}
@@ -744,6 +725,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
return NULL;
}
+ list_del(&bf->list);
if (!bf->bf_mpdu)
return bf;
@@ -1034,6 +1016,134 @@ static void ath9k_rx_skb_postprocess(struct ath_common *common,
rxs->flag &= ~RX_FLAG_DECRYPTED;
}
+#ifdef CONFIG_ATH9K_DEBUGFS
+static s8 fix_rssi_inv_only(u8 rssi_val)
+{
+ if (rssi_val == 128)
+ rssi_val = 0;
+ return (s8) rssi_val;
+}
+#endif
+
+/* returns 1 if this was a spectral frame, even if not handled. */
+static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
+ struct ath_rx_status *rs, u64 tsf)
+{
+#ifdef CONFIG_ATH9K_DEBUGFS
+ struct ath_hw *ah = sc->sc_ah;
+ u8 bins[SPECTRAL_HT20_NUM_BINS];
+ u8 *vdata = (u8 *)hdr;
+ struct fft_sample_ht20 fft_sample;
+ struct ath_radar_info *radar_info;
+ struct ath_ht20_mag_info *mag_info;
+ int len = rs->rs_datalen;
+ int dc_pos;
+ u16 length, max_magnitude;
+
+ /* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
+ * via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
+ * yet, but this is supposed to be possible as well.
+ */
+ if (rs->rs_phyerr != ATH9K_PHYERR_RADAR &&
+ rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT &&
+ rs->rs_phyerr != ATH9K_PHYERR_SPECTRAL)
+ return 0;
+
+ /* check if spectral scan bit is set. This does not have to be checked
+ * if received through a SPECTRAL phy error, but shouldn't hurt.
+ */
+ radar_info = ((struct ath_radar_info *)&vdata[len]) - 1;
+ if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
+ return 0;
+
+ /* Variation in the data length is possible and will be fixed later.
+ * Note that we only support HT20 for now.
+ *
+ * TODO: add HT20_40 support as well.
+ */
+ if ((len > SPECTRAL_HT20_TOTAL_DATA_LEN + 2) ||
+ (len < SPECTRAL_HT20_TOTAL_DATA_LEN - 1))
+ return 1;
+
+ fft_sample.tlv.type = ATH_FFT_SAMPLE_HT20;
+ length = sizeof(fft_sample) - sizeof(fft_sample.tlv);
+ fft_sample.tlv.length = __cpu_to_be16(length);
+
+ fft_sample.freq = __cpu_to_be16(ah->curchan->chan->center_freq);
+ fft_sample.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
+ fft_sample.noise = ah->noise;
+
+ switch (len - SPECTRAL_HT20_TOTAL_DATA_LEN) {
+ case 0:
+ /* length correct, nothing to do. */
+ memcpy(bins, vdata, SPECTRAL_HT20_NUM_BINS);
+ break;
+ case -1:
+ /* first byte missing, duplicate it. */
+ memcpy(&bins[1], vdata, SPECTRAL_HT20_NUM_BINS - 1);
+ bins[0] = vdata[0];
+ break;
+ case 2:
+ /* MAC added 2 extra bytes at bin 30 and 32, remove them. */
+ memcpy(bins, vdata, 30);
+ bins[30] = vdata[31];
+ memcpy(&bins[31], &vdata[33], SPECTRAL_HT20_NUM_BINS - 31);
+ break;
+ case 1:
+ /* MAC added 2 extra bytes AND first byte is missing. */
+ bins[0] = vdata[0];
+ memcpy(&bins[0], vdata, 30);
+ bins[31] = vdata[31];
+ memcpy(&bins[32], &vdata[33], SPECTRAL_HT20_NUM_BINS - 32);
+ break;
+ default:
+ return 1;
+ }
+
+ /* DC value (value in the middle) is the blind spot of the spectral
+ * sample and invalid, interpolate it.
+ */
+ dc_pos = SPECTRAL_HT20_NUM_BINS / 2;
+ bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2;
+
+ /* mag data is at the end of the frame, in front of radar_info */
+ mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
+
+ /* copy raw bins without scaling them */
+ memcpy(fft_sample.data, bins, SPECTRAL_HT20_NUM_BINS);
+ fft_sample.max_exp = mag_info->max_exp & 0xf;
+
+ max_magnitude = spectral_max_magnitude(mag_info->all_bins);
+ fft_sample.max_magnitude = __cpu_to_be16(max_magnitude);
+ fft_sample.max_index = spectral_max_index(mag_info->all_bins);
+ fft_sample.bitmap_weight = spectral_bitmap_weight(mag_info->all_bins);
+ fft_sample.tsf = __cpu_to_be64(tsf);
+
+ ath_debug_send_fft_sample(sc, &fft_sample.tlv);
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+static void ath9k_apply_ampdu_details(struct ath_softc *sc,
+ struct ath_rx_status *rs, struct ieee80211_rx_status *rxs)
+{
+ if (rs->rs_isaggr) {
+ rxs->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
+
+ rxs->ampdu_reference = sc->rx.ampdu_ref;
+
+ if (!rs->rs_moreaggr) {
+ rxs->flag |= RX_FLAG_AMPDU_IS_LAST;
+ sc->rx.ampdu_ref++;
+ }
+
+ if (rs->rs_flags & ATH9K_RX_DELIM_CRC_PRE)
+ rxs->flag |= RX_FLAG_AMPDU_DELIM_CRC_ERROR;
+ }
+}
+
int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
{
struct ath_buf *bf;
@@ -1059,16 +1169,12 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
dma_type = DMA_FROM_DEVICE;
qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
- spin_lock_bh(&sc->rx.rxbuflock);
tsf = ath9k_hw_gettsf64(ah);
tsf_lower = tsf & 0xffffffff;
do {
bool decrypt_error = false;
- /* If handling rx interrupt and flush is in progress => exit */
- if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags) && (flush == 0))
- break;
memset(&rs, 0, sizeof(rs));
if (edma)
@@ -1111,15 +1217,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
ath_debug_stat_rx(sc, &rs);
- /*
- * If we're asked to flush receive queue, directly
- * chain it back at the queue without processing it.
- */
- if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags)) {
- RX_STAT_INC(rx_drop_rxflush);
- goto requeue_drop_frag;
- }
-
memset(rxs, 0, sizeof(struct ieee80211_rx_status));
rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
@@ -1131,6 +1228,13 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
rxs->mactime += 0x100000000ULL;
+ if (rs.rs_status & ATH9K_RXERR_PHY) {
+ if (ath_process_fft(sc, hdr, &rs, rxs->mactime)) {
+ RX_STAT_INC(rx_spectral);
+ goto requeue_drop_frag;
+ }
+ }
+
retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
rxs, &decrypt_error);
if (retval)
@@ -1246,6 +1350,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx == 3)
ath_ant_comb_scan(sc, &rs);
+ ath9k_apply_ampdu_details(sc, &rs, rxs);
+
ieee80211_rx(hw, skb);
requeue_drop_frag:
@@ -1254,19 +1360,18 @@ requeue_drop_frag:
sc->rx.frag = NULL;
}
requeue:
+ list_add_tail(&bf->list, &sc->rx.rxbuf);
+ if (flush)
+ continue;
+
if (edma) {
- list_add_tail(&bf->list, &sc->rx.rxbuf);
ath_rx_edma_buf_link(sc, qtype);
} else {
- list_move_tail(&bf->list, &sc->rx.rxbuf);
ath_rx_buf_link(sc, bf);
- if (!flush)
- ath9k_hw_rxena(ah);
+ ath9k_hw_rxena(ah);
}
} while (1);
- spin_unlock_bh(&sc->rx.rxbuflock);
-
if (!(ah->imask & ATH9K_INT_RXEOL)) {
ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
ath9k_hw_set_interrupts(ah);
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index ad3c82c09177..5929850649f0 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -789,6 +789,7 @@
#define AR_SREV_REVISION_9271_11 1
#define AR_SREV_VERSION_9300 0x1c0
#define AR_SREV_REVISION_9300_20 2 /* 2.0 and 2.1 */
+#define AR_SREV_REVISION_9300_22 3
#define AR_SREV_VERSION_9330 0x200
#define AR_SREV_REVISION_9330_10 0
#define AR_SREV_REVISION_9330_11 1
@@ -869,6 +870,9 @@
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300))
#define AR_SREV_9300_20_OR_LATER(_ah) \
((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9300)
+#define AR_SREV_9300_22(_ah) \
+ (AR_SREV_9300(ah) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9300_22))
#define AR_SREV_9330(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9330))
@@ -884,9 +888,6 @@
#define AR_SREV_9485(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485))
-#define AR_SREV_9485_10(_ah) \
- (AR_SREV_9485(_ah) && \
- ((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_10))
#define AR_SREV_9485_11(_ah) \
(AR_SREV_9485(_ah) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_11))
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 90e48a0fafe5..89a64411b82e 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -378,7 +378,7 @@ static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf, struct list_head *bf_q,
- struct ath_tx_status *ts, int txok, bool retry)
+ struct ath_tx_status *ts, int txok)
{
struct ath_node *an = NULL;
struct sk_buff *skb;
@@ -490,7 +490,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
} else if (!isaggr && txok) {
/* transmit completion */
acked_cnt++;
- } else if ((tid->state & AGGR_CLEANUP) || !retry) {
+ } else if (tid->state & AGGR_CLEANUP) {
/*
* cleanup in progress, just fail
* the un-acked sub-frames
@@ -604,6 +604,37 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR);
}
+static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
+ return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
+}
+
+static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_tx_status *ts, struct ath_buf *bf,
+ struct list_head *bf_head)
+{
+ bool txok, flush;
+
+ txok = !(ts->ts_status & ATH9K_TXERR_MASK);
+ flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
+ txq->axq_tx_inprogress = false;
+
+ txq->axq_depth--;
+ if (bf_is_ampdu_not_probing(bf))
+ txq->axq_ampdu_depth--;
+
+ if (!bf_isampdu(bf)) {
+ if (!flush)
+ ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
+ ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
+ } else
+ ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);
+
+ if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !flush)
+ ath_txq_schedule(sc, txq);
+}
+
static bool ath_lookup_legacy(struct ath_buf *bf)
{
struct sk_buff *skb;
@@ -1202,7 +1233,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
* in HT IBSS when a beacon with HT-info is received after the station
* has already been added.
*/
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
+ if (sta->ht_cap.ht_supported) {
an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
sta->ht_cap.ampdu_factor);
density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
@@ -1331,23 +1362,6 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid
/* Queue Management */
/********************/
-static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
- struct ath_txq *txq)
-{
- struct ath_atx_ac *ac, *ac_tmp;
- struct ath_atx_tid *tid, *tid_tmp;
-
- list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
- list_del(&ac->list);
- ac->sched = false;
- list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
- list_del(&tid->list);
- tid->sched = false;
- ath_tid_drain(sc, txq, tid);
- }
- }
-}
-
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
{
struct ath_hw *ah = sc->sc_ah;
@@ -1470,14 +1484,8 @@ int ath_cabq_update(struct ath_softc *sc)
return 0;
}
-static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
- return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
-}
-
static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
- struct list_head *list, bool retry_tx)
+ struct list_head *list)
{
struct ath_buf *bf, *lastbf;
struct list_head bf_head;
@@ -1499,16 +1507,7 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
lastbf = bf->bf_lastbf;
list_cut_position(&bf_head, list, &lastbf->list);
-
- txq->axq_depth--;
- if (bf_is_ampdu_not_probing(bf))
- txq->axq_ampdu_depth--;
-
- if (bf_isampdu(bf))
- ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
- retry_tx);
- else
- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+ ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
}
}
@@ -1518,7 +1517,7 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
* This assumes output has been stopped and
* we do not need to block ath_tx_tasklet.
*/
-void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
+void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq)
{
ath_txq_lock(sc, txq);
@@ -1526,8 +1525,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
int idx = txq->txq_tailidx;
while (!list_empty(&txq->txq_fifo[idx])) {
- ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
- retry_tx);
+ ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx]);
INCR(idx, ATH_TXFIFO_DEPTH);
}
@@ -1536,16 +1534,12 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
txq->axq_link = NULL;
txq->axq_tx_inprogress = false;
- ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
-
- /* flush any pending frames if aggregation is enabled */
- if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !retry_tx)
- ath_txq_drain_pending_buffers(sc, txq);
+ ath_drain_txq_list(sc, txq, &txq->axq_q);
ath_txq_unlock_complete(sc, txq);
}
-bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
+bool ath_drain_all_txq(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -1581,7 +1575,7 @@ bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
*/
txq = &sc->tx.txq[i];
txq->stopped = false;
- ath_draintxq(sc, txq, retry_tx);
+ ath_draintxq(sc, txq);
}
return !npend;
@@ -1910,8 +1904,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
struct ath_buf *bf;
u8 tidno;
- if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && txctl->an &&
- ieee80211_is_data_qos(hdr->frame_control)) {
+ if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) {
tidno = ieee80211_get_qos_ctl(hdr)[0] &
IEEE80211_QOS_CTL_TID_MASK;
tid = ATH_AN_2_TID(txctl->an, tidno);
@@ -2175,28 +2168,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
}
-static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_tx_status *ts, struct ath_buf *bf,
- struct list_head *bf_head)
-{
- int txok;
-
- txq->axq_depth--;
- txok = !(ts->ts_status & ATH9K_TXERR_MASK);
- txq->axq_tx_inprogress = false;
- if (bf_is_ampdu_not_probing(bf))
- txq->axq_ampdu_depth--;
-
- if (!bf_isampdu(bf)) {
- ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
- ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
- } else
- ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
-
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
- ath_txq_schedule(sc, txq);
-}
-
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
{
struct ath_hw *ah = sc->sc_ah;
@@ -2361,8 +2332,8 @@ static int ath_txstatus_setup(struct ath_softc *sc, int size)
u8 txs_len = sc->sc_ah->caps.txs_len;
dd->dd_desc_len = size * txs_len;
- dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
- &dd->dd_desc_paddr, GFP_KERNEL);
+ dd->dd_desc = dmam_alloc_coherent(sc->dev, dd->dd_desc_len,
+ &dd->dd_desc_paddr, GFP_KERNEL);
if (!dd->dd_desc)
return -ENOMEM;
@@ -2382,14 +2353,6 @@ static int ath_tx_edma_init(struct ath_softc *sc)
return err;
}
-static void ath_tx_edma_cleanup(struct ath_softc *sc)
-{
- struct ath_descdma *dd = &sc->txsdma;
-
- dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
- dd->dd_desc_paddr);
-}
-
int ath_tx_init(struct ath_softc *sc, int nbufs)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -2402,7 +2365,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
if (error != 0) {
ath_err(common,
"Failed to allocate tx descriptors: %d\n", error);
- goto err;
+ return error;
}
error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
@@ -2410,36 +2373,17 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
if (error != 0) {
ath_err(common,
"Failed to allocate beacon descriptors: %d\n", error);
- goto err;
+ return error;
}
INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
error = ath_tx_edma_init(sc);
- if (error)
- goto err;
- }
-
-err:
- if (error != 0)
- ath_tx_cleanup(sc);
return error;
}
-void ath_tx_cleanup(struct ath_softc *sc)
-{
- if (sc->beacon.bdma.dd_desc_len != 0)
- ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
-
- if (sc->tx.txdma.dd_desc_len != 0)
- ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
-
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
- ath_tx_edma_cleanup(sc);
-}
-
void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
{
struct ath_atx_tid *tid;
diff --git a/drivers/net/wireless/ath/carl9170/Kconfig b/drivers/net/wireless/ath/carl9170/Kconfig
index 13a204598766..1a796e5f69ec 100644
--- a/drivers/net/wireless/ath/carl9170/Kconfig
+++ b/drivers/net/wireless/ath/carl9170/Kconfig
@@ -1,6 +1,6 @@
config CARL9170
tristate "Linux Community AR9170 802.11n USB support"
- depends on USB && MAC80211 && EXPERIMENTAL
+ depends on USB && MAC80211
select ATH_COMMON
select FW_LOADER
select CRC32
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index 2df17f1e49ef..25599741cd8a 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -85,20 +85,14 @@ enum carl9170_device_state {
CARL9170_STARTED,
};
-#define CARL9170_NUM_TID 16
#define WME_BA_BMP_SIZE 64
#define CARL9170_TX_USER_RATE_TRIES 3
-#define WME_AC_BE 2
-#define WME_AC_BK 3
-#define WME_AC_VI 1
-#define WME_AC_VO 0
-
#define TID_TO_WME_AC(_tid) \
- ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
- (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
- (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
- WME_AC_VO)
+ ((((_tid) == 0) || ((_tid) == 3)) ? IEEE80211_AC_BE : \
+ (((_tid) == 1) || ((_tid) == 2)) ? IEEE80211_AC_BK : \
+ (((_tid) == 4) || ((_tid) == 5)) ? IEEE80211_AC_VI : \
+ IEEE80211_AC_VO)
#define SEQ_DIFF(_start, _seq) \
(((_start) - (_seq)) & 0x0fff)
@@ -290,6 +284,7 @@ struct ar9170 {
unsigned int rx_size;
unsigned int tx_seq_table;
bool ba_filter;
+ bool disable_offload_fw;
} fw;
/* interface configuration combinations */
@@ -493,8 +488,8 @@ struct carl9170_sta_info {
bool sleeping;
atomic_t pending_frames;
unsigned int ampdu_max_len;
- struct carl9170_sta_tid __rcu *agg[CARL9170_NUM_TID];
- struct carl9170_ba_stats stats[CARL9170_NUM_TID];
+ struct carl9170_sta_tid __rcu *agg[IEEE80211_NUM_TIDS];
+ struct carl9170_ba_stats stats[IEEE80211_NUM_TIDS];
};
struct carl9170_tx_info {
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index 63fd9af3fd39..47d5c2e910ad 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -215,6 +215,24 @@ static int carl9170_fw_tx_sequence(struct ar9170 *ar)
return 0;
}
+static void carl9170_fw_set_if_combinations(struct ar9170 *ar,
+ u16 if_comb_types)
+{
+ if (ar->fw.vif_num < 2)
+ return;
+
+ ar->if_comb_limits[0].max = ar->fw.vif_num;
+ ar->if_comb_limits[0].types = if_comb_types;
+
+ ar->if_combs[0].num_different_channels = 1;
+ ar->if_combs[0].max_interfaces = ar->fw.vif_num;
+ ar->if_combs[0].limits = ar->if_comb_limits;
+ ar->if_combs[0].n_limits = ARRAY_SIZE(ar->if_comb_limits);
+
+ ar->hw->wiphy->iface_combinations = ar->if_combs;
+ ar->hw->wiphy->n_iface_combinations = ARRAY_SIZE(ar->if_combs);
+}
+
static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
{
const struct carl9170fw_otus_desc *otus_desc;
@@ -264,7 +282,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
if (!SUPP(CARL9170FW_COMMAND_CAM)) {
dev_info(&ar->udev->dev, "crypto offloading is disabled "
"by firmware.\n");
- ar->disable_offload = true;
+ ar->fw.disable_offload_fw = true;
}
if (SUPP(CARL9170FW_PSM) && SUPP(CARL9170FW_FIXED_5GHZ_PSM))
@@ -345,20 +363,15 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
}
}
- ar->if_comb_limits[0].max = ar->fw.vif_num;
- ar->if_comb_limits[0].types = if_comb_types;
-
- ar->if_combs[0].num_different_channels = 1;
- ar->if_combs[0].max_interfaces = ar->fw.vif_num;
- ar->if_combs[0].limits = ar->if_comb_limits;
- ar->if_combs[0].n_limits = ARRAY_SIZE(ar->if_comb_limits);
-
- ar->hw->wiphy->iface_combinations = ar->if_combs;
- ar->hw->wiphy->n_iface_combinations = ARRAY_SIZE(ar->if_combs);
+ carl9170_fw_set_if_combinations(ar, if_comb_types);
ar->hw->wiphy->interface_modes |= if_comb_types;
- ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ ar->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ /* As IBSS Encryption is software-based, IBSS RSN is supported. */
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+ WIPHY_FLAG_IBSS_RSN | WIPHY_FLAG_SUPPORTS_TDLS;
#undef SUPPORTED
return carl9170_fw_tx_sequence(ar);
diff --git a/drivers/net/wireless/ath/carl9170/fwcmd.h b/drivers/net/wireless/ath/carl9170/fwcmd.h
index 9443c802b25b..9111d4ffc1b3 100644
--- a/drivers/net/wireless/ath/carl9170/fwcmd.h
+++ b/drivers/net/wireless/ath/carl9170/fwcmd.h
@@ -156,6 +156,14 @@ struct carl9170_psm {
} __packed;
#define CARL9170_PSM_SIZE 4
+/*
+ * Note: If a bit in rx_filter is set, then it
+ * means that the particular frames which matches
+ * the condition are FILTERED/REMOVED/DISCARDED!
+ * (This is can be a bit confusing, especially
+ * because someone people think it's the exact
+ * opposite way, so watch out!)
+ */
struct carl9170_rx_filter_cmd {
__le32 rx_filter;
} __packed;
diff --git a/drivers/net/wireless/ath/carl9170/hw.h b/drivers/net/wireless/ath/carl9170/hw.h
index fa834c1460f0..0db874abde50 100644
--- a/drivers/net/wireless/ath/carl9170/hw.h
+++ b/drivers/net/wireless/ath/carl9170/hw.h
@@ -384,7 +384,7 @@
#define AR9170_MAC_REG_BCN_ADDR (AR9170_MAC_REG_BASE + 0xd84)
#define AR9170_MAC_REG_BCN_LENGTH (AR9170_MAC_REG_BASE + 0xd88)
-#define AR9170_MAC_BCN_LENGTH_MAX 256
+#define AR9170_MAC_BCN_LENGTH_MAX (512 - 32)
#define AR9170_MAC_REG_BCN_STATUS (AR9170_MAC_REG_BASE + 0xd8c)
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 25a1e2f4f738..f293b3ff4756 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -358,8 +358,13 @@ static int carl9170_op_start(struct ieee80211_hw *hw)
ar->ps.last_action = jiffies;
ar->ps.last_slept = jiffies;
ar->erp_mode = CARL9170_ERP_AUTO;
- ar->rx_software_decryption = false;
- ar->disable_offload = false;
+
+ /* Set "disable hw crypto offload" whenever the module parameter
+ * nohwcrypt is true or if the firmware does not support it.
+ */
+ ar->disable_offload = modparam_nohwcrypt |
+ ar->fw.disable_offload_fw;
+ ar->rx_software_decryption = ar->disable_offload;
for (i = 0; i < ar->hw->queues; i++) {
ar->queue_stop_timeout[i] = jiffies;
@@ -565,12 +570,28 @@ static int carl9170_init_interface(struct ar9170 *ar,
memcpy(common->macaddr, vif->addr, ETH_ALEN);
- if (modparam_nohwcrypt ||
- ((vif->type != NL80211_IFTYPE_STATION) &&
- (vif->type != NL80211_IFTYPE_AP))) {
- ar->rx_software_decryption = true;
- ar->disable_offload = true;
- }
+ /* We have to fall back to software crypto, whenever
+ * the user choose to participates in an IBSS. HW
+ * offload for IBSS RSN is not supported by this driver.
+ *
+ * NOTE: If the previous main interface has already
+ * disabled hw crypto offload, we have to keep this
+ * previous disable_offload setting as it was.
+ * Altough ideally, we should notify mac80211 and tell
+ * it to forget about any HW crypto offload for now.
+ */
+ ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) &&
+ (vif->type != NL80211_IFTYPE_AP));
+
+ /* While the driver supports HW offload in a single
+ * P2P client configuration, it doesn't support HW
+ * offload in the favourit, concurrent P2P GO+CLIENT
+ * configuration. Hence, HW offload will always be
+ * disabled for P2P.
+ */
+ ar->disable_offload |= vif->p2p;
+
+ ar->rx_software_decryption = ar->disable_offload;
err = carl9170_set_operating_mode(ar);
return err;
@@ -580,7 +601,7 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
- struct ieee80211_vif *main_vif;
+ struct ieee80211_vif *main_vif, *old_main = NULL;
struct ar9170 *ar = hw->priv;
int vif_id = -1, err = 0;
@@ -602,6 +623,15 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw,
goto init;
}
+ /* Because the AR9170 HW's MAC doesn't provide full support for
+ * multiple, independent interfaces [of different operation modes].
+ * We have to select ONE main interface [main mode of HW], but we
+ * can have multiple slaves [AKA: entry in the ACK-table].
+ *
+ * The first (from HEAD/TOP) interface in the ar->vif_list is
+ * always the main intf. All following intfs in this list
+ * are considered to be slave intfs.
+ */
main_vif = carl9170_get_main_vif(ar);
if (main_vif) {
@@ -610,6 +640,18 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_STATION)
break;
+ /* P2P GO [master] use-case
+ * Because the P2P GO station is selected dynamically
+ * by all participating peers of a WIFI Direct network,
+ * the driver has be able to change the main interface
+ * operating mode on the fly.
+ */
+ if (main_vif->p2p && vif->p2p &&
+ vif->type == NL80211_IFTYPE_AP) {
+ old_main = main_vif;
+ break;
+ }
+
err = -EBUSY;
rcu_read_unlock();
@@ -648,14 +690,41 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw,
vif_priv->id = vif_id;
vif_priv->enable_beacon = false;
ar->vifs++;
- list_add_tail_rcu(&vif_priv->list, &ar->vif_list);
+ if (old_main) {
+ /* We end up in here, if the main interface is being replaced.
+ * Put the new main interface at the HEAD of the list and the
+ * previous inteface will automatically become second in line.
+ */
+ list_add_rcu(&vif_priv->list, &ar->vif_list);
+ } else {
+ /* Add new inteface. If the list is empty, it will become the
+ * main inteface, otherwise it will be slave.
+ */
+ list_add_tail_rcu(&vif_priv->list, &ar->vif_list);
+ }
rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif);
init:
- if (carl9170_get_main_vif(ar) == vif) {
+ main_vif = carl9170_get_main_vif(ar);
+
+ if (main_vif == vif) {
rcu_assign_pointer(ar->beacon_iter, vif_priv);
rcu_read_unlock();
+ if (old_main) {
+ struct carl9170_vif_info *old_main_priv =
+ (void *) old_main->drv_priv;
+ /* downgrade old main intf to slave intf.
+ * NOTE: We are no longer under rcu_read_lock.
+ * But we are still holding ar->mutex, so the
+ * vif data [id, addr] is safe.
+ */
+ err = carl9170_mod_virtual_mac(ar, old_main_priv->id,
+ old_main->addr);
+ if (err)
+ goto unlock;
+ }
+
err = carl9170_init_interface(ar, vif);
if (err)
goto unlock;
@@ -1112,9 +1181,7 @@ static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (ar->disable_offload || !vif)
return -EOPNOTSUPP;
- /*
- * We have to fall back to software encryption, whenever
- * the user choose to participates in an IBSS or is connected
+ /* Fall back to software encryption whenever the driver is connected
* to more than one network.
*
* This is very unfortunate, because some machines cannot handle
@@ -1263,7 +1330,7 @@ static int carl9170_op_sta_add(struct ieee80211_hw *hw,
return 0;
}
- for (i = 0; i < CARL9170_NUM_TID; i++)
+ for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++)
RCU_INIT_POINTER(sta_info->agg[i], NULL);
sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
@@ -1287,7 +1354,7 @@ static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
sta_info->ht_sta = false;
rcu_read_lock();
- for (i = 0; i < CARL9170_NUM_TID; i++) {
+ for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++) {
struct carl9170_sta_tid *tid_info;
tid_info = rcu_dereference(sta_info->agg[i]);
@@ -1394,7 +1461,9 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
- case IEEE80211_AMPDU_TX_STOP:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
rcu_read_lock();
tid_info = rcu_dereference(sta_info->agg[tid]);
if (tid_info) {
@@ -1784,7 +1853,7 @@ void *carl9170_alloc(size_t priv_size)
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
- IEEE80211_HW_NEED_DTIM_PERIOD |
+ IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
IEEE80211_HW_SIGNAL_DBM;
if (!modparam_noht) {
@@ -1805,10 +1874,6 @@ void *carl9170_alloc(size_t priv_size)
for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
- hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
-
- /* As IBSS Encryption is software-based, IBSS RSN is supported. */
- hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
return ar;
err_nomem:
@@ -1916,13 +1981,13 @@ static int carl9170_parse_eeprom(struct ar9170 *ar)
return 0;
}
-static int carl9170_reg_notifier(struct wiphy *wiphy,
- struct regulatory_request *request)
+static void carl9170_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ar9170 *ar = hw->priv;
- return ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
+ ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
}
int carl9170_register(struct ar9170 *ar)
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index ef4ec0da6e49..9c0b150d5b8e 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -1520,35 +1520,92 @@ void carl9170_tx_scheduler(struct ar9170 *ar)
carl9170_tx(ar);
}
-int carl9170_update_beacon(struct ar9170 *ar, const bool submit)
+/* caller has to take rcu_read_lock */
+static struct carl9170_vif_info *carl9170_pick_beaconing_vif(struct ar9170 *ar)
{
- struct sk_buff *skb = NULL;
struct carl9170_vif_info *cvif;
+ int i = 1;
+
+ /* The AR9170 hardware has no fancy beacon queue or some
+ * other scheduling mechanism. So, the driver has to make
+ * due by setting the two beacon timers (pretbtt and tbtt)
+ * once and then swapping the beacon address in the HW's
+ * register file each time the pretbtt fires.
+ */
+
+ cvif = rcu_dereference(ar->beacon_iter);
+ if (ar->vifs > 0 && cvif) {
+ do {
+ list_for_each_entry_continue_rcu(cvif, &ar->vif_list,
+ list) {
+ if (cvif->active && cvif->enable_beacon)
+ goto out;
+ }
+ } while (ar->beacon_enabled && i--);
+ }
+
+out:
+ rcu_assign_pointer(ar->beacon_iter, cvif);
+ return cvif;
+}
+
+static bool carl9170_tx_beacon_physet(struct ar9170 *ar, struct sk_buff *skb,
+ u32 *ht1, u32 *plcp)
+{
struct ieee80211_tx_info *txinfo;
struct ieee80211_tx_rate *rate;
- __le32 *data, *old = NULL;
- unsigned int plcp, power, chains;
- u32 word, ht1, off, addr, len;
- int i = 0, err = 0;
+ unsigned int power, chains;
+ bool ht_rate;
- rcu_read_lock();
- cvif = rcu_dereference(ar->beacon_iter);
-retry:
- if (ar->vifs == 0 || !cvif)
- goto out_unlock;
+ txinfo = IEEE80211_SKB_CB(skb);
+ rate = &txinfo->control.rates[0];
+ ht_rate = !!(txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS);
+ carl9170_tx_rate_tpc_chains(ar, txinfo, rate, plcp, &power, &chains);
- list_for_each_entry_continue_rcu(cvif, &ar->vif_list, list) {
- if (cvif->active && cvif->enable_beacon)
- goto found;
+ *ht1 = AR9170_MAC_BCN_HT1_TX_ANT0;
+ if (chains == AR9170_TX_PHY_TXCHAIN_2)
+ *ht1 |= AR9170_MAC_BCN_HT1_TX_ANT1;
+ SET_VAL(AR9170_MAC_BCN_HT1_PWR_CTRL, *ht1, 7);
+ SET_VAL(AR9170_MAC_BCN_HT1_TPC, *ht1, power);
+ SET_VAL(AR9170_MAC_BCN_HT1_CHAIN_MASK, *ht1, chains);
+
+ if (ht_rate) {
+ *ht1 |= AR9170_MAC_BCN_HT1_HT_EN;
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ *plcp |= AR9170_MAC_BCN_HT2_SGI;
+
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
+ *ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_SHARED;
+ *plcp |= AR9170_MAC_BCN_HT2_BW40;
+ } else if (rate->flags & IEEE80211_TX_RC_DUP_DATA) {
+ *ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_DUP;
+ *plcp |= AR9170_MAC_BCN_HT2_BW40;
+ }
+
+ SET_VAL(AR9170_MAC_BCN_HT2_LEN, *plcp, skb->len + FCS_LEN);
+ } else {
+ if (*plcp <= AR9170_TX_PHY_RATE_CCK_11M)
+ *plcp |= ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400;
+ else
+ *plcp |= ((skb->len + FCS_LEN) << 16) + 0x0010;
}
- if (!ar->beacon_enabled || i++)
- goto out_unlock;
+ return ht_rate;
+}
- goto retry;
+int carl9170_update_beacon(struct ar9170 *ar, const bool submit)
+{
+ struct sk_buff *skb = NULL;
+ struct carl9170_vif_info *cvif;
+ __le32 *data, *old = NULL;
+ u32 word, ht1, plcp, off, addr, len;
+ int i = 0, err = 0;
+ bool ht_rate;
-found:
- rcu_assign_pointer(ar->beacon_iter, cvif);
+ rcu_read_lock();
+ cvif = carl9170_pick_beaconing_vif(ar);
+ if (!cvif)
+ goto out_unlock;
skb = ieee80211_beacon_get_tim(ar->hw, carl9170_get_vif(cvif),
NULL, NULL);
@@ -1558,7 +1615,6 @@ found:
goto err_free;
}
- txinfo = IEEE80211_SKB_CB(skb);
spin_lock_bh(&ar->beacon_lock);
data = (__le32 *)skb->data;
if (cvif->beacon)
@@ -1588,43 +1644,14 @@ found:
goto err_unlock;
}
- ht1 = AR9170_MAC_BCN_HT1_TX_ANT0;
- rate = &txinfo->control.rates[0];
- carl9170_tx_rate_tpc_chains(ar, txinfo, rate, &plcp, &power, &chains);
- if (!(txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS)) {
- if (plcp <= AR9170_TX_PHY_RATE_CCK_11M)
- plcp |= ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400;
- else
- plcp |= ((skb->len + FCS_LEN) << 16) + 0x0010;
- } else {
- ht1 |= AR9170_MAC_BCN_HT1_HT_EN;
- if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- plcp |= AR9170_MAC_BCN_HT2_SGI;
-
- if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
- ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_SHARED;
- plcp |= AR9170_MAC_BCN_HT2_BW40;
- }
- if (rate->flags & IEEE80211_TX_RC_DUP_DATA) {
- ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_DUP;
- plcp |= AR9170_MAC_BCN_HT2_BW40;
- }
-
- SET_VAL(AR9170_MAC_BCN_HT2_LEN, plcp, skb->len + FCS_LEN);
- }
-
- SET_VAL(AR9170_MAC_BCN_HT1_PWR_CTRL, ht1, 7);
- SET_VAL(AR9170_MAC_BCN_HT1_TPC, ht1, power);
- SET_VAL(AR9170_MAC_BCN_HT1_CHAIN_MASK, ht1, chains);
- if (chains == AR9170_TX_PHY_TXCHAIN_2)
- ht1 |= AR9170_MAC_BCN_HT1_TX_ANT1;
+ ht_rate = carl9170_tx_beacon_physet(ar, skb, &ht1, &plcp);
carl9170_async_regwrite_begin(ar);
carl9170_async_regwrite(AR9170_MAC_REG_BCN_HT1, ht1);
- if (!(txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS))
- carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP, plcp);
- else
+ if (ht_rate)
carl9170_async_regwrite(AR9170_MAC_REG_BCN_HT2, plcp);
+ else
+ carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP, plcp);
for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) {
/*
diff --git a/drivers/net/wireless/ath/carl9170/version.h b/drivers/net/wireless/ath/carl9170/version.h
index 2ec3e9191e4d..2282847d4bb8 100644
--- a/drivers/net/wireless/ath/carl9170/version.h
+++ b/drivers/net/wireless/ath/carl9170/version.h
@@ -1,7 +1,7 @@
#ifndef __CARL9170_SHARED_VERSION_H
#define __CARL9170_SHARED_VERSION_H
#define CARL9170FW_VERSION_YEAR 12
-#define CARL9170FW_VERSION_MONTH 7
-#define CARL9170FW_VERSION_DAY 7
-#define CARL9170FW_VERSION_GIT "1.9.6"
+#define CARL9170FW_VERSION_MONTH 12
+#define CARL9170FW_VERSION_DAY 15
+#define CARL9170FW_VERSION_GIT "1.9.7"
#endif /* __CARL9170_SHARED_VERSION_H */
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index d81698015bf7..ccc4c718f124 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -195,8 +195,6 @@ ath_reg_apply_beaconing_flags(struct wiphy *wiphy,
const struct ieee80211_reg_rule *reg_rule;
struct ieee80211_channel *ch;
unsigned int i;
- u32 bandwidth = 0;
- int r;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
@@ -214,11 +212,8 @@ ath_reg_apply_beaconing_flags(struct wiphy *wiphy,
continue;
if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
- r = freq_reg_info(wiphy,
- ch->center_freq,
- bandwidth,
- &reg_rule);
- if (r)
+ reg_rule = freq_reg_info(wiphy, ch->center_freq);
+ if (IS_ERR(reg_rule))
continue;
/*
* If 11d had a rule for this channel ensure
@@ -254,8 +249,6 @@ ath_reg_apply_active_scan_flags(struct wiphy *wiphy,
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
const struct ieee80211_reg_rule *reg_rule;
- u32 bandwidth = 0;
- int r;
sband = wiphy->bands[IEEE80211_BAND_2GHZ];
if (!sband)
@@ -283,16 +276,16 @@ ath_reg_apply_active_scan_flags(struct wiphy *wiphy,
*/
ch = &sband->channels[11]; /* CH 12 */
- r = freq_reg_info(wiphy, ch->center_freq, bandwidth, &reg_rule);
- if (!r) {
+ reg_rule = freq_reg_info(wiphy, ch->center_freq);
+ if (!IS_ERR(reg_rule)) {
if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
}
ch = &sband->channels[12]; /* CH 13 */
- r = freq_reg_info(wiphy, ch->center_freq, bandwidth, &reg_rule);
- if (!r) {
+ reg_rule = freq_reg_info(wiphy, ch->center_freq);
+ if (!IS_ERR(reg_rule)) {
if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
@@ -363,9 +356,9 @@ static u16 ath_regd_find_country_by_name(char *alpha2)
return -1;
}
-int ath_reg_notifier_apply(struct wiphy *wiphy,
- struct regulatory_request *request,
- struct ath_regulatory *reg)
+void ath_reg_notifier_apply(struct wiphy *wiphy,
+ struct regulatory_request *request,
+ struct ath_regulatory *reg)
{
struct ath_common *common = container_of(reg, struct ath_common,
regulatory);
@@ -380,7 +373,7 @@ int ath_reg_notifier_apply(struct wiphy *wiphy,
* any pending requests in the queue.
*/
if (!request)
- return 0;
+ return;
switch (request->initiator) {
case NL80211_REGDOM_SET_BY_CORE:
@@ -416,8 +409,6 @@ int ath_reg_notifier_apply(struct wiphy *wiphy,
break;
}
-
- return 0;
}
EXPORT_SYMBOL(ath_reg_notifier_apply);
@@ -507,8 +498,8 @@ ath_get_regpair(int regdmn)
static int
ath_regd_init_wiphy(struct ath_regulatory *reg,
struct wiphy *wiphy,
- int (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request))
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request))
{
const struct ieee80211_regdomain *regd;
@@ -628,8 +619,8 @@ static int __ath_regd_init(struct ath_regulatory *reg)
int
ath_regd_init(struct ath_regulatory *reg,
struct wiphy *wiphy,
- int (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request))
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request))
{
struct ath_common *common = container_of(reg, struct ath_common,
regulatory);
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index 03a8268ccf21..37f53bd8fcb1 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -252,12 +252,12 @@ enum CountryCode {
bool ath_is_world_regd(struct ath_regulatory *reg);
bool ath_is_49ghz_allowed(u16 redomain);
int ath_regd_init(struct ath_regulatory *reg, struct wiphy *wiphy,
- int (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request));
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request));
u32 ath_regd_get_band_ctl(struct ath_regulatory *reg,
enum ieee80211_band band);
-int ath_reg_notifier_apply(struct wiphy *wiphy,
- struct regulatory_request *request,
- struct ath_regulatory *reg);
+void ath_reg_notifier_apply(struct wiphy *wiphy,
+ struct regulatory_request *request,
+ struct ath_regulatory *reg);
#endif
diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig
new file mode 100644
index 000000000000..bac3d98a0cfb
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/Kconfig
@@ -0,0 +1,29 @@
+config WIL6210
+ tristate "Wilocity 60g WiFi card wil6210 support"
+ depends on CFG80211
+ depends on PCI
+ default n
+ ---help---
+ This module adds support for wireless adapter based on
+ wil6210 chip by Wilocity. It supports operation on the
+ 60 GHz band, covered by the IEEE802.11ad standard.
+
+ http://wireless.kernel.org/en/users/Drivers/wil6210
+
+ If you choose to build it as a module, it will be called
+ wil6210
+
+config WIL6210_ISR_COR
+ bool "Use Clear-On-Read mode for ISR registers for wil6210"
+ depends on WIL6210
+ default y
+ ---help---
+ ISR registers on wil6210 chip may operate in either
+ COR (Clear-On-Read) or W1C (Write-1-to-Clear) mode.
+ For production code, use COR (say y); is default since
+ it saves extra target transaction;
+ For ISR debug, use W1C (say n); is allows to monitor ISR
+ registers with debugfs. If COR were used, ISR would
+ self-clear when accessed for debug purposes, it makes
+ such monitoring impossible.
+ Say y unless you debug interrupts
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
new file mode 100644
index 000000000000..9396dc9fe3c5
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -0,0 +1,13 @@
+obj-$(CONFIG_WIL6210) += wil6210.o
+
+wil6210-objs := main.o
+wil6210-objs += netdev.o
+wil6210-objs += cfg80211.o
+wil6210-objs += pcie_bus.o
+wil6210-objs += debugfs.o
+wil6210-objs += wmi.o
+wil6210-objs += interrupt.o
+wil6210-objs += txrx.o
+
+subdir-ccflags-y += -Werror
+subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
new file mode 100644
index 000000000000..9ecc1968262c
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -0,0 +1,572 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <net/cfg80211.h>
+
+#include "wil6210.h"
+#include "wmi.h"
+
+#define CHAN60G(_channel, _flags) { \
+ .band = IEEE80211_BAND_60GHZ, \
+ .center_freq = 56160 + (2160 * (_channel)), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 40, \
+}
+
+static struct ieee80211_channel wil_60ghz_channels[] = {
+ CHAN60G(1, 0),
+ CHAN60G(2, 0),
+ CHAN60G(3, 0),
+/* channel 4 not supported yet */
+};
+
+static struct ieee80211_supported_band wil_band_60ghz = {
+ .channels = wil_60ghz_channels,
+ .n_channels = ARRAY_SIZE(wil_60ghz_channels),
+ .ht_cap = {
+ .ht_supported = true,
+ .cap = 0, /* TODO */
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, /* TODO */
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, /* TODO */
+ .mcs = {
+ /* MCS 1..12 - SC PHY */
+ .rx_mask = {0xfe, 0x1f}, /* 1..12 */
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED, /* TODO */
+ },
+ },
+};
+
+static const struct ieee80211_txrx_stypes
+wil_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+ [NL80211_IFTYPE_STATION] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_AP] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_CLIENT] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_GO] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+};
+
+static const u32 wil_cipher_suites[] = {
+ WLAN_CIPHER_SUITE_GCMP,
+};
+
+int wil_iftype_nl2wmi(enum nl80211_iftype type)
+{
+ static const struct {
+ enum nl80211_iftype nl;
+ enum wmi_network_type wmi;
+ } __nl2wmi[] = {
+ {NL80211_IFTYPE_ADHOC, WMI_NETTYPE_ADHOC},
+ {NL80211_IFTYPE_STATION, WMI_NETTYPE_INFRA},
+ {NL80211_IFTYPE_AP, WMI_NETTYPE_AP},
+ {NL80211_IFTYPE_P2P_CLIENT, WMI_NETTYPE_P2P},
+ {NL80211_IFTYPE_P2P_GO, WMI_NETTYPE_P2P},
+ {NL80211_IFTYPE_MONITOR, WMI_NETTYPE_ADHOC}, /* FIXME */
+ };
+ uint i;
+
+ for (i = 0; i < ARRAY_SIZE(__nl2wmi); i++) {
+ if (__nl2wmi[i].nl == type)
+ return __nl2wmi[i].wmi;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int wil_cfg80211_get_station(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8 *mac, struct station_info *sinfo)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ int rc;
+ struct wmi_notify_req_cmd cmd = {
+ .cid = 0,
+ .interval_usec = 0,
+ };
+
+ if (memcmp(mac, wil->dst_addr[0], ETH_ALEN))
+ return -ENOENT;
+
+ /* WMI_NOTIFY_REQ_DONE_EVENTID handler fills wil->stats.bf_mcs */
+ rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, &cmd, sizeof(cmd),
+ WMI_NOTIFY_REQ_DONE_EVENTID, NULL, 0, 20);
+ if (rc)
+ return rc;
+
+ sinfo->generation = wil->sinfo_gen;
+
+ sinfo->filled |= STATION_INFO_TX_BITRATE;
+ sinfo->txrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
+ sinfo->txrate.mcs = wil->stats.bf_mcs;
+ sinfo->filled |= STATION_INFO_RX_BITRATE;
+ sinfo->rxrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
+ sinfo->rxrate.mcs = wil->stats.last_mcs_rx;
+
+ if (test_bit(wil_status_fwconnected, &wil->status)) {
+ sinfo->filled |= STATION_INFO_SIGNAL;
+ sinfo->signal = 12; /* TODO: provide real value */
+ }
+
+ return 0;
+}
+
+static int wil_cfg80211_change_iface(struct wiphy *wiphy,
+ struct net_device *ndev,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = wil->wdev;
+
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ if (flags)
+ wil->monitor_flags = *flags;
+ else
+ wil->monitor_flags = 0;
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ wdev->iftype = type;
+
+ return 0;
+}
+
+static int wil_cfg80211_scan(struct wiphy *wiphy,
+ struct cfg80211_scan_request *request)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = wil->wdev;
+ struct {
+ struct wmi_start_scan_cmd cmd;
+ u16 chnl[4];
+ } __packed cmd;
+ uint i, n;
+
+ if (wil->scan_request) {
+ wil_err(wil, "Already scanning\n");
+ return -EAGAIN;
+ }
+
+ /* check we are client side */
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* FW don't support scan after connection attempt */
+ if (test_bit(wil_status_dontscan, &wil->status)) {
+ wil_err(wil, "Scan after connect attempt not supported\n");
+ return -EBUSY;
+ }
+
+ wil->scan_request = request;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd.num_channels = 0;
+ n = min(request->n_channels, 4U);
+ for (i = 0; i < n; i++) {
+ int ch = request->channels[i]->hw_value;
+ if (ch == 0) {
+ wil_err(wil,
+ "Scan requested for unknown frequency %dMhz\n",
+ request->channels[i]->center_freq);
+ continue;
+ }
+ /* 0-based channel indexes */
+ cmd.cmd.channel_list[cmd.cmd.num_channels++].channel = ch - 1;
+ wil_dbg_misc(wil, "Scan for ch %d : %d MHz\n", ch,
+ request->channels[i]->center_freq);
+ }
+
+ return wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
+ cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
+}
+
+static int wil_cfg80211_connect(struct wiphy *wiphy,
+ struct net_device *ndev,
+ struct cfg80211_connect_params *sme)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct cfg80211_bss *bss;
+ struct wmi_connect_cmd conn;
+ const u8 *ssid_eid;
+ const u8 *rsn_eid;
+ int ch;
+ int rc = 0;
+
+ bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
+ sme->ssid, sme->ssid_len,
+ WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
+ if (!bss) {
+ wil_err(wil, "Unable to find BSS\n");
+ return -ENOENT;
+ }
+
+ ssid_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
+ if (!ssid_eid) {
+ wil_err(wil, "No SSID\n");
+ rc = -ENOENT;
+ goto out;
+ }
+
+ rsn_eid = sme->ie ?
+ cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) :
+ NULL;
+ if (rsn_eid) {
+ if (sme->ie_len > WMI_MAX_IE_LEN) {
+ rc = -ERANGE;
+ wil_err(wil, "IE too large (%td bytes)\n",
+ sme->ie_len);
+ goto out;
+ }
+ /*
+ * For secure assoc, send:
+ * (1) WMI_DELETE_CIPHER_KEY_CMD
+ * (2) WMI_SET_APPIE_CMD
+ */
+ rc = wmi_del_cipher_key(wil, 0, bss->bssid);
+ if (rc) {
+ wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD failed\n");
+ goto out;
+ }
+ /* WMI_SET_APPIE_CMD */
+ rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie);
+ if (rc) {
+ wil_err(wil, "WMI_SET_APPIE_CMD failed\n");
+ goto out;
+ }
+ }
+
+ /* WMI_CONNECT_CMD */
+ memset(&conn, 0, sizeof(conn));
+ switch (bss->capability & 0x03) {
+ case WLAN_CAPABILITY_DMG_TYPE_AP:
+ conn.network_type = WMI_NETTYPE_INFRA;
+ break;
+ case WLAN_CAPABILITY_DMG_TYPE_PBSS:
+ conn.network_type = WMI_NETTYPE_P2P;
+ break;
+ default:
+ wil_err(wil, "Unsupported BSS type, capability= 0x%04x\n",
+ bss->capability);
+ goto out;
+ }
+ if (rsn_eid) {
+ conn.dot11_auth_mode = WMI_AUTH11_SHARED;
+ conn.auth_mode = WMI_AUTH_WPA2_PSK;
+ conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP;
+ conn.pairwise_crypto_len = 16;
+ } else {
+ conn.dot11_auth_mode = WMI_AUTH11_OPEN;
+ conn.auth_mode = WMI_AUTH_NONE;
+ }
+
+ conn.ssid_len = min_t(u8, ssid_eid[1], 32);
+ memcpy(conn.ssid, ssid_eid+2, conn.ssid_len);
+
+ ch = bss->channel->hw_value;
+ if (ch == 0) {
+ wil_err(wil, "BSS at unknown frequency %dMhz\n",
+ bss->channel->center_freq);
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+ conn.channel = ch - 1;
+
+ memcpy(conn.bssid, bss->bssid, 6);
+ memcpy(conn.dst_mac, bss->bssid, 6);
+ /*
+ * FW don't support scan after connection attempt
+ */
+ set_bit(wil_status_dontscan, &wil->status);
+
+ rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn));
+ if (rc == 0) {
+ /* Connect can take lots of time */
+ mod_timer(&wil->connect_timer,
+ jiffies + msecs_to_jiffies(2000));
+ }
+
+ out:
+ cfg80211_put_bss(wiphy, bss);
+
+ return rc;
+}
+
+static int wil_cfg80211_disconnect(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u16 reason_code)
+{
+ int rc;
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ rc = wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0);
+
+ return rc;
+}
+
+static int wil_cfg80211_set_channel(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = wil->wdev;
+
+ wdev->preset_chandef = *chandef;
+
+ return 0;
+}
+
+static int wil_cfg80211_add_key(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr,
+ struct key_params *params)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ /* group key is not used */
+ if (!pairwise)
+ return 0;
+
+ return wmi_add_cipher_key(wil, key_index, mac_addr,
+ params->key_len, params->key);
+}
+
+static int wil_cfg80211_del_key(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ /* group key is not used */
+ if (!pairwise)
+ return 0;
+
+ return wmi_del_cipher_key(wil, key_index, mac_addr);
+}
+
+/* Need to be present or wiphy_new() will WARN */
+static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8 key_index, bool unicast,
+ bool multicast)
+{
+ return 0;
+}
+
+static int wil_cfg80211_start_ap(struct wiphy *wiphy,
+ struct net_device *ndev,
+ struct cfg80211_ap_settings *info)
+{
+ int rc = 0;
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ struct ieee80211_channel *channel = info->chandef.chan;
+ struct cfg80211_beacon_data *bcon = &info->beacon;
+ u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+
+ if (!channel) {
+ wil_err(wil, "AP: No channel???\n");
+ return -EINVAL;
+ }
+
+ wil_dbg_misc(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value,
+ channel->center_freq, info->privacy ? "secure" : "open");
+ print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
+ info->ssid, info->ssid_len);
+
+ rc = wil_reset(wil);
+ if (rc)
+ return rc;
+
+ rc = wmi_set_ssid(wil, info->ssid_len, info->ssid);
+ if (rc)
+ return rc;
+
+ rc = wmi_set_channel(wil, channel->hw_value);
+ if (rc)
+ return rc;
+
+ /* MAC address - pre-requisite for other commands */
+ wmi_set_mac_address(wil, ndev->dev_addr);
+
+ /* IE's */
+ /* bcon 'head IE's are not relevant for 60g band */
+ wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
+ bcon->beacon_ies);
+ wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len,
+ bcon->proberesp_ies);
+ wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
+ bcon->assocresp_ies);
+
+ wil->secure_pcp = info->privacy;
+
+ rc = wmi_set_bcon(wil, info->beacon_interval, wmi_nettype);
+ if (rc)
+ return rc;
+
+ /* Rx VRING. After MAC and beacon */
+ rc = wil_rx_init(wil);
+
+ netif_carrier_on(ndev);
+
+ return rc;
+}
+
+static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
+ struct net_device *ndev)
+{
+ int rc = 0;
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+ u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+
+ /* To stop beaconing, set BI to 0 */
+ rc = wmi_set_bcon(wil, 0, wmi_nettype);
+
+ return rc;
+}
+
+static struct cfg80211_ops wil_cfg80211_ops = {
+ .scan = wil_cfg80211_scan,
+ .connect = wil_cfg80211_connect,
+ .disconnect = wil_cfg80211_disconnect,
+ .change_virtual_intf = wil_cfg80211_change_iface,
+ .get_station = wil_cfg80211_get_station,
+ .set_monitor_channel = wil_cfg80211_set_channel,
+ .add_key = wil_cfg80211_add_key,
+ .del_key = wil_cfg80211_del_key,
+ .set_default_key = wil_cfg80211_set_default_key,
+ /* AP mode */
+ .start_ap = wil_cfg80211_start_ap,
+ .stop_ap = wil_cfg80211_stop_ap,
+};
+
+static void wil_wiphy_init(struct wiphy *wiphy)
+{
+ /* TODO: set real value */
+ wiphy->max_scan_ssids = 10;
+ wiphy->max_num_pmkids = 0 /* TODO: */;
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MONITOR);
+ /* TODO: enable P2P when integrated with supplicant:
+ * BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO)
+ */
+ wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
+ WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+ dev_warn(wiphy_dev(wiphy), "%s : flags = 0x%08x\n",
+ __func__, wiphy->flags);
+ wiphy->probe_resp_offload =
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
+
+ wiphy->bands[IEEE80211_BAND_60GHZ] = &wil_band_60ghz;
+
+ /* TODO: figure this out */
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+
+ wiphy->cipher_suites = wil_cipher_suites;
+ wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites);
+ wiphy->mgmt_stypes = wil_mgmt_stypes;
+}
+
+struct wireless_dev *wil_cfg80211_init(struct device *dev)
+{
+ int rc = 0;
+ struct wireless_dev *wdev;
+
+ wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
+ if (!wdev)
+ return ERR_PTR(-ENOMEM);
+
+ wdev->wiphy = wiphy_new(&wil_cfg80211_ops,
+ sizeof(struct wil6210_priv));
+ if (!wdev->wiphy) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ set_wiphy_dev(wdev->wiphy, dev);
+ wil_wiphy_init(wdev->wiphy);
+
+ rc = wiphy_register(wdev->wiphy);
+ if (rc < 0)
+ goto out_failed_reg;
+
+ return wdev;
+
+out_failed_reg:
+ wiphy_free(wdev->wiphy);
+out:
+ kfree(wdev);
+
+ return ERR_PTR(rc);
+}
+
+void wil_wdev_free(struct wil6210_priv *wil)
+{
+ struct wireless_dev *wdev = wil_to_wdev(wil);
+
+ if (!wdev)
+ return;
+
+ wiphy_unregister(wdev->wiphy);
+ wiphy_free(wdev->wiphy);
+ kfree(wdev);
+}
diff --git a/drivers/net/wireless/ath/wil6210/dbg_hexdump.h b/drivers/net/wireless/ath/wil6210/dbg_hexdump.h
new file mode 100644
index 000000000000..e5712f026c47
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/dbg_hexdump.h
@@ -0,0 +1,20 @@
+#ifndef WIL_DBG_HEXDUMP_H_
+#define WIL_DBG_HEXDUMP_H_
+
+#include <linux/printk.h>
+#include <linux/dynamic_debug.h>
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define wil_print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
+
+#else /* defined(CONFIG_DYNAMIC_DEBUG) */
+#define wil_print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
+#endif /* defined(CONFIG_DYNAMIC_DEBUG) */
+
+#endif /* WIL_DBG_HEXDUMP_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
new file mode 100644
index 000000000000..65fc9683bfd8
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -0,0 +1,603 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+
+#include "wil6210.h"
+#include "txrx.h"
+
+/* Nasty hack. Better have per device instances */
+static u32 mem_addr;
+static u32 dbg_txdesc_index;
+
+static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
+ const char *name, struct vring *vring)
+{
+ void __iomem *x = wmi_addr(wil, vring->hwtail);
+
+ seq_printf(s, "VRING %s = {\n", name);
+ seq_printf(s, " pa = 0x%016llx\n", (unsigned long long)vring->pa);
+ seq_printf(s, " va = 0x%p\n", vring->va);
+ seq_printf(s, " size = %d\n", vring->size);
+ seq_printf(s, " swtail = %d\n", vring->swtail);
+ seq_printf(s, " swhead = %d\n", vring->swhead);
+ seq_printf(s, " hwtail = [0x%08x] -> ", vring->hwtail);
+ if (x)
+ seq_printf(s, "0x%08x\n", ioread32(x));
+ else
+ seq_printf(s, "???\n");
+
+ if (vring->va && (vring->size < 1025)) {
+ uint i;
+ for (i = 0; i < vring->size; i++) {
+ volatile struct vring_tx_desc *d = &vring->va[i].tx;
+ if ((i % 64) == 0 && (i != 0))
+ seq_printf(s, "\n");
+ seq_printf(s, "%s", (d->dma.status & BIT(0)) ?
+ "S" : (vring->ctx[i] ? "H" : "h"));
+ }
+ seq_printf(s, "\n");
+ }
+ seq_printf(s, "}\n");
+}
+
+static int wil_vring_debugfs_show(struct seq_file *s, void *data)
+{
+ uint i;
+ struct wil6210_priv *wil = s->private;
+
+ wil_print_vring(s, wil, "rx", &wil->vring_rx);
+
+ for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
+ struct vring *vring = &(wil->vring_tx[i]);
+ if (vring->va) {
+ char name[10];
+ snprintf(name, sizeof(name), "tx_%2d", i);
+ wil_print_vring(s, wil, name, vring);
+ }
+ }
+
+ return 0;
+}
+
+static int wil_vring_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_vring_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_vring = {
+ .open = wil_vring_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static void wil_print_ring(struct seq_file *s, const char *prefix,
+ void __iomem *off)
+{
+ struct wil6210_priv *wil = s->private;
+ struct wil6210_mbox_ring r;
+ int rsize;
+ uint i;
+
+ wil_memcpy_fromio_32(&r, off, sizeof(r));
+ wil_mbox_ring_le2cpus(&r);
+ /*
+ * we just read memory block from NIC. This memory may be
+ * garbage. Check validity before using it.
+ */
+ rsize = r.size / sizeof(struct wil6210_mbox_ring_desc);
+
+ seq_printf(s, "ring %s = {\n", prefix);
+ seq_printf(s, " base = 0x%08x\n", r.base);
+ seq_printf(s, " size = 0x%04x bytes -> %d entries\n", r.size, rsize);
+ seq_printf(s, " tail = 0x%08x\n", r.tail);
+ seq_printf(s, " head = 0x%08x\n", r.head);
+ seq_printf(s, " entry size = %d\n", r.entry_size);
+
+ if (r.size % sizeof(struct wil6210_mbox_ring_desc)) {
+ seq_printf(s, " ??? size is not multiple of %zd, garbage?\n",
+ sizeof(struct wil6210_mbox_ring_desc));
+ goto out;
+ }
+
+ if (!wmi_addr(wil, r.base) ||
+ !wmi_addr(wil, r.tail) ||
+ !wmi_addr(wil, r.head)) {
+ seq_printf(s, " ??? pointers are garbage?\n");
+ goto out;
+ }
+
+ for (i = 0; i < rsize; i++) {
+ struct wil6210_mbox_ring_desc d;
+ struct wil6210_mbox_hdr hdr;
+ size_t delta = i * sizeof(d);
+ void __iomem *x = wil->csr + HOSTADDR(r.base) + delta;
+
+ wil_memcpy_fromio_32(&d, x, sizeof(d));
+
+ seq_printf(s, " [%2x] %s %s%s 0x%08x", i,
+ d.sync ? "F" : "E",
+ (r.tail - r.base == delta) ? "t" : " ",
+ (r.head - r.base == delta) ? "h" : " ",
+ le32_to_cpu(d.addr));
+ if (0 == wmi_read_hdr(wil, d.addr, &hdr)) {
+ u16 len = le16_to_cpu(hdr.len);
+ seq_printf(s, " -> %04x %04x %04x %02x\n",
+ le16_to_cpu(hdr.seq), len,
+ le16_to_cpu(hdr.type), hdr.flags);
+ if (len <= MAX_MBOXITEM_SIZE) {
+ int n = 0;
+ unsigned char printbuf[16 * 3 + 2];
+ unsigned char databuf[MAX_MBOXITEM_SIZE];
+ void __iomem *src = wmi_buffer(wil, d.addr) +
+ sizeof(struct wil6210_mbox_hdr);
+ /*
+ * No need to check @src for validity -
+ * we already validated @d.addr while
+ * reading header
+ */
+ wil_memcpy_fromio_32(databuf, src, len);
+ while (n < len) {
+ int l = min(len - n, 16);
+ hex_dump_to_buffer(databuf + n, l,
+ 16, 1, printbuf,
+ sizeof(printbuf),
+ false);
+ seq_printf(s, " : %s\n", printbuf);
+ n += l;
+ }
+ }
+ } else {
+ seq_printf(s, "\n");
+ }
+ }
+ out:
+ seq_printf(s, "}\n");
+}
+
+static int wil_mbox_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+
+ wil_print_ring(s, "tx", wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, tx));
+ wil_print_ring(s, "rx", wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, rx));
+
+ return 0;
+}
+
+static int wil_mbox_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_mbox_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_mbox = {
+ .open = wil_mbox_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static int wil_debugfs_iomem_x32_set(void *data, u64 val)
+{
+ iowrite32(val, (void __iomem *)data);
+ wmb(); /* make sure write propagated to HW */
+
+ return 0;
+}
+
+static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
+{
+ *val = ioread32((void __iomem *)data);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get,
+ wil_debugfs_iomem_x32_set, "0x%08llx\n");
+
+static struct dentry *wil_debugfs_create_iomem_x32(const char *name,
+ mode_t mode,
+ struct dentry *parent,
+ void __iomem *value)
+{
+ return debugfs_create_file(name, mode, parent, (void * __force)value,
+ &fops_iomem_x32);
+}
+
+static int wil6210_debugfs_create_ISR(struct wil6210_priv *wil,
+ const char *name,
+ struct dentry *parent, u32 off)
+{
+ struct dentry *d = debugfs_create_dir(name, parent);
+
+ if (IS_ERR_OR_NULL(d))
+ return -ENODEV;
+
+ wil_debugfs_create_iomem_x32("ICC", S_IRUGO | S_IWUSR, d,
+ wil->csr + off);
+ wil_debugfs_create_iomem_x32("ICR", S_IRUGO | S_IWUSR, d,
+ wil->csr + off + 4);
+ wil_debugfs_create_iomem_x32("ICM", S_IRUGO | S_IWUSR, d,
+ wil->csr + off + 8);
+ wil_debugfs_create_iomem_x32("ICS", S_IWUSR, d,
+ wil->csr + off + 12);
+ wil_debugfs_create_iomem_x32("IMV", S_IRUGO | S_IWUSR, d,
+ wil->csr + off + 16);
+ wil_debugfs_create_iomem_x32("IMS", S_IWUSR, d,
+ wil->csr + off + 20);
+ wil_debugfs_create_iomem_x32("IMC", S_IWUSR, d,
+ wil->csr + off + 24);
+
+ return 0;
+}
+
+static int wil6210_debugfs_create_pseudo_ISR(struct wil6210_priv *wil,
+ struct dentry *parent)
+{
+ struct dentry *d = debugfs_create_dir("PSEUDO_ISR", parent);
+
+ if (IS_ERR_OR_NULL(d))
+ return -ENODEV;
+
+ wil_debugfs_create_iomem_x32("CAUSE", S_IRUGO, d, wil->csr +
+ HOSTADDR(RGF_DMA_PSEUDO_CAUSE));
+ wil_debugfs_create_iomem_x32("MASK_SW", S_IRUGO, d, wil->csr +
+ HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+ wil_debugfs_create_iomem_x32("MASK_FW", S_IRUGO, d, wil->csr +
+ HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_FW));
+
+ return 0;
+}
+
+static int wil6210_debugfs_create_ITR_CNT(struct wil6210_priv *wil,
+ struct dentry *parent)
+{
+ struct dentry *d = debugfs_create_dir("ITR_CNT", parent);
+
+ if (IS_ERR_OR_NULL(d))
+ return -ENODEV;
+
+ wil_debugfs_create_iomem_x32("TRSH", S_IRUGO, d, wil->csr +
+ HOSTADDR(RGF_DMA_ITR_CNT_TRSH));
+ wil_debugfs_create_iomem_x32("DATA", S_IRUGO, d, wil->csr +
+ HOSTADDR(RGF_DMA_ITR_CNT_DATA));
+ wil_debugfs_create_iomem_x32("CTL", S_IRUGO, d, wil->csr +
+ HOSTADDR(RGF_DMA_ITR_CNT_CRL));
+
+ return 0;
+}
+
+static int wil_memread_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ void __iomem *a = wmi_buffer(wil, cpu_to_le32(mem_addr));
+
+ if (a)
+ seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, ioread32(a));
+ else
+ seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
+
+ return 0;
+}
+
+static int wil_memread_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_memread_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_memread = {
+ .open = wil_memread_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static int wil_default_open(struct inode *inode, struct file *file)
+{
+ if (inode->i_private)
+ file->private_data = inode->i_private;
+
+ return 0;
+}
+
+static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ enum { max_count = 4096 };
+ struct debugfs_blob_wrapper *blob = file->private_data;
+ loff_t pos = *ppos;
+ size_t available = blob->size;
+ void *buf;
+ size_t ret;
+
+ if (pos < 0)
+ return -EINVAL;
+
+ if (pos >= available || !count)
+ return 0;
+
+ if (count > available - pos)
+ count = available - pos;
+ if (count > max_count)
+ count = max_count;
+
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ wil_memcpy_fromio_32(buf, (const volatile void __iomem *)blob->data +
+ pos, count);
+
+ ret = copy_to_user(user_buf, buf, count);
+ kfree(buf);
+ if (ret == count)
+ return -EFAULT;
+
+ count -= ret;
+ *ppos = pos + count;
+
+ return count;
+}
+
+static const struct file_operations fops_ioblob = {
+ .read = wil_read_file_ioblob,
+ .open = wil_default_open,
+ .llseek = default_llseek,
+};
+
+static
+struct dentry *wil_debugfs_create_ioblob(const char *name,
+ mode_t mode,
+ struct dentry *parent,
+ struct debugfs_blob_wrapper *blob)
+{
+ return debugfs_create_file(name, mode, parent, blob, &fops_ioblob);
+}
+/*---reset---*/
+static ssize_t wil_write_file_reset(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ /**
+ * BUG:
+ * this code does NOT sync device state with the rest of system
+ * use with care, debug only!!!
+ */
+ rtnl_lock();
+ dev_close(ndev);
+ ndev->flags &= ~IFF_UP;
+ rtnl_unlock();
+ wil_reset(wil);
+
+ return len;
+}
+
+static const struct file_operations fops_reset = {
+ .write = wil_write_file_reset,
+ .open = wil_default_open,
+};
+/*---------Tx descriptor------------*/
+
+static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ struct vring *vring = &(wil->vring_tx[0]);
+
+ if (!vring->va) {
+ seq_printf(s, "No Tx VRING\n");
+ return 0;
+ }
+
+ if (dbg_txdesc_index < vring->size) {
+ volatile struct vring_tx_desc *d =
+ &(vring->va[dbg_txdesc_index].tx);
+ volatile u32 *u = (volatile u32 *)d;
+ struct sk_buff *skb = vring->ctx[dbg_txdesc_index];
+
+ seq_printf(s, "Tx[%3d] = {\n", dbg_txdesc_index);
+ seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ u[0], u[1], u[2], u[3]);
+ seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ u[4], u[5], u[6], u[7]);
+ seq_printf(s, " SKB = %p\n", skb);
+
+ if (skb) {
+ unsigned char printbuf[16 * 3 + 2];
+ int i = 0;
+ int len = skb_headlen(skb);
+ void *p = skb->data;
+
+ seq_printf(s, " len = %d\n", len);
+
+ while (i < len) {
+ int l = min(len - i, 16);
+ hex_dump_to_buffer(p + i, l, 16, 1, printbuf,
+ sizeof(printbuf), false);
+ seq_printf(s, " : %s\n", printbuf);
+ i += l;
+ }
+ }
+ seq_printf(s, "}\n");
+ } else {
+ seq_printf(s, "TxDesc index (%d) >= size (%d)\n",
+ dbg_txdesc_index, vring->size);
+ }
+
+ return 0;
+}
+
+static int wil_txdesc_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_txdesc_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_txdesc = {
+ .open = wil_txdesc_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+/*---------beamforming------------*/
+static int wil_bf_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ seq_printf(s,
+ "TSF : 0x%016llx\n"
+ "TxMCS : %d\n"
+ "Sectors(rx:tx) my %2d:%2d peer %2d:%2d\n",
+ wil->stats.tsf, wil->stats.bf_mcs,
+ wil->stats.my_rx_sector, wil->stats.my_tx_sector,
+ wil->stats.peer_rx_sector, wil->stats.peer_tx_sector);
+ return 0;
+}
+
+static int wil_bf_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_bf_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_bf = {
+ .open = wil_bf_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+/*---------SSID------------*/
+static ssize_t wil_read_file_ssid(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ struct wireless_dev *wdev = wil_to_wdev(wil);
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ wdev->ssid, wdev->ssid_len);
+}
+
+static ssize_t wil_write_file_ssid(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ struct wireless_dev *wdev = wil_to_wdev(wil);
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ if (*ppos != 0) {
+ wil_err(wil, "Unable to set SSID substring from [%d]\n",
+ (int)*ppos);
+ return -EINVAL;
+ }
+
+ if (count > sizeof(wdev->ssid)) {
+ wil_err(wil, "SSID too long, len = %d\n", (int)count);
+ return -EINVAL;
+ }
+ if (netif_running(ndev)) {
+ wil_err(wil, "Unable to change SSID on running interface\n");
+ return -EINVAL;
+ }
+
+ wdev->ssid_len = count;
+ return simple_write_to_buffer(wdev->ssid, wdev->ssid_len, ppos,
+ buf, count);
+}
+
+static const struct file_operations fops_ssid = {
+ .read = wil_read_file_ssid,
+ .write = wil_write_file_ssid,
+ .open = wil_default_open,
+};
+
+/*----------------*/
+int wil6210_debugfs_init(struct wil6210_priv *wil)
+{
+ struct dentry *dbg = wil->debug = debugfs_create_dir(WIL_NAME,
+ wil_to_wiphy(wil)->debugfsdir);
+
+ if (IS_ERR_OR_NULL(dbg))
+ return -ENODEV;
+
+ debugfs_create_file("mbox", S_IRUGO, dbg, wil, &fops_mbox);
+ debugfs_create_file("vrings", S_IRUGO, dbg, wil, &fops_vring);
+ debugfs_create_file("txdesc", S_IRUGO, dbg, wil, &fops_txdesc);
+ debugfs_create_u32("txdesc_index", S_IRUGO | S_IWUSR, dbg,
+ &dbg_txdesc_index);
+ debugfs_create_file("bf", S_IRUGO, dbg, wil, &fops_bf);
+ debugfs_create_file("ssid", S_IRUGO | S_IWUSR, dbg, wil, &fops_ssid);
+ debugfs_create_u32("secure_pcp", S_IRUGO | S_IWUSR, dbg,
+ &wil->secure_pcp);
+
+ wil6210_debugfs_create_ISR(wil, "USER_ICR", dbg,
+ HOSTADDR(RGF_USER_USER_ICR));
+ wil6210_debugfs_create_ISR(wil, "DMA_EP_TX_ICR", dbg,
+ HOSTADDR(RGF_DMA_EP_TX_ICR));
+ wil6210_debugfs_create_ISR(wil, "DMA_EP_RX_ICR", dbg,
+ HOSTADDR(RGF_DMA_EP_RX_ICR));
+ wil6210_debugfs_create_ISR(wil, "DMA_EP_MISC_ICR", dbg,
+ HOSTADDR(RGF_DMA_EP_MISC_ICR));
+ wil6210_debugfs_create_pseudo_ISR(wil, dbg);
+ wil6210_debugfs_create_ITR_CNT(wil, dbg);
+
+ debugfs_create_u32("mem_addr", S_IRUGO | S_IWUSR, dbg, &mem_addr);
+ debugfs_create_file("mem_val", S_IRUGO, dbg, wil, &fops_memread);
+
+ debugfs_create_file("reset", S_IWUSR, dbg, wil, &fops_reset);
+
+ wil->rgf_blob.data = (void * __force)wil->csr + 0;
+ wil->rgf_blob.size = 0xa000;
+ wil_debugfs_create_ioblob("blob_rgf", S_IRUGO, dbg, &wil->rgf_blob);
+
+ wil->fw_code_blob.data = (void * __force)wil->csr + 0x40000;
+ wil->fw_code_blob.size = 0x40000;
+ wil_debugfs_create_ioblob("blob_fw_code", S_IRUGO, dbg,
+ &wil->fw_code_blob);
+
+ wil->fw_data_blob.data = (void * __force)wil->csr + 0x80000;
+ wil->fw_data_blob.size = 0x8000;
+ wil_debugfs_create_ioblob("blob_fw_data", S_IRUGO, dbg,
+ &wil->fw_data_blob);
+
+ wil->fw_peri_blob.data = (void * __force)wil->csr + 0x88000;
+ wil->fw_peri_blob.size = 0x18000;
+ wil_debugfs_create_ioblob("blob_fw_peri", S_IRUGO, dbg,
+ &wil->fw_peri_blob);
+
+ wil->uc_code_blob.data = (void * __force)wil->csr + 0xa0000;
+ wil->uc_code_blob.size = 0x10000;
+ wil_debugfs_create_ioblob("blob_uc_code", S_IRUGO, dbg,
+ &wil->uc_code_blob);
+
+ wil->uc_data_blob.data = (void * __force)wil->csr + 0xb0000;
+ wil->uc_data_blob.size = 0x4000;
+ wil_debugfs_create_ioblob("blob_uc_data", S_IRUGO, dbg,
+ &wil->uc_data_blob);
+
+ return 0;
+}
+
+void wil6210_debugfs_remove(struct wil6210_priv *wil)
+{
+ debugfs_remove_recursive(wil->debug);
+ wil->debug = NULL;
+}
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
new file mode 100644
index 000000000000..dc97e7b2609c
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -0,0 +1,490 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/interrupt.h>
+
+#include "wil6210.h"
+
+/**
+ * Theory of operation:
+ *
+ * There is ISR pseudo-cause register,
+ * dma_rgf->DMA_RGF.PSEUDO_CAUSE.PSEUDO_CAUSE
+ * Its bits represents OR'ed bits from 3 real ISR registers:
+ * TX, RX, and MISC.
+ *
+ * Registers may be configured to either "write 1 to clear" or
+ * "clear on read" mode
+ *
+ * When handling interrupt, one have to mask/unmask interrupts for the
+ * real ISR registers, or hardware may malfunction.
+ *
+ */
+
+#define WIL6210_IRQ_DISABLE (0xFFFFFFFFUL)
+#define WIL6210_IMC_RX BIT_DMA_EP_RX_ICR_RX_DONE
+#define WIL6210_IMC_TX (BIT_DMA_EP_TX_ICR_TX_DONE | \
+ BIT_DMA_EP_TX_ICR_TX_DONE_N(0))
+#define WIL6210_IMC_MISC (ISR_MISC_FW_READY | \
+ ISR_MISC_MBOX_EVT | \
+ ISR_MISC_FW_ERROR)
+
+#define WIL6210_IRQ_PSEUDO_MASK (u32)(~(BIT_DMA_PSEUDO_CAUSE_RX | \
+ BIT_DMA_PSEUDO_CAUSE_TX | \
+ BIT_DMA_PSEUDO_CAUSE_MISC))
+
+#if defined(CONFIG_WIL6210_ISR_COR)
+/* configure to Clear-On-Read mode */
+#define WIL_ICR_ICC_VALUE (0xFFFFFFFFUL)
+
+static inline void wil_icr_clear(u32 x, void __iomem *addr)
+{
+}
+#else /* defined(CONFIG_WIL6210_ISR_COR) */
+/* configure to Write-1-to-Clear mode */
+#define WIL_ICR_ICC_VALUE (0UL)
+
+static inline void wil_icr_clear(u32 x, void __iomem *addr)
+{
+ iowrite32(x, addr);
+}
+#endif /* defined(CONFIG_WIL6210_ISR_COR) */
+
+static inline u32 wil_ioread32_and_clear(void __iomem *addr)
+{
+ u32 x = ioread32(addr);
+
+ wil_icr_clear(x, addr);
+
+ return x;
+}
+
+static void wil6210_mask_irq_tx(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, IMS));
+}
+
+static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, IMS));
+}
+
+static void wil6210_mask_irq_misc(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, IMS));
+}
+
+static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
+{
+ wil_dbg_irq(wil, "%s()\n", __func__);
+
+ iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+ HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+
+ clear_bit(wil_status_irqen, &wil->status);
+}
+
+static void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IMC_TX, wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, IMC));
+}
+
+static void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IMC_RX, wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, IMC));
+}
+
+static void wil6210_unmask_irq_misc(struct wil6210_priv *wil)
+{
+ iowrite32(WIL6210_IMC_MISC, wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, IMC));
+}
+
+static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
+{
+ wil_dbg_irq(wil, "%s()\n", __func__);
+
+ set_bit(wil_status_irqen, &wil->status);
+
+ iowrite32(WIL6210_IRQ_PSEUDO_MASK, wil->csr +
+ HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+}
+
+void wil6210_disable_irq(struct wil6210_priv *wil)
+{
+ wil_dbg_irq(wil, "%s()\n", __func__);
+
+ wil6210_mask_irq_tx(wil);
+ wil6210_mask_irq_rx(wil);
+ wil6210_mask_irq_misc(wil);
+ wil6210_mask_irq_pseudo(wil);
+}
+
+void wil6210_enable_irq(struct wil6210_priv *wil)
+{
+ wil_dbg_irq(wil, "%s()\n", __func__);
+
+ iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICC));
+ iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICC));
+ iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICC));
+
+ wil6210_unmask_irq_pseudo(wil);
+ wil6210_unmask_irq_tx(wil);
+ wil6210_unmask_irq_rx(wil);
+ wil6210_unmask_irq_misc(wil);
+}
+
+static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+
+ wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
+
+ if (!isr) {
+ wil_err(wil, "spurious IRQ: RX\n");
+ return IRQ_NONE;
+ }
+
+ wil6210_mask_irq_rx(wil);
+
+ if (isr & BIT_DMA_EP_RX_ICR_RX_DONE) {
+ wil_dbg_irq(wil, "RX done\n");
+ isr &= ~BIT_DMA_EP_RX_ICR_RX_DONE;
+ wil_rx_handle(wil);
+ }
+
+ if (isr)
+ wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr);
+
+ wil6210_unmask_irq_rx(wil);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+
+ wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
+
+ if (!isr) {
+ wil_err(wil, "spurious IRQ: TX\n");
+ return IRQ_NONE;
+ }
+
+ wil6210_mask_irq_tx(wil);
+
+ if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) {
+ uint i;
+ wil_dbg_irq(wil, "TX done\n");
+ isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
+ for (i = 0; i < 24; i++) {
+ u32 mask = BIT_DMA_EP_TX_ICR_TX_DONE_N(i);
+ if (isr & mask) {
+ isr &= ~mask;
+ wil_dbg_irq(wil, "TX done(%i)\n", i);
+ wil_tx_complete(wil, i);
+ }
+ }
+ }
+
+ if (isr)
+ wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr);
+
+ wil6210_unmask_irq_tx(wil);
+
+ return IRQ_HANDLED;
+}
+
+static void wil_notify_fw_error(struct wil6210_priv *wil)
+{
+ struct device *dev = &wil_to_ndev(wil)->dev;
+ char *envp[3] = {
+ [0] = "SOURCE=wil6210",
+ [1] = "EVENT=FW_ERROR",
+ [2] = NULL,
+ };
+ kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
+}
+
+static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+
+ wil_dbg_irq(wil, "ISR MISC 0x%08x\n", isr);
+
+ if (!isr) {
+ wil_err(wil, "spurious IRQ: MISC\n");
+ return IRQ_NONE;
+ }
+
+ wil6210_mask_irq_misc(wil);
+
+ if (isr & ISR_MISC_FW_ERROR) {
+ wil_dbg_irq(wil, "IRQ: Firmware error\n");
+ clear_bit(wil_status_fwready, &wil->status);
+ wil_notify_fw_error(wil);
+ isr &= ~ISR_MISC_FW_ERROR;
+ }
+
+ if (isr & ISR_MISC_FW_READY) {
+ wil_dbg_irq(wil, "IRQ: FW ready\n");
+ /**
+ * Actual FW ready indicated by the
+ * WMI_FW_READY_EVENTID
+ */
+ isr &= ~ISR_MISC_FW_READY;
+ }
+
+ wil->isr_misc = isr;
+
+ if (isr) {
+ return IRQ_WAKE_THREAD;
+ } else {
+ wil6210_unmask_irq_misc(wil);
+ return IRQ_HANDLED;
+ }
+}
+
+static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+ u32 isr = wil->isr_misc;
+
+ wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr);
+
+ if (isr & ISR_MISC_MBOX_EVT) {
+ wil_dbg_irq(wil, "MBOX event\n");
+ wmi_recv_cmd(wil);
+ isr &= ~ISR_MISC_MBOX_EVT;
+ }
+
+ if (isr)
+ wil_err(wil, "un-handled MISC ISR bits 0x%08x\n", isr);
+
+ wil->isr_misc = 0;
+
+ wil6210_unmask_irq_misc(wil);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * thread IRQ handler
+ */
+static irqreturn_t wil6210_thread_irq(int irq, void *cookie)
+{
+ struct wil6210_priv *wil = cookie;
+
+ wil_dbg_irq(wil, "Thread IRQ\n");
+ /* Discover real IRQ cause */
+ if (wil->isr_misc)
+ wil6210_irq_misc_thread(irq, cookie);
+
+ wil6210_unmask_irq_pseudo(wil);
+
+ return IRQ_HANDLED;
+}
+
+/* DEBUG
+ * There is subtle bug in hardware that causes IRQ to raise when it should be
+ * masked. It is quite rare and hard to debug.
+ *
+ * Catch irq issue if it happens and print all I can.
+ */
+static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
+{
+ if (!test_bit(wil_status_irqen, &wil->status)) {
+ u32 icm_rx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ u32 icr_rx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ u32 imv_rx = ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_EP_RX_ICR) +
+ offsetof(struct RGF_ICR, IMV));
+ u32 icm_tx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ u32 icr_tx = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ u32 imv_tx = ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_EP_TX_ICR) +
+ offsetof(struct RGF_ICR, IMV));
+ u32 icm_misc = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICM));
+ u32 icr_misc = wil_ioread32_and_clear(wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ u32 imv_misc = ioread32(wil->csr +
+ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+ offsetof(struct RGF_ICR, IMV));
+ wil_err(wil, "IRQ when it should be masked: pseudo 0x%08x\n"
+ "Rx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
+ "Tx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
+ "Misc icm:icr:imv 0x%08x 0x%08x 0x%08x\n",
+ pseudo_cause,
+ icm_rx, icr_rx, imv_rx,
+ icm_tx, icr_tx, imv_tx,
+ icm_misc, icr_misc, imv_misc);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static irqreturn_t wil6210_hardirq(int irq, void *cookie)
+{
+ irqreturn_t rc = IRQ_HANDLED;
+ struct wil6210_priv *wil = cookie;
+ u32 pseudo_cause = ioread32(wil->csr + HOSTADDR(RGF_DMA_PSEUDO_CAUSE));
+
+ /**
+ * pseudo_cause is Clear-On-Read, no need to ACK
+ */
+ if ((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff))
+ return IRQ_NONE;
+
+ /* FIXME: IRQ mask debug */
+ if (wil6210_debug_irq_mask(wil, pseudo_cause))
+ return IRQ_NONE;
+
+ wil_dbg_irq(wil, "Pseudo IRQ 0x%08x\n", pseudo_cause);
+
+ wil6210_mask_irq_pseudo(wil);
+
+ /* Discover real IRQ cause
+ * There are 2 possible phases for every IRQ:
+ * - hard IRQ handler called right here
+ * - threaded handler called later
+ *
+ * Hard IRQ handler reads and clears ISR.
+ *
+ * If threaded handler requested, hard IRQ handler
+ * returns IRQ_WAKE_THREAD and saves ISR register value
+ * for the threaded handler use.
+ *
+ * voting for wake thread - need at least 1 vote
+ */
+ if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_RX) &&
+ (wil6210_irq_rx(irq, cookie) == IRQ_WAKE_THREAD))
+ rc = IRQ_WAKE_THREAD;
+
+ if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_TX) &&
+ (wil6210_irq_tx(irq, cookie) == IRQ_WAKE_THREAD))
+ rc = IRQ_WAKE_THREAD;
+
+ if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_MISC) &&
+ (wil6210_irq_misc(irq, cookie) == IRQ_WAKE_THREAD))
+ rc = IRQ_WAKE_THREAD;
+
+ /* if thread is requested, it will unmask IRQ */
+ if (rc != IRQ_WAKE_THREAD)
+ wil6210_unmask_irq_pseudo(wil);
+
+ return rc;
+}
+
+static int wil6210_request_3msi(struct wil6210_priv *wil, int irq)
+{
+ int rc;
+ /*
+ * IRQ's are in the following order:
+ * - Tx
+ * - Rx
+ * - Misc
+ */
+
+ rc = request_irq(irq, wil6210_irq_tx, IRQF_SHARED,
+ WIL_NAME"_tx", wil);
+ if (rc)
+ return rc;
+
+ rc = request_irq(irq + 1, wil6210_irq_rx, IRQF_SHARED,
+ WIL_NAME"_rx", wil);
+ if (rc)
+ goto free0;
+
+ rc = request_threaded_irq(irq + 2, wil6210_irq_misc,
+ wil6210_irq_misc_thread,
+ IRQF_SHARED, WIL_NAME"_misc", wil);
+ if (rc)
+ goto free1;
+
+ return 0;
+ /* error branch */
+free1:
+ free_irq(irq + 1, wil);
+free0:
+ free_irq(irq, wil);
+
+ return rc;
+}
+
+int wil6210_init_irq(struct wil6210_priv *wil, int irq)
+{
+ int rc;
+ if (wil->n_msi == 3)
+ rc = wil6210_request_3msi(wil, irq);
+ else
+ rc = request_threaded_irq(irq, wil6210_hardirq,
+ wil6210_thread_irq,
+ wil->n_msi ? 0 : IRQF_SHARED,
+ WIL_NAME, wil);
+ if (rc)
+ return rc;
+
+ wil6210_enable_irq(wil);
+
+ return 0;
+}
+
+void wil6210_fini_irq(struct wil6210_priv *wil, int irq)
+{
+ wil6210_disable_irq(wil);
+ free_irq(irq, wil);
+ if (wil->n_msi == 3) {
+ free_irq(irq + 1, wil);
+ free_irq(irq + 2, wil);
+ }
+}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
new file mode 100644
index 000000000000..761c389586d4
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -0,0 +1,410 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/ieee80211.h>
+#include <linux/wireless.h>
+#include <linux/slab.h>
+#include <linux/moduleparam.h>
+#include <linux/if_arp.h>
+
+#include "wil6210.h"
+
+/*
+ * Due to a hardware issue,
+ * one has to read/write to/from NIC in 32-bit chunks;
+ * regular memcpy_fromio and siblings will
+ * not work on 64-bit platform - it uses 64-bit transactions
+ *
+ * Force 32-bit transactions to enable NIC on 64-bit platforms
+ *
+ * To avoid byte swap on big endian host, __raw_{read|write}l
+ * should be used - {read|write}l would swap bytes to provide
+ * little endian on PCI value in host endianness.
+ */
+void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
+ size_t count)
+{
+ u32 *d = dst;
+ const volatile u32 __iomem *s = src;
+
+ /* size_t is unsigned, if (count%4 != 0) it will wrap */
+ for (count += 4; count > 4; count -= 4)
+ *d++ = __raw_readl(s++);
+}
+
+void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
+ size_t count)
+{
+ volatile u32 __iomem *d = dst;
+ const u32 *s = src;
+
+ for (count += 4; count > 4; count -= 4)
+ __raw_writel(*s++, d++);
+}
+
+static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
+{
+ uint i;
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *wdev = wil->wdev;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ wil_link_off(wil);
+ clear_bit(wil_status_fwconnected, &wil->status);
+
+ switch (wdev->sme_state) {
+ case CFG80211_SME_CONNECTED:
+ cfg80211_disconnected(ndev, WLAN_STATUS_UNSPECIFIED_FAILURE,
+ NULL, 0, GFP_KERNEL);
+ break;
+ case CFG80211_SME_CONNECTING:
+ cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++)
+ wil_vring_fini_tx(wil, i);
+
+ clear_bit(wil_status_dontscan, &wil->status);
+}
+
+static void wil_disconnect_worker(struct work_struct *work)
+{
+ struct wil6210_priv *wil = container_of(work,
+ struct wil6210_priv, disconnect_worker);
+
+ _wil6210_disconnect(wil, NULL);
+}
+
+static void wil_connect_timer_fn(ulong x)
+{
+ struct wil6210_priv *wil = (void *)x;
+
+ wil_dbg_misc(wil, "Connect timeout\n");
+
+ /* reschedule to thread context - disconnect won't
+ * run from atomic context
+ */
+ schedule_work(&wil->disconnect_worker);
+}
+
+static void wil_cache_mbox_regs(struct wil6210_priv *wil)
+{
+ /* make shadow copy of registers that should not change on run time */
+ wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX,
+ sizeof(struct wil6210_mbox_ctl));
+ wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx);
+ wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
+}
+
+int wil_priv_init(struct wil6210_priv *wil)
+{
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ mutex_init(&wil->mutex);
+ mutex_init(&wil->wmi_mutex);
+
+ init_completion(&wil->wmi_ready);
+
+ wil->pending_connect_cid = -1;
+ setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil);
+
+ INIT_WORK(&wil->wmi_connect_worker, wmi_connect_worker);
+ INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
+ INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
+
+ INIT_LIST_HEAD(&wil->pending_wmi_ev);
+ spin_lock_init(&wil->wmi_ev_lock);
+
+ wil->wmi_wq = create_singlethread_workqueue(WIL_NAME"_wmi");
+ if (!wil->wmi_wq)
+ return -EAGAIN;
+
+ wil->wmi_wq_conn = create_singlethread_workqueue(WIL_NAME"_connect");
+ if (!wil->wmi_wq_conn) {
+ destroy_workqueue(wil->wmi_wq);
+ return -EAGAIN;
+ }
+
+ wil_cache_mbox_regs(wil);
+
+ return 0;
+}
+
+void wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
+{
+ del_timer_sync(&wil->connect_timer);
+ _wil6210_disconnect(wil, bssid);
+}
+
+void wil_priv_deinit(struct wil6210_priv *wil)
+{
+ cancel_work_sync(&wil->disconnect_worker);
+ wil6210_disconnect(wil, NULL);
+ wmi_event_flush(wil);
+ destroy_workqueue(wil->wmi_wq_conn);
+ destroy_workqueue(wil->wmi_wq);
+}
+
+static void wil_target_reset(struct wil6210_priv *wil)
+{
+ wil_dbg_misc(wil, "Resetting...\n");
+
+ /* register write */
+#define W(a, v) iowrite32(v, wil->csr + HOSTADDR(a))
+ /* register set = read, OR, write */
+#define S(a, v) iowrite32(ioread32(wil->csr + HOSTADDR(a)) | v, \
+ wil->csr + HOSTADDR(a))
+
+ /* hpal_perst_from_pad_src_n_mask */
+ S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(6));
+ /* car_perst_rst_src_n_mask */
+ S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(7));
+
+ W(RGF_USER_MAC_CPU_0, BIT(1)); /* mac_cpu_man_rst */
+ W(RGF_USER_USER_CPU_0, BIT(1)); /* user_cpu_man_rst */
+
+ msleep(100);
+
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000170);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FC00);
+
+ msleep(100);
+
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000001);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00000080);
+ W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+
+ msleep(2000);
+
+ W(RGF_USER_USER_CPU_0, BIT(0)); /* user_cpu_man_de_rst */
+
+ msleep(2000);
+
+ wil_dbg_misc(wil, "Reset completed\n");
+
+#undef W
+#undef S
+}
+
+void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
+{
+ le32_to_cpus(&r->base);
+ le16_to_cpus(&r->entry_size);
+ le16_to_cpus(&r->size);
+ le32_to_cpus(&r->tail);
+ le32_to_cpus(&r->head);
+}
+
+static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
+{
+ ulong to = msecs_to_jiffies(1000);
+ ulong left = wait_for_completion_timeout(&wil->wmi_ready, to);
+ if (0 == left) {
+ wil_err(wil, "Firmware not ready\n");
+ return -ETIME;
+ } else {
+ wil_dbg_misc(wil, "FW ready after %d ms\n",
+ jiffies_to_msecs(to-left));
+ }
+ return 0;
+}
+
+/*
+ * We reset all the structures, and we reset the UMAC.
+ * After calling this routine, you're expected to reload
+ * the firmware.
+ */
+int wil_reset(struct wil6210_priv *wil)
+{
+ int rc;
+
+ cancel_work_sync(&wil->disconnect_worker);
+ wil6210_disconnect(wil, NULL);
+
+ wil6210_disable_irq(wil);
+ wil->status = 0;
+
+ wmi_event_flush(wil);
+
+ flush_workqueue(wil->wmi_wq_conn);
+ flush_workqueue(wil->wmi_wq);
+
+ /* TODO: put MAC in reset */
+ wil_target_reset(wil);
+
+ /* init after reset */
+ wil->pending_connect_cid = -1;
+ INIT_COMPLETION(wil->wmi_ready);
+
+ wil_cache_mbox_regs(wil);
+
+ /* TODO: release MAC reset */
+ wil6210_enable_irq(wil);
+
+ /* we just started MAC, wait for FW ready */
+ rc = wil_wait_for_fw_ready(wil);
+
+ return rc;
+}
+
+
+void wil_link_on(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ netif_carrier_on(ndev);
+ netif_tx_wake_all_queues(ndev);
+}
+
+void wil_link_off(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ netif_tx_stop_all_queues(ndev);
+ netif_carrier_off(ndev);
+}
+
+static int __wil_up(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *wdev = wil->wdev;
+ struct ieee80211_channel *channel = wdev->preset_chandef.chan;
+ int rc;
+ int bi;
+ u16 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+
+ rc = wil_reset(wil);
+ if (rc)
+ return rc;
+
+ /* FIXME Firmware works now in PBSS mode(ToDS=0, FromDS=0) */
+ wmi_nettype = wil_iftype_nl2wmi(NL80211_IFTYPE_ADHOC);
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ wil_dbg_misc(wil, "type: STATION\n");
+ bi = 0;
+ ndev->type = ARPHRD_ETHER;
+ break;
+ case NL80211_IFTYPE_AP:
+ wil_dbg_misc(wil, "type: AP\n");
+ bi = 100;
+ ndev->type = ARPHRD_ETHER;
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ wil_dbg_misc(wil, "type: P2P_CLIENT\n");
+ bi = 0;
+ ndev->type = ARPHRD_ETHER;
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ wil_dbg_misc(wil, "type: P2P_GO\n");
+ bi = 100;
+ ndev->type = ARPHRD_ETHER;
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ wil_dbg_misc(wil, "type: Monitor\n");
+ bi = 0;
+ ndev->type = ARPHRD_IEEE80211_RADIOTAP;
+ /* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_RADIOTAP ? */
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* Apply profile in the following order: */
+ /* SSID and channel for the AP */
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ if (wdev->ssid_len == 0) {
+ wil_err(wil, "SSID not set\n");
+ return -EINVAL;
+ }
+ wmi_set_ssid(wil, wdev->ssid_len, wdev->ssid);
+ if (channel)
+ wmi_set_channel(wil, channel->hw_value);
+ break;
+ default:
+ break;
+ }
+
+ /* MAC address - pre-requisite for other commands */
+ wmi_set_mac_address(wil, ndev->dev_addr);
+
+ /* Set up beaconing if required. */
+ rc = wmi_set_bcon(wil, bi, wmi_nettype);
+ if (rc)
+ return rc;
+
+ /* Rx VRING. After MAC and beacon */
+ wil_rx_init(wil);
+
+ return 0;
+}
+
+int wil_up(struct wil6210_priv *wil)
+{
+ int rc;
+
+ mutex_lock(&wil->mutex);
+ rc = __wil_up(wil);
+ mutex_unlock(&wil->mutex);
+
+ return rc;
+}
+
+static int __wil_down(struct wil6210_priv *wil)
+{
+ if (wil->scan_request) {
+ cfg80211_scan_done(wil->scan_request, true);
+ wil->scan_request = NULL;
+ }
+
+ wil6210_disconnect(wil, NULL);
+ wil_rx_fini(wil);
+
+ return 0;
+}
+
+int wil_down(struct wil6210_priv *wil)
+{
+ int rc;
+
+ mutex_lock(&wil->mutex);
+ rc = __wil_down(wil);
+ mutex_unlock(&wil->mutex);
+
+ return rc;
+}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
new file mode 100644
index 000000000000..8ce2e33dce20
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+
+#include "wil6210.h"
+
+static int wil_open(struct net_device *ndev)
+{
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+
+ return wil_up(wil);
+}
+
+static int wil_stop(struct net_device *ndev)
+{
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+
+ return wil_down(wil);
+}
+
+static const struct net_device_ops wil_netdev_ops = {
+ .ndo_open = wil_open,
+ .ndo_stop = wil_stop,
+ .ndo_start_xmit = wil_start_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+void *wil_if_alloc(struct device *dev, void __iomem *csr)
+{
+ struct net_device *ndev;
+ struct wireless_dev *wdev;
+ struct wil6210_priv *wil;
+ struct ieee80211_channel *ch;
+ int rc = 0;
+
+ wdev = wil_cfg80211_init(dev);
+ if (IS_ERR(wdev)) {
+ dev_err(dev, "wil_cfg80211_init failed\n");
+ return wdev;
+ }
+
+ wil = wdev_to_wil(wdev);
+ wil->csr = csr;
+ wil->wdev = wdev;
+
+ rc = wil_priv_init(wil);
+ if (rc) {
+ dev_err(dev, "wil_priv_init failed\n");
+ goto out_wdev;
+ }
+
+ wdev->iftype = NL80211_IFTYPE_STATION; /* TODO */
+ /* default monitor channel */
+ ch = wdev->wiphy->bands[IEEE80211_BAND_60GHZ]->channels;
+ cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT);
+
+ ndev = alloc_netdev(0, "wlan%d", ether_setup);
+ if (!ndev) {
+ dev_err(dev, "alloc_netdev_mqs failed\n");
+ rc = -ENOMEM;
+ goto out_priv;
+ }
+
+ ndev->netdev_ops = &wil_netdev_ops;
+ ndev->ieee80211_ptr = wdev;
+ SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
+ wdev->netdev = ndev;
+
+ wil_link_off(wil);
+
+ return wil;
+
+ out_priv:
+ wil_priv_deinit(wil);
+
+ out_wdev:
+ wil_wdev_free(wil);
+
+ return ERR_PTR(rc);
+}
+
+void wil_if_free(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ if (!ndev)
+ return;
+
+ free_netdev(ndev);
+ wil_priv_deinit(wil);
+ wil_wdev_free(wil);
+}
+
+int wil_if_add(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ int rc;
+
+ rc = register_netdev(ndev);
+ if (rc < 0) {
+ dev_err(&ndev->dev, "Failed to register netdev: %d\n", rc);
+ return rc;
+ }
+
+ wil_link_off(wil);
+
+ return 0;
+}
+
+void wil_if_remove(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ unregister_netdev(ndev);
+}
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
new file mode 100644
index 000000000000..81c35c6e3832
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/debugfs.h>
+#include <linux/pci.h>
+#include <linux/moduleparam.h>
+
+#include "wil6210.h"
+
+static int use_msi = 1;
+module_param(use_msi, int, S_IRUGO);
+MODULE_PARM_DESC(use_msi,
+ " Use MSI interrupt: "
+ "0 - don't, 1 - (default) - single, or 3");
+
+/* Bus ops */
+static int wil_if_pcie_enable(struct wil6210_priv *wil)
+{
+ struct pci_dev *pdev = wil->pdev;
+ int rc;
+
+ pci_set_master(pdev);
+
+ /*
+ * how many MSI interrupts to request?
+ */
+ switch (use_msi) {
+ case 3:
+ case 1:
+ case 0:
+ break;
+ default:
+ wil_err(wil, "Invalid use_msi=%d, default to 1\n",
+ use_msi);
+ use_msi = 1;
+ }
+ wil->n_msi = use_msi;
+ if (wil->n_msi) {
+ wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi);
+ rc = pci_enable_msi_block(pdev, wil->n_msi);
+ if (rc && (wil->n_msi == 3)) {
+ wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
+ wil->n_msi = 1;
+ rc = pci_enable_msi_block(pdev, wil->n_msi);
+ }
+ if (rc) {
+ wil_err(wil, "pci_enable_msi failed, use INTx\n");
+ wil->n_msi = 0;
+ }
+ } else {
+ wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
+ }
+
+ rc = wil6210_init_irq(wil, pdev->irq);
+ if (rc)
+ goto stop_master;
+
+ /* need reset here to obtain MAC */
+ rc = wil_reset(wil);
+ if (rc)
+ goto release_irq;
+
+ return 0;
+
+ release_irq:
+ wil6210_fini_irq(wil, pdev->irq);
+ /* safe to call if no MSI */
+ pci_disable_msi(pdev);
+ stop_master:
+ pci_clear_master(pdev);
+ return rc;
+}
+
+static int wil_if_pcie_disable(struct wil6210_priv *wil)
+{
+ struct pci_dev *pdev = wil->pdev;
+
+ pci_clear_master(pdev);
+ /* disable and release IRQ */
+ wil6210_fini_irq(wil, pdev->irq);
+ /* safe to call if no MSI */
+ pci_disable_msi(pdev);
+ /* TODO: disable HW */
+
+ return 0;
+}
+
+static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct wil6210_priv *wil;
+ struct device *dev = &pdev->dev;
+ void __iomem *csr;
+ int rc;
+
+ /* check HW */
+ dev_info(&pdev->dev, WIL_NAME " device found [%04x:%04x] (rev %x)\n",
+ (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
+
+ if (pci_resource_len(pdev, 0) != WIL6210_MEM_SIZE) {
+ dev_err(&pdev->dev, "Not " WIL_NAME "? "
+ "BAR0 size is %lu while expecting %lu\n",
+ (ulong)pci_resource_len(pdev, 0), WIL6210_MEM_SIZE);
+ return -ENODEV;
+ }
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "pci_enable_device failed\n");
+ return -ENODEV;
+ }
+ /* rollback to err_disable_pdev */
+
+ rc = pci_request_region(pdev, 0, WIL_NAME);
+ if (rc) {
+ dev_err(&pdev->dev, "pci_request_region failed\n");
+ goto err_disable_pdev;
+ }
+ /* rollback to err_release_reg */
+
+ csr = pci_ioremap_bar(pdev, 0);
+ if (!csr) {
+ dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
+ rc = -ENODEV;
+ goto err_release_reg;
+ }
+ /* rollback to err_iounmap */
+ dev_info(&pdev->dev, "CSR at %pR -> %p\n", &pdev->resource[0], csr);
+
+ wil = wil_if_alloc(dev, csr);
+ if (IS_ERR(wil)) {
+ rc = (int)PTR_ERR(wil);
+ dev_err(dev, "wil_if_alloc failed: %d\n", rc);
+ goto err_iounmap;
+ }
+ /* rollback to if_free */
+
+ pci_set_drvdata(pdev, wil);
+ wil->pdev = pdev;
+
+ /* FW should raise IRQ when ready */
+ rc = wil_if_pcie_enable(wil);
+ if (rc) {
+ wil_err(wil, "Enable device failed\n");
+ goto if_free;
+ }
+ /* rollback to bus_disable */
+
+ rc = wil_if_add(wil);
+ if (rc) {
+ wil_err(wil, "wil_if_add failed: %d\n", rc);
+ goto bus_disable;
+ }
+
+ wil6210_debugfs_init(wil);
+
+ /* check FW is alive */
+ wmi_echo(wil);
+
+ return 0;
+
+ bus_disable:
+ wil_if_pcie_disable(wil);
+ if_free:
+ wil_if_free(wil);
+ err_iounmap:
+ pci_iounmap(pdev, csr);
+ err_release_reg:
+ pci_release_region(pdev, 0);
+ err_disable_pdev:
+ pci_disable_device(pdev);
+
+ return rc;
+}
+
+static void wil_pcie_remove(struct pci_dev *pdev)
+{
+ struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+ wil6210_debugfs_remove(wil);
+ wil_if_pcie_disable(wil);
+ wil_if_remove(wil);
+ wil_if_free(wil);
+ pci_iounmap(pdev, wil->csr);
+ pci_release_region(pdev, 0);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(wil6210_pcie_ids) = {
+ { PCI_DEVICE(0x1ae9, 0x0301) },
+ { /* end: all zeroes */ },
+};
+MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
+
+static struct pci_driver wil6210_driver = {
+ .probe = wil_pcie_probe,
+ .remove = wil_pcie_remove,
+ .id_table = wil6210_pcie_ids,
+ .name = WIL_NAME,
+};
+
+module_pci_driver(wil6210_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Qualcomm Atheros <wil6210@qca.qualcomm.com>");
+MODULE_DESCRIPTION("Driver for 60g WiFi WIL6210 card");
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
new file mode 100644
index 000000000000..d1315b442375
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -0,0 +1,824 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/hardirq.h>
+#include <net/ieee80211_radiotap.h>
+#include <linux/if_arp.h>
+#include <linux/moduleparam.h>
+
+#include "wil6210.h"
+#include "wmi.h"
+#include "txrx.h"
+
+static bool rtap_include_phy_info;
+module_param(rtap_include_phy_info, bool, S_IRUGO);
+MODULE_PARM_DESC(rtap_include_phy_info,
+ " Include PHY info in the radiotap header, default - no");
+
+static inline int wil_vring_is_empty(struct vring *vring)
+{
+ return vring->swhead == vring->swtail;
+}
+
+static inline u32 wil_vring_next_tail(struct vring *vring)
+{
+ return (vring->swtail + 1) % vring->size;
+}
+
+static inline void wil_vring_advance_head(struct vring *vring, int n)
+{
+ vring->swhead = (vring->swhead + n) % vring->size;
+}
+
+static inline int wil_vring_is_full(struct vring *vring)
+{
+ return wil_vring_next_tail(vring) == vring->swhead;
+}
+/*
+ * Available space in Tx Vring
+ */
+static inline int wil_vring_avail_tx(struct vring *vring)
+{
+ u32 swhead = vring->swhead;
+ u32 swtail = vring->swtail;
+ int used = (vring->size + swhead - swtail) % vring->size;
+
+ return vring->size - used - 1;
+}
+
+static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz = vring->size * sizeof(vring->va[0]);
+ uint i;
+
+ BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
+
+ vring->swhead = 0;
+ vring->swtail = 0;
+ vring->ctx = kzalloc(vring->size * sizeof(vring->ctx[0]), GFP_KERNEL);
+ if (!vring->ctx) {
+ vring->va = NULL;
+ return -ENOMEM;
+ }
+ /*
+ * vring->va should be aligned on its size rounded up to power of 2
+ * This is granted by the dma_alloc_coherent
+ */
+ vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
+ if (!vring->va) {
+ wil_err(wil, "vring_alloc [%d] failed to alloc DMA mem\n",
+ vring->size);
+ kfree(vring->ctx);
+ vring->ctx = NULL;
+ return -ENOMEM;
+ }
+ /* initially, all descriptors are SW owned
+ * For Tx and Rx, ownership bit is at the same location, thus
+ * we can use any
+ */
+ for (i = 0; i < vring->size; i++) {
+ volatile struct vring_tx_desc *d = &(vring->va[i].tx);
+ d->dma.status = TX_DMA_STATUS_DU;
+ }
+
+ wil_dbg_misc(wil, "vring[%d] 0x%p:0x%016llx 0x%p\n", vring->size,
+ vring->va, (unsigned long long)vring->pa, vring->ctx);
+
+ return 0;
+}
+
+static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
+ int tx)
+{
+ struct device *dev = wil_to_dev(wil);
+ size_t sz = vring->size * sizeof(vring->va[0]);
+
+ while (!wil_vring_is_empty(vring)) {
+ if (tx) {
+ volatile struct vring_tx_desc *d =
+ &vring->va[vring->swtail].tx;
+ dma_addr_t pa = d->dma.addr_low |
+ ((u64)d->dma.addr_high << 32);
+ struct sk_buff *skb = vring->ctx[vring->swtail];
+ if (skb) {
+ dma_unmap_single(dev, pa, d->dma.length,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ vring->ctx[vring->swtail] = NULL;
+ } else {
+ dma_unmap_page(dev, pa, d->dma.length,
+ DMA_TO_DEVICE);
+ }
+ vring->swtail = wil_vring_next_tail(vring);
+ } else { /* rx */
+ volatile struct vring_rx_desc *d =
+ &vring->va[vring->swtail].rx;
+ dma_addr_t pa = d->dma.addr_low |
+ ((u64)d->dma.addr_high << 32);
+ struct sk_buff *skb = vring->ctx[vring->swhead];
+ dma_unmap_single(dev, pa, d->dma.length,
+ DMA_FROM_DEVICE);
+ kfree_skb(skb);
+ wil_vring_advance_head(vring, 1);
+ }
+ }
+ dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
+ kfree(vring->ctx);
+ vring->pa = 0;
+ vring->va = NULL;
+ vring->ctx = NULL;
+}
+
+/**
+ * Allocate one skb for Rx VRING
+ *
+ * Safe to call from IRQ
+ */
+static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
+ u32 i, int headroom)
+{
+ struct device *dev = wil_to_dev(wil);
+ unsigned int sz = RX_BUF_LEN;
+ volatile struct vring_rx_desc *d = &(vring->va[i].rx);
+ dma_addr_t pa;
+
+ /* TODO align */
+ struct sk_buff *skb = dev_alloc_skb(sz + headroom);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ skb_reserve(skb, headroom);
+ skb_put(skb, sz);
+
+ pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, pa))) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
+ d->dma.addr_low = lower_32_bits(pa);
+ d->dma.addr_high = (u16)upper_32_bits(pa);
+ /* ip_length don't care */
+ /* b11 don't care */
+ /* error don't care */
+ d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
+ d->dma.length = sz;
+ vring->ctx[i] = skb;
+
+ return 0;
+}
+
+/**
+ * Adds radiotap header
+ *
+ * Any error indicated as "Bad FCS"
+ *
+ * Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
+ * - Rx descriptor: 32 bytes
+ * - Phy info
+ */
+static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
+ struct sk_buff *skb,
+ volatile struct vring_rx_desc *d)
+{
+ struct wireless_dev *wdev = wil->wdev;
+ struct wil6210_rtap {
+ struct ieee80211_radiotap_header rthdr;
+ /* fields should be in the order of bits in rthdr.it_present */
+ /* flags */
+ u8 flags;
+ /* channel */
+ __le16 chnl_freq __aligned(2);
+ __le16 chnl_flags;
+ /* MCS */
+ u8 mcs_present;
+ u8 mcs_flags;
+ u8 mcs_index;
+ } __packed;
+ struct wil6210_rtap_vendor {
+ struct wil6210_rtap rtap;
+ /* vendor */
+ u8 vendor_oui[3] __aligned(2);
+ u8 vendor_ns;
+ __le16 vendor_skip;
+ u8 vendor_data[0];
+ } __packed;
+ struct wil6210_rtap_vendor *rtap_vendor;
+ int rtap_len = sizeof(struct wil6210_rtap);
+ int phy_length = 0; /* phy info header size, bytes */
+ static char phy_data[128];
+ struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+
+ if (rtap_include_phy_info) {
+ rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
+ /* calculate additional length */
+ if (d->dma.status & RX_DMA_STATUS_PHY_INFO) {
+ /**
+ * PHY info starts from 8-byte boundary
+ * there are 8-byte lines, last line may be partially
+ * written (HW bug), thus FW configures for last line
+ * to be excessive. Driver skips this last line.
+ */
+ int len = min_t(int, 8 + sizeof(phy_data),
+ wil_rxdesc_phy_length(d));
+ if (len > 8) {
+ void *p = skb_tail_pointer(skb);
+ void *pa = PTR_ALIGN(p, 8);
+ if (skb_tailroom(skb) >= len + (pa - p)) {
+ phy_length = len - 8;
+ memcpy(phy_data, pa, phy_length);
+ }
+ }
+ }
+ rtap_len += phy_length;
+ }
+
+ if (skb_headroom(skb) < rtap_len &&
+ pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
+ wil_err(wil, "Unable to expand headrom to %d\n", rtap_len);
+ return;
+ }
+
+ rtap_vendor = (void *)skb_push(skb, rtap_len);
+ memset(rtap_vendor, 0, rtap_len);
+
+ rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
+ rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len);
+ rtap_vendor->rtap.rthdr.it_present = cpu_to_le32(
+ (1 << IEEE80211_RADIOTAP_FLAGS) |
+ (1 << IEEE80211_RADIOTAP_CHANNEL) |
+ (1 << IEEE80211_RADIOTAP_MCS));
+ if (d->dma.status & RX_DMA_STATUS_ERROR)
+ rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS;
+
+ rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
+ rtap_vendor->rtap.chnl_flags = cpu_to_le16(0);
+
+ rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
+ rtap_vendor->rtap.mcs_flags = 0;
+ rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d);
+
+ if (rtap_include_phy_info) {
+ rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 <<
+ IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
+ /* OUI for Wilocity 04:ce:14 */
+ rtap_vendor->vendor_oui[0] = 0x04;
+ rtap_vendor->vendor_oui[1] = 0xce;
+ rtap_vendor->vendor_oui[2] = 0x14;
+ rtap_vendor->vendor_ns = 1;
+ /* Rx descriptor + PHY data */
+ rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) +
+ phy_length);
+ memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d));
+ memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data,
+ phy_length);
+ }
+}
+
+/*
+ * Fast swap in place between 2 registers
+ */
+static void wil_swap_u16(u16 *a, u16 *b)
+{
+ *a ^= *b;
+ *b ^= *a;
+ *a ^= *b;
+}
+
+static void wil_swap_ethaddr(void *data)
+{
+ struct ethhdr *eth = data;
+ u16 *s = (u16 *)eth->h_source;
+ u16 *d = (u16 *)eth->h_dest;
+
+ wil_swap_u16(s++, d++);
+ wil_swap_u16(s++, d++);
+ wil_swap_u16(s, d);
+}
+
+/**
+ * reap 1 frame from @swhead
+ *
+ * Safe to call from IRQ
+ */
+static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
+ struct vring *vring)
+{
+ struct device *dev = wil_to_dev(wil);
+ struct net_device *ndev = wil_to_ndev(wil);
+ volatile struct vring_rx_desc *d;
+ struct sk_buff *skb;
+ dma_addr_t pa;
+ unsigned int sz = RX_BUF_LEN;
+ u8 ftype;
+ u8 ds_bits;
+
+ if (wil_vring_is_empty(vring))
+ return NULL;
+
+ d = &(vring->va[vring->swhead].rx);
+ if (!(d->dma.status & RX_DMA_STATUS_DU)) {
+ /* it is not error, we just reached end of Rx done area */
+ return NULL;
+ }
+
+ pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
+ skb = vring->ctx[vring->swhead];
+ dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
+ skb_trim(skb, d->dma.length);
+
+ wil->stats.last_mcs_rx = wil_rxdesc_mcs(d);
+
+ /* use radiotap header only if required */
+ if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
+ wil_rx_add_radiotap_header(wil, skb, d);
+
+ wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, d->dma.length);
+ wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+
+ wil_vring_advance_head(vring, 1);
+
+ /* no extra checks if in sniffer mode */
+ if (ndev->type != ARPHRD_ETHER)
+ return skb;
+ /*
+ * Non-data frames may be delivered through Rx DMA channel (ex: BAR)
+ * Driver should recognize it by frame type, that is found
+ * in Rx descriptor. If type is not data, it is 802.11 frame as is
+ */
+ ftype = wil_rxdesc_ftype(d) << 2;
+ if (ftype != IEEE80211_FTYPE_DATA) {
+ wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype);
+ /* TODO: process it */
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ if (skb->len < ETH_HLEN) {
+ wil_err(wil, "Short frame, len = %d\n", skb->len);
+ /* TODO: process it (i.e. BAR) */
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ ds_bits = wil_rxdesc_ds_bits(d);
+ if (ds_bits == 1) {
+ /*
+ * HW bug - in ToDS mode, i.e. Rx on AP side,
+ * addresses get swapped
+ */
+ wil_swap_ethaddr(skb->data);
+ }
+
+ return skb;
+}
+
+/**
+ * allocate and fill up to @count buffers in rx ring
+ * buffers posted at @swtail
+ */
+static int wil_rx_refill(struct wil6210_priv *wil, int count)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct vring *v = &wil->vring_rx;
+ u32 next_tail;
+ int rc = 0;
+ int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
+ WIL6210_RTAP_SIZE : 0;
+
+ for (; next_tail = wil_vring_next_tail(v),
+ (next_tail != v->swhead) && (count-- > 0);
+ v->swtail = next_tail) {
+ rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
+ if (rc) {
+ wil_err(wil, "Error %d in wil_rx_refill[%d]\n",
+ rc, v->swtail);
+ break;
+ }
+ }
+ iowrite32(v->swtail, wil->csr + HOSTADDR(v->hwtail));
+
+ return rc;
+}
+
+/*
+ * Pass Rx packet to the netif. Update statistics.
+ */
+static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
+{
+ int rc;
+ unsigned int len = skb->len;
+
+ skb_orphan(skb);
+
+ if (in_interrupt())
+ rc = netif_rx(skb);
+ else
+ rc = netif_rx_ni(skb);
+
+ if (likely(rc == NET_RX_SUCCESS)) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+
+ } else {
+ ndev->stats.rx_dropped++;
+ }
+}
+
+/**
+ * Proceed all completed skb's from Rx VRING
+ *
+ * Safe to call from IRQ
+ */
+void wil_rx_handle(struct wil6210_priv *wil)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct vring *v = &wil->vring_rx;
+ struct sk_buff *skb;
+
+ if (!v->va) {
+ wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
+ return;
+ }
+ wil_dbg_txrx(wil, "%s()\n", __func__);
+ while (NULL != (skb = wil_vring_reap_rx(wil, v))) {
+ wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb_headlen(skb), false);
+
+ if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
+ skb->dev = ndev;
+ skb_reset_mac_header(skb);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->pkt_type = PACKET_OTHERHOST;
+ skb->protocol = htons(ETH_P_802_2);
+
+ } else {
+ skb->protocol = eth_type_trans(skb, ndev);
+ }
+
+ wil_netif_rx_any(skb, ndev);
+ }
+ wil_rx_refill(wil, v->size);
+}
+
+int wil_rx_init(struct wil6210_priv *wil)
+{
+ struct vring *vring = &wil->vring_rx;
+ int rc;
+
+ vring->size = WIL6210_RX_RING_SIZE;
+ rc = wil_vring_alloc(wil, vring);
+ if (rc)
+ return rc;
+
+ rc = wmi_rx_chain_add(wil, vring);
+ if (rc)
+ goto err_free;
+
+ rc = wil_rx_refill(wil, vring->size);
+ if (rc)
+ goto err_free;
+
+ return 0;
+ err_free:
+ wil_vring_free(wil, vring, 0);
+
+ return rc;
+}
+
+void wil_rx_fini(struct wil6210_priv *wil)
+{
+ struct vring *vring = &wil->vring_rx;
+
+ if (vring->va)
+ wil_vring_free(wil, vring, 0);
+}
+
+int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
+ int cid, int tid)
+{
+ int rc;
+ struct wmi_vring_cfg_cmd cmd = {
+ .action = cpu_to_le32(WMI_VRING_CMD_ADD),
+ .vring_cfg = {
+ .tx_sw_ring = {
+ .max_mpdu_size = cpu_to_le16(TX_BUF_LEN),
+ },
+ .ringid = id,
+ .cidxtid = (cid & 0xf) | ((tid & 0xf) << 4),
+ .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
+ .mac_ctrl = 0,
+ .to_resolution = 0,
+ .agg_max_wsize = 16,
+ .schd_params = {
+ .priority = cpu_to_le16(0),
+ .timeslot_us = cpu_to_le16(0xfff),
+ },
+ },
+ };
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_vring_cfg_done_event cmd;
+ } __packed reply;
+ struct vring *vring = &wil->vring_tx[id];
+
+ if (vring->va) {
+ wil_err(wil, "Tx ring [%d] already allocated\n", id);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ vring->size = size;
+ rc = wil_vring_alloc(wil, vring);
+ if (rc)
+ goto out;
+
+ cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
+ cmd.vring_cfg.tx_sw_ring.ring_size = cpu_to_le16(vring->size);
+
+ rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
+ WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
+ if (rc)
+ goto out_free;
+
+ if (reply.cmd.status != WMI_VRING_CFG_SUCCESS) {
+ wil_err(wil, "Tx config failed, status 0x%02x\n",
+ reply.cmd.status);
+ rc = -EINVAL;
+ goto out_free;
+ }
+ vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
+
+ return 0;
+ out_free:
+ wil_vring_free(wil, vring, 1);
+ out:
+
+ return rc;
+}
+
+void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
+{
+ struct vring *vring = &wil->vring_tx[id];
+
+ if (!vring->va)
+ return;
+
+ wil_vring_free(wil, vring, 1);
+}
+
+static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
+ struct sk_buff *skb)
+{
+ struct vring *v = &wil->vring_tx[0];
+
+ if (v->va)
+ return v;
+
+ return NULL;
+}
+
+static int wil_tx_desc_map(volatile struct vring_tx_desc *d,
+ dma_addr_t pa, u32 len)
+{
+ d->dma.addr_low = lower_32_bits(pa);
+ d->dma.addr_high = (u16)upper_32_bits(pa);
+ d->dma.ip_length = 0;
+ /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
+ d->dma.b11 = 0/*14 | BIT(7)*/;
+ d->dma.error = 0;
+ d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
+ d->dma.length = len;
+ d->dma.d0 = 0;
+ d->mac.d[0] = 0;
+ d->mac.d[1] = 0;
+ d->mac.d[2] = 0;
+ d->mac.ucode_cmd = 0;
+ /* use dst index 0 */
+ d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_DST_INDEX_EN_POS) |
+ (0 << MAC_CFG_DESC_TX_1_DST_INDEX_POS);
+ /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
+ d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
+ (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
+
+ return 0;
+}
+
+static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
+ struct sk_buff *skb)
+{
+ struct device *dev = wil_to_dev(wil);
+ volatile struct vring_tx_desc *d;
+ u32 swhead = vring->swhead;
+ int avail = wil_vring_avail_tx(vring);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ uint f;
+ int vring_index = vring - wil->vring_tx;
+ uint i = swhead;
+ dma_addr_t pa;
+
+ wil_dbg_txrx(wil, "%s()\n", __func__);
+
+ if (avail < vring->size/8)
+ netif_tx_stop_all_queues(wil_to_ndev(wil));
+ if (avail < 1 + nr_frags) {
+ wil_err(wil, "Tx ring full. No space for %d fragments\n",
+ 1 + nr_frags);
+ return -ENOMEM;
+ }
+ d = &(vring->va[i].tx);
+
+ /* FIXME FW can accept only unicast frames for the peer */
+ memcpy(skb->data, wil->dst_addr[vring_index], ETH_ALEN);
+
+ pa = dma_map_single(dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+
+ wil_dbg_txrx(wil, "Tx skb %d bytes %p -> %#08llx\n", skb_headlen(skb),
+ skb->data, (unsigned long long)pa);
+ wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb_headlen(skb), false);
+
+ if (unlikely(dma_mapping_error(dev, pa)))
+ return -EINVAL;
+ /* 1-st segment */
+ wil_tx_desc_map(d, pa, skb_headlen(skb));
+ d->mac.d[2] |= ((nr_frags + 1) <<
+ MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
+ /* middle segments */
+ for (f = 0; f < nr_frags; f++) {
+ const struct skb_frag_struct *frag =
+ &skb_shinfo(skb)->frags[f];
+ int len = skb_frag_size(frag);
+ i = (swhead + f + 1) % vring->size;
+ d = &(vring->va[i].tx);
+ pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, pa)))
+ goto dma_error;
+ wil_tx_desc_map(d, pa, len);
+ vring->ctx[i] = NULL;
+ }
+ /* for the last seg only */
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
+ d->dma.d0 |= BIT(9); /* BUG: undocumented bit */
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
+ d->dma.d0 |= (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
+
+ wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+
+ /* advance swhead */
+ wil_vring_advance_head(vring, nr_frags + 1);
+ wil_dbg_txrx(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead);
+ iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
+ /* hold reference to skb
+ * to prevent skb release before accounting
+ * in case of immediate "tx done"
+ */
+ vring->ctx[i] = skb_get(skb);
+
+ return 0;
+ dma_error:
+ /* unmap what we have mapped */
+ /* Note: increment @f to operate with positive index */
+ for (f++; f > 0; f--) {
+ i = (swhead + f) % vring->size;
+ d = &(vring->va[i].tx);
+ d->dma.status = TX_DMA_STATUS_DU;
+ pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
+ if (vring->ctx[i])
+ dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, pa, d->dma.length, DMA_TO_DEVICE);
+ }
+
+ return -EINVAL;
+}
+
+
+netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+ struct vring *vring;
+ int rc;
+
+ wil_dbg_txrx(wil, "%s()\n", __func__);
+ if (!test_bit(wil_status_fwready, &wil->status)) {
+ wil_err(wil, "FW not ready\n");
+ goto drop;
+ }
+ if (!test_bit(wil_status_fwconnected, &wil->status)) {
+ wil_err(wil, "FW not connected\n");
+ goto drop;
+ }
+ if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
+ wil_err(wil, "Xmit in monitor mode not supported\n");
+ goto drop;
+ }
+ if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
+ rc = wmi_tx_eapol(wil, skb);
+ } else {
+ /* find vring */
+ vring = wil_find_tx_vring(wil, skb);
+ if (!vring) {
+ wil_err(wil, "No Tx VRING available\n");
+ goto drop;
+ }
+ /* set up vring entry */
+ rc = wil_tx_vring(wil, vring, skb);
+ }
+ switch (rc) {
+ case 0:
+ /* statistics will be updated on the tx_complete */
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ case -ENOMEM:
+ return NETDEV_TX_BUSY;
+ default:
+ break; /* goto drop; */
+ }
+ drop:
+ netif_tx_stop_all_queues(ndev);
+ ndev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+
+ return NET_XMIT_DROP;
+}
+
+/**
+ * Clean up transmitted skb's from the Tx VRING
+ *
+ * Safe to call from IRQ
+ */
+void wil_tx_complete(struct wil6210_priv *wil, int ringid)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct device *dev = wil_to_dev(wil);
+ struct vring *vring = &wil->vring_tx[ringid];
+
+ if (!vring->va) {
+ wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
+ return;
+ }
+
+ wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
+
+ while (!wil_vring_is_empty(vring)) {
+ volatile struct vring_tx_desc *d = &vring->va[vring->swtail].tx;
+ dma_addr_t pa;
+ struct sk_buff *skb;
+ if (!(d->dma.status & TX_DMA_STATUS_DU))
+ break;
+
+ wil_dbg_txrx(wil,
+ "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n",
+ vring->swtail, d->dma.length, d->dma.status,
+ d->dma.error);
+ wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4,
+ (const void *)d, sizeof(*d), false);
+
+ pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
+ skb = vring->ctx[vring->swtail];
+ if (skb) {
+ if (d->dma.error == 0) {
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ } else {
+ ndev->stats.tx_errors++;
+ }
+
+ dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ vring->ctx[vring->swtail] = NULL;
+ } else {
+ dma_unmap_page(dev, pa, d->dma.length, DMA_TO_DEVICE);
+ }
+ d->dma.addr_low = 0;
+ d->dma.addr_high = 0;
+ d->dma.length = 0;
+ d->dma.status = TX_DMA_STATUS_DU;
+ vring->swtail = wil_vring_next_tail(vring);
+ }
+ if (wil_vring_avail_tx(vring) > vring->size/4)
+ netif_tx_wake_all_queues(wil_to_ndev(wil));
+}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
new file mode 100644
index 000000000000..45a61f597c5c
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef WIL6210_TXRX_H
+#define WIL6210_TXRX_H
+
+#define BUF_SW_OWNED (1)
+#define BUF_HW_OWNED (0)
+
+/* size of max. Rx packet */
+#define RX_BUF_LEN (2048)
+#define TX_BUF_LEN (2048)
+/* how many bytes to reserve for rtap header? */
+#define WIL6210_RTAP_SIZE (128)
+
+/* Tx/Rx path */
+/*
+ * Tx descriptor - MAC part
+ * [dword 0]
+ * bit 0.. 9 : lifetime_expiry_value:10
+ * bit 10 : interrup_en:1
+ * bit 11 : status_en:1
+ * bit 12..13 : txss_override:2
+ * bit 14 : timestamp_insertion:1
+ * bit 15 : duration_preserve:1
+ * bit 16..21 : reserved0:6
+ * bit 22..26 : mcs_index:5
+ * bit 27 : mcs_en:1
+ * bit 28..29 : reserved1:2
+ * bit 30 : reserved2:1
+ * bit 31 : sn_preserved:1
+ * [dword 1]
+ * bit 0.. 3 : pkt_mode:4
+ * bit 4 : pkt_mode_en:1
+ * bit 5.. 7 : reserved0:3
+ * bit 8..13 : reserved1:6
+ * bit 14 : reserved2:1
+ * bit 15 : ack_policy_en:1
+ * bit 16..19 : dst_index:4
+ * bit 20 : dst_index_en:1
+ * bit 21..22 : ack_policy:2
+ * bit 23 : lifetime_en:1
+ * bit 24..30 : max_retry:7
+ * bit 31 : max_retry_en:1
+ * [dword 2]
+ * bit 0.. 7 : num_of_descriptors:8
+ * bit 8..17 : reserved:10
+ * bit 18..19 : l2_translation_type:2
+ * bit 20 : snap_hdr_insertion_en:1
+ * bit 21 : vlan_removal_en:1
+ * bit 22..31 : reserved0:10
+ * [dword 3]
+ * bit 0.. 31: ucode_cmd:32
+ */
+struct vring_tx_mac {
+ u32 d[3];
+ u32 ucode_cmd;
+} __packed;
+
+/* TX MAC Dword 0 */
+#define MAC_CFG_DESC_TX_0_LIFETIME_EXPIRY_VALUE_POS 0
+#define MAC_CFG_DESC_TX_0_LIFETIME_EXPIRY_VALUE_LEN 10
+#define MAC_CFG_DESC_TX_0_LIFETIME_EXPIRY_VALUE_MSK 0x3FF
+
+#define MAC_CFG_DESC_TX_0_INTERRUP_EN_POS 10
+#define MAC_CFG_DESC_TX_0_INTERRUP_EN_LEN 1
+#define MAC_CFG_DESC_TX_0_INTERRUP_EN_MSK 0x400
+
+#define MAC_CFG_DESC_TX_0_STATUS_EN_POS 11
+#define MAC_CFG_DESC_TX_0_STATUS_EN_LEN 1
+#define MAC_CFG_DESC_TX_0_STATUS_EN_MSK 0x800
+
+#define MAC_CFG_DESC_TX_0_TXSS_OVERRIDE_POS 12
+#define MAC_CFG_DESC_TX_0_TXSS_OVERRIDE_LEN 2
+#define MAC_CFG_DESC_TX_0_TXSS_OVERRIDE_MSK 0x3000
+
+#define MAC_CFG_DESC_TX_0_TIMESTAMP_INSERTION_POS 14
+#define MAC_CFG_DESC_TX_0_TIMESTAMP_INSERTION_LEN 1
+#define MAC_CFG_DESC_TX_0_TIMESTAMP_INSERTION_MSK 0x4000
+
+#define MAC_CFG_DESC_TX_0_DURATION_PRESERVE_POS 15
+#define MAC_CFG_DESC_TX_0_DURATION_PRESERVE_LEN 1
+#define MAC_CFG_DESC_TX_0_DURATION_PRESERVE_MSK 0x8000
+
+#define MAC_CFG_DESC_TX_0_MCS_INDEX_POS 22
+#define MAC_CFG_DESC_TX_0_MCS_INDEX_LEN 5
+#define MAC_CFG_DESC_TX_0_MCS_INDEX_MSK 0x7C00000
+
+#define MAC_CFG_DESC_TX_0_MCS_EN_POS 27
+#define MAC_CFG_DESC_TX_0_MCS_EN_LEN 1
+#define MAC_CFG_DESC_TX_0_MCS_EN_MSK 0x8000000
+
+#define MAC_CFG_DESC_TX_0_SN_PRESERVED_POS 31
+#define MAC_CFG_DESC_TX_0_SN_PRESERVED_LEN 1
+#define MAC_CFG_DESC_TX_0_SN_PRESERVED_MSK 0x80000000
+
+/* TX MAC Dword 1 */
+#define MAC_CFG_DESC_TX_1_PKT_MODE_POS 0
+#define MAC_CFG_DESC_TX_1_PKT_MODE_LEN 4
+#define MAC_CFG_DESC_TX_1_PKT_MODE_MSK 0xF
+
+#define MAC_CFG_DESC_TX_1_PKT_MODE_EN_POS 4
+#define MAC_CFG_DESC_TX_1_PKT_MODE_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_PKT_MODE_EN_MSK 0x10
+
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_EN_POS 15
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_EN_MSK 0x8000
+
+#define MAC_CFG_DESC_TX_1_DST_INDEX_POS 16
+#define MAC_CFG_DESC_TX_1_DST_INDEX_LEN 4
+#define MAC_CFG_DESC_TX_1_DST_INDEX_MSK 0xF0000
+
+#define MAC_CFG_DESC_TX_1_DST_INDEX_EN_POS 20
+#define MAC_CFG_DESC_TX_1_DST_INDEX_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_DST_INDEX_EN_MSK 0x100000
+
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_POS 21
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_LEN 2
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_MSK 0x600000
+
+#define MAC_CFG_DESC_TX_1_LIFETIME_EN_POS 23
+#define MAC_CFG_DESC_TX_1_LIFETIME_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_LIFETIME_EN_MSK 0x800000
+
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_POS 24
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_LEN 7
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_MSK 0x7F000000
+
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_EN_POS 31
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_EN_MSK 0x80000000
+
+/* TX MAC Dword 2 */
+#define MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS 0
+#define MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_LEN 8
+#define MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_MSK 0xFF
+
+#define MAC_CFG_DESC_TX_2_RESERVED_POS 8
+#define MAC_CFG_DESC_TX_2_RESERVED_LEN 10
+#define MAC_CFG_DESC_TX_2_RESERVED_MSK 0x3FF00
+
+#define MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS 18
+#define MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_LEN 2
+#define MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_MSK 0xC0000
+
+#define MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS 20
+#define MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_LEN 1
+#define MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_MSK 0x100000
+
+#define MAC_CFG_DESC_TX_2_VLAN_REMOVAL_EN_POS 21
+#define MAC_CFG_DESC_TX_2_VLAN_REMOVAL_EN_LEN 1
+#define MAC_CFG_DESC_TX_2_VLAN_REMOVAL_EN_MSK 0x200000
+
+/* TX MAC Dword 3 */
+#define MAC_CFG_DESC_TX_3_UCODE_CMD_POS 0
+#define MAC_CFG_DESC_TX_3_UCODE_CMD_LEN 32
+#define MAC_CFG_DESC_TX_3_UCODE_CMD_MSK 0xFFFFFFFF
+
+/* TX DMA Dword 0 */
+#define DMA_CFG_DESC_TX_0_L4_LENGTH_POS 0
+#define DMA_CFG_DESC_TX_0_L4_LENGTH_LEN 8
+#define DMA_CFG_DESC_TX_0_L4_LENGTH_MSK 0xFF
+
+#define DMA_CFG_DESC_TX_0_CMD_EOP_POS 8
+#define DMA_CFG_DESC_TX_0_CMD_EOP_LEN 1
+#define DMA_CFG_DESC_TX_0_CMD_EOP_MSK 0x100
+
+#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS 10
+#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_LEN 1
+#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_MSK 0x400
+
+#define DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS 11
+#define DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_LEN 2
+#define DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_MSK 0x1800
+
+#define DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS 13
+#define DMA_CFG_DESC_TX_0_TCP_SEG_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_TCP_SEG_EN_MSK 0x2000
+
+#define DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS 14
+#define DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_MSK 0x4000
+
+#define DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS 15
+#define DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_MSK 0x8000
+
+#define DMA_CFG_DESC_TX_0_QID_POS 16
+#define DMA_CFG_DESC_TX_0_QID_LEN 5
+#define DMA_CFG_DESC_TX_0_QID_MSK 0x1F0000
+
+#define DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS 21
+#define DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_MSK 0x200000
+
+#define DMA_CFG_DESC_TX_0_L4_TYPE_POS 30
+#define DMA_CFG_DESC_TX_0_L4_TYPE_LEN 2
+#define DMA_CFG_DESC_TX_0_L4_TYPE_MSK 0xC0000000
+
+
+#define TX_DMA_STATUS_DU BIT(0)
+
+struct vring_tx_dma {
+ u32 d0;
+ u32 addr_low;
+ u16 addr_high;
+ u8 ip_length;
+ u8 b11; /* 0..6: mac_length; 7:ip_version */
+ u8 error; /* 0..2: err; 3..7: reserved; */
+ u8 status; /* 0: used; 1..7; reserved */
+ u16 length;
+} __packed;
+
+/*
+ * Rx descriptor - MAC part
+ * [dword 0]
+ * bit 0.. 3 : tid:4 The QoS (b3-0) TID Field
+ * bit 4.. 6 : connection_id:3 :The Source index that was found during
+ * Parsing the TA. This field is used to define the source of the packet
+ * bit 7 : reserved:1
+ * bit 8.. 9 : mac_id:2 : The MAC virtual Ring number (always zero)
+ * bit 10..11 : frame_type:2 : The FC Control (b3-2) - MPDU Type
+ * (management, data, control and extension)
+ * bit 12..15 : frame_subtype:4 : The FC Control (b7-4) - Frame Subtype
+ * bit 16..27 : seq_number:12 The received Sequence number field
+ * bit 28..31 : extended:4 extended subtype
+ * [dword 1]
+ * bit 0.. 3 : reserved
+ * bit 4.. 5 : key_id:2
+ * bit 6 : decrypt_bypass:1
+ * bit 7 : security:1
+ * bit 8.. 9 : ds_bits:2
+ * bit 10 : a_msdu_present:1 from qos header
+ * bit 11 : a_msdu_type:1 from qos header
+ * bit 12 : a_mpdu:1 part of AMPDU aggregation
+ * bit 13 : broadcast:1
+ * bit 14 : mutlicast:1
+ * bit 15 : reserved:1
+ * bit 16..20 : rx_mac_qid:5 The Queue Identifier that the packet
+ * is received from
+ * bit 21..24 : mcs:4
+ * bit 25..28 : mic_icr:4
+ * bit 29..31 : reserved:3
+ * [dword 2]
+ * bit 0.. 2 : time_slot:3 The timeslot that the MPDU is received
+ * bit 3 : fc_protocol_ver:1 The FC Control (b0) - Protocol Version
+ * bit 4 : fc_order:1 The FC Control (b15) -Order
+ * bit 5.. 7 : qos_ack_policy:3 The QoS (b6-5) ack policy Field
+ * bit 8 : esop:1 The QoS (b4) ESOP field
+ * bit 9 : qos_rdg_more_ppdu:1 The QoS (b9) RDG field
+ * bit 10..14 : qos_reserved:5 The QoS (b14-10) Reserved field
+ * bit 15 : qos_ac_constraint:1
+ * bit 16..31 : pn_15_0:16 low 2 bytes of PN
+ * [dword 3]
+ * bit 0..31 : pn_47_16:32 high 4 bytes of PN
+ */
+struct vring_rx_mac {
+ u32 d0;
+ u32 d1;
+ u16 w4;
+ u16 pn_15_0;
+ u32 pn_47_16;
+} __packed;
+
+/*
+ * Rx descriptor - DMA part
+ * [dword 0]
+ * bit 0.. 7 : l4_length:8 layer 4 length
+ * bit 8.. 9 : reserved:2
+ * bit 10 : cmd_dma_it:1
+ * bit 11..15 : reserved:5
+ * bit 16..29 : phy_info_length:14
+ * bit 30..31 : l4_type:2 valid if the L4I bit is set in the status field
+ * [dword 1]
+ * bit 0..31 : addr_low:32 The payload buffer low address
+ * [dword 2]
+ * bit 0..15 : addr_high:16 The payload buffer high address
+ * bit 16..23 : ip_length:8
+ * bit 24..30 : mac_length:7
+ * bit 31 : ip_version:1
+ * [dword 3]
+ * [byte 12] error
+ * [byte 13] status
+ * bit 0 : du:1
+ * bit 1 : eop:1
+ * bit 2 : error:1
+ * bit 3 : mi:1
+ * bit 4 : l3_identified:1
+ * bit 5 : l4_identified:1
+ * bit 6 : phy_info_included:1
+ * bit 7 : reserved:1
+ * [word 7] length
+ *
+ */
+
+#define RX_DMA_D0_CMD_DMA_IT BIT(10)
+
+#define RX_DMA_STATUS_DU BIT(0)
+#define RX_DMA_STATUS_ERROR BIT(2)
+#define RX_DMA_STATUS_PHY_INFO BIT(6)
+
+struct vring_rx_dma {
+ u32 d0;
+ u32 addr_low;
+ u16 addr_high;
+ u8 ip_length;
+ u8 b11;
+ u8 error;
+ u8 status;
+ u16 length;
+} __packed;
+
+struct vring_tx_desc {
+ struct vring_tx_mac mac;
+ struct vring_tx_dma dma;
+} __packed;
+
+struct vring_rx_desc {
+ struct vring_rx_mac mac;
+ struct vring_rx_dma dma;
+} __packed;
+
+union vring_desc {
+ struct vring_tx_desc tx;
+ struct vring_rx_desc rx;
+} __packed;
+
+static inline int wil_rxdesc_phy_length(volatile struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->dma.d0, 16, 29);
+}
+
+static inline int wil_rxdesc_mcs(volatile struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d1, 21, 24);
+}
+
+static inline int wil_rxdesc_ds_bits(volatile struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d1, 8, 9);
+}
+
+static inline int wil_rxdesc_ftype(volatile struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d0, 10, 11);
+}
+
+#endif /* WIL6210_TXRX_H */
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
new file mode 100644
index 000000000000..aea961ff8f08
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -0,0 +1,363 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __WIL6210_H__
+#define __WIL6210_H__
+
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/cfg80211.h>
+
+#include "dbg_hexdump.h"
+
+#define WIL_NAME "wil6210"
+
+/**
+ * extract bits [@b0:@b1] (inclusive) from the value @x
+ * it should be @b0 <= @b1, or result is incorrect
+ */
+static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
+{
+ return (x >> b0) & ((1 << (b1 - b0 + 1)) - 1);
+}
+
+#define WIL6210_MEM_SIZE (2*1024*1024UL)
+
+#define WIL6210_RX_RING_SIZE (128)
+#define WIL6210_TX_RING_SIZE (128)
+#define WIL6210_MAX_TX_RINGS (24)
+
+/* Hardware definitions begin */
+
+/*
+ * Mapping
+ * RGF File | Host addr | FW addr
+ * | |
+ * user_rgf | 0x000000 | 0x880000
+ * dma_rgf | 0x001000 | 0x881000
+ * pcie_rgf | 0x002000 | 0x882000
+ * | |
+ */
+
+/* Where various structures placed in host address space */
+#define WIL6210_FW_HOST_OFF (0x880000UL)
+
+#define HOSTADDR(fwaddr) (fwaddr - WIL6210_FW_HOST_OFF)
+
+/*
+ * Interrupt control registers block
+ *
+ * each interrupt controlled by the same bit in all registers
+ */
+struct RGF_ICR {
+ u32 ICC; /* Cause Control, RW: 0 - W1C, 1 - COR */
+ u32 ICR; /* Cause, W1C/COR depending on ICC */
+ u32 ICM; /* Cause masked (ICR & ~IMV), W1C/COR depending on ICC */
+ u32 ICS; /* Cause Set, WO */
+ u32 IMV; /* Mask, RW+S/C */
+ u32 IMS; /* Mask Set, write 1 to set */
+ u32 IMC; /* Mask Clear, write 1 to clear */
+} __packed;
+
+/* registers - FW addresses */
+#define RGF_USER_USER_SCRATCH_PAD (0x8802bc)
+#define RGF_USER_USER_ICR (0x880b4c) /* struct RGF_ICR */
+ #define BIT_USER_USER_ICR_SW_INT_2 BIT(18)
+#define RGF_USER_CLKS_CTL_SW_RST_MASK_0 (0x880b14)
+#define RGF_USER_MAC_CPU_0 (0x8801fc)
+#define RGF_USER_USER_CPU_0 (0x8801e0)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_0 (0x880b04)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_1 (0x880b08)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_2 (0x880b0c)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_3 (0x880b10)
+
+#define RGF_DMA_PSEUDO_CAUSE (0x881c68)
+#define RGF_DMA_PSEUDO_CAUSE_MASK_SW (0x881c6c)
+#define RGF_DMA_PSEUDO_CAUSE_MASK_FW (0x881c70)
+ #define BIT_DMA_PSEUDO_CAUSE_RX BIT(0)
+ #define BIT_DMA_PSEUDO_CAUSE_TX BIT(1)
+ #define BIT_DMA_PSEUDO_CAUSE_MISC BIT(2)
+
+#define RGF_DMA_EP_TX_ICR (0x881bb4) /* struct RGF_ICR */
+ #define BIT_DMA_EP_TX_ICR_TX_DONE BIT(0)
+ #define BIT_DMA_EP_TX_ICR_TX_DONE_N(n) BIT(n+1) /* n = [0..23] */
+#define RGF_DMA_EP_RX_ICR (0x881bd0) /* struct RGF_ICR */
+ #define BIT_DMA_EP_RX_ICR_RX_DONE BIT(0)
+#define RGF_DMA_EP_MISC_ICR (0x881bec) /* struct RGF_ICR */
+ #define BIT_DMA_EP_MISC_ICR_RX_HTRSH BIT(0)
+ #define BIT_DMA_EP_MISC_ICR_TX_NO_ACT BIT(1)
+ #define BIT_DMA_EP_MISC_ICR_FW_INT(n) BIT(28+n) /* n = [0..3] */
+
+/* Interrupt moderation control */
+#define RGF_DMA_ITR_CNT_TRSH (0x881c5c)
+#define RGF_DMA_ITR_CNT_DATA (0x881c60)
+#define RGF_DMA_ITR_CNT_CRL (0x881C64)
+ #define BIT_DMA_ITR_CNT_CRL_EN BIT(0)
+ #define BIT_DMA_ITR_CNT_CRL_EXT_TICK BIT(1)
+ #define BIT_DMA_ITR_CNT_CRL_FOREVER BIT(2)
+ #define BIT_DMA_ITR_CNT_CRL_CLR BIT(3)
+ #define BIT_DMA_ITR_CNT_CRL_REACH_TRSH BIT(4)
+
+/* popular locations */
+#define HOST_MBOX HOSTADDR(RGF_USER_USER_SCRATCH_PAD)
+#define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \
+ offsetof(struct RGF_ICR, ICS))
+#define SW_INT_MBOX BIT_USER_USER_ICR_SW_INT_2
+
+/* ISR register bits */
+#define ISR_MISC_FW_READY BIT_DMA_EP_MISC_ICR_FW_INT(0)
+#define ISR_MISC_MBOX_EVT BIT_DMA_EP_MISC_ICR_FW_INT(1)
+#define ISR_MISC_FW_ERROR BIT_DMA_EP_MISC_ICR_FW_INT(3)
+
+/* Hardware definitions end */
+
+struct wil6210_mbox_ring {
+ u32 base;
+ u16 entry_size; /* max. size of mbox entry, incl. all headers */
+ u16 size;
+ u32 tail;
+ u32 head;
+} __packed;
+
+struct wil6210_mbox_ring_desc {
+ __le32 sync;
+ __le32 addr;
+} __packed;
+
+/* at HOST_OFF_WIL6210_MBOX_CTL */
+struct wil6210_mbox_ctl {
+ struct wil6210_mbox_ring tx;
+ struct wil6210_mbox_ring rx;
+} __packed;
+
+struct wil6210_mbox_hdr {
+ __le16 seq;
+ __le16 len; /* payload, bytes after this header */
+ __le16 type;
+ u8 flags;
+ u8 reserved;
+} __packed;
+
+#define WIL_MBOX_HDR_TYPE_WMI (0)
+
+/* max. value for wil6210_mbox_hdr.len */
+#define MAX_MBOXITEM_SIZE (240)
+
+struct wil6210_mbox_hdr_wmi {
+ u8 reserved0[2];
+ __le16 id;
+ __le16 info1; /* bits [0..3] - device_id, rest - unused */
+ u8 reserved1[2];
+} __packed;
+
+struct pending_wmi_event {
+ struct list_head list;
+ struct {
+ struct wil6210_mbox_hdr hdr;
+ struct wil6210_mbox_hdr_wmi wmi;
+ u8 data[0];
+ } __packed event;
+};
+
+union vring_desc;
+
+struct vring {
+ dma_addr_t pa;
+ volatile union vring_desc *va; /* vring_desc[size], WriteBack by DMA */
+ u16 size; /* number of vring_desc elements */
+ u32 swtail;
+ u32 swhead;
+ u32 hwtail; /* write here to inform hw */
+ void **ctx; /* void *ctx[size] - software context */
+};
+
+enum { /* for wil6210_priv.status */
+ wil_status_fwready = 0,
+ wil_status_fwconnected,
+ wil_status_dontscan,
+ wil_status_irqen, /* FIXME: interrupts enabled - for debug */
+};
+
+struct pci_dev;
+
+struct wil6210_stats {
+ u64 tsf;
+ u32 snr;
+ u16 last_mcs_rx;
+ u16 bf_mcs; /* last BF, used for Tx */
+ u16 my_rx_sector;
+ u16 my_tx_sector;
+ u16 peer_rx_sector;
+ u16 peer_tx_sector;
+};
+
+struct wil6210_priv {
+ struct pci_dev *pdev;
+ int n_msi;
+ struct wireless_dev *wdev;
+ void __iomem *csr;
+ ulong status;
+ /* profile */
+ u32 monitor_flags;
+ u32 secure_pcp; /* create secure PCP? */
+ int sinfo_gen;
+ /* cached ISR registers */
+ u32 isr_misc;
+ /* mailbox related */
+ struct mutex wmi_mutex;
+ struct wil6210_mbox_ctl mbox_ctl;
+ struct completion wmi_ready;
+ u16 wmi_seq;
+ u16 reply_id; /**< wait for this WMI event */
+ void *reply_buf;
+ u16 reply_size;
+ struct workqueue_struct *wmi_wq; /* for deferred calls */
+ struct work_struct wmi_event_worker;
+ struct workqueue_struct *wmi_wq_conn; /* for connect worker */
+ struct work_struct wmi_connect_worker;
+ struct work_struct disconnect_worker;
+ struct timer_list connect_timer;
+ int pending_connect_cid;
+ struct list_head pending_wmi_ev;
+ /*
+ * protect pending_wmi_ev
+ * - fill in IRQ from wil6210_irq_misc,
+ * - consumed in thread by wmi_event_worker
+ */
+ spinlock_t wmi_ev_lock;
+ /* DMA related */
+ struct vring vring_rx;
+ struct vring vring_tx[WIL6210_MAX_TX_RINGS];
+ u8 dst_addr[WIL6210_MAX_TX_RINGS][ETH_ALEN];
+ /* scan */
+ struct cfg80211_scan_request *scan_request;
+
+ struct mutex mutex; /* for wil6210_priv access in wil_{up|down} */
+ /* statistics */
+ struct wil6210_stats stats;
+ /* debugfs */
+ struct dentry *debug;
+ struct debugfs_blob_wrapper fw_code_blob;
+ struct debugfs_blob_wrapper fw_data_blob;
+ struct debugfs_blob_wrapper fw_peri_blob;
+ struct debugfs_blob_wrapper uc_code_blob;
+ struct debugfs_blob_wrapper uc_data_blob;
+ struct debugfs_blob_wrapper rgf_blob;
+};
+
+#define wil_to_wiphy(i) (i->wdev->wiphy)
+#define wil_to_dev(i) (wiphy_dev(wil_to_wiphy(i)))
+#define wiphy_to_wil(w) (struct wil6210_priv *)(wiphy_priv(w))
+#define wil_to_wdev(i) (i->wdev)
+#define wdev_to_wil(w) (struct wil6210_priv *)(wdev_priv(w))
+#define wil_to_ndev(i) (wil_to_wdev(i)->netdev)
+#define ndev_to_wil(n) (wdev_to_wil(n->ieee80211_ptr))
+
+#define wil_dbg(wil, fmt, arg...) netdev_dbg(wil_to_ndev(wil), fmt, ##arg)
+#define wil_info(wil, fmt, arg...) netdev_info(wil_to_ndev(wil), fmt, ##arg)
+#define wil_err(wil, fmt, arg...) netdev_err(wil_to_ndev(wil), fmt, ##arg)
+
+#define wil_dbg_irq(wil, fmt, arg...) wil_dbg(wil, "DBG[ IRQ]" fmt, ##arg)
+#define wil_dbg_txrx(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg)
+#define wil_dbg_wmi(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg)
+#define wil_dbg_misc(wil, fmt, arg...) wil_dbg(wil, "DBG[MISC]" fmt, ##arg)
+
+#define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ wil_print_hex_dump_debug("DBG[TXRX]" prefix_str,\
+ prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
+
+#define wil_hex_dump_wmi(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ wil_print_hex_dump_debug("DBG[ WMI]" prefix_str,\
+ prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
+
+void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
+ size_t count);
+void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
+ size_t count);
+
+void *wil_if_alloc(struct device *dev, void __iomem *csr);
+void wil_if_free(struct wil6210_priv *wil);
+int wil_if_add(struct wil6210_priv *wil);
+void wil_if_remove(struct wil6210_priv *wil);
+int wil_priv_init(struct wil6210_priv *wil);
+void wil_priv_deinit(struct wil6210_priv *wil);
+int wil_reset(struct wil6210_priv *wil);
+void wil_link_on(struct wil6210_priv *wil);
+void wil_link_off(struct wil6210_priv *wil);
+int wil_up(struct wil6210_priv *wil);
+int wil_down(struct wil6210_priv *wil);
+void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r);
+
+void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr);
+void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr);
+int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
+ struct wil6210_mbox_hdr *hdr);
+int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len);
+void wmi_recv_cmd(struct wil6210_priv *wil);
+int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
+ u16 reply_id, void *reply, u8 reply_size, int to_msec);
+void wmi_connect_worker(struct work_struct *work);
+void wmi_event_worker(struct work_struct *work);
+void wmi_event_flush(struct wil6210_priv *wil);
+int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid);
+int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid);
+int wmi_set_channel(struct wil6210_priv *wil, int channel);
+int wmi_get_channel(struct wil6210_priv *wil, int *channel);
+int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb);
+int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
+ const void *mac_addr);
+int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
+ const void *mac_addr, int key_len, const void *key);
+int wmi_echo(struct wil6210_priv *wil);
+int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
+int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
+
+int wil6210_init_irq(struct wil6210_priv *wil, int irq);
+void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
+void wil6210_disable_irq(struct wil6210_priv *wil);
+void wil6210_enable_irq(struct wil6210_priv *wil);
+
+int wil6210_debugfs_init(struct wil6210_priv *wil);
+void wil6210_debugfs_remove(struct wil6210_priv *wil);
+
+struct wireless_dev *wil_cfg80211_init(struct device *dev);
+void wil_wdev_free(struct wil6210_priv *wil);
+
+int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
+int wmi_set_bcon(struct wil6210_priv *wil, int bi, u8 wmi_nettype);
+void wil6210_disconnect(struct wil6210_priv *wil, void *bssid);
+
+int wil_rx_init(struct wil6210_priv *wil);
+void wil_rx_fini(struct wil6210_priv *wil);
+
+/* TX API */
+int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
+ int cid, int tid);
+void wil_vring_fini_tx(struct wil6210_priv *wil, int id);
+
+netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+void wil_tx_complete(struct wil6210_priv *wil, int ringid);
+
+/* RX API */
+void wil_rx_handle(struct wil6210_priv *wil);
+
+int wil_iftype_nl2wmi(enum nl80211_iftype type);
+
+#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
new file mode 100644
index 000000000000..0bb3b76b4b58
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -0,0 +1,1020 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include "wil6210.h"
+#include "txrx.h"
+#include "wmi.h"
+
+/**
+ * WMI event receiving - theory of operations
+ *
+ * When firmware about to report WMI event, it fills memory area
+ * in the mailbox and raises misc. IRQ. Thread interrupt handler invoked for
+ * the misc IRQ, function @wmi_recv_cmd called by thread IRQ handler.
+ *
+ * @wmi_recv_cmd reads event, allocates memory chunk and attaches it to the
+ * event list @wil->pending_wmi_ev. Then, work queue @wil->wmi_wq wakes up
+ * and handles events within the @wmi_event_worker. Every event get detached
+ * from list, processed and deleted.
+ *
+ * Purpose for this mechanism is to release IRQ thread; otherwise,
+ * if WMI event handling involves another WMI command flow, this 2-nd flow
+ * won't be completed because of blocked IRQ thread.
+ */
+
+/**
+ * Addressing - theory of operations
+ *
+ * There are several buses present on the WIL6210 card.
+ * Same memory areas are visible at different address on
+ * the different busses. There are 3 main bus masters:
+ * - MAC CPU (ucode)
+ * - User CPU (firmware)
+ * - AHB (host)
+ *
+ * On the PCI bus, there is one BAR (BAR0) of 2Mb size, exposing
+ * AHB addresses starting from 0x880000
+ *
+ * Internally, firmware uses addresses that allows faster access but
+ * are invisible from the host. To read from these addresses, alternative
+ * AHB address must be used.
+ *
+ * Memory mapping
+ * Linker address PCI/Host address
+ * 0x880000 .. 0xa80000 2Mb BAR0
+ * 0x800000 .. 0x807000 0x900000 .. 0x907000 28k DCCM
+ * 0x840000 .. 0x857000 0x908000 .. 0x91f000 92k PERIPH
+ */
+
+/**
+ * @fw_mapping provides memory remapping table
+ */
+static const struct {
+ u32 from; /* linker address - from, inclusive */
+ u32 to; /* linker address - to, exclusive */
+ u32 host; /* PCI/Host address - BAR0 + 0x880000 */
+} fw_mapping[] = {
+ {0x000000, 0x040000, 0x8c0000}, /* FW code RAM 256k */
+ {0x800000, 0x808000, 0x900000}, /* FW data RAM 32k */
+ {0x840000, 0x860000, 0x908000}, /* peripheral data RAM 128k/96k used */
+ {0x880000, 0x88a000, 0x880000}, /* various RGF */
+ {0x8c0000, 0x932000, 0x8c0000}, /* trivial mapping for upper area */
+ /*
+ * 920000..930000 ucode code RAM
+ * 930000..932000 ucode data RAM
+ */
+};
+
+/**
+ * return AHB address for given firmware/ucode internal (linker) address
+ * @x - internal address
+ * If address have no valid AHB mapping, return 0
+ */
+static u32 wmi_addr_remap(u32 x)
+{
+ uint i;
+
+ for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
+ if ((x >= fw_mapping[i].from) && (x < fw_mapping[i].to))
+ return x + fw_mapping[i].host - fw_mapping[i].from;
+ }
+
+ return 0;
+}
+
+/**
+ * Check address validity for WMI buffer; remap if needed
+ * @ptr - internal (linker) fw/ucode address
+ *
+ * Valid buffer should be DWORD aligned
+ *
+ * return address for accessing buffer from the host;
+ * if buffer is not valid, return NULL.
+ */
+void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
+{
+ u32 off;
+ u32 ptr = le32_to_cpu(ptr_);
+
+ if (ptr % 4)
+ return NULL;
+
+ ptr = wmi_addr_remap(ptr);
+ if (ptr < WIL6210_FW_HOST_OFF)
+ return NULL;
+
+ off = HOSTADDR(ptr);
+ if (off > WIL6210_MEM_SIZE - 4)
+ return NULL;
+
+ return wil->csr + off;
+}
+
+/**
+ * Check address validity
+ */
+void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr)
+{
+ u32 off;
+
+ if (ptr % 4)
+ return NULL;
+
+ if (ptr < WIL6210_FW_HOST_OFF)
+ return NULL;
+
+ off = HOSTADDR(ptr);
+ if (off > WIL6210_MEM_SIZE - 4)
+ return NULL;
+
+ return wil->csr + off;
+}
+
+int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
+ struct wil6210_mbox_hdr *hdr)
+{
+ void __iomem *src = wmi_buffer(wil, ptr);
+ if (!src)
+ return -EINVAL;
+
+ wil_memcpy_fromio_32(hdr, src, sizeof(*hdr));
+
+ return 0;
+}
+
+static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
+{
+ struct {
+ struct wil6210_mbox_hdr hdr;
+ struct wil6210_mbox_hdr_wmi wmi;
+ } __packed cmd = {
+ .hdr = {
+ .type = WIL_MBOX_HDR_TYPE_WMI,
+ .flags = 0,
+ .len = cpu_to_le16(sizeof(cmd.wmi) + len),
+ },
+ .wmi = {
+ .id = cpu_to_le16(cmdid),
+ .info1 = 0,
+ },
+ };
+ struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx;
+ struct wil6210_mbox_ring_desc d_head;
+ u32 next_head;
+ void __iomem *dst;
+ void __iomem *head = wmi_addr(wil, r->head);
+ uint retry;
+
+ if (sizeof(cmd) + len > r->entry_size) {
+ wil_err(wil, "WMI size too large: %d bytes, max is %d\n",
+ (int)(sizeof(cmd) + len), r->entry_size);
+ return -ERANGE;
+ }
+
+ might_sleep();
+
+ if (!test_bit(wil_status_fwready, &wil->status)) {
+ wil_err(wil, "FW not ready\n");
+ return -EAGAIN;
+ }
+
+ if (!head) {
+ wil_err(wil, "WMI head is garbage: 0x%08x\n", r->head);
+ return -EINVAL;
+ }
+ /* read Tx head till it is not busy */
+ for (retry = 5; retry > 0; retry--) {
+ wil_memcpy_fromio_32(&d_head, head, sizeof(d_head));
+ if (d_head.sync == 0)
+ break;
+ msleep(20);
+ }
+ if (d_head.sync != 0) {
+ wil_err(wil, "WMI head busy\n");
+ return -EBUSY;
+ }
+ /* next head */
+ next_head = r->base + ((r->head - r->base + sizeof(d_head)) % r->size);
+ wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head);
+ /* wait till FW finish with previous command */
+ for (retry = 5; retry > 0; retry--) {
+ r->tail = ioread32(wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, tx.tail));
+ if (next_head != r->tail)
+ break;
+ msleep(20);
+ }
+ if (next_head == r->tail) {
+ wil_err(wil, "WMI ring full\n");
+ return -EBUSY;
+ }
+ dst = wmi_buffer(wil, d_head.addr);
+ if (!dst) {
+ wil_err(wil, "invalid WMI buffer: 0x%08x\n",
+ le32_to_cpu(d_head.addr));
+ return -EINVAL;
+ }
+ cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq);
+ /* set command */
+ wil_dbg_wmi(wil, "WMI command 0x%04x [%d]\n", cmdid, len);
+ wil_hex_dump_wmi("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd,
+ sizeof(cmd), true);
+ wil_hex_dump_wmi("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf,
+ len, true);
+ wil_memcpy_toio_32(dst, &cmd, sizeof(cmd));
+ wil_memcpy_toio_32(dst + sizeof(cmd), buf, len);
+ /* mark entry as full */
+ iowrite32(1, wil->csr + HOSTADDR(r->head) +
+ offsetof(struct wil6210_mbox_ring_desc, sync));
+ /* advance next ptr */
+ iowrite32(r->head = next_head, wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, tx.head));
+
+ /* interrupt to FW */
+ iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT);
+
+ return 0;
+}
+
+int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
+{
+ int rc;
+
+ mutex_lock(&wil->wmi_mutex);
+ rc = __wmi_send(wil, cmdid, buf, len);
+ mutex_unlock(&wil->wmi_mutex);
+
+ return rc;
+}
+
+/*=== Event handlers ===*/
+static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *wdev = wil->wdev;
+ struct wmi_ready_event *evt = d;
+ u32 ver = le32_to_cpu(evt->sw_version);
+
+ wil_dbg_wmi(wil, "FW ver. %d; MAC %pM\n", ver, evt->mac);
+
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ memcpy(ndev->dev_addr, evt->mac, ETH_ALEN);
+ memcpy(ndev->perm_addr, evt->mac, ETH_ALEN);
+ }
+ snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version),
+ "%d", ver);
+}
+
+static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d,
+ int len)
+{
+ wil_dbg_wmi(wil, "WMI: FW ready\n");
+
+ set_bit(wil_status_fwready, &wil->status);
+ /* reuse wmi_ready for the firmware ready indication */
+ complete(&wil->wmi_ready);
+}
+
+static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct wmi_rx_mgmt_packet_event *data = d;
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+ struct ieee80211_mgmt *rx_mgmt_frame =
+ (struct ieee80211_mgmt *)data->payload;
+ int ch_no = data->info.channel+1;
+ u32 freq = ieee80211_channel_to_frequency(ch_no,
+ IEEE80211_BAND_60GHZ);
+ struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq);
+ /* TODO convert LE to CPU */
+ s32 signal = 0; /* TODO */
+ __le16 fc = rx_mgmt_frame->frame_control;
+ u32 d_len = le32_to_cpu(data->info.len);
+ u16 d_status = le16_to_cpu(data->info.status);
+
+ wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d\n",
+ data->info.channel, data->info.mcs, data->info.snr);
+ wil_dbg_wmi(wil, "status 0x%04x len %d stype %04x\n", d_status, d_len,
+ le16_to_cpu(data->info.stype));
+ wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
+ data->info.qid, data->info.mid, data->info.cid);
+
+ if (!channel) {
+ wil_err(wil, "Frame on unsupported channel\n");
+ return;
+ }
+
+ if (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)) {
+ struct cfg80211_bss *bss;
+ u64 tsf = le64_to_cpu(rx_mgmt_frame->u.beacon.timestamp);
+ u16 cap = le16_to_cpu(rx_mgmt_frame->u.beacon.capab_info);
+ u16 bi = le16_to_cpu(rx_mgmt_frame->u.beacon.beacon_int);
+ const u8 *ie_buf = rx_mgmt_frame->u.beacon.variable;
+ size_t ie_len = d_len - offsetof(struct ieee80211_mgmt,
+ u.beacon.variable);
+ wil_dbg_wmi(wil, "Capability info : 0x%04x\n", cap);
+
+ bss = cfg80211_inform_bss(wiphy, channel, rx_mgmt_frame->bssid,
+ tsf, cap, bi, ie_buf, ie_len,
+ signal, GFP_KERNEL);
+ if (bss) {
+ wil_dbg_wmi(wil, "Added BSS %pM\n",
+ rx_mgmt_frame->bssid);
+ cfg80211_put_bss(wiphy, bss);
+ } else {
+ wil_err(wil, "cfg80211_inform_bss() failed\n");
+ }
+ }
+}
+
+static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
+ void *d, int len)
+{
+ if (wil->scan_request) {
+ struct wmi_scan_complete_event *data = d;
+ bool aborted = (data->status != 0);
+
+ wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status);
+ cfg80211_scan_done(wil->scan_request, aborted);
+ wil->scan_request = NULL;
+ } else {
+ wil_err(wil, "SCAN_COMPLETE while not scanning\n");
+ }
+}
+
+static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *wdev = wil->wdev;
+ struct wmi_connect_event *evt = d;
+ int ch; /* channel number */
+ struct station_info sinfo;
+ u8 *assoc_req_ie, *assoc_resp_ie;
+ size_t assoc_req_ielen, assoc_resp_ielen;
+ /* capinfo(u16) + listen_interval(u16) + IEs */
+ const size_t assoc_req_ie_offset = sizeof(u16) * 2;
+ /* capinfo(u16) + status_code(u16) + associd(u16) + IEs */
+ const size_t assoc_resp_ie_offset = sizeof(u16) * 3;
+
+ if (len < sizeof(*evt)) {
+ wil_err(wil, "Connect event too short : %d bytes\n", len);
+ return;
+ }
+ if (len != sizeof(*evt) + evt->beacon_ie_len + evt->assoc_req_len +
+ evt->assoc_resp_len) {
+ wil_err(wil,
+ "Connect event corrupted : %d != %d + %d + %d + %d\n",
+ len, (int)sizeof(*evt), evt->beacon_ie_len,
+ evt->assoc_req_len, evt->assoc_resp_len);
+ return;
+ }
+ ch = evt->channel + 1;
+ wil_dbg_wmi(wil, "Connect %pM channel [%d] cid %d\n",
+ evt->bssid, ch, evt->cid);
+ wil_hex_dump_wmi("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1,
+ evt->assoc_info, len - sizeof(*evt), true);
+
+ /* figure out IE's */
+ assoc_req_ie = &evt->assoc_info[evt->beacon_ie_len +
+ assoc_req_ie_offset];
+ assoc_req_ielen = evt->assoc_req_len - assoc_req_ie_offset;
+ if (evt->assoc_req_len <= assoc_req_ie_offset) {
+ assoc_req_ie = NULL;
+ assoc_req_ielen = 0;
+ }
+
+ assoc_resp_ie = &evt->assoc_info[evt->beacon_ie_len +
+ evt->assoc_req_len +
+ assoc_resp_ie_offset];
+ assoc_resp_ielen = evt->assoc_resp_len - assoc_resp_ie_offset;
+ if (evt->assoc_resp_len <= assoc_resp_ie_offset) {
+ assoc_resp_ie = NULL;
+ assoc_resp_ielen = 0;
+ }
+
+ if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
+ (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
+ if (wdev->sme_state != CFG80211_SME_CONNECTING) {
+ wil_err(wil, "Not in connecting state\n");
+ return;
+ }
+ del_timer_sync(&wil->connect_timer);
+ cfg80211_connect_result(ndev, evt->bssid,
+ assoc_req_ie, assoc_req_ielen,
+ assoc_resp_ie, assoc_resp_ielen,
+ WLAN_STATUS_SUCCESS, GFP_KERNEL);
+
+ } else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
+ (wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
+ memset(&sinfo, 0, sizeof(sinfo));
+
+ sinfo.generation = wil->sinfo_gen++;
+
+ if (assoc_req_ie) {
+ sinfo.assoc_req_ies = assoc_req_ie;
+ sinfo.assoc_req_ies_len = assoc_req_ielen;
+ sinfo.filled |= STATION_INFO_ASSOC_REQ_IES;
+ }
+
+ cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL);
+ }
+ set_bit(wil_status_fwconnected, &wil->status);
+
+ /* FIXME FW can transmit only ucast frames to peer */
+ /* FIXME real ring_id instead of hard coded 0 */
+ memcpy(wil->dst_addr[0], evt->bssid, ETH_ALEN);
+
+ wil->pending_connect_cid = evt->cid;
+ queue_work(wil->wmi_wq_conn, &wil->wmi_connect_worker);
+}
+
+static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
+ void *d, int len)
+{
+ struct wmi_disconnect_event *evt = d;
+
+ wil_dbg_wmi(wil, "Disconnect %pM reason %d proto %d wmi\n",
+ evt->bssid,
+ evt->protocol_reason_status, evt->disconnect_reason);
+
+ wil->sinfo_gen++;
+
+ wil6210_disconnect(wil, evt->bssid);
+}
+
+static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct wmi_notify_req_done_event *evt = d;
+
+ if (len < sizeof(*evt)) {
+ wil_err(wil, "Short NOTIFY event\n");
+ return;
+ }
+
+ wil->stats.tsf = le64_to_cpu(evt->tsf);
+ wil->stats.snr = le32_to_cpu(evt->snr_val);
+ wil->stats.bf_mcs = le16_to_cpu(evt->bf_mcs);
+ wil->stats.my_rx_sector = le16_to_cpu(evt->my_rx_sector);
+ wil->stats.my_tx_sector = le16_to_cpu(evt->my_tx_sector);
+ wil->stats.peer_rx_sector = le16_to_cpu(evt->other_rx_sector);
+ wil->stats.peer_tx_sector = le16_to_cpu(evt->other_tx_sector);
+ wil_dbg_wmi(wil, "Link status, MCS %d TSF 0x%016llx\n"
+ "BF status 0x%08x SNR 0x%08x\n"
+ "Tx Tpt %d goodput %d Rx goodput %d\n"
+ "Sectors(rx:tx) my %d:%d peer %d:%d\n",
+ wil->stats.bf_mcs, wil->stats.tsf, evt->status,
+ wil->stats.snr, le32_to_cpu(evt->tx_tpt),
+ le32_to_cpu(evt->tx_goodput), le32_to_cpu(evt->rx_goodput),
+ wil->stats.my_rx_sector, wil->stats.my_tx_sector,
+ wil->stats.peer_rx_sector, wil->stats.peer_tx_sector);
+}
+
+/*
+ * Firmware reports EAPOL frame using WME event.
+ * Reconstruct Ethernet frame and deliver it via normal Rx
+ */
+static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
+ void *d, int len)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wmi_eapol_rx_event *evt = d;
+ u16 eapol_len = le16_to_cpu(evt->eapol_len);
+ int sz = eapol_len + ETH_HLEN;
+ struct sk_buff *skb;
+ struct ethhdr *eth;
+
+ wil_dbg_wmi(wil, "EAPOL len %d from %pM\n", eapol_len,
+ evt->src_mac);
+
+ if (eapol_len > 196) { /* TODO: revisit size limit */
+ wil_err(wil, "EAPOL too large\n");
+ return;
+ }
+
+ skb = alloc_skb(sz, GFP_KERNEL);
+ if (!skb) {
+ wil_err(wil, "Failed to allocate skb\n");
+ return;
+ }
+ eth = (struct ethhdr *)skb_put(skb, ETH_HLEN);
+ memcpy(eth->h_dest, ndev->dev_addr, ETH_ALEN);
+ memcpy(eth->h_source, evt->src_mac, ETH_ALEN);
+ eth->h_proto = cpu_to_be16(ETH_P_PAE);
+ memcpy(skb_put(skb, eapol_len), evt->eapol, eapol_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += skb->len;
+ } else {
+ ndev->stats.rx_dropped++;
+ }
+}
+
+static const struct {
+ int eventid;
+ void (*handler)(struct wil6210_priv *wil, int eventid,
+ void *data, int data_len);
+} wmi_evt_handlers[] = {
+ {WMI_READY_EVENTID, wmi_evt_ready},
+ {WMI_FW_READY_EVENTID, wmi_evt_fw_ready},
+ {WMI_RX_MGMT_PACKET_EVENTID, wmi_evt_rx_mgmt},
+ {WMI_SCAN_COMPLETE_EVENTID, wmi_evt_scan_complete},
+ {WMI_CONNECT_EVENTID, wmi_evt_connect},
+ {WMI_DISCONNECT_EVENTID, wmi_evt_disconnect},
+ {WMI_NOTIFY_REQ_DONE_EVENTID, wmi_evt_notify},
+ {WMI_EAPOL_RX_EVENTID, wmi_evt_eapol_rx},
+};
+
+/*
+ * Run in IRQ context
+ * Extract WMI command from mailbox. Queue it to the @wil->pending_wmi_ev
+ * that will be eventually handled by the @wmi_event_worker in the thread
+ * context of thread "wil6210_wmi"
+ */
+void wmi_recv_cmd(struct wil6210_priv *wil)
+{
+ struct wil6210_mbox_ring_desc d_tail;
+ struct wil6210_mbox_hdr hdr;
+ struct wil6210_mbox_ring *r = &wil->mbox_ctl.rx;
+ struct pending_wmi_event *evt;
+ u8 *cmd;
+ void __iomem *src;
+ ulong flags;
+
+ for (;;) {
+ u16 len;
+
+ r->head = ioread32(wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, rx.head));
+ if (r->tail == r->head)
+ return;
+
+ /* read cmd from tail */
+ wil_memcpy_fromio_32(&d_tail, wil->csr + HOSTADDR(r->tail),
+ sizeof(struct wil6210_mbox_ring_desc));
+ if (d_tail.sync == 0) {
+ wil_err(wil, "Mbox evt not owned by FW?\n");
+ return;
+ }
+
+ if (0 != wmi_read_hdr(wil, d_tail.addr, &hdr)) {
+ wil_err(wil, "Mbox evt at 0x%08x?\n",
+ le32_to_cpu(d_tail.addr));
+ return;
+ }
+
+ len = le16_to_cpu(hdr.len);
+ src = wmi_buffer(wil, d_tail.addr) +
+ sizeof(struct wil6210_mbox_hdr);
+ evt = kmalloc(ALIGN(offsetof(struct pending_wmi_event,
+ event.wmi) + len, 4),
+ GFP_KERNEL);
+ if (!evt)
+ return;
+
+ evt->event.hdr = hdr;
+ cmd = (void *)&evt->event.wmi;
+ wil_memcpy_fromio_32(cmd, src, len);
+ /* mark entry as empty */
+ iowrite32(0, wil->csr + HOSTADDR(r->tail) +
+ offsetof(struct wil6210_mbox_ring_desc, sync));
+ /* indicate */
+ wil_dbg_wmi(wil, "Mbox evt %04x %04x %04x %02x\n",
+ le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type),
+ hdr.flags);
+ if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
+ (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
+ wil_dbg_wmi(wil, "WMI event 0x%04x\n",
+ evt->event.wmi.id);
+ }
+ wil_hex_dump_wmi("evt ", DUMP_PREFIX_OFFSET, 16, 1,
+ &evt->event.hdr, sizeof(hdr) + len, true);
+
+ /* advance tail */
+ r->tail = r->base + ((r->tail - r->base +
+ sizeof(struct wil6210_mbox_ring_desc)) % r->size);
+ iowrite32(r->tail, wil->csr + HOST_MBOX +
+ offsetof(struct wil6210_mbox_ctl, rx.tail));
+
+ /* add to the pending list */
+ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+ list_add_tail(&evt->list, &wil->pending_wmi_ev);
+ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
+ {
+ int q = queue_work(wil->wmi_wq,
+ &wil->wmi_event_worker);
+ wil_dbg_wmi(wil, "queue_work -> %d\n", q);
+ }
+ }
+}
+
+int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
+ u16 reply_id, void *reply, u8 reply_size, int to_msec)
+{
+ int rc;
+ int remain;
+
+ mutex_lock(&wil->wmi_mutex);
+
+ rc = __wmi_send(wil, cmdid, buf, len);
+ if (rc)
+ goto out;
+
+ wil->reply_id = reply_id;
+ wil->reply_buf = reply;
+ wil->reply_size = reply_size;
+ remain = wait_for_completion_timeout(&wil->wmi_ready,
+ msecs_to_jiffies(to_msec));
+ if (0 == remain) {
+ wil_err(wil, "wmi_call(0x%04x->0x%04x) timeout %d msec\n",
+ cmdid, reply_id, to_msec);
+ rc = -ETIME;
+ } else {
+ wil_dbg_wmi(wil,
+ "wmi_call(0x%04x->0x%04x) completed in %d msec\n",
+ cmdid, reply_id,
+ to_msec - jiffies_to_msecs(remain));
+ }
+ wil->reply_id = 0;
+ wil->reply_buf = NULL;
+ wil->reply_size = 0;
+ out:
+ mutex_unlock(&wil->wmi_mutex);
+
+ return rc;
+}
+
+int wmi_echo(struct wil6210_priv *wil)
+{
+ struct wmi_echo_cmd cmd = {
+ .value = cpu_to_le32(0x12345678),
+ };
+
+ return wmi_call(wil, WMI_ECHO_CMDID, &cmd, sizeof(cmd),
+ WMI_ECHO_RSP_EVENTID, NULL, 0, 20);
+}
+
+int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
+{
+ struct wmi_set_mac_address_cmd cmd;
+
+ memcpy(cmd.mac, addr, ETH_ALEN);
+
+ wil_dbg_wmi(wil, "Set MAC %pM\n", addr);
+
+ return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_set_bcon(struct wil6210_priv *wil, int bi, u8 wmi_nettype)
+{
+ struct wmi_bcon_ctrl_cmd cmd = {
+ .bcon_interval = cpu_to_le16(bi),
+ .network_type = wmi_nettype,
+ .disable_sec_offload = 1,
+ };
+
+ if (!wil->secure_pcp)
+ cmd.disable_sec = 1;
+
+ return wmi_send(wil, WMI_BCON_CTRL_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid)
+{
+ struct wmi_set_ssid_cmd cmd = {
+ .ssid_len = cpu_to_le32(ssid_len),
+ };
+
+ if (ssid_len > sizeof(cmd.ssid))
+ return -EINVAL;
+
+ memcpy(cmd.ssid, ssid, ssid_len);
+
+ return wmi_send(wil, WMI_SET_SSID_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid)
+{
+ int rc;
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_set_ssid_cmd cmd;
+ } __packed reply;
+ int len; /* reply.cmd.ssid_len in CPU order */
+
+ rc = wmi_call(wil, WMI_GET_SSID_CMDID, NULL, 0, WMI_GET_SSID_EVENTID,
+ &reply, sizeof(reply), 20);
+ if (rc)
+ return rc;
+
+ len = le32_to_cpu(reply.cmd.ssid_len);
+ if (len > sizeof(reply.cmd.ssid))
+ return -EINVAL;
+
+ *ssid_len = len;
+ memcpy(ssid, reply.cmd.ssid, len);
+
+ return 0;
+}
+
+int wmi_set_channel(struct wil6210_priv *wil, int channel)
+{
+ struct wmi_set_pcp_channel_cmd cmd = {
+ .channel = channel - 1,
+ };
+
+ return wmi_send(wil, WMI_SET_PCP_CHANNEL_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_get_channel(struct wil6210_priv *wil, int *channel)
+{
+ int rc;
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_set_pcp_channel_cmd cmd;
+ } __packed reply;
+
+ rc = wmi_call(wil, WMI_GET_PCP_CHANNEL_CMDID, NULL, 0,
+ WMI_GET_PCP_CHANNEL_EVENTID, &reply, sizeof(reply), 20);
+ if (rc)
+ return rc;
+
+ if (reply.cmd.channel > 3)
+ return -EINVAL;
+
+ *channel = reply.cmd.channel + 1;
+
+ return 0;
+}
+
+int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb)
+{
+ struct wmi_eapol_tx_cmd *cmd;
+ struct ethhdr *eth;
+ u16 eapol_len = skb->len - ETH_HLEN;
+ void *eapol = skb->data + ETH_HLEN;
+ uint i;
+ int rc;
+
+ skb_set_mac_header(skb, 0);
+ eth = eth_hdr(skb);
+ wil_dbg_wmi(wil, "EAPOL %d bytes to %pM\n", eapol_len, eth->h_dest);
+ for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
+ if (memcmp(wil->dst_addr[i], eth->h_dest, ETH_ALEN) == 0)
+ goto found_dest;
+ }
+
+ return -EINVAL;
+
+ found_dest:
+ /* find out eapol data & len */
+ cmd = kzalloc(sizeof(*cmd) + eapol_len, GFP_KERNEL);
+ if (!cmd)
+ return -EINVAL;
+
+ memcpy(cmd->dst_mac, eth->h_dest, ETH_ALEN);
+ cmd->eapol_len = cpu_to_le16(eapol_len);
+ memcpy(cmd->eapol, eapol, eapol_len);
+ rc = wmi_send(wil, WMI_EAPOL_TX_CMDID, cmd, sizeof(*cmd) + eapol_len);
+ kfree(cmd);
+
+ return rc;
+}
+
+int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
+ const void *mac_addr)
+{
+ struct wmi_delete_cipher_key_cmd cmd = {
+ .key_index = key_index,
+ };
+
+ if (mac_addr)
+ memcpy(cmd.mac, mac_addr, WMI_MAC_LEN);
+
+ return wmi_send(wil, WMI_DELETE_CIPHER_KEY_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
+ const void *mac_addr, int key_len, const void *key)
+{
+ struct wmi_add_cipher_key_cmd cmd = {
+ .key_index = key_index,
+ .key_usage = WMI_KEY_USE_PAIRWISE,
+ .key_len = key_len,
+ };
+
+ if (!key || (key_len > sizeof(cmd.key)))
+ return -EINVAL;
+
+ memcpy(cmd.key, key, key_len);
+ if (mac_addr)
+ memcpy(cmd.mac, mac_addr, WMI_MAC_LEN);
+
+ return wmi_send(wil, WMI_ADD_CIPHER_KEY_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
+{
+ int rc;
+ u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
+ struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->mgmt_frm_type = type;
+ /* BUG: FW API define ieLen as u8. Will fix FW */
+ cmd->ie_len = cpu_to_le16(ie_len);
+ memcpy(cmd->ie_info, ie, ie_len);
+ rc = wmi_send(wil, WMI_SET_APPIE_CMDID, &cmd, len);
+ kfree(cmd);
+
+ return rc;
+}
+
+int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
+{
+ struct wireless_dev *wdev = wil->wdev;
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wmi_cfg_rx_chain_cmd cmd = {
+ .action = WMI_RX_CHAIN_ADD,
+ .rx_sw_ring = {
+ .max_mpdu_size = cpu_to_le16(RX_BUF_LEN),
+ .ring_mem_base = cpu_to_le64(vring->pa),
+ .ring_size = cpu_to_le16(vring->size),
+ },
+ .mid = 0, /* TODO - what is it? */
+ .decap_trans_type = WMI_DECAP_TYPE_802_3,
+ };
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cfg_rx_chain_done_event evt;
+ } __packed evt;
+ int rc;
+
+ if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
+ struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+
+ cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON);
+ if (ch)
+ cmd.sniffer_cfg.channel = ch->hw_value - 1;
+ cmd.sniffer_cfg.phy_info_mode =
+ cpu_to_le32(ndev->type == ARPHRD_IEEE80211_RADIOTAP);
+ cmd.sniffer_cfg.phy_support =
+ cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL)
+ ? WMI_SNIFFER_CP : WMI_SNIFFER_DP);
+ }
+ /* typical time for secure PCP is 840ms */
+ rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
+ WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000);
+ if (rc)
+ return rc;
+
+ vring->hwtail = le32_to_cpu(evt.evt.rx_ring_tail_ptr);
+
+ wil_dbg_misc(wil, "Rx init: status %d tail 0x%08x\n",
+ le32_to_cpu(evt.evt.status), vring->hwtail);
+
+ if (le32_to_cpu(evt.evt.status) != WMI_CFG_RX_CHAIN_SUCCESS)
+ rc = -EINVAL;
+
+ return rc;
+}
+
+void wmi_event_flush(struct wil6210_priv *wil)
+{
+ struct pending_wmi_event *evt, *t;
+
+ wil_dbg_wmi(wil, "%s()\n", __func__);
+
+ list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) {
+ list_del(&evt->list);
+ kfree(evt);
+ }
+}
+
+static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id,
+ void *d, int len)
+{
+ uint i;
+
+ for (i = 0; i < ARRAY_SIZE(wmi_evt_handlers); i++) {
+ if (wmi_evt_handlers[i].eventid == id) {
+ wmi_evt_handlers[i].handler(wil, id, d, len);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void wmi_event_handle(struct wil6210_priv *wil,
+ struct wil6210_mbox_hdr *hdr)
+{
+ u16 len = le16_to_cpu(hdr->len);
+
+ if ((hdr->type == WIL_MBOX_HDR_TYPE_WMI) &&
+ (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
+ struct wil6210_mbox_hdr_wmi *wmi = (void *)(&hdr[1]);
+ void *evt_data = (void *)(&wmi[1]);
+ u16 id = le16_to_cpu(wmi->id);
+ /* check if someone waits for this event */
+ if (wil->reply_id && wil->reply_id == id) {
+ if (wil->reply_buf) {
+ memcpy(wil->reply_buf, wmi,
+ min(len, wil->reply_size));
+ } else {
+ wmi_evt_call_handler(wil, id, evt_data,
+ len - sizeof(*wmi));
+ }
+ wil_dbg_wmi(wil, "Complete WMI 0x%04x\n", id);
+ complete(&wil->wmi_ready);
+ return;
+ }
+ /* unsolicited event */
+ /* search for handler */
+ if (!wmi_evt_call_handler(wil, id, evt_data,
+ len - sizeof(*wmi))) {
+ wil_err(wil, "Unhandled event 0x%04x\n", id);
+ }
+ } else {
+ wil_err(wil, "Unknown event type\n");
+ print_hex_dump(KERN_ERR, "evt?? ", DUMP_PREFIX_OFFSET, 16, 1,
+ hdr, sizeof(*hdr) + len, true);
+ }
+}
+
+/*
+ * Retrieve next WMI event from the pending list
+ */
+static struct list_head *next_wmi_ev(struct wil6210_priv *wil)
+{
+ ulong flags;
+ struct list_head *ret = NULL;
+
+ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+
+ if (!list_empty(&wil->pending_wmi_ev)) {
+ ret = wil->pending_wmi_ev.next;
+ list_del(ret);
+ }
+
+ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
+
+ return ret;
+}
+
+/*
+ * Handler for the WMI events
+ */
+void wmi_event_worker(struct work_struct *work)
+{
+ struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+ wmi_event_worker);
+ struct pending_wmi_event *evt;
+ struct list_head *lh;
+
+ while ((lh = next_wmi_ev(wil)) != NULL) {
+ evt = list_entry(lh, struct pending_wmi_event, list);
+ wmi_event_handle(wil, &evt->event.hdr);
+ kfree(evt);
+ }
+}
+
+void wmi_connect_worker(struct work_struct *work)
+{
+ int rc;
+ struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+ wmi_connect_worker);
+
+ if (wil->pending_connect_cid < 0) {
+ wil_err(wil, "No connection pending\n");
+ return;
+ }
+
+ wil_dbg_wmi(wil, "Configure for connection CID %d\n",
+ wil->pending_connect_cid);
+
+ rc = wil_vring_init_tx(wil, 0, WIL6210_TX_RING_SIZE,
+ wil->pending_connect_cid, 0);
+ wil->pending_connect_cid = -1;
+ if (rc == 0)
+ wil_link_on(wil);
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
new file mode 100644
index 000000000000..3bbf87572b07
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -0,0 +1,1116 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ * Copyright (c) 2006-2012 Wilocity .
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file contains the definitions of the WMI protocol specified in the
+ * Wireless Module Interface (WMI) for the Wilocity
+ * MARLON 60 Gigabit wireless solution.
+ * It includes definitions of all the commands and events.
+ * Commands are messages from the host to the WM.
+ * Events are messages from the WM to the host.
+ */
+
+#ifndef __WILOCITY_WMI_H__
+#define __WILOCITY_WMI_H__
+
+/* General */
+
+#define WMI_MAC_LEN (6)
+#define WMI_PROX_RANGE_NUM (3)
+
+/* List of Commands */
+enum wmi_command_id {
+ WMI_CONNECT_CMDID = 0x0001,
+ WMI_DISCONNECT_CMDID = 0x0003,
+ WMI_START_SCAN_CMDID = 0x0007,
+ WMI_SET_BSS_FILTER_CMDID = 0x0009,
+ WMI_SET_PROBED_SSID_CMDID = 0x000a,
+ WMI_SET_LISTEN_INT_CMDID = 0x000b,
+ WMI_BCON_CTRL_CMDID = 0x000f,
+ WMI_ADD_CIPHER_KEY_CMDID = 0x0016,
+ WMI_DELETE_CIPHER_KEY_CMDID = 0x0017,
+ WMI_SET_APPIE_CMDID = 0x003f,
+ WMI_GET_APPIE_CMDID = 0x0040,
+ WMI_SET_WSC_STATUS_CMDID = 0x0041,
+ WMI_PXMT_RANGE_CFG_CMDID = 0x0042,
+ WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x0043,
+ WMI_FAST_MEM_ACC_MODE_CMDID = 0x0300,
+ WMI_MEM_READ_CMDID = 0x0800,
+ WMI_MEM_WR_CMDID = 0x0801,
+ WMI_ECHO_CMDID = 0x0803,
+ WMI_DEEP_ECHO_CMDID = 0x0804,
+ WMI_CONFIG_MAC_CMDID = 0x0805,
+ WMI_CONFIG_PHY_DEBUG_CMDID = 0x0806,
+ WMI_ADD_STATION_CMDID = 0x0807,
+ WMI_ADD_DEBUG_TX_PCKT_CMDID = 0x0808,
+ WMI_PHY_GET_STATISTICS_CMDID = 0x0809,
+ WMI_FS_TUNE_CMDID = 0x080a,
+ WMI_CORR_MEASURE_CMDID = 0x080b,
+ WMI_TEMP_SENSE_CMDID = 0x080e,
+ WMI_DC_CALIB_CMDID = 0x080f,
+ WMI_SEND_TONE_CMDID = 0x0810,
+ WMI_IQ_TX_CALIB_CMDID = 0x0811,
+ WMI_IQ_RX_CALIB_CMDID = 0x0812,
+ WMI_SET_UCODE_IDLE_CMDID = 0x0813,
+ WMI_SET_WORK_MODE_CMDID = 0x0815,
+ WMI_LO_LEAKAGE_CALIB_CMDID = 0x0816,
+ WMI_MARLON_R_ACTIVATE_CMDID = 0x0817,
+ WMI_MARLON_R_READ_CMDID = 0x0818,
+ WMI_MARLON_R_WRITE_CMDID = 0x0819,
+ WMI_MARLON_R_TXRX_SEL_CMDID = 0x081a,
+ MAC_IO_STATIC_PARAMS_CMDID = 0x081b,
+ MAC_IO_DYNAMIC_PARAMS_CMDID = 0x081c,
+ WMI_SILENT_RSSI_CALIB_CMDID = 0x081d,
+ WMI_CFG_RX_CHAIN_CMDID = 0x0820,
+ WMI_VRING_CFG_CMDID = 0x0821,
+ WMI_RX_ON_CMDID = 0x0822,
+ WMI_VRING_BA_EN_CMDID = 0x0823,
+ WMI_VRING_BA_DIS_CMDID = 0x0824,
+ WMI_RCP_ADDBA_RESP_CMDID = 0x0825,
+ WMI_RCP_DELBA_CMDID = 0x0826,
+ WMI_SET_SSID_CMDID = 0x0827,
+ WMI_GET_SSID_CMDID = 0x0828,
+ WMI_SET_PCP_CHANNEL_CMDID = 0x0829,
+ WMI_GET_PCP_CHANNEL_CMDID = 0x082a,
+ WMI_SW_TX_REQ_CMDID = 0x082b,
+ WMI_RX_OFF_CMDID = 0x082c,
+ WMI_READ_MAC_RXQ_CMDID = 0x0830,
+ WMI_READ_MAC_TXQ_CMDID = 0x0831,
+ WMI_WRITE_MAC_RXQ_CMDID = 0x0832,
+ WMI_WRITE_MAC_TXQ_CMDID = 0x0833,
+ WMI_WRITE_MAC_XQ_FIELD_CMDID = 0x0834,
+ WMI_MLME_PUSH_CMDID = 0x0835,
+ WMI_BEAMFORMING_MGMT_CMDID = 0x0836,
+ WMI_BF_TXSS_MGMT_CMDID = 0x0837,
+ WMI_BF_SM_MGMT_CMDID = 0x0838,
+ WMI_BF_RXSS_MGMT_CMDID = 0x0839,
+ WMI_SET_SECTORS_CMDID = 0x0849,
+ WMI_MAINTAIN_PAUSE_CMDID = 0x0850,
+ WMI_MAINTAIN_RESUME_CMDID = 0x0851,
+ WMI_RS_MGMT_CMDID = 0x0852,
+ WMI_RF_MGMT_CMDID = 0x0853,
+ /* Performance monitoring commands */
+ WMI_BF_CTRL_CMDID = 0x0862,
+ WMI_NOTIFY_REQ_CMDID = 0x0863,
+ WMI_GET_STATUS_CMDID = 0x0864,
+ WMI_UNIT_TEST_CMDID = 0x0900,
+ WMI_HICCUP_CMDID = 0x0901,
+ WMI_FLASH_READ_CMDID = 0x0902,
+ WMI_FLASH_WRITE_CMDID = 0x0903,
+ WMI_SECURITY_UNIT_TEST_CMDID = 0x0904,
+
+ WMI_SET_MAC_ADDRESS_CMDID = 0xf003,
+ WMI_ABORT_SCAN_CMDID = 0xf007,
+ WMI_SET_PMK_CMDID = 0xf028,
+
+ WMI_SET_PROMISCUOUS_MODE_CMDID = 0xf041,
+ WMI_GET_PMK_CMDID = 0xf048,
+ WMI_SET_PASSPHRASE_CMDID = 0xf049,
+ WMI_SEND_ASSOC_RES_CMDID = 0xf04a,
+ WMI_SET_ASSOC_REQ_RELAY_CMDID = 0xf04b,
+ WMI_EAPOL_TX_CMDID = 0xf04c,
+ WMI_MAC_ADDR_REQ_CMDID = 0xf04d,
+ WMI_FW_VER_CMDID = 0xf04e,
+};
+
+/*
+ * Commands data structures
+ */
+
+/*
+ * Frame Types
+ */
+enum wmi_mgmt_frame_type {
+ WMI_FRAME_BEACON = 0,
+ WMI_FRAME_PROBE_REQ = 1,
+ WMI_FRAME_PROBE_RESP = 2,
+ WMI_FRAME_ASSOC_REQ = 3,
+ WMI_FRAME_ASSOC_RESP = 4,
+ WMI_NUM_MGMT_FRAME,
+};
+
+/*
+ * WMI_CONNECT_CMDID
+ */
+enum wmi_network_type {
+ WMI_NETTYPE_INFRA = 0x01,
+ WMI_NETTYPE_ADHOC = 0x02,
+ WMI_NETTYPE_ADHOC_CREATOR = 0x04,
+ WMI_NETTYPE_AP = 0x10,
+ WMI_NETTYPE_P2P = 0x20,
+ WMI_NETTYPE_WBE = 0x40, /* PCIE over 60g */
+};
+
+enum wmi_dot11_auth_mode {
+ WMI_AUTH11_OPEN = 0x01,
+ WMI_AUTH11_SHARED = 0x02,
+ WMI_AUTH11_LEAP = 0x04,
+ WMI_AUTH11_WSC = 0x08,
+};
+
+enum wmi_auth_mode {
+ WMI_AUTH_NONE = 0x01,
+ WMI_AUTH_WPA = 0x02,
+ WMI_AUTH_WPA2 = 0x04,
+ WMI_AUTH_WPA_PSK = 0x08,
+ WMI_AUTH_WPA2_PSK = 0x10,
+ WMI_AUTH_WPA_CCKM = 0x20,
+ WMI_AUTH_WPA2_CCKM = 0x40,
+};
+
+enum wmi_crypto_type {
+ WMI_CRYPT_NONE = 0x01,
+ WMI_CRYPT_WEP = 0x02,
+ WMI_CRYPT_TKIP = 0x04,
+ WMI_CRYPT_AES = 0x08,
+ WMI_CRYPT_AES_GCMP = 0x20,
+};
+
+
+enum wmi_connect_ctrl_flag_bits {
+ WMI_CONNECT_ASSOC_POLICY_USER = 0x0001,
+ WMI_CONNECT_SEND_REASSOC = 0x0002,
+ WMI_CONNECT_IGNORE_WPAx_GROUP_CIPHER = 0x0004,
+ WMI_CONNECT_PROFILE_MATCH_DONE = 0x0008,
+ WMI_CONNECT_IGNORE_AAC_BEACON = 0x0010,
+ WMI_CONNECT_CSA_FOLLOW_BSS = 0x0020,
+ WMI_CONNECT_DO_WPA_OFFLOAD = 0x0040,
+ WMI_CONNECT_DO_NOT_DEAUTH = 0x0080,
+};
+
+#define WMI_MAX_SSID_LEN (32)
+
+struct wmi_connect_cmd {
+ u8 network_type;
+ u8 dot11_auth_mode;
+ u8 auth_mode;
+ u8 pairwise_crypto_type;
+ u8 pairwise_crypto_len;
+ u8 group_crypto_type;
+ u8 group_crypto_len;
+ u8 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+ u8 channel;
+ u8 reserved0;
+ u8 bssid[WMI_MAC_LEN];
+ __le32 ctrl_flags;
+ u8 dst_mac[WMI_MAC_LEN];
+ u8 reserved1[2];
+} __packed;
+
+
+/*
+ * WMI_RECONNECT_CMDID
+ */
+struct wmi_reconnect_cmd {
+ u8 channel; /* hint */
+ u8 reserved;
+ u8 bssid[WMI_MAC_LEN]; /* mandatory if set */
+} __packed;
+
+
+/*
+ * WMI_SET_PMK_CMDID
+ */
+
+#define WMI_MIN_KEY_INDEX (0)
+#define WMI_MAX_KEY_INDEX (3)
+#define WMI_MAX_KEY_LEN (32)
+#define WMI_PASSPHRASE_LEN (64)
+#define WMI_PMK_LEN (32)
+
+struct wmi_set_pmk_cmd {
+ u8 pmk[WMI_PMK_LEN];
+} __packed;
+
+
+/*
+ * WMI_SET_PASSPHRASE_CMDID
+ */
+struct wmi_set_passphrase_cmd {
+ u8 ssid[WMI_MAX_SSID_LEN];
+ u8 passphrase[WMI_PASSPHRASE_LEN];
+ u8 ssid_len;
+ u8 passphrase_len;
+} __packed;
+
+/*
+ * WMI_ADD_CIPHER_KEY_CMDID
+ */
+enum wmi_key_usage {
+ WMI_KEY_USE_PAIRWISE = 0,
+ WMI_KEY_USE_GROUP = 1,
+ WMI_KEY_USE_TX = 2, /* default Tx Key - Static WEP only */
+};
+
+struct wmi_add_cipher_key_cmd {
+ u8 key_index;
+ u8 key_type;
+ u8 key_usage; /* enum wmi_key_usage */
+ u8 key_len;
+ u8 key_rsc[8]; /* key replay sequence counter */
+ u8 key[WMI_MAX_KEY_LEN];
+ u8 key_op_ctrl; /* Additional Key Control information */
+ u8 mac[WMI_MAC_LEN];
+} __packed;
+
+/*
+ * WMI_DELETE_CIPHER_KEY_CMDID
+ */
+struct wmi_delete_cipher_key_cmd {
+ u8 key_index;
+ u8 mac[WMI_MAC_LEN];
+} __packed;
+
+
+/*
+ * WMI_START_SCAN_CMDID
+ *
+ * Start L1 scan operation
+ *
+ * Returned events:
+ * - WMI_RX_MGMT_PACKET_EVENTID - for every probe resp.
+ * - WMI_SCAN_COMPLETE_EVENTID
+ */
+enum wmi_scan_type {
+ WMI_LONG_SCAN = 0,
+ WMI_SHORT_SCAN = 1,
+};
+
+struct wmi_start_scan_cmd {
+ u8 reserved[8];
+ __le32 home_dwell_time; /* Max duration in the home channel(ms) */
+ __le32 force_scan_interval; /* Time interval between scans (ms)*/
+ u8 scan_type; /* wmi_scan_type */
+ u8 num_channels; /* how many channels follow */
+ struct {
+ u8 channel;
+ u8 reserved;
+ } channel_list[0]; /* channels ID's */
+ /* 0 - 58320 MHz */
+ /* 1 - 60480 MHz */
+ /* 2 - 62640 MHz */
+} __packed;
+
+/*
+ * WMI_SET_PROBED_SSID_CMDID
+ */
+#define MAX_PROBED_SSID_INDEX (15)
+
+enum wmi_ssid_flag {
+ WMI_SSID_FLAG_DISABLE = 0, /* disables entry */
+ WMI_SSID_FLAG_SPECIFIC = 1, /* probes specified ssid */
+ WMI_SSID_FLAG_ANY = 2, /* probes for any ssid */
+};
+
+struct wmi_probed_ssid_cmd {
+ u8 entry_index; /* 0 to MAX_PROBED_SSID_INDEX */
+ u8 flag; /* enum wmi_ssid_flag */
+ u8 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+} __packed;
+
+/*
+ * WMI_SET_APPIE_CMDID
+ * Add Application specified IE to a management frame
+ */
+struct wmi_set_appie_cmd {
+ u8 mgmt_frm_type; /* enum wmi_mgmt_frame_type */
+ u8 reserved;
+ __le16 ie_len; /* Length of the IE to be added to MGMT frame */
+ u8 ie_info[0];
+} __packed;
+
+#define WMI_MAX_IE_LEN (1024)
+
+struct wmi_pxmt_range_cfg_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 range;
+} __packed;
+
+struct wmi_pxmt_snr2_range_cfg_cmd {
+ s8 snr2range_arr[WMI_PROX_RANGE_NUM-1];
+} __packed;
+
+/*
+ * WMI_RF_MGMT_CMDID
+ */
+enum wmi_rf_mgmt_type {
+ WMI_RF_MGMT_W_DISABLE = 0,
+ WMI_RF_MGMT_W_ENABLE = 1,
+ WMI_RF_MGMT_GET_STATUS = 2,
+};
+
+struct wmi_rf_mgmt_cmd {
+ __le32 rf_mgmt_type;
+} __packed;
+
+/*
+ * WMI_SET_SSID_CMDID
+ */
+struct wmi_set_ssid_cmd {
+ __le32 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+} __packed;
+
+/*
+ * WMI_SET_PCP_CHANNEL_CMDID
+ */
+struct wmi_set_pcp_channel_cmd {
+ u8 channel;
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_BCON_CTRL_CMDID
+ */
+struct wmi_bcon_ctrl_cmd {
+ __le16 bcon_interval;
+ __le16 frag_num;
+ __le64 ss_mask;
+ u8 network_type;
+ u8 reserved;
+ u8 disable_sec_offload;
+ u8 disable_sec;
+} __packed;
+
+/*
+ * WMI_SW_TX_REQ_CMDID
+ */
+struct wmi_sw_tx_req_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 len;
+ u8 payload[0];
+} __packed;
+
+/*
+ * WMI_VRING_CFG_CMDID
+ */
+
+struct wmi_sw_ring_cfg {
+ __le64 ring_mem_base;
+ __le16 ring_size;
+ __le16 max_mpdu_size;
+} __packed;
+
+struct wmi_vring_cfg_schd {
+ __le16 priority;
+ __le16 timeslot_us;
+} __packed;
+
+enum wmi_vring_cfg_encap_trans_type {
+ WMI_VRING_ENC_TYPE_802_3 = 0,
+ WMI_VRING_ENC_TYPE_NATIVE_WIFI = 1,
+};
+
+enum wmi_vring_cfg_ds_cfg {
+ WMI_VRING_DS_PBSS = 0,
+ WMI_VRING_DS_STATION = 1,
+ WMI_VRING_DS_AP = 2,
+ WMI_VRING_DS_ADDR4 = 3,
+};
+
+enum wmi_vring_cfg_nwifi_ds_trans_type {
+ WMI_NWIFI_TX_TRANS_MODE_NO = 0,
+ WMI_NWIFI_TX_TRANS_MODE_AP2PBSS = 1,
+ WMI_NWIFI_TX_TRANS_MODE_STA2PBSS = 2,
+};
+
+enum wmi_vring_cfg_schd_params_priority {
+ WMI_SCH_PRIO_REGULAR = 0,
+ WMI_SCH_PRIO_HIGH = 1,
+};
+
+struct wmi_vring_cfg {
+ struct wmi_sw_ring_cfg tx_sw_ring;
+ u8 ringid; /* 0-23 vrings */
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 encap_trans_type;
+ u8 ds_cfg; /* 802.3 DS cfg */
+ u8 nwifi_ds_trans_type;
+
+ #define VRING_CFG_MAC_CTRL_LIFETIME_EN_POS (0)
+ #define VRING_CFG_MAC_CTRL_LIFETIME_EN_LEN (1)
+ #define VRING_CFG_MAC_CTRL_LIFETIME_EN_MSK (0x1)
+ #define VRING_CFG_MAC_CTRL_AGGR_EN_POS (1)
+ #define VRING_CFG_MAC_CTRL_AGGR_EN_LEN (1)
+ #define VRING_CFG_MAC_CTRL_AGGR_EN_MSK (0x2)
+ u8 mac_ctrl;
+
+ #define VRING_CFG_TO_RESOLUTION_VALUE_POS (0)
+ #define VRING_CFG_TO_RESOLUTION_VALUE_LEN (6)
+ #define VRING_CFG_TO_RESOLUTION_VALUE_MSK (0x3F)
+ u8 to_resolution;
+ u8 agg_max_wsize;
+ struct wmi_vring_cfg_schd schd_params;
+} __packed;
+
+enum wmi_vring_cfg_cmd_action {
+ WMI_VRING_CMD_ADD = 0,
+ WMI_VRING_CMD_MODIFY = 1,
+ WMI_VRING_CMD_DELETE = 2,
+};
+
+struct wmi_vring_cfg_cmd {
+ __le32 action;
+ struct wmi_vring_cfg vring_cfg;
+} __packed;
+
+/*
+ * WMI_VRING_BA_EN_CMDID
+ */
+struct wmi_vring_ba_en_cmd {
+ u8 ringid;
+ u8 agg_max_wsize;
+ __le16 ba_timeout;
+} __packed;
+
+/*
+ * WMI_VRING_BA_DIS_CMDID
+ */
+struct wmi_vring_ba_dis_cmd {
+ u8 ringid;
+ u8 reserved;
+ __le16 reason;
+} __packed;
+
+/*
+ * WMI_NOTIFY_REQ_CMDID
+ */
+struct wmi_notify_req_cmd {
+ u8 cid;
+ u8 reserved[3];
+ __le32 interval_usec;
+} __packed;
+
+/*
+ * WMI_CFG_RX_CHAIN_CMDID
+ */
+enum wmi_sniffer_cfg_mode {
+ WMI_SNIFFER_OFF = 0,
+ WMI_SNIFFER_ON = 1,
+};
+
+enum wmi_sniffer_cfg_phy_info_mode {
+ WMI_SNIFFER_PHY_INFO_DISABLED = 0,
+ WMI_SNIFFER_PHY_INFO_ENABLED = 1,
+};
+
+enum wmi_sniffer_cfg_phy_support {
+ WMI_SNIFFER_CP = 0,
+ WMI_SNIFFER_DP = 1,
+ WMI_SNIFFER_BOTH_PHYS = 2,
+};
+
+struct wmi_sniffer_cfg {
+ __le32 mode; /* enum wmi_sniffer_cfg_mode */
+ __le32 phy_info_mode; /* enum wmi_sniffer_cfg_phy_info_mode */
+ __le32 phy_support; /* enum wmi_sniffer_cfg_phy_support */
+ u8 channel;
+ u8 reserved[3];
+} __packed;
+
+enum wmi_cfg_rx_chain_cmd_action {
+ WMI_RX_CHAIN_ADD = 0,
+ WMI_RX_CHAIN_DEL = 1,
+};
+
+enum wmi_cfg_rx_chain_cmd_decap_trans_type {
+ WMI_DECAP_TYPE_802_3 = 0,
+ WMI_DECAP_TYPE_NATIVE_WIFI = 1,
+};
+
+enum wmi_cfg_rx_chain_cmd_nwifi_ds_trans_type {
+ WMI_NWIFI_RX_TRANS_MODE_NO = 0,
+ WMI_NWIFI_RX_TRANS_MODE_PBSS2AP = 1,
+ WMI_NWIFI_RX_TRANS_MODE_PBSS2STA = 2,
+};
+
+struct wmi_cfg_rx_chain_cmd {
+ __le32 action;
+ struct wmi_sw_ring_cfg rx_sw_ring;
+ u8 mid;
+ u8 decap_trans_type;
+
+ #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS (0)
+ #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN (1)
+ #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK (0x1)
+ u8 l2_802_3_offload_ctrl;
+
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS (0)
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_LEN (1)
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_MSK (0x1)
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_POS (1)
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_LEN (1)
+ #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_MSK (0x2)
+ u8 l2_nwifi_offload_ctrl;
+
+ u8 vlan_id;
+ u8 nwifi_ds_trans_type;
+
+ #define L3_L4_CTRL_IPV4_CHECKSUM_EN_POS (0)
+ #define L3_L4_CTRL_IPV4_CHECKSUM_EN_LEN (1)
+ #define L3_L4_CTRL_IPV4_CHECKSUM_EN_MSK (0x1)
+ #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS (1)
+ #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_LEN (1)
+ #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_MSK (0x2)
+ u8 l3_l4_ctrl;
+
+ #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_POS (0)
+ #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_LEN (1)
+ #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_MSK (0x1)
+ #define RING_CTRL_OVERRIDE_WB_THRSH_POS (1)
+ #define RING_CTRL_OVERRIDE_WB_THRSH_LEN (1)
+ #define RING_CTRL_OVERRIDE_WB_THRSH_MSK (0x2)
+ #define RING_CTRL_OVERRIDE_ITR_THRSH_POS (2)
+ #define RING_CTRL_OVERRIDE_ITR_THRSH_LEN (1)
+ #define RING_CTRL_OVERRIDE_ITR_THRSH_MSK (0x4)
+ #define RING_CTRL_OVERRIDE_HOST_THRSH_POS (3)
+ #define RING_CTRL_OVERRIDE_HOST_THRSH_LEN (1)
+ #define RING_CTRL_OVERRIDE_HOST_THRSH_MSK (0x8)
+ u8 ring_ctrl;
+
+ __le16 prefetch_thrsh;
+ __le16 wb_thrsh;
+ __le32 itr_value;
+ __le16 host_thrsh;
+ u8 reserved[2];
+ struct wmi_sniffer_cfg sniffer_cfg;
+} __packed;
+
+/*
+ * WMI_RCP_ADDBA_RESP_CMDID
+ */
+struct wmi_rcp_addba_resp_cmd {
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 dialog_token;
+ __le16 status_code;
+ __le16 ba_param_set; /* ieee80211_ba_parameterset field to send */
+ __le16 ba_timeout;
+} __packed;
+
+/*
+ * WMI_RCP_DELBA_CMDID
+ */
+struct wmi_rcp_delba_cmd {
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 reserved;
+ __le16 reason;
+} __packed;
+
+/*
+ * WMI_RCP_ADDBA_REQ_CMDID
+ */
+struct wmi_rcp_addba_req_cmd {
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 dialog_token;
+ /* ieee80211_ba_parameterset field as it received */
+ __le16 ba_param_set;
+ __le16 ba_timeout;
+ /* ieee80211_ba_seqstrl field as it received */
+ __le16 ba_seq_ctrl;
+} __packed;
+
+/*
+ * WMI_SET_MAC_ADDRESS_CMDID
+ */
+struct wmi_set_mac_address_cmd {
+ u8 mac[WMI_MAC_LEN];
+ u8 reserved[2];
+} __packed;
+
+
+/*
+* WMI_EAPOL_TX_CMDID
+*/
+struct wmi_eapol_tx_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 eapol_len;
+ u8 eapol[0];
+} __packed;
+
+/*
+ * WMI_ECHO_CMDID
+ *
+ * Check FW is alive
+ *
+ * WMI_DEEP_ECHO_CMDID
+ *
+ * Check FW and ucode are alive
+ *
+ * Returned event: WMI_ECHO_RSP_EVENTID
+ * same event for both commands
+ */
+struct wmi_echo_cmd {
+ __le32 value;
+} __packed;
+
+/*
+ * WMI Events
+ */
+
+/*
+ * List of Events (target to host)
+ */
+enum wmi_event_id {
+ WMI_IMM_RSP_EVENTID = 0x0000,
+ WMI_READY_EVENTID = 0x1001,
+ WMI_CONNECT_EVENTID = 0x1002,
+ WMI_DISCONNECT_EVENTID = 0x1003,
+ WMI_SCAN_COMPLETE_EVENTID = 0x100a,
+ WMI_REPORT_STATISTICS_EVENTID = 0x100b,
+ WMI_RD_MEM_RSP_EVENTID = 0x1800,
+ WMI_FW_READY_EVENTID = 0x1801,
+ WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x0200,
+ WMI_ECHO_RSP_EVENTID = 0x1803,
+ WMI_CONFIG_MAC_DONE_EVENTID = 0x1805,
+ WMI_CONFIG_PHY_DEBUG_DONE_EVENTID = 0x1806,
+ WMI_ADD_STATION_DONE_EVENTID = 0x1807,
+ WMI_ADD_DEBUG_TX_PCKT_DONE_EVENTID = 0x1808,
+ WMI_PHY_GET_STATISTICS_EVENTID = 0x1809,
+ WMI_FS_TUNE_DONE_EVENTID = 0x180a,
+ WMI_CORR_MEASURE_DONE_EVENTID = 0x180b,
+ WMI_TEMP_SENSE_DONE_EVENTID = 0x180e,
+ WMI_DC_CALIB_DONE_EVENTID = 0x180f,
+ WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811,
+ WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812,
+ WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815,
+ WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816,
+ WMI_MARLON_R_ACTIVATE_DONE_EVENTID = 0x1817,
+ WMI_MARLON_R_READ_DONE_EVENTID = 0x1818,
+ WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819,
+ WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181a,
+ WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181d,
+
+ WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820,
+ WMI_VRING_CFG_DONE_EVENTID = 0x1821,
+ WMI_RX_ON_DONE_EVENTID = 0x1822,
+ WMI_BA_STATUS_EVENTID = 0x1823,
+ WMI_RCP_ADDBA_REQ_EVENTID = 0x1824,
+ WMI_ADDBA_RESP_SENT_EVENTID = 0x1825,
+ WMI_DELBA_EVENTID = 0x1826,
+ WMI_GET_SSID_EVENTID = 0x1828,
+ WMI_GET_PCP_CHANNEL_EVENTID = 0x182a,
+ WMI_SW_TX_COMPLETE_EVENTID = 0x182b,
+ WMI_RX_OFF_DONE_EVENTID = 0x182c,
+
+ WMI_READ_MAC_RXQ_EVENTID = 0x1830,
+ WMI_READ_MAC_TXQ_EVENTID = 0x1831,
+ WMI_WRITE_MAC_RXQ_EVENTID = 0x1832,
+ WMI_WRITE_MAC_TXQ_EVENTID = 0x1833,
+ WMI_WRITE_MAC_XQ_FIELD_EVENTID = 0x1834,
+
+ WMI_BEAFORMING_MGMT_DONE_EVENTID = 0x1836,
+ WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837,
+ WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
+ WMI_RS_MGMT_DONE_EVENTID = 0x1852,
+ WMI_RF_MGMT_STATUS_EVENTID = 0x1853,
+ WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
+ WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
+
+ /* Performance monitoring events */
+ WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
+ WMI_WBE_LINKDOWN_EVENTID = 0x1861,
+
+ WMI_BF_CTRL_DONE_EVENTID = 0x1862,
+ WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
+ WMI_GET_STATUS_DONE_EVENTID = 0x1864,
+
+ WMI_UNIT_TEST_EVENTID = 0x1900,
+ WMI_FLASH_READ_DONE_EVENTID = 0x1902,
+ WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
+
+ WMI_SET_CHANNEL_EVENTID = 0x9000,
+ WMI_ASSOC_REQ_EVENTID = 0x9001,
+ WMI_EAPOL_RX_EVENTID = 0x9002,
+ WMI_MAC_ADDR_RESP_EVENTID = 0x9003,
+ WMI_FW_VER_EVENTID = 0x9004,
+};
+
+/*
+ * Events data structures
+ */
+
+/*
+ * WMI_RF_MGMT_STATUS_EVENTID
+ */
+enum wmi_rf_status {
+ WMI_RF_ENABLED = 0,
+ WMI_RF_DISABLED_HW = 1,
+ WMI_RF_DISABLED_SW = 2,
+ WMI_RF_DISABLED_HW_SW = 3,
+};
+
+struct wmi_rf_mgmt_status_event {
+ __le32 rf_status;
+} __packed;
+
+/*
+ * WMI_GET_STATUS_DONE_EVENTID
+ */
+struct wmi_get_status_done_event {
+ __le32 is_associated;
+ u8 cid;
+ u8 reserved0[3];
+ u8 bssid[WMI_MAC_LEN];
+ u8 channel;
+ u8 reserved1;
+ u8 network_type;
+ u8 reserved2[3];
+ __le32 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+ __le32 rf_status;
+ __le32 is_secured;
+} __packed;
+
+/*
+ * WMI_FW_VER_EVENTID
+ */
+struct wmi_fw_ver_event {
+ u8 major;
+ u8 minor;
+ __le16 subminor;
+ __le16 build;
+} __packed;
+
+/*
+* WMI_MAC_ADDR_RESP_EVENTID
+*/
+struct wmi_mac_addr_resp_event {
+ u8 mac[WMI_MAC_LEN];
+ u8 auth_mode;
+ u8 crypt_mode;
+ __le32 offload_mode;
+} __packed;
+
+/*
+* WMI_EAPOL_RX_EVENTID
+*/
+struct wmi_eapol_rx_event {
+ u8 src_mac[WMI_MAC_LEN];
+ __le16 eapol_len;
+ u8 eapol[0];
+} __packed;
+
+/*
+* WMI_READY_EVENTID
+*/
+enum wmi_phy_capability {
+ WMI_11A_CAPABILITY = 1,
+ WMI_11G_CAPABILITY = 2,
+ WMI_11AG_CAPABILITY = 3,
+ WMI_11NA_CAPABILITY = 4,
+ WMI_11NG_CAPABILITY = 5,
+ WMI_11NAG_CAPABILITY = 6,
+ WMI_11AD_CAPABILITY = 7,
+ WMI_11N_CAPABILITY_OFFSET = WMI_11NA_CAPABILITY - WMI_11A_CAPABILITY,
+};
+
+struct wmi_ready_event {
+ __le32 sw_version;
+ __le32 abi_version;
+ u8 mac[WMI_MAC_LEN];
+ u8 phy_capability; /* enum wmi_phy_capability */
+ u8 reserved;
+} __packed;
+
+/*
+ * WMI_NOTIFY_REQ_DONE_EVENTID
+ */
+struct wmi_notify_req_done_event {
+ __le32 status;
+ __le64 tsf;
+ __le32 snr_val;
+ __le32 tx_tpt;
+ __le32 tx_goodput;
+ __le32 rx_goodput;
+ __le16 bf_mcs;
+ __le16 my_rx_sector;
+ __le16 my_tx_sector;
+ __le16 other_rx_sector;
+ __le16 other_tx_sector;
+ __le16 range;
+} __packed;
+
+/*
+ * WMI_CONNECT_EVENTID
+ */
+struct wmi_connect_event {
+ u8 channel;
+ u8 reserved0;
+ u8 bssid[WMI_MAC_LEN];
+ __le16 listen_interval;
+ __le16 beacon_interval;
+ u8 network_type;
+ u8 reserved1[3];
+ u8 beacon_ie_len;
+ u8 assoc_req_len;
+ u8 assoc_resp_len;
+ u8 cid;
+ u8 reserved2[3];
+ u8 assoc_info[0];
+} __packed;
+
+/*
+ * WMI_DISCONNECT_EVENTID
+ */
+enum wmi_disconnect_reason {
+ WMI_DIS_REASON_NO_NETWORK_AVAIL = 1,
+ WMI_DIS_REASON_LOST_LINK = 2, /* bmiss */
+ WMI_DIS_REASON_DISCONNECT_CMD = 3,
+ WMI_DIS_REASON_BSS_DISCONNECTED = 4,
+ WMI_DIS_REASON_AUTH_FAILED = 5,
+ WMI_DIS_REASON_ASSOC_FAILED = 6,
+ WMI_DIS_REASON_NO_RESOURCES_AVAIL = 7,
+ WMI_DIS_REASON_CSERV_DISCONNECT = 8,
+ WMI_DIS_REASON_INVALID_PROFILE = 10,
+ WMI_DIS_REASON_DOT11H_CHANNEL_SWITCH = 11,
+ WMI_DIS_REASON_PROFILE_MISMATCH = 12,
+ WMI_DIS_REASON_CONNECTION_EVICTED = 13,
+ WMI_DIS_REASON_IBSS_MERGE = 14,
+};
+
+struct wmi_disconnect_event {
+ __le16 protocol_reason_status; /* reason code, see 802.11 spec. */
+ u8 bssid[WMI_MAC_LEN]; /* set if known */
+ u8 disconnect_reason; /* see wmi_disconnect_reason_e */
+ u8 assoc_resp_len;
+ u8 assoc_info[0];
+} __packed;
+
+/*
+ * WMI_SCAN_COMPLETE_EVENTID
+ */
+struct wmi_scan_complete_event {
+ __le32 status;
+} __packed;
+
+/*
+ * WMI_BA_STATUS_EVENTID
+ */
+enum wmi_vring_ba_status {
+ WMI_BA_AGREED = 0,
+ WMI_BA_NON_AGREED = 1,
+};
+
+struct wmi_vring_ba_status_event {
+ __le16 status;
+ u8 reserved[2];
+ u8 ringid;
+ u8 agg_wsize;
+ __le16 ba_timeout;
+} __packed;
+
+/*
+ * WMI_DELBA_EVENTID
+ */
+struct wmi_delba_event {
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 from_initiator;
+ __le16 reason;
+} __packed;
+
+/*
+ * WMI_VRING_CFG_DONE_EVENTID
+ */
+enum wmi_vring_cfg_done_event_status {
+ WMI_VRING_CFG_SUCCESS = 0,
+ WMI_VRING_CFG_FAILURE = 1,
+};
+
+struct wmi_vring_cfg_done_event {
+ u8 ringid;
+ u8 status;
+ u8 reserved[2];
+ __le32 tx_vring_tail_ptr;
+} __packed;
+
+/*
+ * WMI_ADDBA_RESP_SENT_EVENTID
+ */
+enum wmi_rcp_addba_resp_sent_event_status {
+ WMI_ADDBA_SUCCESS = 0,
+ WMI_ADDBA_FAIL = 1,
+};
+
+struct wmi_rcp_addba_resp_sent_event {
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 reserved;
+ __le16 status;
+} __packed;
+
+/*
+ * WMI_RCP_ADDBA_REQ_EVENTID
+ */
+struct wmi_rcp_addba_req_event {
+
+ #define CIDXTID_CID_POS (0)
+ #define CIDXTID_CID_LEN (4)
+ #define CIDXTID_CID_MSK (0xF)
+ #define CIDXTID_TID_POS (4)
+ #define CIDXTID_TID_LEN (4)
+ #define CIDXTID_TID_MSK (0xF0)
+ u8 cidxtid;
+
+ u8 dialog_token;
+ __le16 ba_param_set; /* ieee80211_ba_parameterset as it received */
+ __le16 ba_timeout;
+ __le16 ba_seq_ctrl; /* ieee80211_ba_seqstrl field as it received */
+} __packed;
+
+/*
+ * WMI_CFG_RX_CHAIN_DONE_EVENTID
+ */
+enum wmi_cfg_rx_chain_done_event_status {
+ WMI_CFG_RX_CHAIN_SUCCESS = 1,
+};
+
+struct wmi_cfg_rx_chain_done_event {
+ __le32 rx_ring_tail_ptr; /* Rx V-Ring Tail pointer */
+ __le32 status;
+} __packed;
+
+/*
+ * WMI_WBE_LINKDOWN_EVENTID
+ */
+enum wmi_wbe_link_down_event_reason {
+ WMI_WBE_REASON_USER_REQUEST = 0,
+ WMI_WBE_REASON_RX_DISASSOC = 1,
+ WMI_WBE_REASON_BAD_PHY_LINK = 2,
+};
+
+struct wmi_wbe_link_down_event {
+ u8 cid;
+ u8 reserved[3];
+ __le32 reason;
+} __packed;
+
+/*
+ * WMI_DATA_PORT_OPEN_EVENTID
+ */
+struct wmi_data_port_open_event {
+ u8 cid;
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_GET_PCP_CHANNEL_EVENTID
+ */
+struct wmi_get_pcp_channel_event {
+ u8 channel;
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_SW_TX_COMPLETE_EVENTID
+ */
+enum wmi_sw_tx_status {
+ WMI_TX_SW_STATUS_SUCCESS = 0,
+ WMI_TX_SW_STATUS_FAILED_NO_RESOURCES = 1,
+ WMI_TX_SW_STATUS_FAILED_TX = 2,
+};
+
+struct wmi_sw_tx_complete_event {
+ u8 status; /* enum wmi_sw_tx_status */
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_GET_SSID_EVENTID
+ */
+struct wmi_get_ssid_event {
+ __le32 ssid_len;
+ u8 ssid[WMI_MAX_SSID_LEN];
+} __packed;
+
+/*
+ * WMI_RX_MGMT_PACKET_EVENTID
+ */
+struct wmi_rx_mgmt_info {
+ u8 mcs;
+ s8 snr;
+ __le16 range;
+ __le16 stype;
+ __le16 status;
+ __le32 len;
+ u8 qid;
+ u8 mid;
+ u8 cid;
+ u8 channel; /* From Radio MNGR */
+} __packed;
+
+struct wmi_rx_mgmt_packet_event {
+ struct wmi_rx_mgmt_info info;
+ u8 payload[0];
+} __packed;
+
+/*
+ * WMI_ECHO_RSP_EVENTID
+ */
+struct wmi_echo_event {
+ __le32 echoed_value;
+} __packed;
+
+#endif /* __WILOCITY_WMI_H__ */
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c
index ded03d226a71..b42930f457c2 100644
--- a/drivers/net/wireless/atmel_cs.c
+++ b/drivers/net/wireless/atmel_cs.c
@@ -79,10 +79,9 @@ static int atmel_probe(struct pcmcia_device *p_dev)
/* Allocate space for private device-specific data */
local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
- if (!local) {
- printk(KERN_ERR "atmel_cs: no memory for new device\n");
+ if (!local)
return -ENOMEM;
- }
+
p_dev->priv = local;
return atmel_config(p_dev);
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 7a28d21ac389..287c6b670a36 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -78,8 +78,8 @@ config B43_PCMCIA
If unsure, say N.
config B43_SDIO
- bool "Broadcom 43xx SDIO device support (EXPERIMENTAL)"
- depends on B43 && SSB_SDIOHOST_POSSIBLE && EXPERIMENTAL
+ bool "Broadcom 43xx SDIO device support"
+ depends on B43 && SSB_SDIOHOST_POSSIBLE
select SSB_SDIOHOST
---help---
Broadcom 43xx device support for Soft-MAC SDIO devices.
@@ -109,8 +109,8 @@ config B43_PIO
default y
config B43_PHY_N
- bool "Support for 802.11n (N-PHY) devices (EXPERIMENTAL)"
- depends on B43 && EXPERIMENTAL
+ bool "Support for 802.11n (N-PHY) devices"
+ depends on B43
---help---
Support for the N-PHY.
@@ -130,8 +130,8 @@ config B43_PHY_LP
(802.11a support is optional, and currently disabled).
config B43_PHY_HT
- bool "Support for HT-PHY (high throughput) devices (EXPERIMENTAL)"
- depends on B43 && EXPERIMENTAL
+ bool "Support for HT-PHY (high throughput) devices"
+ depends on B43
---help---
Support for the HT-PHY.
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index b298e5d68be2..10e288d470e7 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -7,6 +7,7 @@
#include <linux/hw_random.h>
#include <linux/bcma/bcma.h>
#include <linux/ssb/ssb.h>
+#include <linux/completion.h>
#include <net/mac80211.h>
#include "debugfs.h"
@@ -722,6 +723,10 @@ enum b43_firmware_file_type {
struct b43_request_fw_context {
/* The device we are requesting the fw for. */
struct b43_wldev *dev;
+ /* a completion event structure needed if this call is asynchronous */
+ struct completion fw_load_complete;
+ /* a pointer to the firmware object */
+ const struct firmware *blob;
/* The type of firmware to request. */
enum b43_firmware_file_type req_type;
/* Error messages for each firmware type. */
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index 315b96ed1d90..9fdd1983079c 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -169,7 +169,7 @@ struct b43_dmadesc_generic {
/* DMA engine tuning knobs */
#define B43_TXRING_SLOTS 256
-#define B43_RXRING_SLOTS 64
+#define B43_RXRING_SLOTS 256
#define B43_DMA0_RX_FW598_BUFSIZE (B43_DMA0_RX_FW598_FO + IEEE80211_MAX_FRAME_LEN)
#define B43_DMA0_RX_FW351_BUFSIZE (B43_DMA0_RX_FW351_FO + IEEE80211_MAX_FRAME_LEN)
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 16ab280359bd..806e34c19281 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2088,11 +2088,18 @@ static void b43_print_fw_helptext(struct b43_wl *wl, bool error)
b43warn(wl, text);
}
+static void b43_fw_cb(const struct firmware *firmware, void *context)
+{
+ struct b43_request_fw_context *ctx = context;
+
+ ctx->blob = firmware;
+ complete(&ctx->fw_load_complete);
+}
+
int b43_do_request_fw(struct b43_request_fw_context *ctx,
const char *name,
- struct b43_firmware_file *fw)
+ struct b43_firmware_file *fw, bool async)
{
- const struct firmware *blob;
struct b43_fw_header *hdr;
u32 size;
int err;
@@ -2131,11 +2138,31 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
B43_WARN_ON(1);
return -ENOSYS;
}
- err = request_firmware(&blob, ctx->fwname, ctx->dev->dev->dev);
+ if (async) {
+ /* do this part asynchronously */
+ init_completion(&ctx->fw_load_complete);
+ err = request_firmware_nowait(THIS_MODULE, 1, ctx->fwname,
+ ctx->dev->dev->dev, GFP_KERNEL,
+ ctx, b43_fw_cb);
+ if (err < 0) {
+ pr_err("Unable to load firmware\n");
+ return err;
+ }
+ /* stall here until fw ready */
+ wait_for_completion(&ctx->fw_load_complete);
+ if (ctx->blob)
+ goto fw_ready;
+ /* On some ARM systems, the async request will fail, but the next sync
+ * request works. For this reason, we dall through here
+ */
+ }
+ err = request_firmware(&ctx->blob, ctx->fwname,
+ ctx->dev->dev->dev);
if (err == -ENOENT) {
snprintf(ctx->errors[ctx->req_type],
sizeof(ctx->errors[ctx->req_type]),
- "Firmware file \"%s\" not found\n", ctx->fwname);
+ "Firmware file \"%s\" not found\n",
+ ctx->fwname);
return err;
} else if (err) {
snprintf(ctx->errors[ctx->req_type],
@@ -2144,14 +2171,15 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
ctx->fwname, err);
return err;
}
- if (blob->size < sizeof(struct b43_fw_header))
+fw_ready:
+ if (ctx->blob->size < sizeof(struct b43_fw_header))
goto err_format;
- hdr = (struct b43_fw_header *)(blob->data);
+ hdr = (struct b43_fw_header *)(ctx->blob->data);
switch (hdr->type) {
case B43_FW_TYPE_UCODE:
case B43_FW_TYPE_PCM:
size = be32_to_cpu(hdr->size);
- if (size != blob->size - sizeof(struct b43_fw_header))
+ if (size != ctx->blob->size - sizeof(struct b43_fw_header))
goto err_format;
/* fallthrough */
case B43_FW_TYPE_IV:
@@ -2162,7 +2190,7 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
goto err_format;
}
- fw->data = blob;
+ fw->data = ctx->blob;
fw->filename = name;
fw->type = ctx->req_type;
@@ -2172,7 +2200,7 @@ err_format:
snprintf(ctx->errors[ctx->req_type],
sizeof(ctx->errors[ctx->req_type]),
"Firmware file \"%s\" format error.\n", ctx->fwname);
- release_firmware(blob);
+ release_firmware(ctx->blob);
return -EPROTO;
}
@@ -2223,7 +2251,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
goto err_no_ucode;
}
}
- err = b43_do_request_fw(ctx, filename, &fw->ucode);
+ err = b43_do_request_fw(ctx, filename, &fw->ucode, true);
if (err)
goto err_load;
@@ -2235,7 +2263,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
else
goto err_no_pcm;
fw->pcm_request_failed = false;
- err = b43_do_request_fw(ctx, filename, &fw->pcm);
+ err = b43_do_request_fw(ctx, filename, &fw->pcm, false);
if (err == -ENOENT) {
/* We did not find a PCM file? Not fatal, but
* core rev <= 10 must do without hwcrypto then. */
@@ -2296,7 +2324,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
default:
goto err_no_initvals;
}
- err = b43_do_request_fw(ctx, filename, &fw->initvals);
+ err = b43_do_request_fw(ctx, filename, &fw->initvals, false);
if (err)
goto err_load;
@@ -2355,7 +2383,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
default:
goto err_no_initvals;
}
- err = b43_do_request_fw(ctx, filename, &fw->initvals_band);
+ err = b43_do_request_fw(ctx, filename, &fw->initvals_band, false);
if (err)
goto err_load;
diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/b43/main.h
index 8c684cd33529..abac25ee958d 100644
--- a/drivers/net/wireless/b43/main.h
+++ b/drivers/net/wireless/b43/main.h
@@ -137,9 +137,8 @@ void b43_mac_phy_clock_set(struct b43_wldev *dev, bool on);
struct b43_request_fw_context;
-int b43_do_request_fw(struct b43_request_fw_context *ctx,
- const char *name,
- struct b43_firmware_file *fw);
+int b43_do_request_fw(struct b43_request_fw_context *ctx, const char *name,
+ struct b43_firmware_file *fw, bool async);
void b43_do_release_fw(struct b43_firmware_file *fw);
#endif /* B43_MAIN_H_ */
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 97d4e27bf36f..aaca60c6f575 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -3226,8 +3226,6 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
{
struct nphy_gain_ctl_workaround_entry *e;
u8 phy_idx;
- u8 tr_iso = ghz5 ? dev->dev->bus_sprom->fem.ghz5.tr_iso :
- dev->dev->bus_sprom->fem.ghz2.tr_iso;
if (!ghz5 && dev->phy.rev >= 6 && dev->phy.radio_rev == 11)
return &nphy_gain_ctl_wa_phy6_radio11_ghz2;
@@ -3249,6 +3247,10 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
!b43_channel_type_is_40mhz(dev->phy.channel_type))
e->cliplo_gain = 0x2d;
} else if (!ghz5 && dev->phy.rev >= 5) {
+ static const int gain_data[] = {0x0062, 0x0064, 0x006a, 0x106a,
+ 0x106c, 0x1074, 0x107c, 0x207c};
+ u8 tr_iso = dev->dev->bus_sprom->fem.ghz2.tr_iso;
+
if (ext_lna) {
e->rfseq_init[0] &= ~0x4000;
e->rfseq_init[1] &= ~0x4000;
@@ -3256,26 +3258,10 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
e->rfseq_init[3] &= ~0x4000;
e->init_gain &= ~0x4000;
}
- switch (tr_iso) {
- case 0:
- e->cliplo_gain = 0x0062;
- case 1:
- e->cliplo_gain = 0x0064;
- case 2:
- e->cliplo_gain = 0x006a;
- case 3:
- e->cliplo_gain = 0x106a;
- case 4:
- e->cliplo_gain = 0x106c;
- case 5:
- e->cliplo_gain = 0x1074;
- case 6:
- e->cliplo_gain = 0x107c;
- case 7:
- e->cliplo_gain = 0x207c;
- default:
- e->cliplo_gain = 0x106a;
- }
+ if (tr_iso > 7)
+ tr_iso = 3;
+ e->cliplo_gain = gain_data[tr_iso];
+
} else if (ghz5 && dev->phy.rev == 4 && ext_lna) {
e->rfseq_init[0] &= ~0x4000;
e->rfseq_init[1] &= ~0x4000;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index 1a6661a9f008..756e19fc2795 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -26,6 +26,7 @@ brcmfmac-objs += \
wl_cfg80211.o \
fwil.o \
fweh.o \
+ p2p.o \
dhd_cdc.o \
dhd_common.o \
dhd_linux.o
@@ -37,4 +38,4 @@ brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
usb.o
brcmfmac-$(CONFIG_BRCMDBG) += \
- dhd_dbg.o \ No newline at end of file
+ dhd_dbg.o
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index be35a2f99b1c..11fd1c735589 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -15,8 +15,6 @@
*/
/* ****************** SDIO CARD Interface Functions **************************/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/export.h>
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index d33e5598611b..d92d373733d7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -14,8 +14,6 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/mmc/sdio.h>
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index fd672bf53867..ef6f23be6d32 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -39,6 +39,7 @@
#define BRCMF_C_GET_BSSID 23
#define BRCMF_C_GET_SSID 25
#define BRCMF_C_SET_SSID 26
+#define BRCMF_C_TERMINATED 28
#define BRCMF_C_GET_CHANNEL 29
#define BRCMF_C_SET_CHANNEL 30
#define BRCMF_C_GET_SRL 31
@@ -71,6 +72,7 @@
#define BRCMF_C_SET_WSEC 134
#define BRCMF_C_GET_PHY_NOISE 135
#define BRCMF_C_GET_BSS_INFO 136
+#define BRCMF_C_SET_SCB_TIMEOUT 158
#define BRCMF_C_GET_PHYLIST 180
#define BRCMF_C_SET_SCAN_CHANNEL_TIME 185
#define BRCMF_C_SET_SCAN_UNASSOC_TIME 187
@@ -148,6 +150,7 @@
#define BRCMF_E_REASON_MINTXRATE 9
#define BRCMF_E_REASON_TXFAIL 10
+#define BRCMF_E_REASON_LINK_BSSCFG_DIS 4
#define BRCMF_E_REASON_FAST_ROAM_FAILED 5
#define BRCMF_E_REASON_DIRECTED_ROAM 6
#define BRCMF_E_REASON_TSPEC_REJECTED 7
@@ -374,6 +377,28 @@ struct brcmf_join_params {
struct brcmf_assoc_params_le params_le;
};
+/* scan params for extended join */
+struct brcmf_join_scan_params_le {
+ u8 scan_type; /* 0 use default, active or passive scan */
+ __le32 nprobes; /* -1 use default, nr of probes per channel */
+ __le32 active_time; /* -1 use default, dwell time per channel for
+ * active scanning
+ */
+ __le32 passive_time; /* -1 use default, dwell time per channel
+ * for passive scanning
+ */
+ __le32 home_time; /* -1 use default, dwell time for the home
+ * channel between channel scans
+ */
+};
+
+/* extended join params */
+struct brcmf_ext_join_params_le {
+ struct brcmf_ssid_le ssid_le; /* {0, ""}: wildcard scan */
+ struct brcmf_join_scan_params_le scan_le;
+ struct brcmf_assoc_params_le assoc_le;
+};
+
struct brcmf_wsec_key {
u32 index; /* key index */
u32 len; /* key length */
@@ -450,6 +475,19 @@ struct brcmf_sta_info_le {
__le32 rx_decrypt_failures; /* # of packet decrypted failed */
};
+/*
+ * WLC_E_PROBRESP_MSG
+ * WLC_E_P2P_PROBREQ_MSG
+ * WLC_E_ACTION_FRAME_RX
+ */
+struct brcmf_rx_mgmt_data {
+ __be16 version;
+ __be16 chanspec;
+ __be32 rssi;
+ __be32 mactime;
+ __be32 rate;
+};
+
/* Bus independent dongle command */
struct brcmf_dcmd {
uint cmd; /* common dongle cmd definition */
@@ -480,50 +518,20 @@ struct brcmf_pub {
unsigned long drv_version; /* Version of dongle-resident driver */
u8 mac[ETH_ALEN]; /* MAC address obtained from dongle */
- /* Additional stats for the bus level */
-
/* Multicast data packets sent to dongle */
unsigned long tx_multicast;
- /* Packets flushed due to unscheduled sendup thread */
- unsigned long rx_flushed;
- /* Number of times dpc scheduled by watchdog timer */
- unsigned long wd_dpc_sched;
-
- /* Number of flow control pkts recvd */
- unsigned long fc_packets;
-
- /* Last error return */
- int bcmerror;
-
- /* Last error from dongle */
- int dongle_error;
-
- /* Suspend disable flag flag */
- int suspend_disable_flag; /* "1" to disable all extra powersaving
- during suspend */
- int in_suspend; /* flag set to 1 when early suspend called */
- int dtim_skip; /* dtim skip , default 0 means wake each dtim */
struct brcmf_if *iflist[BRCMF_MAX_IFS];
struct mutex proto_block;
unsigned char proto_buf[BRCMF_DCMD_MAXLEN];
- u8 macvalue[ETH_ALEN];
- atomic_t pend_8021x_cnt;
- wait_queue_head_t pend_8021x_wait;
-
struct brcmf_fweh_info fweh;
#ifdef DEBUG
struct dentry *dbgfs_dir;
#endif
};
-struct bcmevent_name {
- uint event;
- const char *name;
-};
-
struct brcmf_if_event {
u8 ifidx;
u8 action;
@@ -541,9 +549,11 @@ struct brcmf_cfg80211_vif;
* @vif: points to cfg80211 specific interface information.
* @ndev: associated network device.
* @stats: interface specific network statistics.
- * @idx: interface index in device firmware.
+ * @ifidx: interface index in device firmware.
* @bssidx: index of bss associated with this interface.
* @mac_addr: assigned mac address.
+ * @pend_8021x_cnt: tracks outstanding number of 802.1x frames.
+ * @pend_8021x_wait: used for signalling change in count.
*/
struct brcmf_if {
struct brcmf_pub *drvr;
@@ -552,18 +562,13 @@ struct brcmf_if {
struct net_device_stats stats;
struct work_struct setmacaddr_work;
struct work_struct multicast_work;
- int idx;
+ int ifidx;
s32 bssidx;
u8 mac_addr[ETH_ALEN];
+ atomic_t pend_8021x_cnt;
+ wait_queue_head_t pend_8021x_wait;
};
-static inline s32 brcmf_ndev_bssidx(struct net_device *ndev)
-{
- struct brcmf_if *ifp = netdev_priv(ndev);
- return ifp->bssidx;
-}
-
-extern const struct bcmevent_name bcmevent_names[];
extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
@@ -576,9 +581,14 @@ extern int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx,
extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
void *buf, uint len);
-extern int brcmf_net_attach(struct brcmf_if *ifp);
-extern struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx,
- s32 bssidx, char *name, u8 *mac_addr);
-extern void brcmf_del_if(struct brcmf_pub *drvr, int ifidx);
+/* Remove any protocol-specific data header. */
+extern int brcmf_proto_hdrpull(struct brcmf_pub *drvr, u8 *ifidx,
+ struct sk_buff *rxp);
+
+extern int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
+extern struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx,
+ s32 ifidx, char *name, u8 *mac_addr);
+extern void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
+extern u32 brcmf_get_chip_info(struct brcmf_if *ifp);
#endif /* _BRCMF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index dd38b78a9726..ad25c3408b59 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -24,18 +24,6 @@ enum brcmf_bus_state {
BRCMF_BUS_DATA /* Ready for frame transfers */
};
-struct dngl_stats {
- unsigned long rx_packets; /* total packets received */
- unsigned long tx_packets; /* total packets transmitted */
- unsigned long rx_bytes; /* total bytes received */
- unsigned long tx_bytes; /* total bytes transmitted */
- unsigned long rx_errors; /* bad packets received */
- unsigned long tx_errors; /* packet transmit problems */
- unsigned long rx_dropped; /* packets dropped by dongle */
- unsigned long tx_dropped; /* packets dropped by dongle */
- unsigned long multicast; /* multicast packets received */
-};
-
struct brcmf_bus_dcmd {
char *name;
char *param;
@@ -72,11 +60,12 @@ struct brcmf_bus_ops {
* @drvr: public driver information.
* @state: operational state of the bus interface.
* @maxctl: maximum size for rxctl request message.
- * @drvr_up: indicates driver up/down status.
* @tx_realloc: number of tx packets realloced for headroom.
* @dstats: dongle-based statistical data.
* @align: alignment requirement for the bus.
* @dcmd_list: bus/device specific dongle initialization commands.
+ * @chip: device identifier of the dongle chip.
+ * @chiprev: revision of the dongle chip.
*/
struct brcmf_bus {
union {
@@ -87,10 +76,10 @@ struct brcmf_bus {
struct brcmf_pub *drvr;
enum brcmf_bus_state state;
uint maxctl;
- bool drvr_up;
unsigned long tx_realloc;
- struct dngl_stats dstats;
u8 align;
+ u32 chip;
+ u32 chiprev;
struct list_head dcmd_list;
struct brcmf_bus_ops *ops;
@@ -130,31 +119,18 @@ int brcmf_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint len)
* interface functions from common layer
*/
-/* Remove any protocol-specific data header. */
-extern int brcmf_proto_hdrpull(struct device *dev, int *ifidx,
- struct sk_buff *rxp);
-
extern bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
struct sk_buff *pkt, int prec);
/* Receive frame for delivery to OS. Callee disposes of rxp. */
-extern void brcmf_rx_frame(struct device *dev, u8 ifidx,
- struct sk_buff_head *rxlist);
-static inline void brcmf_rx_packet(struct device *dev, int ifidx,
- struct sk_buff *pkt)
-{
- struct sk_buff_head q;
-
- skb_queue_head_init(&q);
- skb_queue_tail(&q, pkt);
- brcmf_rx_frame(dev, ifidx, &q);
-}
+extern void brcmf_rx_frames(struct device *dev, struct sk_buff_head *rxlist);
/* Indication from bus module regarding presence/insertion of dongle. */
extern int brcmf_attach(uint bus_hdrlen, struct device *dev);
/* Indication from bus module regarding removal/absence of dongle */
extern void brcmf_detach(struct device *dev);
-
+/* Indication from bus module that dongle should be reset */
+extern void brcmf_dev_reset(struct device *dev);
/* Indication from bus module to change flow-control state */
extern void brcmf_txflowblock(struct device *dev, bool state);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
index 83923553f1ac..a2354d951dd7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
@@ -19,8 +19,6 @@
* For certain dcmd codes, the dongle interprets string data from the host.
******************************************************************************/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/types.h>
#include <linux/netdevice.h>
@@ -94,8 +92,6 @@ struct brcmf_proto_bdc_header {
struct brcmf_proto {
u16 reqid;
- u8 pending;
- u32 lastcmd;
u8 bus_header[BUS_HEADER_LEN];
struct brcmf_proto_cdc_dcmd msg;
unsigned char buf[BRCMF_DCMD_MAXLEN + ROUND_UP_MARGIN];
@@ -107,7 +103,7 @@ static int brcmf_proto_cdc_msg(struct brcmf_pub *drvr)
int len = le32_to_cpu(prot->msg.len) +
sizeof(struct brcmf_proto_cdc_dcmd);
- brcmf_dbg(TRACE, "Enter\n");
+ brcmf_dbg(CDC, "Enter\n");
/* NOTE : cdc->msg.len holds the desired length of the buffer to be
* returned. Only up to CDC_MAX_MSG_SIZE of this buffer area
@@ -125,7 +121,7 @@ static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
int ret;
struct brcmf_proto *prot = drvr->prot;
- brcmf_dbg(TRACE, "Enter\n");
+ brcmf_dbg(CDC, "Enter\n");
len += sizeof(struct brcmf_proto_cdc_dcmd);
do {
ret = brcmf_bus_rxctl(drvr->bus_if, (unsigned char *)&prot->msg,
@@ -147,20 +143,7 @@ brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
int ret = 0, retries = 0;
u32 id, flags;
- brcmf_dbg(TRACE, "Enter\n");
- brcmf_dbg(CTL, "cmd %d len %d\n", cmd, len);
-
- /* Respond "bcmerror" and "bcmerrorstr" with local cache */
- if (cmd == BRCMF_C_GET_VAR && buf) {
- if (!strcmp((char *)buf, "bcmerrorstr")) {
- strncpy((char *)buf, "bcm_error",
- BCME_STRLEN);
- goto done;
- } else if (!strcmp((char *)buf, "bcmerror")) {
- *(int *)buf = drvr->dongle_error;
- goto done;
- }
- }
+ brcmf_dbg(CDC, "Enter, cmd %d len %d\n", cmd, len);
memset(msg, 0, sizeof(struct brcmf_proto_cdc_dcmd));
@@ -210,11 +193,8 @@ retry:
}
/* Check the ERROR flag */
- if (flags & CDC_DCMD_ERROR) {
+ if (flags & CDC_DCMD_ERROR)
ret = le32_to_cpu(msg->status);
- /* Cache error from dongle */
- drvr->dongle_error = ret;
- }
done:
return ret;
@@ -228,8 +208,7 @@ int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
int ret = 0;
u32 flags, id;
- brcmf_dbg(TRACE, "Enter\n");
- brcmf_dbg(CTL, "cmd %d len %d\n", cmd, len);
+ brcmf_dbg(CDC, "Enter, cmd %d len %d\n", cmd, len);
memset(msg, 0, sizeof(struct brcmf_proto_cdc_dcmd));
@@ -262,11 +241,8 @@ int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
}
/* Check the ERROR flag */
- if (flags & CDC_DCMD_ERROR) {
+ if (flags & CDC_DCMD_ERROR)
ret = le32_to_cpu(msg->status);
- /* Cache error from dongle */
- drvr->dongle_error = ret;
- }
done:
return ret;
@@ -287,7 +263,7 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx,
{
struct brcmf_proto_bdc_header *h;
- brcmf_dbg(TRACE, "Enter\n");
+ brcmf_dbg(CDC, "Enter\n");
/* Push BDC header used to convey priority for buses that don't */
@@ -305,14 +281,12 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx,
BDC_SET_IF_IDX(h, ifidx);
}
-int brcmf_proto_hdrpull(struct device *dev, int *ifidx,
+int brcmf_proto_hdrpull(struct brcmf_pub *drvr, u8 *ifidx,
struct sk_buff *pktbuf)
{
struct brcmf_proto_bdc_header *h;
- struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_pub *drvr = bus_if->drvr;
- brcmf_dbg(TRACE, "Enter\n");
+ brcmf_dbg(CDC, "Enter\n");
/* Pop BDC header used to convey priority for buses that don't */
@@ -329,6 +303,14 @@ int brcmf_proto_hdrpull(struct device *dev, int *ifidx,
brcmf_err("rx data ifnum out of range (%d)\n", *ifidx);
return -EBADE;
}
+ /* The ifidx is the idx to map to matching netdev/ifp. When receiving
+ * events this is easy because it contains the bssidx which maps
+ * 1-on-1 to the netdev/ifp. But for data frames the ifidx is rcvd.
+ * bssidx 1 is used for p2p0 and no data can be received or
+ * transmitted on it. Therefor bssidx is ifidx + 1 if ifidx > 0
+ */
+ if (*ifidx)
+ (*ifidx)++;
if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) !=
BDC_PROTO_VER) {
@@ -338,7 +320,7 @@ int brcmf_proto_hdrpull(struct device *dev, int *ifidx,
}
if (h->flags & BDC_FLAG_SUM_GOOD) {
- brcmf_dbg(INFO, "%s: BDC packet received with good rx-csum, flags 0x%x\n",
+ brcmf_dbg(CDC, "%s: BDC rcv, good checksum, flags 0x%x\n",
brcmf_ifname(drvr, *ifidx), h->flags);
pkt_set_sum_good(pktbuf, true);
}
@@ -348,6 +330,8 @@ int brcmf_proto_hdrpull(struct device *dev, int *ifidx,
skb_pull(pktbuf, BDC_HEADER_LEN);
skb_pull(pktbuf, h->data_offset << 2);
+ if (pktbuf->len == 0)
+ return -ENODATA;
return 0;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index f8b52e5b941a..4544342a0428 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -14,8 +14,6 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index f2ab01cd7966..bc013cbe06f6 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -18,21 +18,26 @@
#define _BRCMF_DBG_H_
/* message levels */
-#define BRCMF_TRACE_VAL 0x0002
-#define BRCMF_INFO_VAL 0x0004
-#define BRCMF_DATA_VAL 0x0008
-#define BRCMF_CTL_VAL 0x0010
-#define BRCMF_TIMER_VAL 0x0020
-#define BRCMF_HDRS_VAL 0x0040
-#define BRCMF_BYTES_VAL 0x0080
-#define BRCMF_INTR_VAL 0x0100
-#define BRCMF_GLOM_VAL 0x0200
-#define BRCMF_EVENT_VAL 0x0400
-#define BRCMF_BTA_VAL 0x0800
-#define BRCMF_FIL_VAL 0x1000
-#define BRCMF_USB_VAL 0x2000
-#define BRCMF_SCAN_VAL 0x4000
-#define BRCMF_CONN_VAL 0x8000
+#define BRCMF_TRACE_VAL 0x00000002
+#define BRCMF_INFO_VAL 0x00000004
+#define BRCMF_DATA_VAL 0x00000008
+#define BRCMF_CTL_VAL 0x00000010
+#define BRCMF_TIMER_VAL 0x00000020
+#define BRCMF_HDRS_VAL 0x00000040
+#define BRCMF_BYTES_VAL 0x00000080
+#define BRCMF_INTR_VAL 0x00000100
+#define BRCMF_GLOM_VAL 0x00000200
+#define BRCMF_EVENT_VAL 0x00000400
+#define BRCMF_BTA_VAL 0x00000800
+#define BRCMF_FIL_VAL 0x00001000
+#define BRCMF_USB_VAL 0x00002000
+#define BRCMF_SCAN_VAL 0x00004000
+#define BRCMF_CONN_VAL 0x00008000
+#define BRCMF_CDC_VAL 0x00010000
+
+/* set default print format */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
/* Macro for error messages. net_ratelimit() is used when driver
* debugging is not selected. When debugging the driver error
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 74a616b4de8e..c06cea88df0d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -14,8 +14,6 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/kernel.h>
#include <linux/etherdevice.h>
#include <linux/module.h>
@@ -28,6 +26,8 @@
#include "dhd_bus.h"
#include "dhd_proto.h"
#include "dhd_dbg.h"
+#include "fwil_types.h"
+#include "p2p.h"
#include "wl_cfg80211.h"
#include "fwil.h"
@@ -42,6 +42,12 @@ MODULE_LICENSE("Dual BSD/GPL");
int brcmf_msg_level;
module_param(brcmf_msg_level, int, 0);
+/* P2P0 enable */
+static int brcmf_p2p_enable;
+#ifdef CONFIG_BRCMDBG
+module_param_named(p2pon, brcmf_p2p_enable, int, 0);
+MODULE_PARM_DESC(p2pon, "enable p2p management functionality");
+#endif
char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
{
@@ -72,9 +78,10 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
u32 buflen;
s32 err;
- brcmf_dbg(TRACE, "enter\n");
-
ifp = container_of(work, struct brcmf_if, multicast_work);
+
+ brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
+
ndev = ifp->ndev;
/* Determine initial value of allmulti flag */
@@ -131,9 +138,10 @@ _brcmf_set_mac_address(struct work_struct *work)
struct brcmf_if *ifp;
s32 err;
- brcmf_dbg(TRACE, "enter\n");
-
ifp = container_of(work, struct brcmf_if, setmacaddr_work);
+
+ brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
+
err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
ETH_ALEN);
if (err < 0) {
@@ -162,28 +170,31 @@ static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
schedule_work(&ifp->multicast_work);
}
-static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
int ret;
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = ifp->drvr;
+ struct ethhdr *eh;
- brcmf_dbg(TRACE, "Enter\n");
+ brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
- /* Reject if down */
- if (!drvr->bus_if->drvr_up ||
- (drvr->bus_if->state != BRCMF_BUS_DATA)) {
- brcmf_err("xmit rejected drvup=%d state=%d\n",
- drvr->bus_if->drvr_up,
- drvr->bus_if->state);
+ /* Can the device send data? */
+ if (drvr->bus_if->state != BRCMF_BUS_DATA) {
+ brcmf_err("xmit rejected state=%d\n", drvr->bus_if->state);
netif_stop_queue(ndev);
- return -ENODEV;
+ dev_kfree_skb(skb);
+ ret = -ENODEV;
+ goto done;
}
- if (!drvr->iflist[ifp->idx]) {
- brcmf_err("bad ifidx %d\n", ifp->idx);
+ if (!drvr->iflist[ifp->bssidx]) {
+ brcmf_err("bad ifidx %d\n", ifp->bssidx);
netif_stop_queue(ndev);
- return -ENODEV;
+ dev_kfree_skb(skb);
+ ret = -ENODEV;
+ goto done;
}
/* Make sure there's enough room for any header */
@@ -191,44 +202,49 @@ static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct sk_buff *skb2;
brcmf_dbg(INFO, "%s: insufficient headroom\n",
- brcmf_ifname(drvr, ifp->idx));
+ brcmf_ifname(drvr, ifp->bssidx));
drvr->bus_if->tx_realloc++;
skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
dev_kfree_skb(skb);
skb = skb2;
if (skb == NULL) {
brcmf_err("%s: skb_realloc_headroom failed\n",
- brcmf_ifname(drvr, ifp->idx));
+ brcmf_ifname(drvr, ifp->bssidx));
ret = -ENOMEM;
goto done;
}
}
- /* Update multicast statistic */
- if (skb->len >= ETH_ALEN) {
- u8 *pktdata = (u8 *)(skb->data);
- struct ethhdr *eh = (struct ethhdr *)pktdata;
-
- if (is_multicast_ether_addr(eh->h_dest))
- drvr->tx_multicast++;
- if (ntohs(eh->h_proto) == ETH_P_PAE)
- atomic_inc(&drvr->pend_8021x_cnt);
+ /* validate length for ether packet */
+ if (skb->len < sizeof(*eh)) {
+ ret = -EINVAL;
+ dev_kfree_skb(skb);
+ goto done;
}
+ /* handle ethernet header */
+ eh = (struct ethhdr *)(skb->data);
+ if (is_multicast_ether_addr(eh->h_dest))
+ drvr->tx_multicast++;
+ if (ntohs(eh->h_proto) == ETH_P_PAE)
+ atomic_inc(&ifp->pend_8021x_cnt);
+
/* If the protocol uses a data header, apply it */
- brcmf_proto_hdrpush(drvr, ifp->idx, skb);
+ brcmf_proto_hdrpush(drvr, ifp->ifidx, skb);
/* Use bus module to send data frame */
ret = brcmf_bus_txdata(drvr->bus_if, skb);
done:
- if (ret)
- drvr->bus_if->dstats.tx_dropped++;
- else
- drvr->bus_if->dstats.tx_packets++;
+ if (ret) {
+ ifp->stats.tx_dropped++;
+ } else {
+ ifp->stats.tx_packets++;
+ ifp->stats.tx_bytes += skb->len;
+ }
/* Return ok: we always eat the packet */
- return 0;
+ return NETDEV_TX_OK;
}
void brcmf_txflowblock(struct device *dev, bool state)
@@ -250,8 +266,7 @@ void brcmf_txflowblock(struct device *dev, bool state)
}
}
-void brcmf_rx_frame(struct device *dev, u8 ifidx,
- struct sk_buff_head *skb_list)
+void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
{
unsigned char *eth;
uint len;
@@ -259,12 +274,25 @@ void brcmf_rx_frame(struct device *dev, u8 ifidx,
struct brcmf_if *ifp;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
+ u8 ifidx;
+ int ret;
brcmf_dbg(TRACE, "Enter\n");
skb_queue_walk_safe(skb_list, skb, pnext) {
skb_unlink(skb, skb_list);
+ /* process and remove protocol-specific header */
+ ret = brcmf_proto_hdrpull(drvr, &ifidx, skb);
+ ifp = drvr->iflist[ifidx];
+
+ if (ret || !ifp || !ifp->ndev) {
+ if ((ret != -ENODATA) && ifp)
+ ifp->stats.rx_errors++;
+ brcmu_pkt_buf_free_skb(skb);
+ continue;
+ }
+
/* Get the protocol, maintain skb around eth_type_trans()
* The main reason for this hack is for the limitation of
* Linux 2.4 where 'eth_type_trans' uses the
@@ -280,21 +308,11 @@ void brcmf_rx_frame(struct device *dev, u8 ifidx,
eth = skb->data;
len = skb->len;
- ifp = drvr->iflist[ifidx];
- if (ifp == NULL)
- ifp = drvr->iflist[0];
-
- if (!ifp || !ifp->ndev ||
- ifp->ndev->reg_state != NETREG_REGISTERED) {
- brcmu_pkt_buf_free_skb(skb);
- continue;
- }
-
skb->dev = ifp->ndev;
skb->protocol = eth_type_trans(skb, skb->dev);
if (skb->pkt_type == PACKET_MULTICAST)
- bus_if->dstats.multicast++;
+ ifp->stats.multicast++;
skb->data = eth;
skb->len = len;
@@ -310,8 +328,13 @@ void brcmf_rx_frame(struct device *dev, u8 ifidx,
ifp->ndev->last_rx = jiffies;
}
- bus_if->dstats.rx_bytes += skb->len;
- bus_if->dstats.rx_packets++; /* Local count */
+ if (!(ifp->ndev->flags & IFF_UP)) {
+ brcmu_pkt_buf_free_skb(skb);
+ continue;
+ }
+
+ ifp->stats.rx_bytes += skb->len;
+ ifp->stats.rx_packets++;
if (in_interrupt())
netif_rx(skb);
@@ -328,41 +351,36 @@ void brcmf_rx_frame(struct device *dev, u8 ifidx,
void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
{
- uint ifidx;
+ u8 ifidx;
struct ethhdr *eh;
u16 type;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
+ struct brcmf_if *ifp;
+
+ brcmf_proto_hdrpull(drvr, &ifidx, txp);
- brcmf_proto_hdrpull(dev, &ifidx, txp);
+ ifp = drvr->iflist[ifidx];
+ if (!ifp)
+ return;
eh = (struct ethhdr *)(txp->data);
type = ntohs(eh->h_proto);
if (type == ETH_P_PAE) {
- atomic_dec(&drvr->pend_8021x_cnt);
- if (waitqueue_active(&drvr->pend_8021x_wait))
- wake_up(&drvr->pend_8021x_wait);
+ atomic_dec(&ifp->pend_8021x_cnt);
+ if (waitqueue_active(&ifp->pend_8021x_wait))
+ wake_up(&ifp->pend_8021x_wait);
}
+ if (!success)
+ ifp->stats.tx_errors++;
}
static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
- struct brcmf_bus *bus_if = ifp->drvr->bus_if;
- brcmf_dbg(TRACE, "Enter\n");
-
- /* Copy dongle stats to net device stats */
- ifp->stats.rx_packets = bus_if->dstats.rx_packets;
- ifp->stats.tx_packets = bus_if->dstats.tx_packets;
- ifp->stats.rx_bytes = bus_if->dstats.rx_bytes;
- ifp->stats.tx_bytes = bus_if->dstats.tx_bytes;
- ifp->stats.rx_errors = bus_if->dstats.rx_errors;
- ifp->stats.tx_errors = bus_if->dstats.tx_errors;
- ifp->stats.rx_dropped = bus_if->dstats.rx_dropped;
- ifp->stats.tx_dropped = bus_if->dstats.tx_dropped;
- ifp->stats.multicast = bus_if->dstats.multicast;
+ brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
return &ifp->stats;
}
@@ -395,9 +413,11 @@ static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = ifp->drvr;
- sprintf(info->driver, KBUILD_MODNAME);
- sprintf(info->version, "%lu", drvr->drv_version);
- sprintf(info->bus_info, "%s", dev_name(drvr->bus_if->dev));
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ snprintf(info->version, sizeof(info->version), "%lu",
+ drvr->drv_version);
+ strlcpy(info->bus_info, dev_name(drvr->bus_if->dev),
+ sizeof(info->bus_info));
}
static const struct ethtool_ops brcmf_ethtool_ops = {
@@ -414,7 +434,7 @@ static int brcmf_ethtool(struct brcmf_if *ifp, void __user *uaddr)
u32 toe_cmpnt, csum_dir;
int ret;
- brcmf_dbg(TRACE, "Enter\n");
+ brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
/* all ethtool calls start with a cmd word */
if (copy_from_user(&cmd, uaddr, sizeof(u32)))
@@ -437,20 +457,14 @@ static int brcmf_ethtool(struct brcmf_if *ifp, void __user *uaddr)
sprintf(info.driver, "dhd");
strcpy(info.version, BRCMF_VERSION_STR);
}
-
- /* otherwise, require dongle to be up */
- else if (!drvr->bus_if->drvr_up) {
- brcmf_err("dongle is not up\n");
- return -ENODEV;
- }
- /* finally, report dongle driver type */
+ /* report dongle driver type */
else
sprintf(info.driver, "wl");
sprintf(info.version, "%lu", drvr->drv_version);
if (copy_to_user(uaddr, &info, sizeof(info)))
return -EFAULT;
- brcmf_dbg(CTL, "given %*s, returning %s\n",
+ brcmf_dbg(TRACE, "given %*s, returning %s\n",
(int)sizeof(drvname), drvname, info.driver);
break;
@@ -517,9 +531,9 @@ static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr,
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = ifp->drvr;
- brcmf_dbg(TRACE, "ifidx %d, cmd 0x%04x\n", ifp->idx, cmd);
+ brcmf_dbg(TRACE, "Enter, idx=%d, cmd=0x%04x\n", ifp->bssidx, cmd);
- if (!drvr->iflist[ifp->idx])
+ if (!drvr->iflist[ifp->bssidx])
return -1;
if (cmd == SIOCETHTOOL)
@@ -531,17 +545,12 @@ static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr,
static int brcmf_netdev_stop(struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
- struct brcmf_pub *drvr = ifp->drvr;
-
- brcmf_dbg(TRACE, "Enter\n");
- if (drvr->bus_if->drvr_up == 0)
- return 0;
+ brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
brcmf_cfg80211_down(ndev);
/* Set state and stop OS transmissions */
- drvr->bus_if->drvr_up = false;
netif_stop_queue(ndev);
return 0;
@@ -555,7 +564,7 @@ static int brcmf_netdev_open(struct net_device *ndev)
u32 toe_ol;
s32 ret = 0;
- brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx);
+ brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
/* If bus is not ready, can't continue */
if (bus_if->state != BRCMF_BUS_DATA) {
@@ -563,25 +572,17 @@ static int brcmf_netdev_open(struct net_device *ndev)
return -EAGAIN;
}
- atomic_set(&drvr->pend_8021x_cnt, 0);
-
- memcpy(ndev->dev_addr, drvr->mac, ETH_ALEN);
+ atomic_set(&ifp->pend_8021x_cnt, 0);
/* Get current TOE mode from dongle */
if (brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_ol) >= 0
&& (toe_ol & TOE_TX_CSUM_OL) != 0)
- drvr->iflist[ifp->idx]->ndev->features |=
- NETIF_F_IP_CSUM;
+ ndev->features |= NETIF_F_IP_CSUM;
else
- drvr->iflist[ifp->idx]->ndev->features &=
- ~NETIF_F_IP_CSUM;
-
- /* make sure RF is ready for work */
- brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0);
+ ndev->features &= ~NETIF_F_IP_CSUM;
/* Allow transmit calls */
netif_start_queue(ndev);
- drvr->bus_if->drvr_up = true;
if (brcmf_cfg80211_up(ndev)) {
brcmf_err("failed to bring up cfg80211\n");
return -1;
@@ -600,29 +601,18 @@ static const struct net_device_ops brcmf_netdev_ops_pri = {
.ndo_set_rx_mode = brcmf_netdev_set_multicast_list
};
-static const struct net_device_ops brcmf_netdev_ops_virt = {
- .ndo_open = brcmf_cfg80211_up,
- .ndo_stop = brcmf_cfg80211_down,
- .ndo_get_stats = brcmf_netdev_get_stats,
- .ndo_do_ioctl = brcmf_netdev_ioctl_entry,
- .ndo_start_xmit = brcmf_netdev_start_xmit,
- .ndo_set_mac_address = brcmf_netdev_set_mac_address,
- .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
-};
-
-int brcmf_net_attach(struct brcmf_if *ifp)
+int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
{
struct brcmf_pub *drvr = ifp->drvr;
struct net_device *ndev;
+ s32 err;
- brcmf_dbg(TRACE, "ifidx %d mac %pM\n", ifp->idx, ifp->mac_addr);
+ brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
+ ifp->mac_addr);
ndev = ifp->ndev;
/* set appropriate operations */
- if (!ifp->idx)
- ndev->netdev_ops = &brcmf_netdev_ops_pri;
- else
- ndev->netdev_ops = &brcmf_netdev_ops_virt;
+ ndev->netdev_ops = &brcmf_netdev_ops_pri;
ndev->hard_header_len = ETH_HLEN + drvr->hdrlen;
ndev->ethtool_ops = &brcmf_ethtool_ops;
@@ -633,7 +623,14 @@ int brcmf_net_attach(struct brcmf_if *ifp)
/* set the mac address */
memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
- if (register_netdev(ndev) != 0) {
+ INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
+ INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
+
+ if (rtnl_locked)
+ err = register_netdevice(ndev);
+ else
+ err = register_netdev(ndev);
+ if (err != 0) {
brcmf_err("couldn't register the net device\n");
goto fail;
}
@@ -647,16 +644,78 @@ fail:
return -EBADE;
}
-struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx, s32 bssidx,
- char *name, u8 *addr_mask)
+static int brcmf_net_p2p_open(struct net_device *ndev)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ return brcmf_cfg80211_up(ndev);
+}
+
+static int brcmf_net_p2p_stop(struct net_device *ndev)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ return brcmf_cfg80211_down(ndev);
+}
+
+static int brcmf_net_p2p_do_ioctl(struct net_device *ndev,
+ struct ifreq *ifr, int cmd)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+ return 0;
+}
+
+static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ if (skb)
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops brcmf_netdev_ops_p2p = {
+ .ndo_open = brcmf_net_p2p_open,
+ .ndo_stop = brcmf_net_p2p_stop,
+ .ndo_do_ioctl = brcmf_net_p2p_do_ioctl,
+ .ndo_start_xmit = brcmf_net_p2p_start_xmit
+};
+
+static int brcmf_net_p2p_attach(struct brcmf_if *ifp)
+{
+ struct net_device *ndev;
+
+ brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
+ ifp->mac_addr);
+ ndev = ifp->ndev;
+
+ ndev->netdev_ops = &brcmf_netdev_ops_p2p;
+
+ /* set the mac address */
+ memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
+
+ if (register_netdev(ndev) != 0) {
+ brcmf_err("couldn't register the p2p net device\n");
+ goto fail;
+ }
+
+ brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
+
+ return 0;
+
+fail:
+ return -EBADE;
+}
+
+struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
+ char *name, u8 *mac_addr)
{
struct brcmf_if *ifp;
struct net_device *ndev;
- int i;
- brcmf_dbg(TRACE, "idx %d\n", ifidx);
+ brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifidx);
- ifp = drvr->iflist[ifidx];
+ ifp = drvr->iflist[bssidx];
/*
* Delete the existing interface before overwriting it
* in case we missed the BRCMF_E_IF_DEL event.
@@ -668,7 +727,7 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx, s32 bssidx,
netif_stop_queue(ifp->ndev);
unregister_netdev(ifp->ndev);
free_netdev(ifp->ndev);
- drvr->iflist[ifidx] = NULL;
+ drvr->iflist[bssidx] = NULL;
} else {
brcmf_err("ignore IF event\n");
return ERR_PTR(-EINVAL);
@@ -685,16 +744,15 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx, s32 bssidx,
ifp = netdev_priv(ndev);
ifp->ndev = ndev;
ifp->drvr = drvr;
- drvr->iflist[ifidx] = ifp;
- ifp->idx = ifidx;
+ drvr->iflist[bssidx] = ifp;
+ ifp->ifidx = ifidx;
ifp->bssidx = bssidx;
- INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
- INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
- if (addr_mask != NULL)
- for (i = 0; i < ETH_ALEN; i++)
- ifp->mac_addr[i] = drvr->mac[i] ^ addr_mask[i];
+ init_waitqueue_head(&ifp->pend_8021x_wait);
+
+ if (mac_addr != NULL)
+ memcpy(ifp->mac_addr, mac_addr, ETH_ALEN);
brcmf_dbg(TRACE, " ==== pid:%x, if:%s (%pM) created ===\n",
current->pid, ifp->ndev->name, ifp->mac_addr);
@@ -702,19 +760,18 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx, s32 bssidx,
return ifp;
}
-void brcmf_del_if(struct brcmf_pub *drvr, int ifidx)
+void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
{
struct brcmf_if *ifp;
- brcmf_dbg(TRACE, "idx %d\n", ifidx);
-
- ifp = drvr->iflist[ifidx];
+ ifp = drvr->iflist[bssidx];
if (!ifp) {
- brcmf_err("Null interface\n");
+ brcmf_err("Null interface, idx=%d\n", bssidx);
return;
}
+ brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifp->ifidx);
if (ifp->ndev) {
- if (ifidx == 0) {
+ if (bssidx == 0) {
if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
rtnl_lock();
brcmf_netdev_stop(ifp->ndev);
@@ -724,12 +781,14 @@ void brcmf_del_if(struct brcmf_pub *drvr, int ifidx)
netif_stop_queue(ifp->ndev);
}
- cancel_work_sync(&ifp->setmacaddr_work);
- cancel_work_sync(&ifp->multicast_work);
+ if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
+ cancel_work_sync(&ifp->setmacaddr_work);
+ cancel_work_sync(&ifp->multicast_work);
+ }
unregister_netdev(ifp->ndev);
- drvr->iflist[ifidx] = NULL;
- if (ifidx == 0)
+ drvr->iflist[bssidx] = NULL;
+ if (bssidx == 0)
brcmf_cfg80211_detach(drvr->config);
free_netdev(ifp->ndev);
}
@@ -769,8 +828,6 @@ int brcmf_attach(uint bus_hdrlen, struct device *dev)
INIT_LIST_HEAD(&drvr->bus_if->dcmd_list);
- init_waitqueue_head(&drvr->pend_8021x_wait);
-
return ret;
fail:
@@ -785,6 +842,7 @@ int brcmf_bus_start(struct device *dev)
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
struct brcmf_if *ifp;
+ struct brcmf_if *p2p_ifp;
brcmf_dbg(TRACE, "\n");
@@ -800,6 +858,13 @@ int brcmf_bus_start(struct device *dev)
if (IS_ERR(ifp))
return PTR_ERR(ifp);
+ if (brcmf_p2p_enable)
+ p2p_ifp = brcmf_add_if(drvr, 1, 0, "p2p%d", NULL);
+ else
+ p2p_ifp = NULL;
+ if (IS_ERR(p2p_ifp))
+ p2p_ifp = NULL;
+
/* signal bus ready */
bus_if->state = BRCMF_BUS_DATA;
@@ -818,16 +883,22 @@ int brcmf_bus_start(struct device *dev)
if (ret < 0)
goto fail;
- ret = brcmf_net_attach(ifp);
+ ret = brcmf_net_attach(ifp, false);
fail:
if (ret < 0) {
brcmf_err("failed: %d\n", ret);
if (drvr->config)
brcmf_cfg80211_detach(drvr->config);
- free_netdev(drvr->iflist[0]->ndev);
+ free_netdev(ifp->ndev);
drvr->iflist[0] = NULL;
+ if (p2p_ifp) {
+ free_netdev(p2p_ifp->ndev);
+ drvr->iflist[1] = NULL;
+ }
return ret;
}
+ if ((brcmf_p2p_enable) && (p2p_ifp))
+ brcmf_net_p2p_attach(p2p_ifp);
return 0;
}
@@ -845,9 +916,21 @@ static void brcmf_bus_detach(struct brcmf_pub *drvr)
}
}
+void brcmf_dev_reset(struct device *dev)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
+
+ if (drvr == NULL)
+ return;
+
+ if (drvr->iflist[0])
+ brcmf_fil_cmd_int_set(drvr->iflist[0], BRCMF_C_TERMINATED, 1);
+}
+
void brcmf_detach(struct device *dev)
{
- int i;
+ s32 i;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
@@ -866,28 +949,26 @@ void brcmf_detach(struct device *dev)
brcmf_bus_detach(drvr);
- if (drvr->prot) {
+ if (drvr->prot)
brcmf_proto_detach(drvr);
- }
brcmf_debugfs_detach(drvr);
bus_if->drvr = NULL;
kfree(drvr);
}
-static int brcmf_get_pend_8021x_cnt(struct brcmf_pub *drvr)
+static int brcmf_get_pend_8021x_cnt(struct brcmf_if *ifp)
{
- return atomic_read(&drvr->pend_8021x_cnt);
+ return atomic_read(&ifp->pend_8021x_cnt);
}
int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
- struct brcmf_pub *drvr = ifp->drvr;
int err;
- err = wait_event_timeout(drvr->pend_8021x_wait,
- !brcmf_get_pend_8021x_cnt(drvr),
+ err = wait_event_timeout(ifp->pend_8021x_wait,
+ !brcmf_get_pend_8021x_cnt(ifp),
msecs_to_jiffies(MAX_WAIT_FOR_8021X_TX));
WARN_ON(!err);
@@ -895,6 +976,16 @@ int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
return !err;
}
+/*
+ * return chip id and rev of the device encoded in u32.
+ */
+u32 brcmf_get_chip_info(struct brcmf_if *ifp)
+{
+ struct brcmf_bus *bus = ifp->drvr->bus_if;
+
+ return bus->chip << 4 | bus->chiprev;
+}
+
static void brcmf_driver_init(struct work_struct *work)
{
brcmf_debugfs_init();
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index cf857f1edf8c..4469321c0eb3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -14,8 +14,6 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
@@ -1098,7 +1096,6 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL &&
type != BRCMF_SDIO_FT_SUPER) {
brcmf_err("HW header length too long\n");
- bus->sdiodev->bus_if->dstats.rx_errors++;
bus->sdcnt.rx_toolong++;
brcmf_sdbrcm_rxfail(bus, false, false);
rd->len = 0;
@@ -1169,7 +1166,6 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
int errcode;
u8 doff, sfdoff;
- int ifidx = 0;
bool usechain = bus->use_rxchain;
struct brcmf_sdio_read rd_new;
@@ -1301,7 +1297,6 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
if (errcode < 0) {
brcmf_err("glom read of %d bytes failed: %d\n",
dlen, errcode);
- bus->sdiodev->bus_if->dstats.rx_errors++;
sdio_claim_host(bus->sdiodev->func[1]);
if (bus->glomerr++ < 3) {
@@ -1388,13 +1383,6 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
skb_unlink(pfirst, &bus->glom);
brcmu_pkt_buf_free_skb(pfirst);
continue;
- } else if (brcmf_proto_hdrpull(bus->sdiodev->dev,
- &ifidx, pfirst) != 0) {
- brcmf_err("rx protocol error\n");
- bus->sdiodev->bus_if->dstats.rx_errors++;
- skb_unlink(pfirst, &bus->glom);
- brcmu_pkt_buf_free_skb(pfirst);
- continue;
}
brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
@@ -1407,7 +1395,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
}
/* sent any remaining packets up */
if (bus->glom.qlen)
- brcmf_rx_frame(bus->sdiodev->dev, ifidx, &bus->glom);
+ brcmf_rx_frames(bus->sdiodev->dev, &bus->glom);
bus->sdcnt.rxglomframes++;
bus->sdcnt.rxglompkts += bus->glom.qlen;
@@ -1455,10 +1443,9 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
if (bus->rxblen)
buf = vzalloc(bus->rxblen);
- if (!buf) {
- brcmf_err("no memory for control frame\n");
+ if (!buf)
goto done;
- }
+
rbuf = bus->rxbuf;
pad = ((unsigned long)rbuf % BRCMF_SDALIGN);
if (pad)
@@ -1488,7 +1475,6 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) {
brcmf_err("%d-byte control read exceeds %d-byte buffer\n",
rdlen, bus->sdiodev->bus_if->maxctl);
- bus->sdiodev->bus_if->dstats.rx_errors++;
brcmf_sdbrcm_rxfail(bus, false, false);
goto done;
}
@@ -1496,7 +1482,6 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
if ((len - doff) > bus->sdiodev->bus_if->maxctl) {
brcmf_err("%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
len, len - doff, bus->sdiodev->bus_if->maxctl);
- bus->sdiodev->bus_if->dstats.rx_errors++;
bus->sdcnt.rx_toolong++;
brcmf_sdbrcm_rxfail(bus, false, false);
goto done;
@@ -1558,10 +1543,10 @@ static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
{
struct sk_buff *pkt; /* Packet for event or data frames */
+ struct sk_buff_head pktlist; /* needed for bus interface */
u16 pad; /* Number of pad bytes to read */
uint rxleft = 0; /* Remaining number of frames allowed */
int sdret; /* Return code from calls */
- int ifidx = 0;
uint rxcount = 0; /* Total frames read */
struct brcmf_sdio_read *rd = &bus->cur_read, rd_new;
u8 head_read = 0;
@@ -1644,7 +1629,6 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
if (!pkt) {
/* Give up on data, request rtx of events */
brcmf_err("brcmu_pkt_buf_get_skb failed\n");
- bus->sdiodev->bus_if->dstats.rx_dropped++;
brcmf_sdbrcm_rxfail(bus, false,
RETRYCHAN(rd->channel));
sdio_release_host(bus->sdiodev->func[1]);
@@ -1662,7 +1646,6 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
brcmf_err("read %d bytes from channel %d failed: %d\n",
rd->len, rd->channel, sdret);
brcmu_pkt_buf_free_skb(pkt);
- bus->sdiodev->bus_if->dstats.rx_errors++;
sdio_claim_host(bus->sdiodev->func[1]);
brcmf_sdbrcm_rxfail(bus, true,
RETRYCHAN(rd->channel));
@@ -1760,15 +1743,11 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
if (pkt->len == 0) {
brcmu_pkt_buf_free_skb(pkt);
continue;
- } else if (brcmf_proto_hdrpull(bus->sdiodev->dev, &ifidx,
- pkt) != 0) {
- brcmf_err("rx protocol error\n");
- brcmu_pkt_buf_free_skb(pkt);
- bus->sdiodev->bus_if->dstats.rx_errors++;
- continue;
}
- brcmf_rx_packet(bus->sdiodev->dev, ifidx, pkt);
+ skb_queue_head_init(&pktlist);
+ skb_queue_tail(&pktlist, pkt);
+ brcmf_rx_frames(bus->sdiodev->dev, &pktlist);
}
rxcount = maxframes - rxleft;
@@ -1954,10 +1933,6 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
datalen = pkt->len - SDPCM_HDRLEN;
ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, true);
- if (ret)
- bus->sdiodev->bus_if->dstats.tx_errors++;
- else
- bus->sdiodev->bus_if->dstats.tx_bytes += datalen;
/* In poll mode, need to check for other events */
if (!bus->intr && cnt) {
@@ -1976,8 +1951,7 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
}
/* Deflow-control stack if needed */
- if (bus->sdiodev->bus_if->drvr_up &&
- (bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) &&
+ if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) &&
bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
bus->txoff = false;
brcmf_txflowblock(bus->sdiodev->dev, false);
@@ -2724,9 +2698,10 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
* address of sdpcm_shared structure
*/
sdio_claim_host(bus->sdiodev->func[1]);
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
rv = brcmf_sdbrcm_membytes(bus, false, shaddr,
(u8 *)&addr_le, 4);
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdio_release_host(bus->sdiodev->func[1]);
if (rv < 0)
return rv;
@@ -2745,10 +2720,8 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
}
/* Read hndrte_shared structure */
- sdio_claim_host(bus->sdiodev->func[1]);
rv = brcmf_sdbrcm_membytes(bus, false, addr, (u8 *)&sh_le,
sizeof(struct sdpcm_shared_le));
- sdio_release_host(bus->sdiodev->func[1]);
if (rv < 0)
return rv;
@@ -2850,14 +2823,12 @@ static int brcmf_sdio_trap_info(struct brcmf_sdio *bus, struct sdpcm_shared *sh,
if ((sh->flags & SDPCM_SHARED_TRAP) == 0)
return 0;
- sdio_claim_host(bus->sdiodev->func[1]);
error = brcmf_sdbrcm_membytes(bus, false, sh->trap_addr, (u8 *)&tr,
sizeof(struct brcmf_trap_info));
if (error < 0)
return error;
nbytes = brcmf_sdio_dump_console(bus, sh, data, count);
- sdio_release_host(bus->sdiodev->func[1]);
if (nbytes < 0)
return nbytes;
@@ -3322,9 +3293,6 @@ static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
{
int ret;
- if (bus->sdiodev->bus_if->drvr_up)
- return -EISCONN;
-
ret = request_firmware(&bus->firmware, BRCMF_SDIO_NV_NAME,
&bus->sdiodev->func[2]->dev);
if (ret) {
@@ -3955,6 +3923,8 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
/* Assign bus interface call back */
bus->sdiodev->bus_if->dev = bus->sdiodev->dev;
bus->sdiodev->bus_if->ops = &brcmf_sdio_bus_ops;
+ bus->sdiodev->bus_if->chip = bus->ci->chip;
+ bus->sdiodev->bus_if->chiprev = bus->ci->chiprev;
/* Attach to the brcmf/OS/network interface */
ret = brcmf_attach(SDPCM_RESERVE, bus->sdiodev->dev);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
index ba0b22512f12..e9d6f91a1f2b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
@@ -189,24 +189,24 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
return;
}
- ifp = drvr->iflist[ifevent->ifidx];
+ ifp = drvr->iflist[ifevent->bssidx];
if (ifevent->action == BRCMF_E_IF_ADD) {
brcmf_dbg(EVENT, "adding %s (%pM)\n", emsg->ifname,
emsg->addr);
- ifp = brcmf_add_if(drvr, ifevent->ifidx, ifevent->bssidx,
+ ifp = brcmf_add_if(drvr, ifevent->bssidx, ifevent->ifidx,
emsg->ifname, emsg->addr);
if (IS_ERR(ifp))
return;
if (!drvr->fweh.evt_handler[BRCMF_E_IF])
- err = brcmf_net_attach(ifp);
+ err = brcmf_net_attach(ifp, false);
}
err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data);
if (ifevent->action == BRCMF_E_IF_DEL)
- brcmf_del_if(drvr, ifevent->ifidx);
+ brcmf_del_if(drvr, ifevent->bssidx);
}
/**
@@ -250,8 +250,6 @@ static void brcmf_fweh_event_worker(struct work_struct *work)
drvr = container_of(fweh, struct brcmf_pub, fweh);
while ((event = brcmf_fweh_dequeue_event(fweh))) {
- ifp = drvr->iflist[event->ifidx];
-
brcmf_dbg(EVENT, "event %s (%u) ifidx %u bsscfg %u addr %pM\n",
brcmf_fweh_event_name(event->code), event->code,
event->emsg.ifidx, event->emsg.bsscfgidx,
@@ -283,6 +281,7 @@ static void brcmf_fweh_event_worker(struct work_struct *work)
goto event_free;
}
+ ifp = drvr->iflist[emsg.bsscfgidx];
err = brcmf_fweh_call_event_handler(ifp, event->code, &emsg,
event->data);
if (err) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
index 36901f76a3b5..8c39b51dcccf 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
@@ -83,6 +83,7 @@ struct brcmf_event;
BRCMF_ENUM_DEF(MULTICAST_DECODE_ERROR, 51) \
BRCMF_ENUM_DEF(TRACE, 52) \
BRCMF_ENUM_DEF(IF, 54) \
+ BRCMF_ENUM_DEF(P2P_DISC_LISTEN_COMPLETE, 55) \
BRCMF_ENUM_DEF(RSSI, 56) \
BRCMF_ENUM_DEF(PFN_SCAN_COMPLETE, 57) \
BRCMF_ENUM_DEF(EXTLOG_MSG, 58) \
@@ -96,8 +97,11 @@ struct brcmf_event;
BRCMF_ENUM_DEF(DFS_AP_RESUME, 66) \
BRCMF_ENUM_DEF(ESCAN_RESULT, 69) \
BRCMF_ENUM_DEF(ACTION_FRAME_OFF_CHAN_COMPLETE, 70) \
+ BRCMF_ENUM_DEF(PROBERESP_MSG, 71) \
+ BRCMF_ENUM_DEF(P2P_PROBEREQ_MSG, 72) \
BRCMF_ENUM_DEF(DCS_REQUEST, 73) \
- BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74)
+ BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \
+ BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75)
#define BRCMF_ENUM_DEF(id, val) \
BRCMF_E_##id = (val),
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
index d8d8b6549dc5..8d1def935b8d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
@@ -45,9 +45,10 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
if (data != NULL)
len = min_t(uint, len, BRCMF_DCMD_MAXLEN);
if (set)
- err = brcmf_proto_cdc_set_dcmd(drvr, ifp->idx, cmd, data, len);
+ err = brcmf_proto_cdc_set_dcmd(drvr, ifp->ifidx, cmd, data,
+ len);
else
- err = brcmf_proto_cdc_query_dcmd(drvr, ifp->idx, cmd, data,
+ err = brcmf_proto_cdc_query_dcmd(drvr, ifp->ifidx, cmd, data,
len);
if (err >= 0)
@@ -100,6 +101,7 @@ brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data)
__le32 data_le = cpu_to_le32(data);
mutex_lock(&ifp->drvr->proto_block);
+ brcmf_dbg(FIL, "cmd=%d, value=%d\n", cmd, data);
err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), true);
mutex_unlock(&ifp->drvr->proto_block);
@@ -116,6 +118,7 @@ brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), false);
mutex_unlock(&ifp->drvr->proto_block);
*data = le32_to_cpu(data_le);
+ brcmf_dbg(FIL, "cmd=%d, value=%d\n", cmd, *data);
return err;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
new file mode 100644
index 000000000000..0f2c83bc95dc
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+#ifndef FWIL_TYPES_H_
+#define FWIL_TYPES_H_
+
+#include <linux/if_ether.h>
+
+
+#define BRCMF_FIL_ACTION_FRAME_SIZE 1800
+
+
+enum brcmf_fil_p2p_if_types {
+ BRCMF_FIL_P2P_IF_CLIENT,
+ BRCMF_FIL_P2P_IF_GO,
+ BRCMF_FIL_P2P_IF_DYNBCN_GO,
+ BRCMF_FIL_P2P_IF_DEV,
+};
+
+struct brcmf_fil_p2p_if_le {
+ u8 addr[ETH_ALEN];
+ __le16 type;
+ __le16 chspec;
+};
+
+struct brcmf_fil_chan_info_le {
+ __le32 hw_channel;
+ __le32 target_channel;
+ __le32 scan_channel;
+};
+
+struct brcmf_fil_action_frame_le {
+ u8 da[ETH_ALEN];
+ __le16 len;
+ __le32 packet_id;
+ u8 data[BRCMF_FIL_ACTION_FRAME_SIZE];
+};
+
+struct brcmf_fil_af_params_le {
+ __le32 channel;
+ __le32 dwell_time;
+ u8 bssid[ETH_ALEN];
+ u8 pad[2];
+ struct brcmf_fil_action_frame_le action_frame;
+};
+
+struct brcmf_fil_bss_enable_le {
+ __le32 bsscfg_idx;
+ __le32 enable;
+};
+
+#endif /* FWIL_TYPES_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
new file mode 100644
index 000000000000..4166e642068b
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -0,0 +1,2277 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <net/cfg80211.h>
+
+#include <brcmu_wifi.h>
+#include <brcmu_utils.h>
+#include <defs.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include "fwil.h"
+#include "fwil_types.h"
+#include "p2p.h"
+#include "wl_cfg80211.h"
+
+/* parameters used for p2p escan */
+#define P2PAPI_SCAN_NPROBES 1
+#define P2PAPI_SCAN_DWELL_TIME_MS 80
+#define P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS 40
+#define P2PAPI_SCAN_HOME_TIME_MS 60
+#define P2PAPI_SCAN_NPROBS_TIME_MS 30
+#define P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS 100
+#define WL_SCAN_CONNECT_DWELL_TIME_MS 200
+#define WL_SCAN_JOIN_PROBE_INTERVAL_MS 20
+
+#define BRCMF_P2P_WILDCARD_SSID "DIRECT-"
+#define BRCMF_P2P_WILDCARD_SSID_LEN (sizeof(BRCMF_P2P_WILDCARD_SSID) - 1)
+
+#define SOCIAL_CHAN_1 1
+#define SOCIAL_CHAN_2 6
+#define SOCIAL_CHAN_3 11
+#define IS_P2P_SOCIAL_CHANNEL(channel) ((channel == SOCIAL_CHAN_1) || \
+ (channel == SOCIAL_CHAN_2) || \
+ (channel == SOCIAL_CHAN_3))
+#define SOCIAL_CHAN_CNT 3
+#define AF_PEER_SEARCH_CNT 2
+
+#define BRCMF_SCB_TIMEOUT_VALUE 20
+
+#define P2P_VER 9 /* P2P version: 9=WiFi P2P v1.0 */
+#define P2P_PUB_AF_CATEGORY 0x04
+#define P2P_PUB_AF_ACTION 0x09
+#define P2P_AF_CATEGORY 0x7f
+#define P2P_OUI "\x50\x6F\x9A" /* P2P OUI */
+#define P2P_OUI_LEN 3 /* P2P OUI length */
+
+/* Action Frame Constants */
+#define DOT11_ACTION_HDR_LEN 2 /* action frame category + action */
+#define DOT11_ACTION_CAT_OFF 0 /* category offset */
+#define DOT11_ACTION_ACT_OFF 1 /* action offset */
+
+#define P2P_AF_DWELL_TIME 200
+#define P2P_AF_MIN_DWELL_TIME 100
+#define P2P_AF_MED_DWELL_TIME 400
+#define P2P_AF_LONG_DWELL_TIME 1000
+#define P2P_AF_TX_MAX_RETRY 1
+#define P2P_AF_MAX_WAIT_TIME 2000
+#define P2P_INVALID_CHANNEL -1
+#define P2P_CHANNEL_SYNC_RETRY 5
+#define P2P_AF_FRM_SCAN_MAX_WAIT 1500
+#define P2P_DEFAULT_SLEEP_TIME_VSDB 200
+
+/* WiFi P2P Public Action Frame OUI Subtypes */
+#define P2P_PAF_GON_REQ 0 /* Group Owner Negotiation Req */
+#define P2P_PAF_GON_RSP 1 /* Group Owner Negotiation Rsp */
+#define P2P_PAF_GON_CONF 2 /* Group Owner Negotiation Confirm */
+#define P2P_PAF_INVITE_REQ 3 /* P2P Invitation Request */
+#define P2P_PAF_INVITE_RSP 4 /* P2P Invitation Response */
+#define P2P_PAF_DEVDIS_REQ 5 /* Device Discoverability Request */
+#define P2P_PAF_DEVDIS_RSP 6 /* Device Discoverability Response */
+#define P2P_PAF_PROVDIS_REQ 7 /* Provision Discovery Request */
+#define P2P_PAF_PROVDIS_RSP 8 /* Provision Discovery Response */
+#define P2P_PAF_SUBTYPE_INVALID 255 /* Invalid Subtype */
+
+/* WiFi P2P Action Frame OUI Subtypes */
+#define P2P_AF_NOTICE_OF_ABSENCE 0 /* Notice of Absence */
+#define P2P_AF_PRESENCE_REQ 1 /* P2P Presence Request */
+#define P2P_AF_PRESENCE_RSP 2 /* P2P Presence Response */
+#define P2P_AF_GO_DISC_REQ 3 /* GO Discoverability Request */
+
+/* P2P Service Discovery related */
+#define P2PSD_ACTION_CATEGORY 0x04 /* Public action frame */
+#define P2PSD_ACTION_ID_GAS_IREQ 0x0a /* GAS Initial Request AF */
+#define P2PSD_ACTION_ID_GAS_IRESP 0x0b /* GAS Initial Response AF */
+#define P2PSD_ACTION_ID_GAS_CREQ 0x0c /* GAS Comback Request AF */
+#define P2PSD_ACTION_ID_GAS_CRESP 0x0d /* GAS Comback Response AF */
+
+/**
+ * struct brcmf_p2p_disc_st_le - set discovery state in firmware.
+ *
+ * @state: requested discovery state (see enum brcmf_p2p_disc_state).
+ * @chspec: channel parameter for %WL_P2P_DISC_ST_LISTEN state.
+ * @dwell: dwell time in ms for %WL_P2P_DISC_ST_LISTEN state.
+ */
+struct brcmf_p2p_disc_st_le {
+ u8 state;
+ __le16 chspec;
+ __le16 dwell;
+};
+
+/**
+ * enum brcmf_p2p_disc_state - P2P discovery state values
+ *
+ * @WL_P2P_DISC_ST_SCAN: P2P discovery with wildcard SSID and P2P IE.
+ * @WL_P2P_DISC_ST_LISTEN: P2P discovery off-channel for specified time.
+ * @WL_P2P_DISC_ST_SEARCH: P2P discovery with P2P wildcard SSID and P2P IE.
+ */
+enum brcmf_p2p_disc_state {
+ WL_P2P_DISC_ST_SCAN,
+ WL_P2P_DISC_ST_LISTEN,
+ WL_P2P_DISC_ST_SEARCH
+};
+
+/**
+ * struct brcmf_p2p_scan_le - P2P specific scan request.
+ *
+ * @type: type of scan method requested (values: 'E' or 'S').
+ * @reserved: reserved (ignored).
+ * @eparams: parameters used for type 'E'.
+ * @sparams: parameters used for type 'S'.
+ */
+struct brcmf_p2p_scan_le {
+ u8 type;
+ u8 reserved[3];
+ union {
+ struct brcmf_escan_params_le eparams;
+ struct brcmf_scan_params_le sparams;
+ };
+};
+
+/**
+ * struct brcmf_p2p_pub_act_frame - WiFi P2P Public Action Frame
+ *
+ * @category: P2P_PUB_AF_CATEGORY
+ * @action: P2P_PUB_AF_ACTION
+ * @oui[3]: P2P_OUI
+ * @oui_type: OUI type - P2P_VER
+ * @subtype: OUI subtype - P2P_TYPE_*
+ * @dialog_token: nonzero, identifies req/rsp transaction
+ * @elts[1]: Variable length information elements.
+ */
+struct brcmf_p2p_pub_act_frame {
+ u8 category;
+ u8 action;
+ u8 oui[3];
+ u8 oui_type;
+ u8 subtype;
+ u8 dialog_token;
+ u8 elts[1];
+};
+
+/**
+ * struct brcmf_p2p_action_frame - WiFi P2P Action Frame
+ *
+ * @category: P2P_AF_CATEGORY
+ * @OUI[3]: OUI - P2P_OUI
+ * @type: OUI Type - P2P_VER
+ * @subtype: OUI Subtype - P2P_AF_*
+ * @dialog_token: nonzero, identifies req/resp tranaction
+ * @elts[1]: Variable length information elements.
+ */
+struct brcmf_p2p_action_frame {
+ u8 category;
+ u8 oui[3];
+ u8 type;
+ u8 subtype;
+ u8 dialog_token;
+ u8 elts[1];
+};
+
+/**
+ * struct brcmf_p2psd_gas_pub_act_frame - Wi-Fi GAS Public Action Frame
+ *
+ * @category: 0x04 Public Action Frame
+ * @action: 0x6c Advertisement Protocol
+ * @dialog_token: nonzero, identifies req/rsp transaction
+ * @query_data[1]: Query Data. SD gas ireq SD gas iresp
+ */
+struct brcmf_p2psd_gas_pub_act_frame {
+ u8 category;
+ u8 action;
+ u8 dialog_token;
+ u8 query_data[1];
+};
+
+/**
+ * struct brcmf_config_af_params - Action Frame Parameters for tx.
+ *
+ * @mpc_onoff: To make sure to send successfully action frame, we have to
+ * turn off mpc 0: off, 1: on, (-1): do nothing
+ * @search_channel: 1: search peer's channel to send af
+ * extra_listen: keep the dwell time to get af response frame.
+ */
+struct brcmf_config_af_params {
+ s32 mpc_onoff;
+ bool search_channel;
+ bool extra_listen;
+};
+
+/**
+ * brcmf_p2p_is_pub_action() - true if p2p public type frame.
+ *
+ * @frame: action frame data.
+ * @frame_len: length of action frame data.
+ *
+ * Determine if action frame is p2p public action type
+ */
+static bool brcmf_p2p_is_pub_action(void *frame, u32 frame_len)
+{
+ struct brcmf_p2p_pub_act_frame *pact_frm;
+
+ if (frame == NULL)
+ return false;
+
+ pact_frm = (struct brcmf_p2p_pub_act_frame *)frame;
+ if (frame_len < sizeof(struct brcmf_p2p_pub_act_frame) - 1)
+ return false;
+
+ if (pact_frm->category == P2P_PUB_AF_CATEGORY &&
+ pact_frm->action == P2P_PUB_AF_ACTION &&
+ pact_frm->oui_type == P2P_VER &&
+ memcmp(pact_frm->oui, P2P_OUI, P2P_OUI_LEN) == 0)
+ return true;
+
+ return false;
+}
+
+/**
+ * brcmf_p2p_is_p2p_action() - true if p2p action type frame.
+ *
+ * @frame: action frame data.
+ * @frame_len: length of action frame data.
+ *
+ * Determine if action frame is p2p action type
+ */
+static bool brcmf_p2p_is_p2p_action(void *frame, u32 frame_len)
+{
+ struct brcmf_p2p_action_frame *act_frm;
+
+ if (frame == NULL)
+ return false;
+
+ act_frm = (struct brcmf_p2p_action_frame *)frame;
+ if (frame_len < sizeof(struct brcmf_p2p_action_frame) - 1)
+ return false;
+
+ if (act_frm->category == P2P_AF_CATEGORY &&
+ act_frm->type == P2P_VER &&
+ memcmp(act_frm->oui, P2P_OUI, P2P_OUI_LEN) == 0)
+ return true;
+
+ return false;
+}
+
+/**
+ * brcmf_p2p_is_gas_action() - true if p2p gas action type frame.
+ *
+ * @frame: action frame data.
+ * @frame_len: length of action frame data.
+ *
+ * Determine if action frame is p2p gas action type
+ */
+static bool brcmf_p2p_is_gas_action(void *frame, u32 frame_len)
+{
+ struct brcmf_p2psd_gas_pub_act_frame *sd_act_frm;
+
+ if (frame == NULL)
+ return false;
+
+ sd_act_frm = (struct brcmf_p2psd_gas_pub_act_frame *)frame;
+ if (frame_len < sizeof(struct brcmf_p2psd_gas_pub_act_frame) - 1)
+ return false;
+
+ if (sd_act_frm->category != P2PSD_ACTION_CATEGORY)
+ return false;
+
+ if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ ||
+ sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP ||
+ sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ ||
+ sd_act_frm->action == P2PSD_ACTION_ID_GAS_CRESP)
+ return true;
+
+ return false;
+}
+
+/**
+ * brcmf_p2p_print_actframe() - debug print routine.
+ *
+ * @tx: Received or to be transmitted
+ * @frame: action frame data.
+ * @frame_len: length of action frame data.
+ *
+ * Print information about the p2p action frame
+ */
+
+#ifdef DEBUG
+
+static void brcmf_p2p_print_actframe(bool tx, void *frame, u32 frame_len)
+{
+ struct brcmf_p2p_pub_act_frame *pact_frm;
+ struct brcmf_p2p_action_frame *act_frm;
+ struct brcmf_p2psd_gas_pub_act_frame *sd_act_frm;
+
+ if (!frame || frame_len <= 2)
+ return;
+
+ if (brcmf_p2p_is_pub_action(frame, frame_len)) {
+ pact_frm = (struct brcmf_p2p_pub_act_frame *)frame;
+ switch (pact_frm->subtype) {
+ case P2P_PAF_GON_REQ:
+ brcmf_dbg(TRACE, "%s P2P Group Owner Negotiation Req Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_PAF_GON_RSP:
+ brcmf_dbg(TRACE, "%s P2P Group Owner Negotiation Rsp Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_PAF_GON_CONF:
+ brcmf_dbg(TRACE, "%s P2P Group Owner Negotiation Confirm Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_PAF_INVITE_REQ:
+ brcmf_dbg(TRACE, "%s P2P Invitation Request Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_PAF_INVITE_RSP:
+ brcmf_dbg(TRACE, "%s P2P Invitation Response Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_PAF_DEVDIS_REQ:
+ brcmf_dbg(TRACE, "%s P2P Device Discoverability Request Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_PAF_DEVDIS_RSP:
+ brcmf_dbg(TRACE, "%s P2P Device Discoverability Response Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_PAF_PROVDIS_REQ:
+ brcmf_dbg(TRACE, "%s P2P Provision Discovery Request Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_PAF_PROVDIS_RSP:
+ brcmf_dbg(TRACE, "%s P2P Provision Discovery Response Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ default:
+ brcmf_dbg(TRACE, "%s Unknown P2P Public Action Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ }
+ } else if (brcmf_p2p_is_p2p_action(frame, frame_len)) {
+ act_frm = (struct brcmf_p2p_action_frame *)frame;
+ switch (act_frm->subtype) {
+ case P2P_AF_NOTICE_OF_ABSENCE:
+ brcmf_dbg(TRACE, "%s P2P Notice of Absence Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_AF_PRESENCE_REQ:
+ brcmf_dbg(TRACE, "%s P2P Presence Request Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_AF_PRESENCE_RSP:
+ brcmf_dbg(TRACE, "%s P2P Presence Response Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2P_AF_GO_DISC_REQ:
+ brcmf_dbg(TRACE, "%s P2P Discoverability Request Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ default:
+ brcmf_dbg(TRACE, "%s Unknown P2P Action Frame\n",
+ (tx) ? "TX" : "RX");
+ }
+
+ } else if (brcmf_p2p_is_gas_action(frame, frame_len)) {
+ sd_act_frm = (struct brcmf_p2psd_gas_pub_act_frame *)frame;
+ switch (sd_act_frm->action) {
+ case P2PSD_ACTION_ID_GAS_IREQ:
+ brcmf_dbg(TRACE, "%s P2P GAS Initial Request\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2PSD_ACTION_ID_GAS_IRESP:
+ brcmf_dbg(TRACE, "%s P2P GAS Initial Response\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2PSD_ACTION_ID_GAS_CREQ:
+ brcmf_dbg(TRACE, "%s P2P GAS Comback Request\n",
+ (tx) ? "TX" : "RX");
+ break;
+ case P2PSD_ACTION_ID_GAS_CRESP:
+ brcmf_dbg(TRACE, "%s P2P GAS Comback Response\n",
+ (tx) ? "TX" : "RX");
+ break;
+ default:
+ brcmf_dbg(TRACE, "%s Unknown P2P GAS Frame\n",
+ (tx) ? "TX" : "RX");
+ break;
+ }
+ }
+}
+
+#else
+
+static void brcmf_p2p_print_actframe(bool tx, void *frame, u32 frame_len)
+{
+}
+
+#endif
+
+
+/**
+ * brcmf_p2p_chnr_to_chspec() - convert channel number to chanspec.
+ *
+ * @channel: channel number
+ */
+static u16 brcmf_p2p_chnr_to_chspec(u16 channel)
+{
+ u16 chanspec;
+
+ chanspec = channel & WL_CHANSPEC_CHAN_MASK;
+
+ if (channel <= CH_MAX_2G_CHANNEL)
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ else
+ chanspec |= WL_CHANSPEC_BAND_5G;
+
+ chanspec |= WL_CHANSPEC_BW_20;
+ chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
+ return chanspec;
+}
+
+
+/**
+ * brcmf_p2p_set_firmware() - prepare firmware for peer-to-peer operation.
+ *
+ * @ifp: ifp to use for iovars (primary).
+ * @p2p_mac: mac address to configure for p2p_da_override
+ */
+static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac)
+{
+ s32 ret = 0;
+
+ brcmf_fil_iovar_int_set(ifp, "apsta", 1);
+
+ /* In case of COB type, firmware has default mac address
+ * After Initializing firmware, we have to set current mac address to
+ * firmware for P2P device address
+ */
+ ret = brcmf_fil_iovar_data_set(ifp, "p2p_da_override", p2p_mac,
+ ETH_ALEN);
+ if (ret)
+ brcmf_err("failed to update device address ret %d\n", ret);
+
+ return ret;
+}
+
+/**
+ * brcmf_p2p_generate_bss_mac() - derive mac addresses for P2P.
+ *
+ * @p2p: P2P specific data.
+ *
+ * P2P needs mac addresses for P2P device and interface. These are
+ * derived from the primary net device, ie. the permanent ethernet
+ * address of the device.
+ */
+static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p)
+{
+ struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
+ struct brcmf_if *p2p_ifp = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif->ifp;
+
+ /* Generate the P2P Device Address. This consists of the device's
+ * primary MAC address with the locally administered bit set.
+ */
+ memcpy(p2p->dev_addr, pri_ifp->mac_addr, ETH_ALEN);
+ p2p->dev_addr[0] |= 0x02;
+ memcpy(p2p_ifp->mac_addr, p2p->dev_addr, ETH_ALEN);
+
+ /* Generate the P2P Interface Address. If the discovery and connection
+ * BSSCFGs need to simultaneously co-exist, then this address must be
+ * different from the P2P Device Address, but also locally administered.
+ */
+ memcpy(p2p->int_addr, p2p->dev_addr, ETH_ALEN);
+ p2p->int_addr[4] ^= 0x80;
+}
+
+/**
+ * brcmf_p2p_scan_is_p2p_request() - is cfg80211 scan request a P2P scan.
+ *
+ * @request: the scan request as received from cfg80211.
+ *
+ * returns true if one of the ssids in the request matches the
+ * P2P wildcard ssid; otherwise returns false.
+ */
+static bool brcmf_p2p_scan_is_p2p_request(struct cfg80211_scan_request *request)
+{
+ struct cfg80211_ssid *ssids = request->ssids;
+ int i;
+
+ for (i = 0; i < request->n_ssids; i++) {
+ if (ssids[i].ssid_len != BRCMF_P2P_WILDCARD_SSID_LEN)
+ continue;
+
+ brcmf_dbg(INFO, "comparing ssid \"%s\"", ssids[i].ssid);
+ if (!memcmp(BRCMF_P2P_WILDCARD_SSID, ssids[i].ssid,
+ BRCMF_P2P_WILDCARD_SSID_LEN))
+ return true;
+ }
+ return false;
+}
+
+/**
+ * brcmf_p2p_set_discover_state - set discover state in firmware.
+ *
+ * @ifp: low-level interface object.
+ * @state: discover state to set.
+ * @chanspec: channel parameters (for state @WL_P2P_DISC_ST_LISTEN only).
+ * @listen_ms: duration to listen (for state @WL_P2P_DISC_ST_LISTEN only).
+ */
+static s32 brcmf_p2p_set_discover_state(struct brcmf_if *ifp, u8 state,
+ u16 chanspec, u16 listen_ms)
+{
+ struct brcmf_p2p_disc_st_le discover_state;
+ s32 ret = 0;
+ brcmf_dbg(TRACE, "enter\n");
+
+ discover_state.state = state;
+ discover_state.chspec = cpu_to_le16(chanspec);
+ discover_state.dwell = cpu_to_le16(listen_ms);
+ ret = brcmf_fil_bsscfg_data_set(ifp, "p2p_state", &discover_state,
+ sizeof(discover_state));
+ return ret;
+}
+
+/**
+ * brcmf_p2p_deinit_discovery() - disable P2P device discovery.
+ *
+ * @p2p: P2P specific data.
+ *
+ * Resets the discovery state and disables it in firmware.
+ */
+static s32 brcmf_p2p_deinit_discovery(struct brcmf_p2p_info *p2p)
+{
+ struct brcmf_cfg80211_vif *vif;
+
+ brcmf_dbg(TRACE, "enter\n");
+
+ /* Set the discovery state to SCAN */
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+ (void)brcmf_p2p_set_discover_state(vif->ifp, WL_P2P_DISC_ST_SCAN, 0, 0);
+
+ /* Disable P2P discovery in the firmware */
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
+ (void)brcmf_fil_iovar_int_set(vif->ifp, "p2p_disc", 0);
+
+ return 0;
+}
+
+/**
+ * brcmf_p2p_enable_discovery() - initialize and configure discovery.
+ *
+ * @p2p: P2P specific data.
+ *
+ * Initializes the discovery device and configure the virtual interface.
+ */
+static int brcmf_p2p_enable_discovery(struct brcmf_p2p_info *p2p)
+{
+ struct brcmf_cfg80211_vif *vif;
+ s32 ret = 0;
+
+ brcmf_dbg(TRACE, "enter\n");
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+ if (!vif) {
+ brcmf_err("P2P config device not available\n");
+ ret = -EPERM;
+ goto exit;
+ }
+
+ if (test_bit(BRCMF_P2P_STATUS_ENABLED, &p2p->status)) {
+ brcmf_dbg(INFO, "P2P config device already configured\n");
+ goto exit;
+ }
+
+ /* Re-initialize P2P Discovery in the firmware */
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
+ ret = brcmf_fil_iovar_int_set(vif->ifp, "p2p_disc", 1);
+ if (ret < 0) {
+ brcmf_err("set p2p_disc error\n");
+ goto exit;
+ }
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+ ret = brcmf_p2p_set_discover_state(vif->ifp, WL_P2P_DISC_ST_SCAN, 0, 0);
+ if (ret < 0) {
+ brcmf_err("unable to set WL_P2P_DISC_ST_SCAN\n");
+ goto exit;
+ }
+
+ /*
+ * Set wsec to any non-zero value in the discovery bsscfg
+ * to ensure our P2P probe responses have the privacy bit
+ * set in the 802.11 WPA IE. Some peer devices may not
+ * initiate WPS with us if this bit is not set.
+ */
+ ret = brcmf_fil_bsscfg_int_set(vif->ifp, "wsec", AES_ENABLED);
+ if (ret < 0) {
+ brcmf_err("wsec error %d\n", ret);
+ goto exit;
+ }
+
+ set_bit(BRCMF_P2P_STATUS_ENABLED, &p2p->status);
+exit:
+ return ret;
+}
+
+/**
+ * brcmf_p2p_escan() - initiate a P2P scan.
+ *
+ * @p2p: P2P specific data.
+ * @num_chans: number of channels to scan.
+ * @chanspecs: channel parameters for @num_chans channels.
+ * @search_state: P2P discover state to use.
+ * @action: scan action to pass to firmware.
+ * @bss_type: type of P2P bss.
+ */
+static s32 brcmf_p2p_escan(struct brcmf_p2p_info *p2p, u32 num_chans,
+ u16 chanspecs[], s32 search_state, u16 action,
+ enum p2p_bss_type bss_type)
+{
+ s32 ret = 0;
+ s32 memsize = offsetof(struct brcmf_p2p_scan_le,
+ eparams.params_le.channel_list);
+ s32 nprobes;
+ s32 active;
+ u32 i;
+ u8 *memblk;
+ struct brcmf_cfg80211_vif *vif;
+ struct brcmf_p2p_scan_le *p2p_params;
+ struct brcmf_scan_params_le *sparams;
+ struct brcmf_ssid ssid;
+
+ memsize += num_chans * sizeof(__le16);
+ memblk = kzalloc(memsize, GFP_KERNEL);
+ if (!memblk)
+ return -ENOMEM;
+
+ vif = p2p->bss_idx[bss_type].vif;
+ if (vif == NULL) {
+ brcmf_err("no vif for bss type %d\n", bss_type);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ switch (search_state) {
+ case WL_P2P_DISC_ST_SEARCH:
+ /*
+ * If we in SEARCH STATE, we don't need to set SSID explictly
+ * because dongle use P2P WILDCARD internally by default
+ */
+ /* use null ssid */
+ ssid.SSID_len = 0;
+ memset(ssid.SSID, 0, sizeof(ssid.SSID));
+ break;
+ case WL_P2P_DISC_ST_SCAN:
+ /*
+ * wpa_supplicant has p2p_find command with type social or
+ * progressive. For progressive, we need to set the ssid to
+ * P2P WILDCARD because we just do broadcast scan unless
+ * setting SSID.
+ */
+ ssid.SSID_len = BRCMF_P2P_WILDCARD_SSID_LEN;
+ memcpy(ssid.SSID, BRCMF_P2P_WILDCARD_SSID, ssid.SSID_len);
+ break;
+ default:
+ brcmf_err(" invalid search state %d\n", search_state);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ brcmf_p2p_set_discover_state(vif->ifp, search_state, 0, 0);
+
+ /*
+ * set p2p scan parameters.
+ */
+ p2p_params = (struct brcmf_p2p_scan_le *)memblk;
+ p2p_params->type = 'E';
+
+ /* determine the scan engine parameters */
+ sparams = &p2p_params->eparams.params_le;
+ sparams->bss_type = DOT11_BSSTYPE_ANY;
+ if (p2p->cfg->active_scan)
+ sparams->scan_type = 0;
+ else
+ sparams->scan_type = 1;
+
+ memset(&sparams->bssid, 0xFF, ETH_ALEN);
+ if (ssid.SSID_len)
+ memcpy(sparams->ssid_le.SSID, ssid.SSID, ssid.SSID_len);
+ sparams->ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len);
+ sparams->home_time = cpu_to_le32(P2PAPI_SCAN_HOME_TIME_MS);
+
+ /*
+ * SOCIAL_CHAN_CNT + 1 takes care of the Progressive scan
+ * supported by the supplicant.
+ */
+ if (num_chans == SOCIAL_CHAN_CNT || num_chans == (SOCIAL_CHAN_CNT + 1))
+ active = P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS;
+ else if (num_chans == AF_PEER_SEARCH_CNT)
+ active = P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS;
+ else if (wl_get_vif_state_all(p2p->cfg, BRCMF_VIF_STATUS_CONNECTED))
+ active = -1;
+ else
+ active = P2PAPI_SCAN_DWELL_TIME_MS;
+
+ /* Override scan params to find a peer for a connection */
+ if (num_chans == 1) {
+ active = WL_SCAN_CONNECT_DWELL_TIME_MS;
+ /* WAR to sync with presence period of VSDB GO.
+ * send probe request more frequently
+ */
+ nprobes = active / WL_SCAN_JOIN_PROBE_INTERVAL_MS;
+ } else {
+ nprobes = active / P2PAPI_SCAN_NPROBS_TIME_MS;
+ }
+
+ if (nprobes <= 0)
+ nprobes = 1;
+
+ brcmf_dbg(INFO, "nprobes # %d, active_time %d\n", nprobes, active);
+ sparams->active_time = cpu_to_le32(active);
+ sparams->nprobes = cpu_to_le32(nprobes);
+ sparams->passive_time = cpu_to_le32(-1);
+ sparams->channel_num = cpu_to_le32(num_chans &
+ BRCMF_SCAN_PARAMS_COUNT_MASK);
+ for (i = 0; i < num_chans; i++)
+ sparams->channel_list[i] = cpu_to_le16(chanspecs[i]);
+
+ /* set the escan specific parameters */
+ p2p_params->eparams.version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION);
+ p2p_params->eparams.action = cpu_to_le16(action);
+ p2p_params->eparams.sync_id = cpu_to_le16(0x1234);
+ /* perform p2p scan on primary device */
+ ret = brcmf_fil_bsscfg_data_set(vif->ifp, "p2p_scan", memblk, memsize);
+ if (!ret)
+ set_bit(BRCMF_SCAN_STATUS_BUSY, &p2p->cfg->scan_status);
+exit:
+ kfree(memblk);
+ return ret;
+}
+
+/**
+ * brcmf_p2p_run_escan() - escan callback for peer-to-peer.
+ *
+ * @cfg: driver private data for cfg80211 interface.
+ * @ndev: net device for which scan is requested.
+ * @request: scan request from cfg80211.
+ * @action: scan action.
+ *
+ * Determines the P2P discovery state based to scan request parameters and
+ * validates the channels in the request.
+ */
+static s32 brcmf_p2p_run_escan(struct brcmf_cfg80211_info *cfg,
+ struct net_device *ndev,
+ struct cfg80211_scan_request *request,
+ u16 action)
+{
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ s32 err = 0;
+ s32 search_state = WL_P2P_DISC_ST_SCAN;
+ struct brcmf_cfg80211_vif *vif;
+ struct net_device *dev = NULL;
+ int i, num_nodfs = 0;
+ u16 *chanspecs;
+
+ brcmf_dbg(TRACE, "enter\n");
+
+ if (!request) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (request->n_channels) {
+ chanspecs = kcalloc(request->n_channels, sizeof(*chanspecs),
+ GFP_KERNEL);
+ if (!chanspecs) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif;
+ if (vif)
+ dev = vif->wdev.netdev;
+ if (request->n_channels == 3 &&
+ request->channels[0]->hw_value == SOCIAL_CHAN_1 &&
+ request->channels[1]->hw_value == SOCIAL_CHAN_2 &&
+ request->channels[2]->hw_value == SOCIAL_CHAN_3) {
+ /* SOCIAL CHANNELS 1, 6, 11 */
+ search_state = WL_P2P_DISC_ST_SEARCH;
+ brcmf_dbg(INFO, "P2P SEARCH PHASE START\n");
+ } else if (dev != NULL && vif->mode == WL_MODE_AP) {
+ /* If you are already a GO, then do SEARCH only */
+ brcmf_dbg(INFO, "Already a GO. Do SEARCH Only\n");
+ search_state = WL_P2P_DISC_ST_SEARCH;
+ } else {
+ brcmf_dbg(INFO, "P2P SCAN STATE START\n");
+ }
+
+ /*
+ * no P2P scanning on passive or DFS channels.
+ */
+ for (i = 0; i < request->n_channels; i++) {
+ struct ieee80211_channel *chan = request->channels[i];
+
+ if (chan->flags & (IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_PASSIVE_SCAN))
+ continue;
+
+ chanspecs[i] = channel_to_chanspec(chan);
+ brcmf_dbg(INFO, "%d: chan=%d, channel spec=%x\n",
+ num_nodfs, chan->hw_value, chanspecs[i]);
+ num_nodfs++;
+ }
+ err = brcmf_p2p_escan(p2p, num_nodfs, chanspecs, search_state,
+ action, P2PAPI_BSSCFG_DEVICE);
+ }
+exit:
+ if (err)
+ brcmf_err("error (%d)\n", err);
+ return err;
+}
+
+
+/**
+ * brcmf_p2p_find_listen_channel() - find listen channel in ie string.
+ *
+ * @ie: string of information elements.
+ * @ie_len: length of string.
+ *
+ * Scan ie for p2p ie and look for attribute 6 channel. If available determine
+ * channel and return it.
+ */
+static s32 brcmf_p2p_find_listen_channel(const u8 *ie, u32 ie_len)
+{
+ u8 channel_ie[5];
+ s32 listen_channel;
+ s32 err;
+
+ err = cfg80211_get_p2p_attr(ie, ie_len,
+ IEEE80211_P2P_ATTR_LISTEN_CHANNEL,
+ channel_ie, sizeof(channel_ie));
+ if (err < 0)
+ return err;
+
+ /* listen channel subel length format: */
+ /* 3(country) + 1(op. class) + 1(chan num) */
+ listen_channel = (s32)channel_ie[3 + 1];
+
+ if (listen_channel == SOCIAL_CHAN_1 ||
+ listen_channel == SOCIAL_CHAN_2 ||
+ listen_channel == SOCIAL_CHAN_3) {
+ brcmf_dbg(INFO, "Found my Listen Channel %d\n", listen_channel);
+ return listen_channel;
+ }
+
+ return -EPERM;
+}
+
+
+/**
+ * brcmf_p2p_scan_prep() - prepare scan based on request.
+ *
+ * @wiphy: wiphy device.
+ * @request: scan request from cfg80211.
+ * @vif: vif on which scan request is to be executed.
+ *
+ * Prepare the scan appropriately for type of scan requested. Overrides the
+ * escan .run() callback for peer-to-peer scanning.
+ */
+int brcmf_p2p_scan_prep(struct wiphy *wiphy,
+ struct cfg80211_scan_request *request,
+ struct brcmf_cfg80211_vif *vif)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ int err = 0;
+
+ if (brcmf_p2p_scan_is_p2p_request(request)) {
+ /* find my listen channel */
+ err = brcmf_p2p_find_listen_channel(request->ie,
+ request->ie_len);
+ if (err < 0)
+ return err;
+
+ p2p->afx_hdl.my_listen_chan = err;
+
+ clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+ brcmf_dbg(INFO, "P2P: GO_NEG_PHASE status cleared\n");
+
+ err = brcmf_p2p_enable_discovery(p2p);
+ if (err)
+ return err;
+
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+
+ /* override .run_escan() callback. */
+ cfg->escan_info.run = brcmf_p2p_run_escan;
+ }
+ err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_PRBREQ_FLAG,
+ request->ie, request->ie_len);
+ return err;
+}
+
+
+/**
+ * brcmf_p2p_discover_listen() - set firmware to discover listen state.
+ *
+ * @p2p: p2p device.
+ * @channel: channel nr for discover listen.
+ * @duration: time in ms to stay on channel.
+ *
+ */
+static s32
+brcmf_p2p_discover_listen(struct brcmf_p2p_info *p2p, u16 channel, u32 duration)
+{
+ struct brcmf_cfg80211_vif *vif;
+ s32 err = 0;
+ u16 chanspec;
+
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+ if (!vif) {
+ brcmf_err("Discovery is not set, so we have nothing to do\n");
+ err = -EPERM;
+ goto exit;
+ }
+
+ if (test_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status)) {
+ brcmf_err("Previous LISTEN is not completed yet\n");
+ /* WAR: prevent cookie mismatch in wpa_supplicant return OK */
+ goto exit;
+ }
+
+ chanspec = brcmf_p2p_chnr_to_chspec(channel);
+ err = brcmf_p2p_set_discover_state(vif->ifp, WL_P2P_DISC_ST_LISTEN,
+ chanspec, (u16)duration);
+ if (!err) {
+ set_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status);
+ p2p->remain_on_channel_cookie++;
+ }
+exit:
+ return err;
+}
+
+
+/**
+ * brcmf_p2p_remain_on_channel() - put device on channel and stay there.
+ *
+ * @wiphy: wiphy device.
+ * @channel: channel to stay on.
+ * @duration: time in ms to remain on channel.
+ *
+ */
+int brcmf_p2p_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct ieee80211_channel *channel,
+ unsigned int duration, u64 *cookie)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ s32 err;
+ u16 channel_nr;
+
+ channel_nr = ieee80211_frequency_to_channel(channel->center_freq);
+ brcmf_dbg(TRACE, "Enter, channel: %d, duration ms (%d)\n", channel_nr,
+ duration);
+
+ err = brcmf_p2p_enable_discovery(p2p);
+ if (err)
+ goto exit;
+ err = brcmf_p2p_discover_listen(p2p, channel_nr, duration);
+ if (err)
+ goto exit;
+
+ memcpy(&p2p->remain_on_channel, channel, sizeof(*channel));
+ *cookie = p2p->remain_on_channel_cookie;
+ cfg80211_ready_on_channel(wdev, *cookie, channel, duration, GFP_KERNEL);
+
+exit:
+ return err;
+}
+
+
+/**
+ * brcmf_p2p_notify_listen_complete() - p2p listen has completed.
+ *
+ * @ifp: interfac control.
+ * @e: event message. Not used, to make it usable for fweh event dispatcher.
+ * @data: payload of message. Not used.
+ *
+ */
+int brcmf_p2p_notify_listen_complete(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e,
+ void *data)
+{
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+
+ brcmf_dbg(TRACE, "Enter\n");
+ if (test_and_clear_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN,
+ &p2p->status)) {
+ if (test_and_clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
+ &p2p->status)) {
+ clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME,
+ &p2p->status);
+ brcmf_dbg(INFO, "Listen DONE, wake up wait_next_af\n");
+ complete(&p2p->wait_next_af);
+ }
+
+ cfg80211_remain_on_channel_expired(&ifp->vif->wdev,
+ p2p->remain_on_channel_cookie,
+ &p2p->remain_on_channel,
+ GFP_KERNEL);
+ }
+ return 0;
+}
+
+
+/**
+ * brcmf_p2p_cancel_remain_on_channel() - cancel p2p listen state.
+ *
+ * @ifp: interfac control.
+ *
+ */
+void brcmf_p2p_cancel_remain_on_channel(struct brcmf_if *ifp)
+{
+ if (!ifp)
+ return;
+ brcmf_p2p_set_discover_state(ifp, WL_P2P_DISC_ST_SCAN, 0, 0);
+ brcmf_p2p_notify_listen_complete(ifp, NULL, NULL);
+}
+
+
+/**
+ * brcmf_p2p_act_frm_search() - search function for action frame.
+ *
+ * @p2p: p2p device.
+ * channel: channel on which action frame is to be trasmitted.
+ *
+ * search function to reach at common channel to send action frame. When
+ * channel is 0 then all social channels will be used to send af
+ */
+static s32 brcmf_p2p_act_frm_search(struct brcmf_p2p_info *p2p, u16 channel)
+{
+ s32 err;
+ u32 channel_cnt;
+ u16 *default_chan_list;
+ u32 i;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (channel)
+ channel_cnt = AF_PEER_SEARCH_CNT;
+ else
+ channel_cnt = SOCIAL_CHAN_CNT;
+ default_chan_list = kzalloc(channel_cnt * sizeof(*default_chan_list),
+ GFP_KERNEL);
+ if (default_chan_list == NULL) {
+ brcmf_err("channel list allocation failed\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+ if (channel) {
+ /* insert same channel to the chan_list */
+ for (i = 0; i < channel_cnt; i++)
+ default_chan_list[i] =
+ brcmf_p2p_chnr_to_chspec(channel);
+ } else {
+ default_chan_list[0] = brcmf_p2p_chnr_to_chspec(SOCIAL_CHAN_1);
+ default_chan_list[1] = brcmf_p2p_chnr_to_chspec(SOCIAL_CHAN_2);
+ default_chan_list[2] = brcmf_p2p_chnr_to_chspec(SOCIAL_CHAN_3);
+ }
+ err = brcmf_p2p_escan(p2p, channel_cnt, default_chan_list,
+ WL_P2P_DISC_ST_SEARCH, WL_ESCAN_ACTION_START,
+ P2PAPI_BSSCFG_DEVICE);
+ kfree(default_chan_list);
+exit:
+ return err;
+}
+
+
+/**
+ * brcmf_p2p_afx_handler() - afx worker thread.
+ *
+ * @work:
+ *
+ */
+static void brcmf_p2p_afx_handler(struct work_struct *work)
+{
+ struct afx_hdl *afx_hdl = container_of(work, struct afx_hdl, afx_work);
+ struct brcmf_p2p_info *p2p = container_of(afx_hdl,
+ struct brcmf_p2p_info,
+ afx_hdl);
+ s32 err;
+
+ if (!afx_hdl->is_active)
+ return;
+
+ if (afx_hdl->is_listen && afx_hdl->my_listen_chan)
+ /* 100ms ~ 300ms */
+ err = brcmf_p2p_discover_listen(p2p, afx_hdl->my_listen_chan,
+ 100 * (1 + (random32() % 3)));
+ else
+ err = brcmf_p2p_act_frm_search(p2p, afx_hdl->peer_listen_chan);
+
+ if (err) {
+ brcmf_err("ERROR occurred! value is (%d)\n", err);
+ if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
+ &p2p->status))
+ complete(&afx_hdl->act_frm_scan);
+ }
+}
+
+
+/**
+ * brcmf_p2p_af_searching_channel() - search channel.
+ *
+ * @p2p: p2p device info struct.
+ *
+ */
+static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
+{
+ struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+ struct brcmf_cfg80211_vif *pri_vif;
+ unsigned long duration;
+ s32 retry;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ pri_vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
+
+ INIT_COMPLETION(afx_hdl->act_frm_scan);
+ set_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status);
+ afx_hdl->is_active = true;
+ afx_hdl->peer_chan = P2P_INVALID_CHANNEL;
+
+ /* Loop to wait until we find a peer's channel or the
+ * pending action frame tx is cancelled.
+ */
+ retry = 0;
+ duration = msecs_to_jiffies(P2P_AF_FRM_SCAN_MAX_WAIT);
+ while ((retry < P2P_CHANNEL_SYNC_RETRY) &&
+ (afx_hdl->peer_chan == P2P_INVALID_CHANNEL)) {
+ afx_hdl->is_listen = false;
+ brcmf_dbg(TRACE, "Scheduling action frame for sending.. (%d)\n",
+ retry);
+ /* search peer on peer's listen channel */
+ schedule_work(&afx_hdl->afx_work);
+ wait_for_completion_timeout(&afx_hdl->act_frm_scan, duration);
+ if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) ||
+ (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
+ &p2p->status)))
+ break;
+
+ if (afx_hdl->my_listen_chan) {
+ brcmf_dbg(TRACE, "Scheduling listen peer, channel=%d\n",
+ afx_hdl->my_listen_chan);
+ /* listen on my listen channel */
+ afx_hdl->is_listen = true;
+ schedule_work(&afx_hdl->afx_work);
+ wait_for_completion_timeout(&afx_hdl->act_frm_scan,
+ duration);
+ }
+ if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) ||
+ (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
+ &p2p->status)))
+ break;
+ retry++;
+
+ /* if sta is connected or connecting, sleep for a while before
+ * retry af tx or finding a peer
+ */
+ if (test_bit(BRCMF_VIF_STATUS_CONNECTED, &pri_vif->sme_state) ||
+ test_bit(BRCMF_VIF_STATUS_CONNECTING, &pri_vif->sme_state))
+ msleep(P2P_DEFAULT_SLEEP_TIME_VSDB);
+ }
+
+ brcmf_dbg(TRACE, "Completed search/listen peer_chan=%d\n",
+ afx_hdl->peer_chan);
+ afx_hdl->is_active = false;
+
+ clear_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status);
+
+ return afx_hdl->peer_chan;
+}
+
+
+/**
+ * brcmf_p2p_scan_finding_common_channel() - was escan used for finding channel
+ *
+ * @cfg: common configuration struct.
+ * @bi: bss info struct, result from scan.
+ *
+ */
+bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_bss_info_le *bi)
+
+{
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+ u8 *ie;
+ s32 err;
+ u8 p2p_dev_addr[ETH_ALEN];
+
+ if (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status))
+ return false;
+
+ if (bi == NULL) {
+ brcmf_dbg(TRACE, "ACTION FRAME SCAN Done\n");
+ if (afx_hdl->peer_chan == P2P_INVALID_CHANNEL)
+ complete(&afx_hdl->act_frm_scan);
+ return true;
+ }
+
+ ie = ((u8 *)bi) + le16_to_cpu(bi->ie_offset);
+ memset(p2p_dev_addr, 0, sizeof(p2p_dev_addr));
+ err = cfg80211_get_p2p_attr(ie, le32_to_cpu(bi->ie_length),
+ IEEE80211_P2P_ATTR_DEVICE_INFO,
+ p2p_dev_addr, sizeof(p2p_dev_addr));
+ if (err < 0)
+ err = cfg80211_get_p2p_attr(ie, le32_to_cpu(bi->ie_length),
+ IEEE80211_P2P_ATTR_DEVICE_ID,
+ p2p_dev_addr, sizeof(p2p_dev_addr));
+ if ((err >= 0) &&
+ (!memcmp(p2p_dev_addr, afx_hdl->tx_dst_addr, ETH_ALEN))) {
+ afx_hdl->peer_chan = bi->ctl_ch ? bi->ctl_ch :
+ CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec));
+ brcmf_dbg(TRACE, "ACTION FRAME SCAN : Peer %pM found, channel : %d\n",
+ afx_hdl->tx_dst_addr, afx_hdl->peer_chan);
+ complete(&afx_hdl->act_frm_scan);
+ }
+ return true;
+}
+
+/**
+ * brcmf_p2p_stop_wait_next_action_frame() - finish scan if af tx complete.
+ *
+ * @cfg: common configuration struct.
+ *
+ */
+static void
+brcmf_p2p_stop_wait_next_action_frame(struct brcmf_cfg80211_info *cfg)
+{
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct net_device *ndev = cfg->escan_info.ndev;
+
+ if (test_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status) &&
+ (test_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status) ||
+ test_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status))) {
+ brcmf_dbg(TRACE, "*** Wake UP ** abort actframe iovar\n");
+ /* if channel is not zero, "actfame" uses off channel scan.
+ * So abort scan for off channel completion.
+ */
+ if (p2p->af_sent_channel)
+ brcmf_notify_escan_complete(cfg, ndev, true, true);
+ } else if (test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
+ &p2p->status)) {
+ brcmf_dbg(TRACE, "*** Wake UP ** abort listen for next af frame\n");
+ /* So abort scan to cancel listen */
+ brcmf_notify_escan_complete(cfg, ndev, true, true);
+ }
+}
+
+
+/**
+ * brcmf_p2p_gon_req_collision() - Check if go negotiaton collission
+ *
+ * @p2p: p2p device info struct.
+ *
+ * return true if recevied action frame is to be dropped.
+ */
+static bool
+brcmf_p2p_gon_req_collision(struct brcmf_p2p_info *p2p, u8 *mac)
+{
+ struct brcmf_cfg80211_info *cfg = p2p->cfg;
+ struct brcmf_if *ifp;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (!test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status) ||
+ !p2p->gon_req_action)
+ return false;
+
+ brcmf_dbg(TRACE, "GO Negotiation Request COLLISION !!!\n");
+ /* if sa(peer) addr is less than da(my) addr, then this device
+ * process peer's gon request and block to send gon req.
+ * if not (sa addr > da addr),
+ * this device will process gon request and drop gon req of peer.
+ */
+ ifp = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif->ifp;
+ if (memcmp(mac, ifp->mac_addr, ETH_ALEN) < 0) {
+ brcmf_dbg(INFO, "Block transmit gon req !!!\n");
+ p2p->block_gon_req_tx = true;
+ /* if we are finding a common channel for sending af,
+ * do not scan more to block to send current gon req
+ */
+ if (test_and_clear_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
+ &p2p->status))
+ complete(&p2p->afx_hdl.act_frm_scan);
+ if (test_and_clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME,
+ &p2p->status))
+ brcmf_p2p_stop_wait_next_action_frame(cfg);
+ return false;
+ }
+
+ /* drop gon request of peer to process gon request by this device. */
+ brcmf_dbg(INFO, "Drop received gon req !!!\n");
+
+ return true;
+}
+
+
+/**
+ * brcmf_p2p_notify_action_frame_rx() - received action frame.
+ *
+ * @ifp: interfac control.
+ * @e: event message. Not used, to make it usable for fweh event dispatcher.
+ * @data: payload of message, containing action frame data.
+ *
+ */
+int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e,
+ void *data)
+{
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+ struct wireless_dev *wdev;
+ u32 mgmt_frame_len = e->datalen - sizeof(struct brcmf_rx_mgmt_data);
+ struct brcmf_rx_mgmt_data *rxframe = (struct brcmf_rx_mgmt_data *)data;
+ u8 *frame = (u8 *)(rxframe + 1);
+ struct brcmf_p2p_pub_act_frame *act_frm;
+ struct brcmf_p2psd_gas_pub_act_frame *sd_act_frm;
+ u16 chanspec = be16_to_cpu(rxframe->chanspec);
+ struct ieee80211_mgmt *mgmt_frame;
+ s32 freq;
+ u16 mgmt_type;
+ u8 action;
+
+ /* Check if wpa_supplicant has registered for this frame */
+ brcmf_dbg(INFO, "ifp->vif->mgmt_rx_reg %04x\n", ifp->vif->mgmt_rx_reg);
+ mgmt_type = (IEEE80211_STYPE_ACTION & IEEE80211_FCTL_STYPE) >> 4;
+ if ((ifp->vif->mgmt_rx_reg & BIT(mgmt_type)) == 0)
+ return 0;
+
+ brcmf_p2p_print_actframe(false, frame, mgmt_frame_len);
+
+ action = P2P_PAF_SUBTYPE_INVALID;
+ if (brcmf_p2p_is_pub_action(frame, mgmt_frame_len)) {
+ act_frm = (struct brcmf_p2p_pub_act_frame *)frame;
+ action = act_frm->subtype;
+ if ((action == P2P_PAF_GON_REQ) &&
+ (brcmf_p2p_gon_req_collision(p2p, (u8 *)e->addr))) {
+ if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
+ &p2p->status) &&
+ (memcmp(afx_hdl->tx_dst_addr, e->addr,
+ ETH_ALEN) == 0)) {
+ afx_hdl->peer_chan = CHSPEC_CHANNEL(chanspec);
+ brcmf_dbg(INFO, "GON request: Peer found, channel=%d\n",
+ afx_hdl->peer_chan);
+ complete(&afx_hdl->act_frm_scan);
+ }
+ return 0;
+ }
+ /* After complete GO Negotiation, roll back to mpc mode */
+ if ((action == P2P_PAF_GON_CONF) ||
+ (action == P2P_PAF_PROVDIS_RSP))
+ brcmf_set_mpc(ifp->ndev, 1);
+ if (action == P2P_PAF_GON_CONF) {
+ brcmf_dbg(TRACE, "P2P: GO_NEG_PHASE status cleared\n");
+ clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+ }
+ } else if (brcmf_p2p_is_gas_action(frame, mgmt_frame_len)) {
+ sd_act_frm = (struct brcmf_p2psd_gas_pub_act_frame *)frame;
+ action = sd_act_frm->action;
+ }
+
+ if (test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status) &&
+ (p2p->next_af_subtype == action)) {
+ brcmf_dbg(TRACE, "We got a right next frame! (%d)\n", action);
+ clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME,
+ &p2p->status);
+ /* Stop waiting for next AF. */
+ brcmf_p2p_stop_wait_next_action_frame(cfg);
+ }
+
+ mgmt_frame = kzalloc(offsetof(struct ieee80211_mgmt, u) +
+ mgmt_frame_len, GFP_KERNEL);
+ if (!mgmt_frame) {
+ brcmf_err("No memory available for action frame\n");
+ return -ENOMEM;
+ }
+ memcpy(mgmt_frame->da, ifp->mac_addr, ETH_ALEN);
+ brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSSID, mgmt_frame->bssid,
+ ETH_ALEN);
+ memcpy(mgmt_frame->sa, e->addr, ETH_ALEN);
+ mgmt_frame->frame_control = cpu_to_le16(IEEE80211_STYPE_ACTION);
+ memcpy(&mgmt_frame->u, frame, mgmt_frame_len);
+ mgmt_frame_len += offsetof(struct ieee80211_mgmt, u);
+
+ freq = ieee80211_channel_to_frequency(CHSPEC_CHANNEL(chanspec),
+ CHSPEC_IS2G(chanspec) ?
+ IEEE80211_BAND_2GHZ :
+ IEEE80211_BAND_5GHZ);
+ wdev = ifp->ndev->ieee80211_ptr;
+ cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len,
+ GFP_ATOMIC);
+
+ kfree(mgmt_frame);
+ return 0;
+}
+
+
+/**
+ * brcmf_p2p_notify_action_tx_complete() - transmit action frame complete
+ *
+ * @ifp: interfac control.
+ * @e: event message. Not used, to make it usable for fweh event dispatcher.
+ * @data: not used.
+ *
+ */
+int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e,
+ void *data)
+{
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+
+ brcmf_dbg(INFO, "Enter: event %s, status=%d\n",
+ e->event_code == BRCMF_E_ACTION_FRAME_OFF_CHAN_COMPLETE ?
+ "ACTION_FRAME_OFF_CHAN_COMPLETE" : "ACTION_FRAME_COMPLETE",
+ e->status);
+
+ if (!test_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status))
+ return 0;
+
+ if (e->event_code == BRCMF_E_ACTION_FRAME_COMPLETE) {
+ if (e->status == BRCMF_E_STATUS_SUCCESS)
+ set_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED,
+ &p2p->status);
+ else {
+ set_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
+ /* If there is no ack, we don't need to wait for
+ * WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE event
+ */
+ brcmf_p2p_stop_wait_next_action_frame(cfg);
+ }
+
+ } else {
+ complete(&p2p->send_af_done);
+ }
+ return 0;
+}
+
+
+/**
+ * brcmf_p2p_tx_action_frame() - send action frame over fil.
+ *
+ * @p2p: p2p info struct for vif.
+ * @af_params: action frame data/info.
+ *
+ * Send an action frame immediately without doing channel synchronization.
+ *
+ * This function waits for a completion event before returning.
+ * The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action
+ * frame is transmitted.
+ */
+static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
+ struct brcmf_fil_af_params_le *af_params)
+{
+ struct brcmf_cfg80211_vif *vif;
+ s32 err = 0;
+ s32 timeout = 0;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ INIT_COMPLETION(p2p->send_af_done);
+ clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status);
+ clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
+
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+ err = brcmf_fil_bsscfg_data_set(vif->ifp, "actframe", af_params,
+ sizeof(*af_params));
+ if (err) {
+ brcmf_err(" sending action frame has failed\n");
+ goto exit;
+ }
+
+ p2p->af_sent_channel = le32_to_cpu(af_params->channel);
+ p2p->af_tx_sent_jiffies = jiffies;
+
+ timeout = wait_for_completion_timeout(&p2p->send_af_done,
+ msecs_to_jiffies(P2P_AF_MAX_WAIT_TIME));
+
+ if (test_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status)) {
+ brcmf_dbg(TRACE, "TX action frame operation is success\n");
+ } else {
+ err = -EIO;
+ brcmf_dbg(TRACE, "TX action frame operation has failed\n");
+ }
+ /* clear status bit for action tx */
+ clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status);
+ clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
+
+exit:
+ return err;
+}
+
+
+/**
+ * brcmf_p2p_pub_af_tx() - public action frame tx routine.
+ *
+ * @cfg: driver private data for cfg80211 interface.
+ * @af_params: action frame data/info.
+ * @config_af_params: configuration data for action frame.
+ *
+ * routine which transmits ation frame public type.
+ */
+static s32 brcmf_p2p_pub_af_tx(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_fil_af_params_le *af_params,
+ struct brcmf_config_af_params *config_af_params)
+{
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct brcmf_fil_action_frame_le *action_frame;
+ struct brcmf_p2p_pub_act_frame *act_frm;
+ s32 err = 0;
+ u16 ie_len;
+
+ action_frame = &af_params->action_frame;
+ act_frm = (struct brcmf_p2p_pub_act_frame *)(action_frame->data);
+
+ config_af_params->extra_listen = true;
+
+ switch (act_frm->subtype) {
+ case P2P_PAF_GON_REQ:
+ brcmf_dbg(TRACE, "P2P: GO_NEG_PHASE status set\n");
+ set_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+ config_af_params->mpc_onoff = 0;
+ config_af_params->search_channel = true;
+ p2p->next_af_subtype = act_frm->subtype + 1;
+ p2p->gon_req_action = true;
+ /* increase dwell time to wait for RESP frame */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+ break;
+ case P2P_PAF_GON_RSP:
+ p2p->next_af_subtype = act_frm->subtype + 1;
+ /* increase dwell time to wait for CONF frame */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+ break;
+ case P2P_PAF_GON_CONF:
+ /* If we reached till GO Neg confirmation reset the filter */
+ brcmf_dbg(TRACE, "P2P: GO_NEG_PHASE status cleared\n");
+ clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+ /* turn on mpc again if go nego is done */
+ config_af_params->mpc_onoff = 1;
+ /* minimize dwell time */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_MIN_DWELL_TIME);
+ config_af_params->extra_listen = false;
+ break;
+ case P2P_PAF_INVITE_REQ:
+ config_af_params->search_channel = true;
+ p2p->next_af_subtype = act_frm->subtype + 1;
+ /* increase dwell time */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+ break;
+ case P2P_PAF_INVITE_RSP:
+ /* minimize dwell time */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_MIN_DWELL_TIME);
+ config_af_params->extra_listen = false;
+ break;
+ case P2P_PAF_DEVDIS_REQ:
+ config_af_params->search_channel = true;
+ p2p->next_af_subtype = act_frm->subtype + 1;
+ /* maximize dwell time to wait for RESP frame */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_LONG_DWELL_TIME);
+ break;
+ case P2P_PAF_DEVDIS_RSP:
+ /* minimize dwell time */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_MIN_DWELL_TIME);
+ config_af_params->extra_listen = false;
+ break;
+ case P2P_PAF_PROVDIS_REQ:
+ ie_len = le16_to_cpu(action_frame->len) -
+ offsetof(struct brcmf_p2p_pub_act_frame, elts);
+ if (cfg80211_get_p2p_attr(&act_frm->elts[0], ie_len,
+ IEEE80211_P2P_ATTR_GROUP_ID,
+ NULL, 0) < 0)
+ config_af_params->search_channel = true;
+ config_af_params->mpc_onoff = 0;
+ p2p->next_af_subtype = act_frm->subtype + 1;
+ /* increase dwell time to wait for RESP frame */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+ break;
+ case P2P_PAF_PROVDIS_RSP:
+ /* wpa_supplicant send go nego req right after prov disc */
+ p2p->next_af_subtype = P2P_PAF_GON_REQ;
+ /* increase dwell time to MED level */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+ config_af_params->extra_listen = false;
+ break;
+ default:
+ brcmf_err("Unknown p2p pub act frame subtype: %d\n",
+ act_frm->subtype);
+ err = -EINVAL;
+ }
+ return err;
+}
+
+/**
+ * brcmf_p2p_send_action_frame() - send action frame .
+ *
+ * @cfg: driver private data for cfg80211 interface.
+ * @ndev: net device to transmit on.
+ * @af_params: configuration data for action frame.
+ */
+bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
+ struct net_device *ndev,
+ struct brcmf_fil_af_params_le *af_params)
+{
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct brcmf_fil_action_frame_le *action_frame;
+ struct brcmf_config_af_params config_af_params;
+ struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+ u16 action_frame_len;
+ bool ack = false;
+ u8 category;
+ u8 action;
+ s32 tx_retry;
+ s32 extra_listen_time;
+ uint delta_ms;
+
+ action_frame = &af_params->action_frame;
+ action_frame_len = le16_to_cpu(action_frame->len);
+
+ brcmf_p2p_print_actframe(true, action_frame->data, action_frame_len);
+
+ /* Add the default dwell time. Dwell time to stay off-channel */
+ /* to wait for a response action frame after transmitting an */
+ /* GO Negotiation action frame */
+ af_params->dwell_time = cpu_to_le32(P2P_AF_DWELL_TIME);
+
+ category = action_frame->data[DOT11_ACTION_CAT_OFF];
+ action = action_frame->data[DOT11_ACTION_ACT_OFF];
+
+ /* initialize variables */
+ p2p->next_af_subtype = P2P_PAF_SUBTYPE_INVALID;
+ p2p->gon_req_action = false;
+
+ /* config parameters */
+ config_af_params.mpc_onoff = -1;
+ config_af_params.search_channel = false;
+ config_af_params.extra_listen = false;
+
+ if (brcmf_p2p_is_pub_action(action_frame->data, action_frame_len)) {
+ /* p2p public action frame process */
+ if (brcmf_p2p_pub_af_tx(cfg, af_params, &config_af_params)) {
+ /* Just send unknown subtype frame with */
+ /* default parameters. */
+ brcmf_err("P2P Public action frame, unknown subtype.\n");
+ }
+ } else if (brcmf_p2p_is_gas_action(action_frame->data,
+ action_frame_len)) {
+ /* service discovery process */
+ if (action == P2PSD_ACTION_ID_GAS_IREQ ||
+ action == P2PSD_ACTION_ID_GAS_CREQ) {
+ /* configure service discovery query frame */
+ config_af_params.search_channel = true;
+
+ /* save next af suptype to cancel */
+ /* remaining dwell time */
+ p2p->next_af_subtype = action + 1;
+
+ af_params->dwell_time =
+ cpu_to_le32(P2P_AF_MED_DWELL_TIME);
+ } else if (action == P2PSD_ACTION_ID_GAS_IRESP ||
+ action == P2PSD_ACTION_ID_GAS_CRESP) {
+ /* configure service discovery response frame */
+ af_params->dwell_time =
+ cpu_to_le32(P2P_AF_MIN_DWELL_TIME);
+ } else {
+ brcmf_err("Unknown action type: %d\n", action);
+ goto exit;
+ }
+ } else if (brcmf_p2p_is_p2p_action(action_frame->data,
+ action_frame_len)) {
+ /* do not configure anything. it will be */
+ /* sent with a default configuration */
+ } else {
+ brcmf_err("Unknown Frame: category 0x%x, action 0x%x\n",
+ category, action);
+ return false;
+ }
+
+ /* if connecting on primary iface, sleep for a while before sending
+ * af tx for VSDB
+ */
+ if (test_bit(BRCMF_VIF_STATUS_CONNECTING,
+ &p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->sme_state))
+ msleep(50);
+
+ /* if scan is ongoing, abort current scan. */
+ if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status))
+ brcmf_abort_scanning(cfg);
+
+ memcpy(afx_hdl->tx_dst_addr, action_frame->da, ETH_ALEN);
+
+ /* To make sure to send successfully action frame, turn off mpc */
+ if (config_af_params.mpc_onoff == 0)
+ brcmf_set_mpc(ndev, 0);
+
+ /* set status and destination address before sending af */
+ if (p2p->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) {
+ /* set status to cancel the remained dwell time in rx process */
+ set_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status);
+ }
+
+ p2p->af_sent_channel = 0;
+ set_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status);
+ /* validate channel and p2p ies */
+ if (config_af_params.search_channel &&
+ IS_P2P_SOCIAL_CHANNEL(le32_to_cpu(af_params->channel)) &&
+ p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif->saved_ie.probe_req_ie_len) {
+ afx_hdl = &p2p->afx_hdl;
+ afx_hdl->peer_listen_chan = le32_to_cpu(af_params->channel);
+
+ if (brcmf_p2p_af_searching_channel(p2p) ==
+ P2P_INVALID_CHANNEL) {
+ brcmf_err("Couldn't find peer's channel.\n");
+ goto exit;
+ }
+
+ /* Abort scan even for VSDB scenarios. Scan gets aborted in
+ * firmware but after the check of piggyback algorithm. To take
+ * care of current piggback algo, lets abort the scan here
+ * itself.
+ */
+ brcmf_notify_escan_complete(cfg, ndev, true, true);
+
+ /* update channel */
+ af_params->channel = cpu_to_le32(afx_hdl->peer_chan);
+ }
+
+ tx_retry = 0;
+ while (!p2p->block_gon_req_tx &&
+ (ack == false) && (tx_retry < P2P_AF_TX_MAX_RETRY)) {
+ ack = !brcmf_p2p_tx_action_frame(p2p, af_params);
+ tx_retry++;
+ }
+ if (ack == false) {
+ brcmf_err("Failed to send Action Frame(retry %d)\n", tx_retry);
+ clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+ }
+
+exit:
+ clear_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status);
+
+ /* WAR: sometimes dongle does not keep the dwell time of 'actframe'.
+ * if we coundn't get the next action response frame and dongle does
+ * not keep the dwell time, go to listen state again to get next action
+ * response frame.
+ */
+ if (ack && config_af_params.extra_listen && !p2p->block_gon_req_tx &&
+ test_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status) &&
+ p2p->af_sent_channel == afx_hdl->my_listen_chan) {
+ delta_ms = jiffies_to_msecs(jiffies - p2p->af_tx_sent_jiffies);
+ if (le32_to_cpu(af_params->dwell_time) > delta_ms)
+ extra_listen_time = le32_to_cpu(af_params->dwell_time) -
+ delta_ms;
+ else
+ extra_listen_time = 0;
+ if (extra_listen_time > 50) {
+ set_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
+ &p2p->status);
+ brcmf_dbg(INFO, "Wait more time! actual af time:%d, calculated extra listen:%d\n",
+ le32_to_cpu(af_params->dwell_time),
+ extra_listen_time);
+ extra_listen_time += 100;
+ if (!brcmf_p2p_discover_listen(p2p,
+ p2p->af_sent_channel,
+ extra_listen_time)) {
+ unsigned long duration;
+
+ extra_listen_time += 100;
+ duration = msecs_to_jiffies(extra_listen_time);
+ wait_for_completion_timeout(&p2p->wait_next_af,
+ duration);
+ }
+ clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
+ &p2p->status);
+ }
+ }
+
+ if (p2p->block_gon_req_tx) {
+ /* if ack is true, supplicant will wait more time(100ms).
+ * so we will return it as a success to get more time .
+ */
+ p2p->block_gon_req_tx = false;
+ ack = true;
+ }
+
+ clear_bit(BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status);
+ /* if all done, turn mpc on again */
+ if (config_af_params.mpc_onoff == 1)
+ brcmf_set_mpc(ndev, 1);
+
+ return ack;
+}
+
+/**
+ * brcmf_p2p_notify_rx_mgmt_p2p_probereq() - Event handler for p2p probe req.
+ *
+ * @ifp: interface pointer for which event was received.
+ * @e: even message.
+ * @data: payload of event message (probe request).
+ */
+s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e,
+ void *data)
+{
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+ struct wireless_dev *wdev;
+ struct brcmf_cfg80211_vif *vif = ifp->vif;
+ struct brcmf_rx_mgmt_data *rxframe = (struct brcmf_rx_mgmt_data *)data;
+ u16 chanspec = be16_to_cpu(rxframe->chanspec);
+ u8 *mgmt_frame;
+ u32 mgmt_frame_len;
+ s32 freq;
+ u16 mgmt_type;
+
+ brcmf_dbg(INFO, "Enter: event %d reason %d\n", e->event_code,
+ e->reason);
+
+ if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status) &&
+ (memcmp(afx_hdl->tx_dst_addr, e->addr, ETH_ALEN) == 0)) {
+ afx_hdl->peer_chan = CHSPEC_CHANNEL(chanspec);
+ brcmf_dbg(INFO, "PROBE REQUEST: Peer found, channel=%d\n",
+ afx_hdl->peer_chan);
+ complete(&afx_hdl->act_frm_scan);
+ }
+
+ /* Firmware sends us two proberesponses for each idx one. At the */
+ /* moment anything but bsscfgidx 0 is passed up to supplicant */
+ if (e->bsscfgidx == 0)
+ return 0;
+
+ /* Filter any P2P probe reqs arriving during the GO-NEG Phase */
+ if (test_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status)) {
+ brcmf_dbg(INFO, "Filtering P2P probe_req in GO-NEG phase\n");
+ return 0;
+ }
+
+ /* Check if wpa_supplicant has registered for this frame */
+ brcmf_dbg(INFO, "vif->mgmt_rx_reg %04x\n", vif->mgmt_rx_reg);
+ mgmt_type = (IEEE80211_STYPE_PROBE_REQ & IEEE80211_FCTL_STYPE) >> 4;
+ if ((vif->mgmt_rx_reg & BIT(mgmt_type)) == 0)
+ return 0;
+
+ mgmt_frame = (u8 *)(rxframe + 1);
+ mgmt_frame_len = e->datalen - sizeof(*rxframe);
+ freq = ieee80211_channel_to_frequency(CHSPEC_CHANNEL(chanspec),
+ CHSPEC_IS2G(chanspec) ?
+ IEEE80211_BAND_2GHZ :
+ IEEE80211_BAND_5GHZ);
+ wdev = ifp->ndev->ieee80211_ptr;
+ cfg80211_rx_mgmt(wdev, freq, 0, mgmt_frame, mgmt_frame_len, GFP_ATOMIC);
+
+ brcmf_dbg(INFO, "mgmt_frame_len (%d) , e->datalen (%d), chanspec (%04x), freq (%d)\n",
+ mgmt_frame_len, e->datalen, chanspec, freq);
+
+ return 0;
+}
+
+
+/**
+ * brcmf_p2p_attach() - attach for P2P.
+ *
+ * @cfg: driver private data for cfg80211 interface.
+ */
+s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg)
+{
+ struct brcmf_if *pri_ifp;
+ struct brcmf_if *p2p_ifp;
+ struct brcmf_cfg80211_vif *p2p_vif;
+ struct brcmf_p2p_info *p2p;
+ struct brcmf_pub *drvr;
+ s32 bssidx;
+ s32 err = 0;
+
+ p2p = &cfg->p2p;
+ p2p->cfg = cfg;
+
+ drvr = cfg->pub;
+
+ pri_ifp = drvr->iflist[0];
+ p2p_ifp = drvr->iflist[1];
+
+ p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif = pri_ifp->vif;
+
+ if (p2p_ifp) {
+ p2p_vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_P2P_DEVICE,
+ false);
+ if (IS_ERR(p2p_vif)) {
+ brcmf_err("could not create discovery vif\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ p2p_vif->ifp = p2p_ifp;
+ p2p_ifp->vif = p2p_vif;
+ p2p_vif->wdev.netdev = p2p_ifp->ndev;
+ p2p_ifp->ndev->ieee80211_ptr = &p2p_vif->wdev;
+ SET_NETDEV_DEV(p2p_ifp->ndev, wiphy_dev(cfg->wiphy));
+
+ p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = p2p_vif;
+
+ brcmf_p2p_generate_bss_mac(p2p);
+ brcmf_p2p_set_firmware(pri_ifp, p2p->dev_addr);
+
+ /* Initialize P2P Discovery in the firmware */
+ err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1);
+ if (err < 0) {
+ brcmf_err("set p2p_disc error\n");
+ brcmf_free_vif(p2p_vif);
+ goto exit;
+ }
+ /* obtain bsscfg index for P2P discovery */
+ err = brcmf_fil_iovar_int_get(pri_ifp, "p2p_dev", &bssidx);
+ if (err < 0) {
+ brcmf_err("retrieving discover bsscfg index failed\n");
+ brcmf_free_vif(p2p_vif);
+ goto exit;
+ }
+ /* Verify that firmware uses same bssidx as driver !! */
+ if (p2p_ifp->bssidx != bssidx) {
+ brcmf_err("Incorrect bssidx=%d, compared to p2p_ifp->bssidx=%d\n",
+ bssidx, p2p_ifp->bssidx);
+ brcmf_free_vif(p2p_vif);
+ goto exit;
+ }
+
+ init_completion(&p2p->send_af_done);
+ INIT_WORK(&p2p->afx_hdl.afx_work, brcmf_p2p_afx_handler);
+ init_completion(&p2p->afx_hdl.act_frm_scan);
+ init_completion(&p2p->wait_next_af);
+ }
+exit:
+ return err;
+}
+
+
+/**
+ * brcmf_p2p_detach() - detach P2P.
+ *
+ * @p2p: P2P specific data.
+ */
+void brcmf_p2p_detach(struct brcmf_p2p_info *p2p)
+{
+ struct brcmf_cfg80211_vif *vif;
+
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+ if (vif != NULL) {
+ brcmf_p2p_cancel_remain_on_channel(vif->ifp);
+ brcmf_p2p_deinit_discovery(p2p);
+ /* remove discovery interface */
+ brcmf_free_vif(vif);
+ p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
+ }
+ /* just set it all to zero */
+ memset(p2p, 0, sizeof(*p2p));
+}
+
+/**
+ * brcmf_p2p_get_current_chanspec() - Get current operation channel.
+ *
+ * @p2p: P2P specific data.
+ * @chanspec: chanspec to be returned.
+ */
+static void brcmf_p2p_get_current_chanspec(struct brcmf_p2p_info *p2p,
+ u16 *chanspec)
+{
+ struct brcmf_if *ifp;
+ struct brcmf_fil_chan_info_le ci;
+ s32 err;
+
+ ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
+
+ *chanspec = 11 & WL_CHANSPEC_CHAN_MASK;
+
+ err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_CHANNEL, &ci, sizeof(ci));
+ if (!err) {
+ *chanspec = le32_to_cpu(ci.hw_channel) & WL_CHANSPEC_CHAN_MASK;
+ if (*chanspec < CH_MAX_2G_CHANNEL)
+ *chanspec |= WL_CHANSPEC_BAND_2G;
+ else
+ *chanspec |= WL_CHANSPEC_BAND_5G;
+ }
+ *chanspec |= WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE;
+}
+
+/**
+ * Change a P2P Role.
+ * Parameters:
+ * @mac: MAC address of the BSS to change a role
+ * Returns 0 if success.
+ */
+int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
+ enum brcmf_fil_p2p_if_types if_type)
+{
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct brcmf_cfg80211_vif *vif;
+ struct brcmf_fil_p2p_if_le if_request;
+ s32 err;
+ u16 chanspec;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
+ if (!vif) {
+ brcmf_err("vif for P2PAPI_BSSCFG_PRIMARY does not exist\n");
+ return -EPERM;
+ }
+ brcmf_notify_escan_complete(cfg, vif->ifp->ndev, true, true);
+ vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif;
+ if (!vif) {
+ brcmf_err("vif for P2PAPI_BSSCFG_CONNECTION does not exist\n");
+ return -EPERM;
+ }
+ brcmf_set_mpc(vif->ifp->ndev, 0);
+
+ /* In concurrency case, STA may be already associated in a particular */
+ /* channel. so retrieve the current channel of primary interface and */
+ /* then start the virtual interface on that. */
+ brcmf_p2p_get_current_chanspec(p2p, &chanspec);
+
+ if_request.type = cpu_to_le16((u16)if_type);
+ if_request.chspec = cpu_to_le16(chanspec);
+ memcpy(if_request.addr, p2p->int_addr, sizeof(if_request.addr));
+
+ brcmf_cfg80211_arm_vif_event(cfg, vif);
+ err = brcmf_fil_iovar_data_set(vif->ifp, "p2p_ifupd", &if_request,
+ sizeof(if_request));
+ if (err) {
+ brcmf_err("p2p_ifupd FAILED, err=%d\n", err);
+ brcmf_cfg80211_arm_vif_event(cfg, NULL);
+ return err;
+ }
+ err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_CHANGE,
+ msecs_to_jiffies(1500));
+ brcmf_cfg80211_arm_vif_event(cfg, NULL);
+ if (!err) {
+ brcmf_err("No BRCMF_E_IF_CHANGE event received\n");
+ return -EIO;
+ }
+
+ err = brcmf_fil_cmd_int_set(vif->ifp, BRCMF_C_SET_SCB_TIMEOUT,
+ BRCMF_SCB_TIMEOUT_VALUE);
+
+ return err;
+}
+
+static int brcmf_p2p_request_p2p_if(struct brcmf_p2p_info *p2p,
+ struct brcmf_if *ifp, u8 ea[ETH_ALEN],
+ enum brcmf_fil_p2p_if_types iftype)
+{
+ struct brcmf_fil_p2p_if_le if_request;
+ int err;
+ u16 chanspec;
+
+ /* we need a default channel */
+ brcmf_p2p_get_current_chanspec(p2p, &chanspec);
+
+ /* fill the firmware request */
+ memcpy(if_request.addr, ea, ETH_ALEN);
+ if_request.type = cpu_to_le16((u16)iftype);
+ if_request.chspec = cpu_to_le16(chanspec);
+
+ err = brcmf_fil_iovar_data_set(ifp, "p2p_ifadd", &if_request,
+ sizeof(if_request));
+ if (err)
+ return err;
+
+ return err;
+}
+
+static int brcmf_p2p_disable_p2p_if(struct brcmf_cfg80211_vif *vif)
+{
+ struct brcmf_cfg80211_info *cfg = wdev_to_cfg(&vif->wdev);
+ struct net_device *pri_ndev = cfg_to_ndev(cfg);
+ struct brcmf_if *ifp = netdev_priv(pri_ndev);
+ u8 *addr = vif->wdev.netdev->dev_addr;
+
+ return brcmf_fil_iovar_data_set(ifp, "p2p_ifdis", addr, ETH_ALEN);
+}
+
+static int brcmf_p2p_release_p2p_if(struct brcmf_cfg80211_vif *vif)
+{
+ struct brcmf_cfg80211_info *cfg = wdev_to_cfg(&vif->wdev);
+ struct net_device *pri_ndev = cfg_to_ndev(cfg);
+ struct brcmf_if *ifp = netdev_priv(pri_ndev);
+ u8 *addr = vif->wdev.netdev->dev_addr;
+
+ return brcmf_fil_iovar_data_set(ifp, "p2p_ifdel", addr, ETH_ALEN);
+}
+
+/**
+ * brcmf_p2p_add_vif() - create a new P2P virtual interface.
+ *
+ * @wiphy: wiphy device of new interface.
+ * @name: name of the new interface.
+ * @type: nl80211 interface type.
+ * @flags: TBD
+ * @params: TBD
+ */
+struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
+ struct brcmf_cfg80211_vif *vif;
+ enum brcmf_fil_p2p_if_types iftype;
+ enum wl_mode mode;
+ int err;
+
+ if (brcmf_cfg80211_vif_event_armed(cfg))
+ return ERR_PTR(-EBUSY);
+
+ brcmf_dbg(INFO, "adding vif \"%s\" (type=%d)\n", name, type);
+
+ switch (type) {
+ case NL80211_IFTYPE_P2P_CLIENT:
+ iftype = BRCMF_FIL_P2P_IF_CLIENT;
+ mode = WL_MODE_BSS;
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ iftype = BRCMF_FIL_P2P_IF_GO;
+ mode = WL_MODE_AP;
+ break;
+ default:
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ vif = brcmf_alloc_vif(cfg, type, false);
+ if (IS_ERR(vif))
+ return (struct wireless_dev *)vif;
+ brcmf_cfg80211_arm_vif_event(cfg, vif);
+
+ err = brcmf_p2p_request_p2p_if(&cfg->p2p, ifp, cfg->p2p.int_addr,
+ iftype);
+ if (err) {
+ brcmf_cfg80211_arm_vif_event(cfg, NULL);
+ goto fail;
+ }
+
+ /* wait for firmware event */
+ err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_ADD,
+ msecs_to_jiffies(1500));
+ brcmf_cfg80211_arm_vif_event(cfg, NULL);
+ if (!err) {
+ brcmf_err("timeout occurred\n");
+ err = -EIO;
+ goto fail;
+ }
+
+ /* interface created in firmware */
+ ifp = vif->ifp;
+ if (!ifp) {
+ brcmf_err("no if pointer provided\n");
+ err = -ENOENT;
+ goto fail;
+ }
+
+ strncpy(ifp->ndev->name, name, sizeof(ifp->ndev->name) - 1);
+ err = brcmf_net_attach(ifp, true);
+ if (err) {
+ brcmf_err("Registering netdevice failed\n");
+ goto fail;
+ }
+ cfg->p2p.bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = vif;
+ /* Disable firmware roaming for P2P interface */
+ brcmf_fil_iovar_int_set(ifp, "roam_off", 1);
+ if (iftype == BRCMF_FIL_P2P_IF_GO) {
+ /* set station timeout for p2p */
+ brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCB_TIMEOUT,
+ BRCMF_SCB_TIMEOUT_VALUE);
+ }
+ return &ifp->vif->wdev;
+
+fail:
+ brcmf_free_vif(vif);
+ return ERR_PTR(err);
+}
+
+/**
+ * brcmf_p2p_del_vif() - delete a P2P virtual interface.
+ *
+ * @wiphy: wiphy device of interface.
+ * @wdev: wireless device of interface.
+ *
+ * TODO: not yet supported.
+ */
+int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+ struct brcmf_cfg80211_vif *vif;
+ unsigned long jiffie_timeout = msecs_to_jiffies(1500);
+ bool wait_for_disable = false;
+ int err;
+
+ brcmf_dbg(TRACE, "delete P2P vif\n");
+ vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
+
+ switch (vif->wdev.iftype) {
+ case NL80211_IFTYPE_P2P_CLIENT:
+ if (test_bit(BRCMF_VIF_STATUS_DISCONNECTING, &vif->sme_state))
+ wait_for_disable = true;
+ break;
+
+ case NL80211_IFTYPE_P2P_GO:
+ if (!brcmf_p2p_disable_p2p_if(vif))
+ wait_for_disable = true;
+ break;
+
+ case NL80211_IFTYPE_P2P_DEVICE:
+ default:
+ return -ENOTSUPP;
+ break;
+ }
+
+ clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
+ brcmf_dbg(INFO, "P2P: GO_NEG_PHASE status cleared\n");
+
+ if (wait_for_disable)
+ wait_for_completion_timeout(&cfg->vif_disabled,
+ msecs_to_jiffies(500));
+
+ brcmf_vif_clear_mgmt_ies(vif);
+
+ brcmf_cfg80211_arm_vif_event(cfg, vif);
+ err = brcmf_p2p_release_p2p_if(vif);
+ if (!err) {
+ /* wait for firmware event */
+ err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_DEL,
+ jiffie_timeout);
+ if (!err)
+ err = -EIO;
+ else
+ err = 0;
+ }
+ brcmf_cfg80211_arm_vif_event(cfg, NULL);
+ brcmf_free_vif(vif);
+ p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL;
+
+ return err;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/brcm80211/brcmfmac/p2p.h
new file mode 100644
index 000000000000..6821b26224be
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef WL_CFGP2P_H_
+#define WL_CFGP2P_H_
+
+#include <net/cfg80211.h>
+
+struct brcmf_cfg80211_info;
+
+/**
+ * enum p2p_bss_type - different type of BSS configurations.
+ *
+ * @P2PAPI_BSSCFG_PRIMARY: maps to driver's primary bsscfg.
+ * @P2PAPI_BSSCFG_DEVICE: maps to driver's P2P device discovery bsscfg.
+ * @P2PAPI_BSSCFG_CONNECTION: maps to driver's P2P connection bsscfg.
+ * @P2PAPI_BSSCFG_MAX: used for range checking.
+ */
+enum p2p_bss_type {
+ P2PAPI_BSSCFG_PRIMARY, /* maps to driver's primary bsscfg */
+ P2PAPI_BSSCFG_DEVICE, /* maps to driver's P2P device discovery bsscfg */
+ P2PAPI_BSSCFG_CONNECTION, /* maps to driver's P2P connection bsscfg */
+ P2PAPI_BSSCFG_MAX
+};
+
+/**
+ * struct p2p_bss - peer-to-peer bss related information.
+ *
+ * @vif: virtual interface of this P2P bss.
+ * @private_data: TBD
+ */
+struct p2p_bss {
+ struct brcmf_cfg80211_vif *vif;
+ void *private_data;
+};
+
+/**
+ * enum brcmf_p2p_status - P2P specific dongle status.
+ *
+ * @BRCMF_P2P_STATUS_IF_ADD: peer-to-peer vif add sent to dongle.
+ * @BRCMF_P2P_STATUS_IF_DEL: NOT-USED?
+ * @BRCMF_P2P_STATUS_IF_DELETING: peer-to-peer vif delete sent to dongle.
+ * @BRCMF_P2P_STATUS_IF_CHANGING: peer-to-peer vif change sent to dongle.
+ * @BRCMF_P2P_STATUS_IF_CHANGED: peer-to-peer vif change completed on dongle.
+ * @BRCMF_P2P_STATUS_ACTION_TX_COMPLETED: action frame tx completed.
+ * @BRCMF_P2P_STATUS_ACTION_TX_NOACK: action frame tx not acked.
+ * @BRCMF_P2P_STATUS_GO_NEG_PHASE: P2P GO negotiation ongoing.
+ * @BRCMF_P2P_STATUS_DISCOVER_LISTEN: P2P listen, remaining on channel.
+ * @BRCMF_P2P_STATUS_SENDING_ACT_FRAME: In the process of sending action frame.
+ * @BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN: extra listen time for af tx.
+ * @BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME: waiting for action frame response.
+ * @BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL: search channel for AF active.
+ */
+enum brcmf_p2p_status {
+ BRCMF_P2P_STATUS_ENABLED,
+ BRCMF_P2P_STATUS_IF_ADD,
+ BRCMF_P2P_STATUS_IF_DEL,
+ BRCMF_P2P_STATUS_IF_DELETING,
+ BRCMF_P2P_STATUS_IF_CHANGING,
+ BRCMF_P2P_STATUS_IF_CHANGED,
+ BRCMF_P2P_STATUS_ACTION_TX_COMPLETED,
+ BRCMF_P2P_STATUS_ACTION_TX_NOACK,
+ BRCMF_P2P_STATUS_GO_NEG_PHASE,
+ BRCMF_P2P_STATUS_DISCOVER_LISTEN,
+ BRCMF_P2P_STATUS_SENDING_ACT_FRAME,
+ BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN,
+ BRCMF_P2P_STATUS_WAITING_NEXT_ACT_FRAME,
+ BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL
+};
+
+/**
+ * struct afx_hdl - action frame off channel storage.
+ *
+ * @afx_work: worker thread for searching channel
+ * @act_frm_scan: thread synchronizing struct.
+ * @is_active: channel searching active.
+ * @peer_chan: current channel.
+ * @is_listen: sets mode for afx worker.
+ * @my_listen_chan: this peers listen channel.
+ * @peer_listen_chan: remote peers listen channel.
+ * @tx_dst_addr: mac address where tx af should be sent to.
+ */
+struct afx_hdl {
+ struct work_struct afx_work;
+ struct completion act_frm_scan;
+ bool is_active;
+ s32 peer_chan;
+ bool is_listen;
+ u16 my_listen_chan;
+ u16 peer_listen_chan;
+ u8 tx_dst_addr[ETH_ALEN];
+};
+
+/**
+ * struct brcmf_p2p_info - p2p specific driver information.
+ *
+ * @cfg: driver private data for cfg80211 interface.
+ * @status: status of P2P (see enum brcmf_p2p_status).
+ * @dev_addr: P2P device address.
+ * @int_addr: P2P interface address.
+ * @bss_idx: informate for P2P bss types.
+ * @listen_timer: timer for @WL_P2P_DISC_ST_LISTEN discover state.
+ * @ssid: ssid for P2P GO.
+ * @listen_channel: channel for @WL_P2P_DISC_ST_LISTEN discover state.
+ * @remain_on_channel: contains copy of struct used by cfg80211.
+ * @remain_on_channel_cookie: cookie counter for remain on channel cmd
+ * @next_af_subtype: expected action frame subtype.
+ * @send_af_done: indication that action frame tx is complete.
+ * @afx_hdl: action frame search handler info.
+ * @af_sent_channel: channel action frame is sent.
+ * @af_tx_sent_jiffies: jiffies time when af tx was transmitted.
+ * @wait_next_af: thread synchronizing struct.
+ * @gon_req_action: about to send go negotiation requets frame.
+ * @block_gon_req_tx: drop tx go negotiation requets frame.
+ */
+struct brcmf_p2p_info {
+ struct brcmf_cfg80211_info *cfg;
+ unsigned long status;
+ u8 dev_addr[ETH_ALEN];
+ u8 int_addr[ETH_ALEN];
+ struct p2p_bss bss_idx[P2PAPI_BSSCFG_MAX];
+ struct timer_list listen_timer;
+ struct brcmf_ssid ssid;
+ u8 listen_channel;
+ struct ieee80211_channel remain_on_channel;
+ u32 remain_on_channel_cookie;
+ u8 next_af_subtype;
+ struct completion send_af_done;
+ struct afx_hdl afx_hdl;
+ u32 af_sent_channel;
+ unsigned long af_tx_sent_jiffies;
+ struct completion wait_next_af;
+ bool gon_req_action;
+ bool block_gon_req_tx;
+};
+
+s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg);
+void brcmf_p2p_detach(struct brcmf_p2p_info *p2p);
+struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params);
+int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev);
+int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
+ enum brcmf_fil_p2p_if_types if_type);
+int brcmf_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev);
+void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev);
+int brcmf_p2p_scan_prep(struct wiphy *wiphy,
+ struct cfg80211_scan_request *request,
+ struct brcmf_cfg80211_vif *vif);
+int brcmf_p2p_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct ieee80211_channel *channel,
+ unsigned int duration, u64 *cookie);
+int brcmf_p2p_notify_listen_complete(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e,
+ void *data);
+void brcmf_p2p_cancel_remain_on_channel(struct brcmf_if *ifp);
+int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e,
+ void *data);
+int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e,
+ void *data);
+bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
+ struct net_device *ndev,
+ struct brcmf_fil_af_params_le *af_params);
+bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_bss_info_le *bi);
+s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e,
+ void *data);
+#endif /* WL_CFGP2P_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
index b1bb46c49799..14be2d5530ce 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -15,8 +15,6 @@
*/
/* ***** SDIO interface chip backplane handle functions ***** */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/mmc/card.h>
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 914c56fe6c5f..42289e9ea886 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -354,11 +354,10 @@ brcmf_usbdev_qinit(struct list_head *q, int qsize)
int i;
struct brcmf_usbreq *req, *reqs;
- reqs = kzalloc(sizeof(struct brcmf_usbreq) * qsize, GFP_ATOMIC);
- if (reqs == NULL) {
- brcmf_err("fail to allocate memory!\n");
+ reqs = kcalloc(qsize, sizeof(struct brcmf_usbreq), GFP_ATOMIC);
+ if (reqs == NULL)
return NULL;
- }
+
req = reqs;
for (i = 0; i < qsize; i++) {
@@ -421,10 +420,6 @@ static void brcmf_usb_tx_complete(struct urb *urb)
brcmf_dbg(USB, "Enter, urb->status=%d, skb=%p\n", urb->status,
req->skb);
brcmf_usb_del_fromq(devinfo, req);
- if (urb->status == 0)
- devinfo->bus_pub.bus->dstats.tx_packets++;
- else
- devinfo->bus_pub.bus->dstats.tx_errors++;
brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0);
@@ -443,30 +438,25 @@ static void brcmf_usb_rx_complete(struct urb *urb)
struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context;
struct brcmf_usbdev_info *devinfo = req->devinfo;
struct sk_buff *skb;
- int ifidx = 0;
+ struct sk_buff_head skbq;
brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status);
brcmf_usb_del_fromq(devinfo, req);
skb = req->skb;
req->skb = NULL;
- if (urb->status == 0) {
- devinfo->bus_pub.bus->dstats.rx_packets++;
- } else {
- devinfo->bus_pub.bus->dstats.rx_errors++;
+ /* zero lenght packets indicate usb "failure". Do not refill */
+ if (urb->status != 0 || !urb->actual_length) {
brcmu_pkt_buf_free_skb(skb);
brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
return;
}
if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
+ skb_queue_head_init(&skbq);
+ skb_queue_tail(&skbq, skb);
skb_put(skb, urb->actual_length);
- if (brcmf_proto_hdrpull(devinfo->dev, &ifidx, skb) != 0) {
- brcmf_err("rx protocol error\n");
- brcmu_pkt_buf_free_skb(skb);
- devinfo->bus_pub.bus->dstats.rx_errors++;
- } else
- brcmf_rx_packet(devinfo->dev, ifidx, skb);
+ brcmf_rx_frames(devinfo->dev, &skbq);
brcmf_usb_rx_refill(devinfo, req);
} else {
brcmu_pkt_buf_free_skb(skb);
@@ -1259,6 +1249,8 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
bus->bus_priv.usb = bus_pub;
dev_set_drvdata(dev, bus);
bus->ops = &brcmf_usb_bus_ops;
+ bus->chip = bus_pub->devid;
+ bus->chiprev = bus_pub->chiprev;
/* Attach to the common driver interface */
ret = brcmf_attach(0, dev);
@@ -1520,10 +1512,23 @@ static void brcmf_release_fw(struct list_head *q)
}
}
+static int brcmf_usb_reset_device(struct device *dev, void *notused)
+{
+ /* device past is the usb interface so we
+ * need to use parent here.
+ */
+ brcmf_dev_reset(dev->parent);
+ return 0;
+}
void brcmf_usb_exit(void)
{
+ struct device_driver *drv = &brcmf_usbdrvr.drvwrap.driver;
+ int ret;
+
brcmf_dbg(USB, "Enter\n");
+ ret = driver_for_each_device(drv, NULL, NULL,
+ brcmf_usb_reset_device);
usb_deregister(&brcmf_usbdrvr);
brcmf_release_fw(&fw_image_list);
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 75464ad4fbd1..cecc3eff72e9 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -16,8 +16,6 @@
/* Toplevel file. Relies on dhd_linux.c to send commands to the dongle. */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/kernel.h>
#include <linux/etherdevice.h>
#include <net/cfg80211.h>
@@ -28,6 +26,8 @@
#include <brcmu_wifi.h>
#include "dhd.h"
#include "dhd_dbg.h"
+#include "fwil_types.h"
+#include "p2p.h"
#include "wl_cfg80211.h"
#include "fwil.h"
@@ -43,16 +43,13 @@
#define BRCMF_PNO_SCAN_COMPLETE 1
#define BRCMF_PNO_SCAN_INCOMPLETE 0
-#define BRCMF_IFACE_MAX_CNT 2
+#define BRCMF_IFACE_MAX_CNT 3
-#define TLV_LEN_OFF 1 /* length offset */
-#define TLV_HDR_LEN 2 /* header length */
-#define TLV_BODY_OFF 2 /* body offset */
-#define TLV_OUI_LEN 3 /* oui id length */
#define WPA_OUI "\x00\x50\xF2" /* WPA OUI */
#define WPA_OUI_TYPE 1
#define RSN_OUI "\x00\x0F\xAC" /* RSN OUI */
#define WME_OUI_TYPE 2
+#define WPS_OUI_TYPE 4
#define VS_IE_FIXED_HDR_LEN 6
#define WPA_IE_VERSION_LEN 2
@@ -78,13 +75,15 @@
#define VNDR_IE_PKTFLAG_OFFSET 8
#define VNDR_IE_VSIE_OFFSET 12
#define VNDR_IE_HDR_SIZE 12
-#define VNDR_IE_BEACON_FLAG 0x1
-#define VNDR_IE_PRBRSP_FLAG 0x2
-#define MAX_VNDR_IE_NUMBER 5
+#define VNDR_IE_PARSE_LIMIT 5
#define DOT11_MGMT_HDR_LEN 24 /* d11 management header len */
#define DOT11_BCN_PRB_FIXED_LEN 12 /* beacon/probe fixed length */
+#define BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS 320
+#define BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS 400
+#define BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS 20
+
#define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
(sizeof(struct brcmf_assoc_params_le) - sizeof(u16))
@@ -273,13 +272,6 @@ static const u32 __wl_cipher_suites[] = {
WLAN_CIPHER_SUITE_AES_CMAC,
};
-/* tag_ID/length/value_buffer tuple */
-struct brcmf_tlv {
- u8 id;
- u8 len;
- u8 data[1];
-};
-
/* Vendor specific ie. id = 221, oui and type defines exact ie */
struct brcmf_vs_tlv {
u8 id;
@@ -296,7 +288,7 @@ struct parsed_vndr_ie_info {
struct parsed_vndr_ies {
u32 count;
- struct parsed_vndr_ie_info ie_info[MAX_VNDR_IE_NUMBER];
+ struct parsed_vndr_ie_info ie_info[VNDR_IE_PARSE_LIMIT];
};
/* Quarter dBm units to mW
@@ -383,7 +375,7 @@ static u8 brcmf_mw_to_qdbm(u16 mw)
return qdbm;
}
-static u16 channel_to_chanspec(struct ieee80211_channel *ch)
+u16 channel_to_chanspec(struct ieee80211_channel *ch)
{
u16 chanspec;
@@ -395,19 +387,92 @@ static u16 channel_to_chanspec(struct ieee80211_channel *ch)
else
chanspec |= WL_CHANSPEC_BAND_5G;
- if (ch->flags & IEEE80211_CHAN_NO_HT40) {
- chanspec |= WL_CHANSPEC_BW_20;
- chanspec |= WL_CHANSPEC_CTL_SB_NONE;
- } else {
- chanspec |= WL_CHANSPEC_BW_40;
- if (ch->flags & IEEE80211_CHAN_NO_HT40PLUS)
- chanspec |= WL_CHANSPEC_CTL_SB_LOWER;
- else
- chanspec |= WL_CHANSPEC_CTL_SB_UPPER;
- }
+ chanspec |= WL_CHANSPEC_BW_20;
+ chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+
return chanspec;
}
+/* Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ */
+struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
+{
+ struct brcmf_tlv *elt;
+ int totlen;
+
+ elt = (struct brcmf_tlv *)buf;
+ totlen = buflen;
+
+ /* find tagged parameter */
+ while (totlen >= TLV_HDR_LEN) {
+ int len = elt->len;
+
+ /* validate remaining totlen */
+ if ((elt->id == key) && (totlen >= (len + TLV_HDR_LEN)))
+ return elt;
+
+ elt = (struct brcmf_tlv *)((u8 *)elt + (len + TLV_HDR_LEN));
+ totlen -= (len + TLV_HDR_LEN);
+ }
+
+ return NULL;
+}
+
+/* Is any of the tlvs the expected entry? If
+ * not update the tlvs buffer pointer/length.
+ */
+static bool
+brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
+ u8 *oui, u32 oui_len, u8 type)
+{
+ /* If the contents match the OUI and the type */
+ if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
+ !memcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
+ type == ie[TLV_BODY_OFF + oui_len]) {
+ return true;
+ }
+
+ if (tlvs == NULL)
+ return false;
+ /* point to the next ie */
+ ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
+ /* calculate the length of the rest of the buffer */
+ *tlvs_len -= (int)(ie - *tlvs);
+ /* update the pointer to the start of the buffer */
+ *tlvs = ie;
+
+ return false;
+}
+
+static struct brcmf_vs_tlv *
+brcmf_find_wpaie(u8 *parse, u32 len)
+{
+ struct brcmf_tlv *ie;
+
+ while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
+ if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
+ WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE))
+ return (struct brcmf_vs_tlv *)ie;
+ }
+ return NULL;
+}
+
+static struct brcmf_vs_tlv *
+brcmf_find_wpsie(u8 *parse, u32 len)
+{
+ struct brcmf_tlv *ie;
+
+ while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
+ if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
+ WPA_OUI, TLV_OUI_LEN, WPS_OUI_TYPE))
+ return (struct brcmf_vs_tlv *)ie;
+ }
+ return NULL;
+}
+
+
static void convert_key_from_CPU(struct brcmf_wsec_key *key,
struct brcmf_wsec_key_le *key_le)
{
@@ -440,11 +505,153 @@ send_key_to_dongle(struct net_device *ndev, struct brcmf_wsec_key *key)
return err;
}
+static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
+ const char *name,
+ enum nl80211_iftype type,
+ u32 *flags,
+ struct vif_params *params)
+{
+ brcmf_dbg(TRACE, "enter: %s type %d\n", name, type);
+ switch (type) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_MESH_POINT:
+ return ERR_PTR(-EOPNOTSUPP);
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ return brcmf_p2p_add_vif(wiphy, name, type, flags, params);
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_P2P_DEVICE:
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+void brcmf_set_mpc(struct net_device *ndev, int mpc)
+{
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ s32 err = 0;
+
+ if (check_vif_up(ifp->vif)) {
+ err = brcmf_fil_iovar_int_set(ifp, "mpc", mpc);
+ if (err) {
+ brcmf_err("fail to set mpc\n");
+ return;
+ }
+ brcmf_dbg(INFO, "MPC : %d\n", mpc);
+ }
+}
+
+s32
+brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
+ struct net_device *ndev,
+ bool aborted, bool fw_abort)
+{
+ struct brcmf_scan_params_le params_le;
+ struct cfg80211_scan_request *scan_request;
+ s32 err = 0;
+
+ brcmf_dbg(SCAN, "Enter\n");
+
+ /* clear scan request, because the FW abort can cause a second call */
+ /* to this functon and might cause a double cfg80211_scan_done */
+ scan_request = cfg->scan_request;
+ cfg->scan_request = NULL;
+
+ if (timer_pending(&cfg->escan_timeout))
+ del_timer_sync(&cfg->escan_timeout);
+
+ if (fw_abort) {
+ /* Do a scan abort to stop the driver's scan engine */
+ brcmf_dbg(SCAN, "ABORT scan in firmware\n");
+ memset(&params_le, 0, sizeof(params_le));
+ memset(params_le.bssid, 0xFF, ETH_ALEN);
+ params_le.bss_type = DOT11_BSSTYPE_ANY;
+ params_le.scan_type = 0;
+ params_le.channel_num = cpu_to_le32(1);
+ params_le.nprobes = cpu_to_le32(1);
+ params_le.active_time = cpu_to_le32(-1);
+ params_le.passive_time = cpu_to_le32(-1);
+ params_le.home_time = cpu_to_le32(-1);
+ /* Scan is aborted by setting channel_list[0] to -1 */
+ params_le.channel_list[0] = cpu_to_le16(-1);
+ /* E-Scan (or anyother type) can be aborted by SCAN */
+ err = brcmf_fil_cmd_data_set(netdev_priv(ndev), BRCMF_C_SCAN,
+ &params_le, sizeof(params_le));
+ if (err)
+ brcmf_err("Scan abort failed\n");
+ }
+ /*
+ * e-scan can be initiated by scheduled scan
+ * which takes precedence.
+ */
+ if (cfg->sched_escan) {
+ brcmf_dbg(SCAN, "scheduled scan completed\n");
+ cfg->sched_escan = false;
+ if (!aborted)
+ cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
+ brcmf_set_mpc(ndev, 1);
+ } else if (scan_request) {
+ brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n",
+ aborted ? "Aborted" : "Done");
+ cfg80211_scan_done(scan_request, aborted);
+ brcmf_set_mpc(ndev, 1);
+ }
+ if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status))
+ brcmf_dbg(SCAN, "Scan complete, probably P2P scan\n");
+
+ return err;
+}
+
+static
+int brcmf_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
+ struct net_device *ndev = wdev->netdev;
+
+ /* vif event pending in firmware */
+ if (brcmf_cfg80211_vif_event_armed(cfg))
+ return -EBUSY;
+
+ if (ndev) {
+ if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status) &&
+ cfg->escan_info.ndev == ndev)
+ brcmf_notify_escan_complete(cfg, ndev, true,
+ true);
+
+ brcmf_fil_iovar_int_set(netdev_priv(ndev), "mpc", 1);
+ }
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_MESH_POINT:
+ return -EOPNOTSUPP;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ return brcmf_p2p_del_vif(wiphy, wdev);
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_P2P_DEVICE:
+ default:
+ return -EINVAL;
+ }
+ return -EOPNOTSUPP;
+}
+
static s32
brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
{
+ struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_vif *vif = ifp->vif;
s32 infra = 0;
@@ -464,10 +671,23 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
infra = 0;
break;
case NL80211_IFTYPE_STATION:
+ /* Ignore change for p2p IF. Unclear why supplicant does this */
+ if ((vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) ||
+ (vif->wdev.iftype == NL80211_IFTYPE_P2P_GO)) {
+ brcmf_dbg(TRACE, "Ignoring cmd for p2p if\n");
+ /* WAR: It is unexpected to get a change of VIF for P2P
+ * IF, but it happens. The request can not be handled
+ * but returning EPERM causes a crash. Returning 0
+ * without setting ieee80211_ptr->iftype causes trace
+ * (WARN_ON) but it works with wpa_supplicant
+ */
+ return 0;
+ }
vif->mode = WL_MODE_BSS;
infra = 1;
break;
case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
vif->mode = WL_MODE_AP;
ap = 1;
break;
@@ -477,8 +697,14 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
}
if (ap) {
- set_bit(BRCMF_VIF_STATUS_AP_CREATING, &vif->sme_state);
- brcmf_dbg(INFO, "IF Type = AP\n");
+ if (type == NL80211_IFTYPE_P2P_GO) {
+ brcmf_dbg(INFO, "IF Type = P2P GO\n");
+ err = brcmf_p2p_ifchange(cfg, BRCMF_FIL_P2P_IF_GO);
+ }
+ if (!err) {
+ set_bit(BRCMF_VIF_STATUS_AP_CREATING, &vif->sme_state);
+ brcmf_dbg(INFO, "IF Type = AP\n");
+ }
} else {
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, infra);
if (err) {
@@ -497,21 +723,6 @@ done:
return err;
}
-static void brcmf_set_mpc(struct net_device *ndev, int mpc)
-{
- struct brcmf_if *ifp = netdev_priv(ndev);
- s32 err = 0;
-
- if (check_vif_up(ifp->vif)) {
- err = brcmf_fil_iovar_int_set(ifp, "mpc", mpc);
- if (err) {
- brcmf_err("fail to set mpc\n");
- return;
- }
- brcmf_dbg(INFO, "MPC : %d\n", mpc);
- }
-}
-
static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le,
struct cfg80211_scan_request *request)
{
@@ -592,69 +803,6 @@ static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le,
}
static s32
-brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
- struct net_device *ndev,
- bool aborted, bool fw_abort)
-{
- struct brcmf_scan_params_le params_le;
- struct cfg80211_scan_request *scan_request;
- s32 err = 0;
-
- brcmf_dbg(SCAN, "Enter\n");
-
- /* clear scan request, because the FW abort can cause a second call */
- /* to this functon and might cause a double cfg80211_scan_done */
- scan_request = cfg->scan_request;
- cfg->scan_request = NULL;
-
- if (timer_pending(&cfg->escan_timeout))
- del_timer_sync(&cfg->escan_timeout);
-
- if (fw_abort) {
- /* Do a scan abort to stop the driver's scan engine */
- brcmf_dbg(SCAN, "ABORT scan in firmware\n");
- memset(&params_le, 0, sizeof(params_le));
- memset(params_le.bssid, 0xFF, ETH_ALEN);
- params_le.bss_type = DOT11_BSSTYPE_ANY;
- params_le.scan_type = 0;
- params_le.channel_num = cpu_to_le32(1);
- params_le.nprobes = cpu_to_le32(1);
- params_le.active_time = cpu_to_le32(-1);
- params_le.passive_time = cpu_to_le32(-1);
- params_le.home_time = cpu_to_le32(-1);
- /* Scan is aborted by setting channel_list[0] to -1 */
- params_le.channel_list[0] = cpu_to_le16(-1);
- /* E-Scan (or anyother type) can be aborted by SCAN */
- err = brcmf_fil_cmd_data_set(netdev_priv(ndev), BRCMF_C_SCAN,
- &params_le, sizeof(params_le));
- if (err)
- brcmf_err("Scan abort failed\n");
- }
- /*
- * e-scan can be initiated by scheduled scan
- * which takes precedence.
- */
- if (cfg->sched_escan) {
- brcmf_dbg(SCAN, "scheduled scan completed\n");
- cfg->sched_escan = false;
- if (!aborted)
- cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
- brcmf_set_mpc(ndev, 1);
- } else if (scan_request) {
- brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n",
- aborted ? "Aborted" : "Done");
- cfg80211_scan_done(scan_request, aborted);
- brcmf_set_mpc(ndev, 1);
- }
- if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
- brcmf_err("Scan complete while device not scanning\n");
- return -EPERM;
- }
-
- return err;
-}
-
-static s32
brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct net_device *ndev,
struct cfg80211_scan_request *request, u16 action)
{
@@ -705,11 +853,12 @@ brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy,
s32 err;
u32 passive_scan;
struct brcmf_scan_results *results;
+ struct escan_info *escan = &cfg->escan_info;
brcmf_dbg(SCAN, "Enter\n");
- cfg->escan_info.ndev = ndev;
- cfg->escan_info.wiphy = wiphy;
- cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANNING;
+ escan->ndev = ndev;
+ escan->wiphy = wiphy;
+ escan->escan_state = WL_ESCAN_STATE_SCANNING;
passive_scan = cfg->active_scan ? 0 : 1;
err = brcmf_fil_cmd_int_set(netdev_priv(ndev), BRCMF_C_SET_PASSIVE_SCAN,
passive_scan);
@@ -723,7 +872,7 @@ brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy,
results->count = 0;
results->buflen = WL_ESCAN_RESULTS_FIXED_SIZE;
- err = brcmf_run_escan(cfg, ndev, request, WL_ESCAN_ACTION_START);
+ err = escan->run(cfg, ndev, request, WL_ESCAN_ACTION_START);
if (err)
brcmf_set_mpc(ndev, 1);
return err;
@@ -760,6 +909,12 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
return -EAGAIN;
}
+ /* If scan req comes for p2p0, send it over primary I/F */
+ if (ifp->vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif) {
+ ifp = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
+ ndev = ifp->ndev;
+ }
+
/* Arm scan timeout timer */
mod_timer(&cfg->escan_timeout, jiffies +
WL_ESCAN_TIMER_INTERVAL_MS * HZ / 1000);
@@ -778,6 +933,11 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
cfg->scan_request = request;
set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
if (escan_req) {
+ cfg->escan_info.run = brcmf_run_escan;
+ err = brcmf_p2p_scan_prep(wiphy, request, ifp->vif);
+ if (err)
+ goto scan_out;
+
err = brcmf_do_escan(cfg, wiphy, ndev, request);
if (err)
goto scan_out;
@@ -935,31 +1095,6 @@ static void brcmf_init_prof(struct brcmf_cfg80211_profile *prof)
memset(prof, 0, sizeof(*prof));
}
-static void brcmf_ch_to_chanspec(int ch, struct brcmf_join_params *join_params,
- size_t *join_params_size)
-{
- u16 chanspec = 0;
-
- if (ch != 0) {
- if (ch <= CH_MAX_2G_CHANNEL)
- chanspec |= WL_CHANSPEC_BAND_2G;
- else
- chanspec |= WL_CHANSPEC_BAND_5G;
-
- chanspec |= WL_CHANSPEC_BW_20;
- chanspec |= WL_CHANSPEC_CTL_SB_NONE;
-
- *join_params_size += BRCMF_ASSOC_PARAMS_FIXED_SIZE +
- sizeof(u16);
-
- chanspec |= (ch & WL_CHANSPEC_CHAN_MASK);
- join_params->params_le.chanspec_list[0] = cpu_to_le16(chanspec);
- join_params->params_le.chanspec_num = cpu_to_le32(1);
-
- brcmf_dbg(CONN, "channel %d, chanspec %#X\n", ch, chanspec);
- }
-}
-
static void brcmf_link_down(struct brcmf_cfg80211_vif *vif)
{
s32 err = 0;
@@ -990,6 +1125,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
s32 err = 0;
s32 wsec = 0;
s32 bcnprd;
+ u16 chanspec;
brcmf_dbg(TRACE, "Enter\n");
if (!check_vif_up(ifp->vif))
@@ -1093,8 +1229,11 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
params->chandef.chan->center_freq);
if (params->channel_fixed) {
/* adding chanspec */
- brcmf_ch_to_chanspec(cfg->channel,
- &join_params, &join_params_size);
+ chanspec = channel_to_chanspec(params->chandef.chan);
+ join_params.params_le.chanspec_list[0] =
+ cpu_to_le16(chanspec);
+ join_params.params_le.chanspec_num = cpu_to_le32(1);
+ join_params_size += sizeof(join_params.params_le);
}
/* set channel for starter */
@@ -1157,7 +1296,7 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
else
val = WPA_AUTH_DISABLED;
brcmf_dbg(CONN, "setting wpa_auth to 0x%0x\n", val);
- err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "wpa_auth", val);
+ err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wpa_auth", val);
if (err) {
brcmf_err("set wpa_auth failed (%d)\n", err);
return err;
@@ -1196,7 +1335,7 @@ static s32 brcmf_set_auth_type(struct net_device *ndev,
break;
}
- err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "auth", val);
+ err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "auth", val);
if (err) {
brcmf_err("set auth failed (%d)\n", err);
return err;
@@ -1260,7 +1399,12 @@ brcmf_set_set_cipher(struct net_device *ndev,
}
brcmf_dbg(CONN, "pval (%d) gval (%d)\n", pval, gval);
- err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "wsec", pval | gval);
+ /* In case of privacy, but no security and WPS then simulate */
+ /* setting AES. WPS-2.0 allows no security */
+ if (brcmf_find_wpsie(sme->ie, sme->ie_len) && !pval && !gval &&
+ sme->privacy)
+ pval = AES_ENABLED;
+ err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wsec", pval | gval);
if (err) {
brcmf_err("error (%d)\n", err);
return err;
@@ -1282,8 +1426,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
s32 err = 0;
if (sme->crypto.n_akm_suites) {
- err = brcmf_fil_iovar_int_get(netdev_priv(ndev),
- "wpa_auth", &val);
+ err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev),
+ "wpa_auth", &val);
if (err) {
brcmf_err("could not get wpa_auth (%d)\n", err);
return err;
@@ -1317,8 +1461,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
}
brcmf_dbg(CONN, "setting wpa_auth to %d\n", val);
- err = brcmf_fil_iovar_int_set(netdev_priv(ndev),
- "wpa_auth", val);
+ err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev),
+ "wpa_auth", val);
if (err) {
brcmf_err("could not set wpa_auth (%d)\n", err);
return err;
@@ -1395,9 +1539,28 @@ brcmf_set_sharedkey(struct net_device *ndev,
return err;
}
+static
+enum nl80211_auth_type brcmf_war_auth_type(struct brcmf_if *ifp,
+ enum nl80211_auth_type type)
+{
+ u32 ci;
+ if (type == NL80211_AUTHTYPE_AUTOMATIC) {
+ /* shift to ignore chip revision */
+ ci = brcmf_get_chip_info(ifp) >> 4;
+ switch (ci) {
+ case 43236:
+ brcmf_dbg(CONN, "43236 WAR: use OPEN instead of AUTO\n");
+ return NL80211_AUTHTYPE_OPEN_SYSTEM;
+ default:
+ break;
+ }
+ }
+ return type;
+}
+
static s32
brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_connect_params *sme)
+ struct cfg80211_connect_params *sme)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
@@ -1405,7 +1568,12 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
struct ieee80211_channel *chan = sme->channel;
struct brcmf_join_params join_params;
size_t join_params_size;
- struct brcmf_ssid ssid;
+ struct brcmf_tlv *rsn_ie;
+ struct brcmf_vs_tlv *wpa_ie;
+ void *ie;
+ u32 ie_len;
+ struct brcmf_ext_join_params_le *ext_join_params;
+ u16 chanspec;
s32 err = 0;
@@ -1418,15 +1586,46 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
return -EOPNOTSUPP;
}
+ if (ifp->vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif) {
+ /* A normal (non P2P) connection request setup. */
+ ie = NULL;
+ ie_len = 0;
+ /* find the WPA_IE */
+ wpa_ie = brcmf_find_wpaie((u8 *)sme->ie, sme->ie_len);
+ if (wpa_ie) {
+ ie = wpa_ie;
+ ie_len = wpa_ie->len + TLV_HDR_LEN;
+ } else {
+ /* find the RSN_IE */
+ rsn_ie = brcmf_parse_tlvs((u8 *)sme->ie, sme->ie_len,
+ WLAN_EID_RSN);
+ if (rsn_ie) {
+ ie = rsn_ie;
+ ie_len = rsn_ie->len + TLV_HDR_LEN;
+ }
+ }
+ brcmf_fil_iovar_data_set(ifp, "wpaie", ie, ie_len);
+ }
+
+ err = brcmf_vif_set_mgmt_ie(ifp->vif, BRCMF_VNDR_IE_ASSOCREQ_FLAG,
+ sme->ie, sme->ie_len);
+ if (err)
+ brcmf_err("Set Assoc REQ IE Failed\n");
+ else
+ brcmf_dbg(TRACE, "Applied Vndr IEs for Assoc request\n");
+
set_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
if (chan) {
cfg->channel =
ieee80211_frequency_to_channel(chan->center_freq);
- brcmf_dbg(CONN, "channel (%d), center_req (%d)\n",
- cfg->channel, chan->center_freq);
- } else
+ chanspec = channel_to_chanspec(chan);
+ brcmf_dbg(CONN, "channel=%d, center_req=%d, chanspec=0x%04x\n",
+ cfg->channel, chan->center_freq, chanspec);
+ } else {
cfg->channel = 0;
+ chanspec = 0;
+ }
brcmf_dbg(INFO, "ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len);
@@ -1436,6 +1635,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
goto done;
}
+ sme->auth_type = brcmf_war_auth_type(ifp, sme->auth_type);
err = brcmf_set_auth_type(ndev, sme);
if (err) {
brcmf_err("wl_set_auth_type failed (%d)\n", err);
@@ -1460,27 +1660,88 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
goto done;
}
+ profile->ssid.SSID_len = min_t(u32, (u32)sizeof(profile->ssid.SSID),
+ (u32)sme->ssid_len);
+ memcpy(&profile->ssid.SSID, sme->ssid, profile->ssid.SSID_len);
+ if (profile->ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
+ profile->ssid.SSID[profile->ssid.SSID_len] = 0;
+ brcmf_dbg(CONN, "SSID \"%s\", len (%d)\n", profile->ssid.SSID,
+ profile->ssid.SSID_len);
+ }
+
+ /* Join with specific BSSID and cached SSID
+ * If SSID is zero join based on BSSID only
+ */
+ join_params_size = offsetof(struct brcmf_ext_join_params_le, assoc_le) +
+ offsetof(struct brcmf_assoc_params_le, chanspec_list);
+ if (cfg->channel)
+ join_params_size += sizeof(u16);
+ ext_join_params = kzalloc(join_params_size, GFP_KERNEL);
+ if (ext_join_params == NULL) {
+ err = -ENOMEM;
+ goto done;
+ }
+ ext_join_params->ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
+ memcpy(&ext_join_params->ssid_le.SSID, sme->ssid,
+ profile->ssid.SSID_len);
+ /*increase dwell time to receive probe response or detect Beacon
+ * from target AP at a noisy air only during connect command
+ */
+ ext_join_params->scan_le.active_time =
+ cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS);
+ ext_join_params->scan_le.passive_time =
+ cpu_to_le32(BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS);
+ /* Set up join scan parameters */
+ ext_join_params->scan_le.scan_type = -1;
+ /* to sync with presence period of VSDB GO.
+ * Send probe request more frequently. Probe request will be stopped
+ * when it gets probe response from target AP/GO.
+ */
+ ext_join_params->scan_le.nprobes =
+ cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS /
+ BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS);
+ ext_join_params->scan_le.home_time = cpu_to_le32(-1);
+
+ if (sme->bssid)
+ memcpy(&ext_join_params->assoc_le.bssid, sme->bssid, ETH_ALEN);
+ else
+ memset(&ext_join_params->assoc_le.bssid, 0xFF, ETH_ALEN);
+
+ if (cfg->channel) {
+ ext_join_params->assoc_le.chanspec_num = cpu_to_le32(1);
+
+ ext_join_params->assoc_le.chanspec_list[0] =
+ cpu_to_le16(chanspec);
+ }
+
+ err = brcmf_fil_bsscfg_data_set(ifp, "join", ext_join_params,
+ join_params_size);
+ kfree(ext_join_params);
+ if (!err)
+ /* This is it. join command worked, we are done */
+ goto done;
+
+ /* join command failed, fallback to set ssid */
memset(&join_params, 0, sizeof(join_params));
join_params_size = sizeof(join_params.ssid_le);
- profile->ssid.SSID_len = min_t(u32,
- sizeof(ssid.SSID), (u32)sme->ssid_len);
memcpy(&join_params.ssid_le.SSID, sme->ssid, profile->ssid.SSID_len);
- memcpy(&profile->ssid.SSID, sme->ssid, profile->ssid.SSID_len);
join_params.ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
- memset(join_params.params_le.bssid, 0xFF, ETH_ALEN);
-
- if (ssid.SSID_len < IEEE80211_MAX_SSID_LEN)
- brcmf_dbg(CONN, "ssid \"%s\", len (%d)\n",
- ssid.SSID, ssid.SSID_len);
+ if (sme->bssid)
+ memcpy(join_params.params_le.bssid, sme->bssid, ETH_ALEN);
+ else
+ memset(join_params.params_le.bssid, 0xFF, ETH_ALEN);
- brcmf_ch_to_chanspec(cfg->channel,
- &join_params, &join_params_size);
+ if (cfg->channel) {
+ join_params.params_le.chanspec_list[0] = cpu_to_le16(chanspec);
+ join_params.params_le.chanspec_num = cpu_to_le32(1);
+ join_params_size += sizeof(join_params.params_le);
+ }
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
&join_params, join_params_size);
if (err)
- brcmf_err("WLC_SET_SSID failed (%d)\n", err);
+ brcmf_err("BRCMF_C_SET_SSID failed (%d)\n", err);
done:
if (err)
@@ -1939,7 +2200,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
goto done;
}
/* Report the current tx rate */
- err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_RATE, &rate);
+ err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_RATE, &rate);
if (err) {
brcmf_err("Could not get rate (%d)\n", err);
goto done;
@@ -2011,67 +2272,6 @@ done:
return err;
}
-static s32
-brcmf_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *ndev,
- const u8 *addr,
- const struct cfg80211_bitrate_mask *mask)
-{
- struct brcmf_if *ifp = netdev_priv(ndev);
- struct brcm_rateset_le rateset_le;
- s32 rate;
- s32 val;
- s32 err_bg;
- s32 err_a;
- u32 legacy;
- s32 err = 0;
-
- brcmf_dbg(TRACE, "Enter\n");
- if (!check_vif_up(ifp->vif))
- return -EIO;
-
- /* addr param is always NULL. ignore it */
- /* Get current rateset */
- err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_CURR_RATESET,
- &rateset_le, sizeof(rateset_le));
- if (err) {
- brcmf_err("could not get current rateset (%d)\n", err);
- goto done;
- }
-
- legacy = ffs(mask->control[IEEE80211_BAND_2GHZ].legacy & 0xFFFF);
- if (!legacy)
- legacy = ffs(mask->control[IEEE80211_BAND_5GHZ].legacy &
- 0xFFFF);
-
- val = wl_g_rates[legacy - 1].bitrate * 100000;
-
- if (val < le32_to_cpu(rateset_le.count))
- /* Select rate by rateset index */
- rate = rateset_le.rates[val] & 0x7f;
- else
- /* Specified rate in bps */
- rate = val / 500000;
-
- brcmf_dbg(CONN, "rate %d mbps\n", rate / 2);
-
- /*
- *
- * Set rate override,
- * Since the is a/b/g-blind, both a/bg_rate are enforced.
- */
- err_bg = brcmf_fil_iovar_int_set(ifp, "bg_rate", rate);
- err_a = brcmf_fil_iovar_int_set(ifp, "a_rate", rate);
- if (err_bg && err_a) {
- brcmf_err("could not set fixed rate (%d) (%d)\n", err_bg,
- err_a);
- err = err_bg | err_a;
- }
-
-done:
- brcmf_dbg(TRACE, "Exit\n");
- return err;
-}
-
static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
struct brcmf_bss_info_le *bi)
{
@@ -2123,7 +2323,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
if (!bss)
return -ENOMEM;
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(wiphy, bss);
return err;
}
@@ -2229,7 +2429,7 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_info *cfg,
goto CleanUp;
}
- cfg80211_put_bss(bss);
+ cfg80211_put_bss(wiphy, bss);
CleanUp:
@@ -2245,78 +2445,10 @@ static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
return vif->mode == WL_MODE_IBSS;
}
-/*
- * Traverse a string of 1-byte tag/1-byte length/variable-length value
- * triples, returning a pointer to the substring whose first element
- * matches tag
- */
-static struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
-{
- struct brcmf_tlv *elt;
- int totlen;
-
- elt = (struct brcmf_tlv *) buf;
- totlen = buflen;
-
- /* find tagged parameter */
- while (totlen >= TLV_HDR_LEN) {
- int len = elt->len;
-
- /* validate remaining totlen */
- if ((elt->id == key) && (totlen >= (len + TLV_HDR_LEN)))
- return elt;
-
- elt = (struct brcmf_tlv *) ((u8 *) elt + (len + TLV_HDR_LEN));
- totlen -= (len + TLV_HDR_LEN);
- }
-
- return NULL;
-}
-
-/* Is any of the tlvs the expected entry? If
- * not update the tlvs buffer pointer/length.
- */
-static bool
-brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
- u8 *oui, u32 oui_len, u8 type)
+static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_if *ifp)
{
- /* If the contents match the OUI and the type */
- if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
- !memcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
- type == ie[TLV_BODY_OFF + oui_len]) {
- return true;
- }
-
- if (tlvs == NULL)
- return false;
- /* point to the next ie */
- ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
- /* calculate the length of the rest of the buffer */
- *tlvs_len -= (int)(ie - *tlvs);
- /* update the pointer to the start of the buffer */
- *tlvs = ie;
-
- return false;
-}
-
-static struct brcmf_vs_tlv *
-brcmf_find_wpaie(u8 *parse, u32 len)
-{
- struct brcmf_tlv *ie;
-
- while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
- if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
- WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE))
- return (struct brcmf_vs_tlv *)ie;
- }
- return NULL;
-}
-
-static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg)
-{
- struct net_device *ndev = cfg_to_ndev(cfg);
- struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
- struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_cfg80211_profile *profile = ndev_to_prof(ifp->ndev);
struct brcmf_bss_info_le *bi;
struct brcmf_ssid *ssid;
struct brcmf_tlv *tim;
@@ -2372,7 +2504,7 @@ update_bss_info_out:
return err;
}
-static void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg)
+void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg)
{
struct escan_info *escan = &cfg->escan_info;
@@ -2391,8 +2523,7 @@ static void brcmf_cfg80211_escan_timeout_worker(struct work_struct *work)
container_of(work, struct brcmf_cfg80211_info,
escan_timeout_work);
- brcmf_notify_escan_complete(cfg,
- cfg->escan_info.ndev, true, true);
+ brcmf_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
}
static void brcmf_escan_timeout(unsigned long data)
@@ -2469,11 +2600,6 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
brcmf_err("Invalid escan result (NULL pointer)\n");
goto exit;
}
- if (!cfg->scan_request) {
- brcmf_dbg(SCAN, "result without cfg80211 request\n");
- goto exit;
- }
-
if (le16_to_cpu(escan_result_le->bss_count) != 1) {
brcmf_err("Invalid bss_count %d: ignoring\n",
escan_result_le->bss_count);
@@ -2481,6 +2607,14 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
}
bss_info_le = &escan_result_le->bss_info_le;
+ if (brcmf_p2p_scan_finding_common_channel(cfg, bss_info_le))
+ goto exit;
+
+ if (!cfg->scan_request) {
+ brcmf_dbg(SCAN, "result without cfg80211 request\n");
+ goto exit;
+ }
+
bi_length = le32_to_cpu(bss_info_le->length);
if (bi_length != (le32_to_cpu(escan_result_le->buflen) -
WL_ESCAN_RESULTS_FIXED_SIZE)) {
@@ -2519,6 +2653,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
list->count++;
} else {
cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ if (brcmf_p2p_scan_finding_common_channel(cfg, NULL))
+ goto exit;
if (cfg->scan_request) {
cfg->bss_list = (struct brcmf_scan_results *)
cfg->escan_info.escan_buf;
@@ -2527,7 +2663,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
brcmf_notify_escan_complete(cfg, ndev, aborted,
false);
} else
- brcmf_err("Unexpected scan result 0x%x\n", status);
+ brcmf_dbg(SCAN, "Ignored scan complete result 0x%x\n",
+ status);
}
exit:
return err;
@@ -3031,9 +3168,8 @@ static int brcmf_cfg80211_testmode(struct wiphy *wiphy, void *data, int len)
}
#endif
-static s32 brcmf_configure_opensecurity(struct net_device *ndev, s32 bssidx)
+static s32 brcmf_configure_opensecurity(struct brcmf_if *ifp)
{
- struct brcmf_if *ifp = netdev_priv(ndev);
s32 err;
/* set auth */
@@ -3292,7 +3428,7 @@ brcmf_parse_vndr_ies(const u8 *vndr_ie_buf, u32 vndr_ie_len,
parsed_info->vndrie.oui[2],
parsed_info->vndrie.oui_type);
- if (vndr_ies->count >= MAX_VNDR_IE_NUMBER)
+ if (vndr_ies->count >= VNDR_IE_PARSE_LIMIT)
break;
next:
remaining_len -= (ie->len + TLV_HDR_LEN);
@@ -3326,7 +3462,6 @@ brcmf_vndr_ie(u8 *iebuf, s32 pktflag, u8 *ie_ptr, u32 ie_len, s8 *add_del_cmd)
return ie_len + VNDR_IE_HDR_SIZE;
}
-static
s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
const u8 *vndr_ie_buf, u32 vndr_ie_len)
{
@@ -3358,24 +3493,28 @@ s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
if (!iovar_ie_buf)
return -ENOMEM;
curr_ie_buf = iovar_ie_buf;
- if (ifp->vif->mode == WL_MODE_AP) {
- switch (pktflag) {
- case VNDR_IE_PRBRSP_FLAG:
- mgmt_ie_buf = saved_ie->probe_res_ie;
- mgmt_ie_len = &saved_ie->probe_res_ie_len;
- mgmt_ie_buf_len = sizeof(saved_ie->probe_res_ie);
- break;
- case VNDR_IE_BEACON_FLAG:
- mgmt_ie_buf = saved_ie->beacon_ie;
- mgmt_ie_len = &saved_ie->beacon_ie_len;
- mgmt_ie_buf_len = sizeof(saved_ie->beacon_ie);
- break;
- default:
- err = -EPERM;
- brcmf_err("not suitable type\n");
- goto exit;
- }
- } else {
+ switch (pktflag) {
+ case BRCMF_VNDR_IE_PRBREQ_FLAG:
+ mgmt_ie_buf = saved_ie->probe_req_ie;
+ mgmt_ie_len = &saved_ie->probe_req_ie_len;
+ mgmt_ie_buf_len = sizeof(saved_ie->probe_req_ie);
+ break;
+ case BRCMF_VNDR_IE_PRBRSP_FLAG:
+ mgmt_ie_buf = saved_ie->probe_res_ie;
+ mgmt_ie_len = &saved_ie->probe_res_ie_len;
+ mgmt_ie_buf_len = sizeof(saved_ie->probe_res_ie);
+ break;
+ case BRCMF_VNDR_IE_BEACON_FLAG:
+ mgmt_ie_buf = saved_ie->beacon_ie;
+ mgmt_ie_len = &saved_ie->beacon_ie_len;
+ mgmt_ie_buf_len = sizeof(saved_ie->beacon_ie);
+ break;
+ case BRCMF_VNDR_IE_ASSOCREQ_FLAG:
+ mgmt_ie_buf = saved_ie->assoc_req_ie;
+ mgmt_ie_len = &saved_ie->assoc_req_ie_len;
+ mgmt_ie_buf_len = sizeof(saved_ie->assoc_req_ie);
+ break;
+ default:
err = -EPERM;
brcmf_err("not suitable type\n");
goto exit;
@@ -3484,6 +3623,49 @@ exit:
return err;
}
+s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif)
+{
+ s32 pktflags[] = {
+ BRCMF_VNDR_IE_PRBREQ_FLAG,
+ BRCMF_VNDR_IE_PRBRSP_FLAG,
+ BRCMF_VNDR_IE_BEACON_FLAG
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pktflags); i++)
+ brcmf_vif_set_mgmt_ie(vif, pktflags[i], NULL, 0);
+
+ memset(&vif->saved_ie, 0, sizeof(vif->saved_ie));
+ return 0;
+}
+
+static s32
+brcmf_config_ap_mgmt_ie(struct brcmf_cfg80211_vif *vif,
+ struct cfg80211_beacon_data *beacon)
+{
+ s32 err;
+
+ /* Set Beacon IEs to FW */
+ err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_BEACON_FLAG,
+ beacon->tail, beacon->tail_len);
+ if (err) {
+ brcmf_err("Set Beacon IE Failed\n");
+ return err;
+ }
+ brcmf_dbg(TRACE, "Applied Vndr IEs for Beacon\n");
+
+ /* Set Probe Response IEs to FW */
+ err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_PRBRSP_FLAG,
+ beacon->proberesp_ies,
+ beacon->proberesp_ies_len);
+ if (err)
+ brcmf_err("Set Probe Resp IE Failed\n");
+ else
+ brcmf_dbg(TRACE, "Applied Vndr IEs for Probe Resp\n");
+
+ return err;
+}
+
static s32
brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_ap_settings *settings)
@@ -3496,7 +3678,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
struct brcmf_tlv *rsn_ie;
struct brcmf_vs_tlv *wpa_ie;
struct brcmf_join_params join_params;
- s32 bssidx = 0;
+ enum nl80211_iftype dev_role;
+ struct brcmf_fil_bss_enable_le bss_enable;
brcmf_dbg(TRACE, "channel_type=%d, beacon_interval=%d, dtim_period=%d,\n",
cfg80211_get_chandef_type(&settings->chandef),
@@ -3506,10 +3689,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
settings->ssid, settings->ssid_len, settings->auth_type,
settings->inactivity_timeout);
- if (!test_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state)) {
- brcmf_err("Not in AP creation mode\n");
- return -EPERM;
- }
+ dev_role = ifp->vif->wdev.iftype;
memset(&ssid_le, 0, sizeof(ssid_le));
if (settings->ssid == NULL || settings->ssid_len == 0) {
@@ -3530,21 +3710,6 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
}
brcmf_set_mpc(ndev, 0);
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
- if (err < 0) {
- brcmf_err("BRCMF_C_DOWN error %d\n", err);
- goto exit;
- }
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 1);
- if (err < 0) {
- brcmf_err("SET INFRA error %d\n", err);
- goto exit;
- }
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 1);
- if (err < 0) {
- brcmf_err("setting AP mode failed %d\n", err);
- goto exit;
- }
/* find the RSN_IE */
rsn_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
@@ -3570,27 +3735,10 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
}
} else {
brcmf_dbg(TRACE, "No WPA(2) IEs found\n");
- brcmf_configure_opensecurity(ndev, bssidx);
+ brcmf_configure_opensecurity(ifp);
}
- /* Set Beacon IEs to FW */
- err = brcmf_vif_set_mgmt_ie(ndev_to_vif(ndev),
- VNDR_IE_BEACON_FLAG,
- settings->beacon.tail,
- settings->beacon.tail_len);
- if (err)
- brcmf_err("Set Beacon IE Failed\n");
- else
- brcmf_dbg(TRACE, "Applied Vndr IEs for Beacon\n");
- /* Set Probe Response IEs to FW */
- err = brcmf_vif_set_mgmt_ie(ndev_to_vif(ndev),
- VNDR_IE_PRBRSP_FLAG,
- settings->beacon.proberesp_ies,
- settings->beacon.proberesp_ies_len);
- if (err)
- brcmf_err("Set Probe Resp IE Failed\n");
- else
- brcmf_dbg(TRACE, "Applied Vndr IEs for Probe Resp\n");
+ brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon);
if (settings->beacon_interval) {
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD,
@@ -3608,22 +3756,62 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
goto exit;
}
}
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
- if (err < 0) {
- brcmf_err("BRCMF_C_UP error (%d)\n", err);
- goto exit;
+
+ if (dev_role == NL80211_IFTYPE_AP) {
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
+ if (err < 0) {
+ brcmf_err("BRCMF_C_DOWN error %d\n", err);
+ goto exit;
+ }
+ brcmf_fil_iovar_int_set(ifp, "apsta", 0);
}
- memset(&join_params, 0, sizeof(join_params));
- /* join parameters starts with ssid */
- memcpy(&join_params.ssid_le, &ssid_le, sizeof(ssid_le));
- /* create softap */
- err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
- &join_params, sizeof(join_params));
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 1);
if (err < 0) {
- brcmf_err("SET SSID error (%d)\n", err);
+ brcmf_err("SET INFRA error %d\n", err);
goto exit;
}
+ if (dev_role == NL80211_IFTYPE_AP) {
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 1);
+ if (err < 0) {
+ brcmf_err("setting AP mode failed %d\n", err);
+ goto exit;
+ }
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
+ if (err < 0) {
+ brcmf_err("BRCMF_C_UP error (%d)\n", err);
+ goto exit;
+ }
+
+ memset(&join_params, 0, sizeof(join_params));
+ /* join parameters starts with ssid */
+ memcpy(&join_params.ssid_le, &ssid_le, sizeof(ssid_le));
+ /* create softap */
+ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
+ &join_params, sizeof(join_params));
+ if (err < 0) {
+ brcmf_err("SET SSID error (%d)\n", err);
+ goto exit;
+ }
+ brcmf_dbg(TRACE, "AP mode configuration complete\n");
+ } else {
+ err = brcmf_fil_bsscfg_data_set(ifp, "ssid", &ssid_le,
+ sizeof(ssid_le));
+ if (err < 0) {
+ brcmf_err("setting ssid failed %d\n", err);
+ goto exit;
+ }
+ bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx);
+ bss_enable.enable = cpu_to_le32(1);
+ err = brcmf_fil_iovar_data_set(ifp, "bss", &bss_enable,
+ sizeof(bss_enable));
+ if (err < 0) {
+ brcmf_err("bss_enable config failed %d\n", err);
+ goto exit;
+ }
+
+ brcmf_dbg(TRACE, "GO mode configuration complete\n");
+ }
clear_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
@@ -3637,10 +3825,11 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
s32 err = -EPERM;
+ struct brcmf_fil_bss_enable_le bss_enable;
brcmf_dbg(TRACE, "Enter\n");
- if (ifp->vif->mode == WL_MODE_AP) {
+ if (ifp->vif->wdev.iftype == NL80211_IFTYPE_AP) {
/* Due to most likely deauths outstanding we sleep */
/* first to make sure they get processed by fw. */
msleep(400);
@@ -3654,18 +3843,41 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
brcmf_err("BRCMF_C_UP error %d\n", err);
goto exit;
}
- brcmf_set_mpc(ndev, 1);
- clear_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
- clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
+ } else {
+ bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx);
+ bss_enable.enable = cpu_to_le32(0);
+ err = brcmf_fil_iovar_data_set(ifp, "bss", &bss_enable,
+ sizeof(bss_enable));
+ if (err < 0)
+ brcmf_err("bss_enable config failed %d\n", err);
}
+ brcmf_set_mpc(ndev, 1);
+ set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
+ clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
+
exit:
return err;
}
+static s32
+brcmf_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_beacon_data *info)
+{
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ s32 err;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ err = brcmf_config_ap_mgmt_ie(ifp->vif, info);
+
+ return err;
+}
+
static int
brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
u8 *mac)
{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_scb_val_le scbval;
struct brcmf_if *ifp = netdev_priv(ndev);
s32 err;
@@ -3675,6 +3887,8 @@ brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
brcmf_dbg(TRACE, "Enter %pM\n", mac);
+ if (ifp->vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
+ ifp = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
if (!check_vif_up(ifp->vif))
return -EIO;
@@ -3689,7 +3903,147 @@ brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
return err;
}
+
+static void
+brcmf_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ u16 frame_type, bool reg)
+{
+ struct brcmf_if *ifp = netdev_priv(wdev->netdev);
+ struct brcmf_cfg80211_vif *vif = ifp->vif;
+ u16 mgmt_type;
+
+ brcmf_dbg(TRACE, "Enter, frame_type %04x, reg=%d\n", frame_type, reg);
+
+ mgmt_type = (frame_type & IEEE80211_FCTL_STYPE) >> 4;
+ if (reg)
+ vif->mgmt_rx_reg |= BIT(mgmt_type);
+ else
+ vif->mgmt_rx_reg &= ~BIT(mgmt_type);
+}
+
+
+static int
+brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct ieee80211_channel *chan, bool offchan,
+ unsigned int wait, const u8 *buf, size_t len,
+ bool no_cck, bool dont_wait_for_ack, u64 *cookie)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ const struct ieee80211_mgmt *mgmt;
+ struct brcmf_if *ifp;
+ struct brcmf_cfg80211_vif *vif;
+ s32 err = 0;
+ s32 ie_offset;
+ s32 ie_len;
+ struct brcmf_fil_action_frame_le *action_frame;
+ struct brcmf_fil_af_params_le *af_params;
+ bool ack;
+ s32 chan_nr;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ *cookie = 0;
+
+ mgmt = (const struct ieee80211_mgmt *)buf;
+
+ if (!ieee80211_is_mgmt(mgmt->frame_control)) {
+ brcmf_err("Driver only allows MGMT packet type\n");
+ return -EPERM;
+ }
+
+ if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+ /* Right now the only reason to get a probe response */
+ /* is for p2p listen response or for p2p GO from */
+ /* wpa_supplicant. Unfortunately the probe is send */
+ /* on primary ndev, while dongle wants it on the p2p */
+ /* vif. Since this is only reason for a probe */
+ /* response to be sent, the vif is taken from cfg. */
+ /* If ever desired to send proberesp for non p2p */
+ /* response then data should be checked for */
+ /* "DIRECT-". Note in future supplicant will take */
+ /* dedicated p2p wdev to do this and then this 'hack'*/
+ /* is not needed anymore. */
+ ie_offset = DOT11_MGMT_HDR_LEN +
+ DOT11_BCN_PRB_FIXED_LEN;
+ ie_len = len - ie_offset;
+ ifp = netdev_priv(wdev->netdev);
+ vif = ifp->vif;
+ if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif)
+ vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+ err = brcmf_vif_set_mgmt_ie(vif,
+ BRCMF_VNDR_IE_PRBRSP_FLAG,
+ &buf[ie_offset],
+ ie_len);
+ cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true,
+ GFP_KERNEL);
+ } else if (ieee80211_is_action(mgmt->frame_control)) {
+ af_params = kzalloc(sizeof(*af_params), GFP_KERNEL);
+ if (af_params == NULL) {
+ brcmf_err("unable to allocate frame\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+ action_frame = &af_params->action_frame;
+ /* Add the packet Id */
+ action_frame->packet_id = cpu_to_le32(*cookie);
+ /* Add BSSID */
+ memcpy(&action_frame->da[0], &mgmt->da[0], ETH_ALEN);
+ memcpy(&af_params->bssid[0], &mgmt->bssid[0], ETH_ALEN);
+ /* Add the length exepted for 802.11 header */
+ action_frame->len = cpu_to_le16(len - DOT11_MGMT_HDR_LEN);
+ /* Add the channel */
+ chan_nr = ieee80211_frequency_to_channel(chan->center_freq);
+ af_params->channel = cpu_to_le32(chan_nr);
+
+ memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN],
+ le16_to_cpu(action_frame->len));
+
+ brcmf_dbg(TRACE, "Action frame, cookie=%lld, len=%d, freq=%d\n",
+ *cookie, le16_to_cpu(action_frame->len),
+ chan->center_freq);
+
+ ack = brcmf_p2p_send_action_frame(cfg, wdev->netdev,
+ af_params);
+
+ cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, ack,
+ GFP_KERNEL);
+ kfree(af_params);
+ } else {
+ brcmf_dbg(TRACE, "Unhandled, fc=%04x!!\n", mgmt->frame_control);
+ brcmf_dbg_hex_dump(true, buf, len, "payload, len=%Zu\n", len);
+ }
+
+exit:
+ return err;
+}
+
+
+static int
+brcmf_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ u64 cookie)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_cfg80211_vif *vif;
+ int err = 0;
+
+ brcmf_dbg(TRACE, "Enter p2p listen cancel\n");
+
+ vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+ if (vif == NULL) {
+ brcmf_err("No p2p device available for probe response\n");
+ err = -ENODEV;
+ goto exit;
+ }
+ brcmf_p2p_cancel_remain_on_channel(vif->ifp);
+exit:
+ return err;
+}
+
static struct cfg80211_ops wl_cfg80211_ops = {
+ .add_virtual_intf = brcmf_cfg80211_add_iface,
+ .del_virtual_intf = brcmf_cfg80211_del_iface,
.change_virtual_intf = brcmf_cfg80211_change_iface,
.scan = brcmf_cfg80211_scan,
.set_wiphy_params = brcmf_cfg80211_set_wiphy_params,
@@ -3704,7 +4058,6 @@ static struct cfg80211_ops wl_cfg80211_ops = {
.set_default_key = brcmf_cfg80211_config_default_key,
.set_default_mgmt_key = brcmf_cfg80211_config_default_mgmt_key,
.set_power_mgmt = brcmf_cfg80211_set_power_mgmt,
- .set_bitrate_mask = brcmf_cfg80211_set_bitrate_mask,
.connect = brcmf_cfg80211_connect,
.disconnect = brcmf_cfg80211_disconnect,
.suspend = brcmf_cfg80211_suspend,
@@ -3714,28 +4067,43 @@ static struct cfg80211_ops wl_cfg80211_ops = {
.flush_pmksa = brcmf_cfg80211_flush_pmksa,
.start_ap = brcmf_cfg80211_start_ap,
.stop_ap = brcmf_cfg80211_stop_ap,
+ .change_beacon = brcmf_cfg80211_change_beacon,
.del_station = brcmf_cfg80211_del_station,
.sched_scan_start = brcmf_cfg80211_sched_scan_start,
.sched_scan_stop = brcmf_cfg80211_sched_scan_stop,
+ .mgmt_frame_register = brcmf_cfg80211_mgmt_frame_register,
+ .mgmt_tx = brcmf_cfg80211_mgmt_tx,
+ .remain_on_channel = brcmf_p2p_remain_on_channel,
+ .cancel_remain_on_channel = brcmf_cfg80211_cancel_remain_on_channel,
#ifdef CONFIG_NL80211_TESTMODE
.testmode_cmd = brcmf_cfg80211_testmode
#endif
};
-static s32 brcmf_mode_to_nl80211_iftype(s32 mode)
+static s32 brcmf_nl80211_iftype_to_mode(enum nl80211_iftype type)
{
- s32 err = 0;
-
- switch (mode) {
- case WL_MODE_BSS:
- return NL80211_IFTYPE_STATION;
- case WL_MODE_IBSS:
- return NL80211_IFTYPE_ADHOC;
+ switch (type) {
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_MESH_POINT:
+ return -ENOTSUPP;
+ case NL80211_IFTYPE_ADHOC:
+ return WL_MODE_IBSS;
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ return WL_MODE_BSS;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ return WL_MODE_AP;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ return WL_MODE_P2P;
+ case NL80211_IFTYPE_UNSPECIFIED:
default:
- return NL80211_IFTYPE_UNSPECIFIED;
+ break;
}
- return err;
+ return -EINVAL;
}
static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
@@ -3747,6 +4115,56 @@ static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
}
+static const struct ieee80211_iface_limit brcmf_iface_limits[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_AP)
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO)
+ },
+};
+static const struct ieee80211_iface_combination brcmf_iface_combos[] = {
+ {
+ .max_interfaces = BRCMF_IFACE_MAX_CNT,
+ .num_different_channels = 1, /* no multi-channel for now */
+ .n_limits = ARRAY_SIZE(brcmf_iface_limits),
+ .limits = brcmf_iface_limits
+ }
+};
+
+static const struct ieee80211_txrx_stypes
+brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = {
+ [NL80211_IFTYPE_STATION] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_CLIENT] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_GO] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_ACTION >> 4)
+ }
+};
+
static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
{
struct wiphy *wiphy;
@@ -3759,10 +4177,16 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
}
set_wiphy_dev(wiphy, phydev);
wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
+ wiphy->max_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_AP);
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE);
+ wiphy->iface_combinations = brcmf_iface_combos;
+ wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos);
wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a; /* Set
* it as 11a by default.
@@ -3774,10 +4198,11 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
wiphy->cipher_suites = __wl_cipher_suites;
wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
- wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; /* enable power
- * save mode
- * by default
- */
+ wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT |
+ WIPHY_FLAG_OFFCHAN_TX |
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ wiphy->mgmt_stypes = brcmf_txrx_stypes;
+ wiphy->max_remain_on_channel_duration = 5000;
brcmf_wiphy_pno_params(wiphy);
err = wiphy_register(wiphy);
if (err < 0) {
@@ -3788,31 +4213,25 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
return wiphy;
}
-static
struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
- struct net_device *netdev,
- s32 mode, bool pm_block)
+ enum nl80211_iftype type,
+ bool pm_block)
{
struct brcmf_cfg80211_vif *vif;
if (cfg->vif_cnt == BRCMF_IFACE_MAX_CNT)
return ERR_PTR(-ENOSPC);
+ brcmf_dbg(TRACE, "allocating virtual interface (size=%zu)\n",
+ sizeof(*vif));
vif = kzalloc(sizeof(*vif), GFP_KERNEL);
if (!vif)
return ERR_PTR(-ENOMEM);
vif->wdev.wiphy = cfg->wiphy;
- vif->wdev.netdev = netdev;
- vif->wdev.iftype = brcmf_mode_to_nl80211_iftype(mode);
+ vif->wdev.iftype = type;
- if (netdev) {
- vif->ifp = netdev_priv(netdev);
- netdev->ieee80211_ptr = &vif->wdev;
- SET_NETDEV_DEV(netdev, wiphy_dev(cfg->wiphy));
- }
-
- vif->mode = mode;
+ vif->mode = brcmf_nl80211_iftype_to_mode(type);
vif->pm_block = pm_block;
vif->roam_off = -1;
@@ -3823,7 +4242,7 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
return vif;
}
-static void brcmf_free_vif(struct brcmf_cfg80211_vif *vif)
+void brcmf_free_vif(struct brcmf_cfg80211_vif *vif)
{
struct brcmf_cfg80211_info *cfg;
struct wiphy *wiphy;
@@ -3897,9 +4316,9 @@ static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_info *cfg)
conn_info->resp_ie_len = 0;
}
-static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg)
+static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_if *ifp)
{
- struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
struct brcmf_cfg80211_assoc_ielen_le *assoc_info;
struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
u32 req_len;
@@ -3975,9 +4394,9 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
brcmf_dbg(TRACE, "Enter\n");
- brcmf_get_assoc_ies(cfg);
+ brcmf_get_assoc_ies(cfg, ifp);
memcpy(profile->bssid, e->addr, ETH_ALEN);
- brcmf_update_bss_info(cfg);
+ brcmf_update_bss_info(cfg, ifp);
buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
if (buf == NULL) {
@@ -4032,9 +4451,11 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg,
if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTING,
&ifp->vif->sme_state)) {
if (completed) {
- brcmf_get_assoc_ies(cfg);
+ brcmf_get_assoc_ies(cfg, ifp);
memcpy(profile->bssid, e->addr, ETH_ALEN);
- brcmf_update_bss_info(cfg);
+ brcmf_update_bss_info(cfg, ifp);
+ set_bit(BRCMF_VIF_STATUS_CONNECTED,
+ &ifp->vif->sme_state);
}
cfg80211_connect_result(ndev,
(u8 *)profile->bssid,
@@ -4045,9 +4466,6 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg,
completed ? WLAN_STATUS_SUCCESS :
WLAN_STATUS_AUTH_TIMEOUT,
GFP_KERNEL);
- if (completed)
- set_bit(BRCMF_VIF_STATUS_CONNECTED,
- &ifp->vif->sme_state);
brcmf_dbg(CONN, "Report connect result - connection %s\n",
completed ? "succeeded" : "failed");
}
@@ -4060,38 +4478,38 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
struct net_device *ndev,
const struct brcmf_event_msg *e, void *data)
{
- s32 err = 0;
+ static int generation;
u32 event = e->event_code;
u32 reason = e->reason;
- u32 len = e->datalen;
- static int generation;
-
struct station_info sinfo;
brcmf_dbg(CONN, "event %d, reason %d\n", event, reason);
- memset(&sinfo, 0, sizeof(sinfo));
+ if (event == BRCMF_E_LINK && reason == BRCMF_E_REASON_LINK_BSSCFG_DIS &&
+ ndev != cfg_to_ndev(cfg)) {
+ brcmf_dbg(CONN, "AP mode link down\n");
+ complete(&cfg->vif_disabled);
+ return 0;
+ }
- sinfo.filled = 0;
if (((event == BRCMF_E_ASSOC_IND) || (event == BRCMF_E_REASSOC_IND)) &&
- reason == BRCMF_E_STATUS_SUCCESS) {
+ (reason == BRCMF_E_STATUS_SUCCESS)) {
+ memset(&sinfo, 0, sizeof(sinfo));
sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
if (!data) {
brcmf_err("No IEs present in ASSOC/REASSOC_IND");
return -EINVAL;
}
sinfo.assoc_req_ies = data;
- sinfo.assoc_req_ies_len = len;
+ sinfo.assoc_req_ies_len = e->datalen;
generation++;
sinfo.generation = generation;
- cfg80211_new_sta(ndev, e->addr, &sinfo, GFP_ATOMIC);
+ cfg80211_new_sta(ndev, e->addr, &sinfo, GFP_KERNEL);
} else if ((event == BRCMF_E_DISASSOC_IND) ||
(event == BRCMF_E_DEAUTH_IND) ||
(event == BRCMF_E_DEAUTH)) {
- generation++;
- sinfo.generation = generation;
- cfg80211_del_sta(ndev, e->addr, GFP_ATOMIC);
+ cfg80211_del_sta(ndev, e->addr, GFP_KERNEL);
}
- return err;
+ return 0;
}
static s32
@@ -4128,6 +4546,8 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
}
brcmf_link_down(ifp->vif);
brcmf_init_prof(ndev_to_prof(ndev));
+ if (ndev != cfg_to_ndev(cfg))
+ complete(&cfg->vif_disabled);
} else if (brcmf_is_nonetwork(cfg, e)) {
if (brcmf_is_ibssmode(ifp->vif))
clear_bit(BRCMF_VIF_STATUS_CONNECTING,
@@ -4176,6 +4596,57 @@ brcmf_notify_mic_status(struct brcmf_if *ifp,
return 0;
}
+static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e, void *data)
+{
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ struct brcmf_if_event *ifevent = (struct brcmf_if_event *)data;
+ struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
+ struct brcmf_cfg80211_vif *vif;
+
+ brcmf_dbg(TRACE, "Enter: action %u flags %u ifidx %u bsscfg %u\n",
+ ifevent->action, ifevent->flags, ifevent->ifidx,
+ ifevent->bssidx);
+
+ mutex_lock(&event->vif_event_lock);
+ event->action = ifevent->action;
+ vif = event->vif;
+
+ switch (ifevent->action) {
+ case BRCMF_E_IF_ADD:
+ /* waiting process may have timed out */
+ if (!cfg->vif_event.vif)
+ return -EBADF;
+
+ ifp->vif = vif;
+ vif->ifp = ifp;
+ vif->wdev.netdev = ifp->ndev;
+ ifp->ndev->ieee80211_ptr = &vif->wdev;
+ SET_NETDEV_DEV(ifp->ndev, wiphy_dev(cfg->wiphy));
+ mutex_unlock(&event->vif_event_lock);
+ wake_up(&event->vif_wq);
+ return 0;
+
+ case BRCMF_E_IF_DEL:
+ ifp->vif = NULL;
+ mutex_unlock(&event->vif_event_lock);
+ /* event may not be upon user request */
+ if (brcmf_cfg80211_vif_event_armed(cfg))
+ wake_up(&event->vif_wq);
+ return 0;
+
+ case BRCMF_E_IF_CHANGE:
+ mutex_unlock(&event->vif_event_lock);
+ wake_up(&event->vif_wq);
+ return 0;
+
+ default:
+ mutex_unlock(&event->vif_event_lock);
+ break;
+ }
+ return -EINVAL;
+}
+
static void brcmf_init_conf(struct brcmf_cfg80211_conf *conf)
{
conf->frag_threshold = (u32)-1;
@@ -4207,6 +4678,18 @@ static void brcmf_register_event_handlers(struct brcmf_cfg80211_info *cfg)
brcmf_notify_connect_status);
brcmf_fweh_register(cfg->pub, BRCMF_E_PFN_NET_FOUND,
brcmf_notify_sched_scan_results);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_IF,
+ brcmf_notify_vif_event);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_P2P_PROBEREQ_MSG,
+ brcmf_p2p_notify_rx_mgmt_p2p_probereq);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_P2P_DISC_LISTEN_COMPLETE,
+ brcmf_p2p_notify_listen_complete);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_ACTION_FRAME_RX,
+ brcmf_p2p_notify_action_frame_rx);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_ACTION_FRAME_COMPLETE,
+ brcmf_p2p_notify_action_tx_complete);
+ brcmf_fweh_register(cfg->pub, BRCMF_E_ACTION_FRAME_OFF_CHAN_COMPLETE,
+ brcmf_p2p_notify_action_tx_complete);
}
static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_info *cfg)
@@ -4262,7 +4745,7 @@ static s32 wl_init_priv(struct brcmf_cfg80211_info *cfg)
mutex_init(&cfg->usr_sync);
brcmf_init_escan(cfg);
brcmf_init_conf(cfg->conf);
-
+ init_completion(&cfg->vif_disabled);
return err;
}
@@ -4273,6 +4756,12 @@ static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg)
brcmf_deinit_priv_mem(cfg);
}
+static void init_vif_event(struct brcmf_cfg80211_vif_event *event)
+{
+ init_waitqueue_head(&event->vif_wq);
+ mutex_init(&event->vif_event_lock);
+}
+
struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
struct device *busdev)
{
@@ -4296,25 +4785,41 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
cfg = wiphy_priv(wiphy);
cfg->wiphy = wiphy;
cfg->pub = drvr;
+ init_vif_event(&cfg->vif_event);
INIT_LIST_HEAD(&cfg->vif_list);
- vif = brcmf_alloc_vif(cfg, ndev, WL_MODE_BSS, false);
+ vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_STATION, false);
if (IS_ERR(vif)) {
wiphy_free(wiphy);
return NULL;
}
+ vif->ifp = ifp;
+ vif->wdev.netdev = ndev;
+ ndev->ieee80211_ptr = &vif->wdev;
+ SET_NETDEV_DEV(ndev, wiphy_dev(cfg->wiphy));
+
err = wl_init_priv(cfg);
if (err) {
brcmf_err("Failed to init iwm_priv (%d)\n", err);
goto cfg80211_attach_out;
}
-
ifp->vif = vif;
+
+ err = brcmf_p2p_attach(cfg);
+ if (err) {
+ brcmf_err("P2P initilisation failed (%d)\n", err);
+ goto cfg80211_p2p_attach_out;
+ }
+
return cfg;
+cfg80211_p2p_attach_out:
+ wl_deinit_priv(cfg);
+
cfg80211_attach_out:
brcmf_free_vif(vif);
+ wiphy_free(wiphy);
return NULL;
}
@@ -4330,9 +4835,8 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
}
static s32
-brcmf_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout)
+brcmf_dongle_roam(struct brcmf_if *ifp, u32 roamvar, u32 bcn_timeout)
{
- struct brcmf_if *ifp = netdev_priv(ndev);
s32 err = 0;
__le32 roamtrigger[2];
__le32 roam_delta[2];
@@ -4383,10 +4887,9 @@ dongle_rom_out:
}
static s32
-brcmf_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time,
+brcmf_dongle_scantime(struct brcmf_if *ifp, s32 scan_assoc_time,
s32 scan_unassoc_time, s32 scan_passive_time)
{
- struct brcmf_if *ifp = netdev_priv(ndev);
s32 err = 0;
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_CHANNEL_TIME,
@@ -4456,6 +4959,7 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
{
struct net_device *ndev;
struct wireless_dev *wdev;
+ struct brcmf_if *ifp;
s32 power_mode;
s32 err = 0;
@@ -4464,35 +4968,34 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
ndev = cfg_to_ndev(cfg);
wdev = ndev->ieee80211_ptr;
+ ifp = netdev_priv(ndev);
+
+ /* make sure RF is ready for work */
+ brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0);
- brcmf_dongle_scantime(ndev, WL_SCAN_CHANNEL_TIME,
- WL_SCAN_UNASSOC_TIME, WL_SCAN_PASSIVE_TIME);
+ brcmf_dongle_scantime(ifp, WL_SCAN_CHANNEL_TIME,
+ WL_SCAN_UNASSOC_TIME, WL_SCAN_PASSIVE_TIME);
power_mode = cfg->pwr_save ? PM_FAST : PM_OFF;
- err = brcmf_fil_cmd_int_set(netdev_priv(ndev), BRCMF_C_SET_PM,
- power_mode);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, power_mode);
if (err)
goto default_conf_out;
brcmf_dbg(INFO, "power save set to %s\n",
(power_mode ? "enabled" : "disabled"));
- err = brcmf_dongle_roam(ndev, (cfg->roam_on ? 0 : 1),
- WL_BEACON_TIMEOUT);
+ err = brcmf_dongle_roam(ifp, (cfg->roam_on ? 0 : 1), WL_BEACON_TIMEOUT);
if (err)
goto default_conf_out;
err = brcmf_cfg80211_change_iface(wdev->wiphy, ndev, wdev->iftype,
NULL, NULL);
- if (err && err != -EINPROGRESS)
+ if (err)
goto default_conf_out;
err = brcmf_dongle_probecap(cfg);
if (err)
goto default_conf_out;
- /* -EINPROGRESS: Call commit handler */
-
-default_conf_out:
-
cfg->dongle_up = true;
+default_conf_out:
return err;
@@ -4501,8 +5004,6 @@ default_conf_out:
static s32 __brcmf_cfg80211_up(struct brcmf_if *ifp)
{
set_bit(BRCMF_VIF_STATUS_READY, &ifp->vif->sme_state);
- if (ifp->idx)
- return 0;
return brcmf_config_dongle(ifp->drvr->config);
}
@@ -4557,3 +5058,57 @@ s32 brcmf_cfg80211_down(struct net_device *ndev)
return err;
}
+u32 wl_get_vif_state_all(struct brcmf_cfg80211_info *cfg, unsigned long state)
+{
+ struct brcmf_cfg80211_vif *vif;
+ bool result = 0;
+
+ list_for_each_entry(vif, &cfg->vif_list, list) {
+ if (test_bit(state, &vif->sme_state))
+ result++;
+ }
+ return result;
+}
+
+static inline bool vif_event_equals(struct brcmf_cfg80211_vif_event *event,
+ u8 action)
+{
+ u8 evt_action;
+
+ mutex_lock(&event->vif_event_lock);
+ evt_action = event->action;
+ mutex_unlock(&event->vif_event_lock);
+ return evt_action == action;
+}
+
+void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_cfg80211_vif *vif)
+{
+ struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
+
+ mutex_lock(&event->vif_event_lock);
+ event->vif = vif;
+ event->action = 0;
+ mutex_unlock(&event->vif_event_lock);
+}
+
+bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg)
+{
+ struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
+ bool armed;
+
+ mutex_lock(&event->vif_event_lock);
+ armed = event->vif != NULL;
+ mutex_unlock(&event->vif_event_lock);
+
+ return armed;
+}
+int brcmf_cfg80211_wait_vif_event_timeout(struct brcmf_cfg80211_info *cfg,
+ u8 action, ulong timeout)
+{
+ struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
+
+ return wait_event_timeout(event->vif_wq,
+ vif_event_equals(event, action), timeout);
+}
+
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
index e4d9cc7a8e63..8b5d4989906c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
@@ -41,6 +41,38 @@
#define WL_AUTH_SHARED_KEY 1 /* d11 shared authentication */
#define IE_MAX_LEN 512
+/* IE TLV processing */
+#define TLV_LEN_OFF 1 /* length offset */
+#define TLV_HDR_LEN 2 /* header length */
+#define TLV_BODY_OFF 2 /* body offset */
+#define TLV_OUI_LEN 3 /* oui id length */
+
+/* 802.11 Mgmt Packet flags */
+#define BRCMF_VNDR_IE_BEACON_FLAG 0x1
+#define BRCMF_VNDR_IE_PRBRSP_FLAG 0x2
+#define BRCMF_VNDR_IE_ASSOCRSP_FLAG 0x4
+#define BRCMF_VNDR_IE_AUTHRSP_FLAG 0x8
+#define BRCMF_VNDR_IE_PRBREQ_FLAG 0x10
+#define BRCMF_VNDR_IE_ASSOCREQ_FLAG 0x20
+/* vendor IE in IW advertisement protocol ID field */
+#define BRCMF_VNDR_IE_IWAPID_FLAG 0x40
+/* allow custom IE id */
+#define BRCMF_VNDR_IE_CUSTOM_FLAG 0x100
+
+/* P2P Action Frames flags (spec ordered) */
+#define BRCMF_VNDR_IE_GONREQ_FLAG 0x001000
+#define BRCMF_VNDR_IE_GONRSP_FLAG 0x002000
+#define BRCMF_VNDR_IE_GONCFM_FLAG 0x004000
+#define BRCMF_VNDR_IE_INVREQ_FLAG 0x008000
+#define BRCMF_VNDR_IE_INVRSP_FLAG 0x010000
+#define BRCMF_VNDR_IE_DISREQ_FLAG 0x020000
+#define BRCMF_VNDR_IE_DISRSP_FLAG 0x040000
+#define BRCMF_VNDR_IE_PRDREQ_FLAG 0x080000
+#define BRCMF_VNDR_IE_PRDRSP_FLAG 0x100000
+
+#define BRCMF_VNDR_IE_P2PAF_SHIFT 12
+
+
/**
* enum brcmf_scan_status - dongle scan status
*
@@ -52,11 +84,19 @@ enum brcmf_scan_status {
BRCMF_SCAN_STATUS_ABORT,
};
-/* wi-fi mode */
+/**
+ * enum wl_mode - driver mode of virtual interface.
+ *
+ * @WL_MODE_BSS: connects to BSS.
+ * @WL_MODE_IBSS: operate as ad-hoc.
+ * @WL_MODE_AP: operate as access-point.
+ * @WL_MODE_P2P: provide P2P discovery.
+ */
enum wl_mode {
WL_MODE_BSS,
WL_MODE_IBSS,
- WL_MODE_AP
+ WL_MODE_AP,
+ WL_MODE_P2P
};
/* dongle configuration */
@@ -108,6 +148,7 @@ struct brcmf_cfg80211_profile {
* @BRCMF_VIF_STATUS_READY: ready for operation.
* @BRCMF_VIF_STATUS_CONNECTING: connect/join in progress.
* @BRCMF_VIF_STATUS_CONNECTED: connected/joined succesfully.
+ * @BRCMF_VIF_STATUS_DISCONNECTING: disconnect/disable in progress.
* @BRCMF_VIF_STATUS_AP_CREATING: interface configured for AP operation.
* @BRCMF_VIF_STATUS_AP_CREATED: AP operation started.
*/
@@ -115,6 +156,7 @@ enum brcmf_vif_status {
BRCMF_VIF_STATUS_READY,
BRCMF_VIF_STATUS_CONNECTING,
BRCMF_VIF_STATUS_CONNECTED,
+ BRCMF_VIF_STATUS_DISCONNECTING,
BRCMF_VIF_STATUS_AP_CREATING,
BRCMF_VIF_STATUS_AP_CREATED
};
@@ -122,16 +164,22 @@ enum brcmf_vif_status {
/**
* struct vif_saved_ie - holds saved IEs for a virtual interface.
*
+ * @probe_req_ie: IE info for probe request.
* @probe_res_ie: IE info for probe response.
* @beacon_ie: IE info for beacon frame.
+ * @probe_req_ie_len: IE info length for probe request.
* @probe_res_ie_len: IE info length for probe response.
* @beacon_ie_len: IE info length for beacon frame.
*/
struct vif_saved_ie {
+ u8 probe_req_ie[IE_MAX_LEN];
u8 probe_res_ie[IE_MAX_LEN];
u8 beacon_ie[IE_MAX_LEN];
+ u8 assoc_req_ie[IE_MAX_LEN];
+ u32 probe_req_ie_len;
u32 probe_res_ie_len;
u32 beacon_ie_len;
+ u32 assoc_req_ie_len;
};
/**
@@ -145,6 +193,7 @@ struct vif_saved_ie {
* @sme_state: SME state using enum brcmf_vif_status bits.
* @pm_block: power-management blocked.
* @list: linked list.
+ * @mgmt_rx_reg: registered rx mgmt frame types.
*/
struct brcmf_cfg80211_vif {
struct brcmf_if *ifp;
@@ -156,6 +205,7 @@ struct brcmf_cfg80211_vif {
bool pm_block;
struct vif_saved_ie saved_ie;
struct list_head list;
+ u16 mgmt_rx_reg;
};
/* association inform */
@@ -189,6 +239,9 @@ struct escan_info {
u8 escan_buf[WL_ESCAN_BUF_SIZE];
struct wiphy *wiphy;
struct net_device *ndev;
+ s32 (*run)(struct brcmf_cfg80211_info *cfg,
+ struct net_device *ndev,
+ struct cfg80211_scan_request *request, u16 action);
};
/**
@@ -273,10 +326,27 @@ struct brcmf_pno_scanresults_le {
};
/**
+ * struct brcmf_cfg80211_vif_event - virtual interface event information.
+ *
+ * @vif_wq: waitqueue awaiting interface event from firmware.
+ * @vif_event_lock: protects other members in this structure.
+ * @vif_complete: completion for net attach.
+ * @action: either add, change, or delete.
+ * @vif: virtual interface object related to the event.
+ */
+struct brcmf_cfg80211_vif_event {
+ wait_queue_head_t vif_wq;
+ struct mutex vif_event_lock;
+ u8 action;
+ struct brcmf_cfg80211_vif *vif;
+};
+
+/**
* struct brcmf_cfg80211_info - dongle private data of cfg80211 interface
*
* @wiphy: wiphy object for cfg80211 interface.
* @conf: dongle configuration.
+ * @p2p: peer-to-peer specific information.
* @scan_request: cfg80211 scan request object.
* @usr_sync: mainly for dongle up/down synchronization.
* @bss_list: bss_list holding scanned ap information.
@@ -304,10 +374,12 @@ struct brcmf_pno_scanresults_le {
* @escan_ioctl_buf: dongle command buffer for escan commands.
* @vif_list: linked list of vif instances.
* @vif_cnt: number of vif instances.
+ * @vif_event: vif event signalling.
*/
struct brcmf_cfg80211_info {
struct wiphy *wiphy;
struct brcmf_cfg80211_conf *conf;
+ struct brcmf_p2p_info p2p;
struct cfg80211_scan_request *scan_request;
struct mutex usr_sync;
struct brcmf_scan_results *bss_list;
@@ -335,6 +407,21 @@ struct brcmf_cfg80211_info {
u8 *escan_ioctl_buf;
struct list_head vif_list;
u8 vif_cnt;
+ struct brcmf_cfg80211_vif_event vif_event;
+ struct completion vif_disabled;
+};
+
+/**
+ * struct brcmf_tlv - tag_ID/length/value_buffer tuple.
+ *
+ * @id: tag identifier.
+ * @len: number of bytes in value buffer.
+ * @data: value buffer.
+ */
+struct brcmf_tlv {
+ u8 id;
+ u8 len;
+ u8 data[1];
};
static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_info *cfg)
@@ -389,4 +476,26 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg);
s32 brcmf_cfg80211_up(struct net_device *ndev);
s32 brcmf_cfg80211_down(struct net_device *ndev);
+struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
+ enum nl80211_iftype type,
+ bool pm_block);
+void brcmf_free_vif(struct brcmf_cfg80211_vif *vif);
+
+s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
+ const u8 *vndr_ie_buf, u32 vndr_ie_len);
+s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif);
+struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key);
+u16 channel_to_chanspec(struct ieee80211_channel *ch);
+u32 wl_get_vif_state_all(struct brcmf_cfg80211_info *cfg, unsigned long state);
+void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_cfg80211_vif *vif);
+bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg);
+int brcmf_cfg80211_wait_vif_event_timeout(struct brcmf_cfg80211_info *cfg,
+ u8 action, ulong timeout);
+s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
+ struct net_device *ndev,
+ bool aborted, bool fw_abort);
+void brcmf_set_mpc(struct net_device *ndev, int mpc);
+void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg);
+
#endif /* _wl_cfg80211_h_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
index 1de94f30564f..1585cc5bf866 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
@@ -961,7 +961,6 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
/* if acked then clear bit and free packet */
if ((bindex < AMPDU_TX_BA_MAX_WSIZE)
&& isset(bitmap, bindex)) {
- ini->tx_in_transit--;
ini->txretry[index] = 0;
/*
@@ -990,7 +989,6 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
if (retry && (ini->txretry[index] < (int)retry_limit)) {
int ret;
ini->txretry[index]++;
- ini->tx_in_transit--;
ret = brcms_c_txfifo(wlc, queue, p);
/*
* We shouldn't be out of space in the DMA
@@ -1000,7 +998,6 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
WARN_ONCE(ret, "queue %d out of txds\n", queue);
} else {
/* Retry timeout */
- ini->tx_in_transit--;
ieee80211_tx_info_clear_status(tx_info);
tx_info->status.ampdu_ack_len = 0;
tx_info->status.ampdu_len = 1;
@@ -1009,8 +1006,8 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
skb_pull(p, D11_PHY_HDR_LEN);
skb_pull(p, D11_TXH_LEN);
brcms_dbg_ht(wlc->hw->d11core,
- "BA Timeout, seq %d, in_transit %d\n",
- seq, ini->tx_in_transit);
+ "BA Timeout, seq %d\n",
+ seq);
ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
p);
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index a90b72202ec5..10ee314c4229 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -183,8 +183,7 @@ static bool brcms_c_country_valid(const char *ccode)
* chars.
*/
if (!((0x80 & ccode[0]) == 0 && ccode[0] >= 0x41 && ccode[0] <= 0x5A &&
- (0x80 & ccode[1]) == 0 && ccode[1] >= 0x41 && ccode[1] <= 0x5A &&
- ccode[2] == '\0'))
+ (0x80 & ccode[1]) == 0 && ccode[1] >= 0x41 && ccode[1] <= 0x5A))
return false;
/*
@@ -670,7 +669,7 @@ brcms_reg_apply_beaconing_flags(struct wiphy *wiphy,
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
const struct ieee80211_reg_rule *rule;
- int band, i, ret;
+ int band, i;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
sband = wiphy->bands[band];
@@ -685,9 +684,8 @@ brcms_reg_apply_beaconing_flags(struct wiphy *wiphy,
continue;
if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
- ret = freq_reg_info(wiphy, ch->center_freq,
- 0, &rule);
- if (ret)
+ rule = freq_reg_info(wiphy, ch->center_freq);
+ if (IS_ERR(rule))
continue;
if (!(rule->flags & NL80211_RRF_NO_IBSS))
@@ -703,8 +701,8 @@ brcms_reg_apply_beaconing_flags(struct wiphy *wiphy,
}
}
-static int brcms_reg_notifier(struct wiphy *wiphy,
- struct regulatory_request *request)
+static void brcms_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct brcms_info *wl = hw->priv;
@@ -745,8 +743,6 @@ static int brcms_reg_notifier(struct wiphy *wiphy,
if (wlc->pub->_nbands > 1 || wlc->band->bandtype == BRCM_BAND_2G)
wlc_phy_chanspec_ch14_widefilter_set(wlc->band->pi,
brcms_c_japan_ccode(request->alpha2));
-
- return 0;
}
void brcms_c_regd_init(struct brcms_c_info *wlc)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 1fbd8ecbe2ea..c6451c61407a 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -36,6 +36,7 @@
#include "debug.h"
#define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */
+#define BRCMS_FLUSH_TIMEOUT 500 /* msec */
/* Flags we support */
#define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
@@ -362,8 +363,11 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
return -EOPNOTSUPP;
}
+ spin_lock_bh(&wl->lock);
+ memcpy(wl->pub->cur_etheraddr, vif->addr, sizeof(vif->addr));
wl->mute_tx = false;
brcms_c_mute(wl->wlc, false);
+ spin_unlock_bh(&wl->lock);
return 0;
}
@@ -539,9 +543,8 @@ brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ARP_FILTER) {
/* Hardware ARP filter address list or state changed */
- brcms_err(core, "%s: arp filtering: enabled %s, count %d"
- " (implement)\n", __func__, info->arp_filter_enabled ?
- "true" : "false", info->arp_addr_cnt);
+ brcms_err(core, "%s: arp filtering: %d addresses"
+ " (implement)\n", __func__, info->arp_addr_cnt);
}
if (changed & BSS_CHANGED_QOS) {
@@ -668,7 +671,9 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw,
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
- case IEEE80211_AMPDU_TX_STOP:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
spin_lock_bh(&wl->lock);
brcms_c_ampdu_flush(wl->wlc, sta, tid);
spin_unlock_bh(&wl->lock);
@@ -708,16 +713,29 @@ static void brcms_ops_rfkill_poll(struct ieee80211_hw *hw)
wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked);
}
+static bool brcms_tx_flush_completed(struct brcms_info *wl)
+{
+ bool result;
+
+ spin_lock_bh(&wl->lock);
+ result = brcms_c_tx_flush_completed(wl->wlc);
+ spin_unlock_bh(&wl->lock);
+ return result;
+}
+
static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop)
{
struct brcms_info *wl = hw->priv;
+ int ret;
no_printk("%s: drop = %s\n", __func__, drop ? "true" : "false");
- /* wait for packet queue and dma fifos to run empty */
- spin_lock_bh(&wl->lock);
- brcms_c_wait_for_tx_completion(wl->wlc, drop);
- spin_unlock_bh(&wl->lock);
+ ret = wait_event_timeout(wl->tx_flush_wq,
+ brcms_tx_flush_completed(wl),
+ msecs_to_jiffies(BRCMS_FLUSH_TIMEOUT));
+
+ brcms_dbg_mac80211(wl->wlc->hw->d11core,
+ "ret=%d\n", jiffies_to_msecs(ret));
}
static const struct ieee80211_ops brcms_ops = {
@@ -772,6 +790,7 @@ void brcms_dpc(unsigned long data)
done:
spin_unlock_bh(&wl->lock);
+ wake_up(&wl->tx_flush_wq);
}
/*
@@ -1020,6 +1039,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
atomic_set(&wl->callbacks, 0);
+ init_waitqueue_head(&wl->tx_flush_wq);
+
/* setup the bottom half handler */
tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl);
@@ -1407,9 +1428,10 @@ void brcms_add_timer(struct brcms_timer *t, uint ms, int periodic)
#endif
t->ms = ms;
t->periodic = (bool) periodic;
- t->set = true;
-
- atomic_inc(&t->wl->callbacks);
+ if (!t->set) {
+ t->set = true;
+ atomic_inc(&t->wl->callbacks);
+ }
ieee80211_queue_delayed_work(hw, &t->dly_wrk, msecs_to_jiffies(ms));
}
@@ -1608,13 +1630,3 @@ bool brcms_rfkill_set_hw_state(struct brcms_info *wl)
spin_lock_bh(&wl->lock);
return blocked;
}
-
-/*
- * precondition: perimeter lock has been acquired
- */
-void brcms_msleep(struct brcms_info *wl, uint ms)
-{
- spin_unlock_bh(&wl->lock);
- msleep(ms);
- spin_lock_bh(&wl->lock);
-}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
index 9358bd5ebd35..947ccacf43e6 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
@@ -68,6 +68,8 @@ struct brcms_info {
spinlock_t lock; /* per-device perimeter lock */
spinlock_t isr_lock; /* per-device ISR synchronization lock */
+ /* tx flush */
+ wait_queue_head_t tx_flush_wq;
/* timer related fields */
atomic_t callbacks; /* # outstanding callback functions */
@@ -100,7 +102,6 @@ extern struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
extern void brcms_free_timer(struct brcms_timer *timer);
extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
extern bool brcms_del_timer(struct brcms_timer *timer);
-extern void brcms_msleep(struct brcms_info *wl, uint ms);
extern void brcms_dpc(unsigned long data);
extern void brcms_timer(struct brcms_timer *t);
extern void brcms_fatal_error(struct brcms_info *wl);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 17594de4199e..8ef02dca8f8c 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -101,8 +101,6 @@
#define DOT11_RTS_LEN 16
#define DOT11_CTS_LEN 10
#define DOT11_BA_BITMAP_LEN 128
-#define DOT11_MIN_BEACON_PERIOD 1
-#define DOT11_MAX_BEACON_PERIOD 0xFFFF
#define DOT11_MAXNUMFRAGS 16
#define DOT11_MAX_FRAG_LEN 2346
@@ -1027,7 +1025,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
static bool
brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
{
- bool morepending = false;
struct bcma_device *core;
struct tx_status txstatus, *txs;
u32 s1, s2;
@@ -1041,23 +1038,20 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
txs = &txstatus;
core = wlc_hw->d11core;
*fatal = false;
- s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
- while (!(*fatal)
- && (s1 & TXS_V)) {
- /* !give others some time to run! */
- if (n >= max_tx_num) {
- morepending = true;
- break;
- }
+ while (n < max_tx_num) {
+ s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
if (s1 == 0xffffffff) {
brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
__func__);
*fatal = true;
return false;
}
- s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
+ /* only process when valid */
+ if (!(s1 & TXS_V))
+ break;
+ s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
txs->status = s1 & TXS_STATUS_MASK;
txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT;
txs->sequence = s2 & TXS_SEQ_MASK;
@@ -1065,15 +1059,12 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
txs->lasttxtime = 0;
*fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs);
-
- s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
+ if (*fatal == true)
+ return false;
n++;
}
- if (*fatal)
- return false;
-
- return morepending;
+ return n >= max_tx_num;
}
static void brcms_c_tbtt(struct brcms_c_info *wlc)
@@ -2473,6 +2464,7 @@ static void brcms_b_tx_fifo_resume(struct brcms_hardware *wlc_hw,
static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool mute_tx)
{
static const u8 null_ether_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
+ u8 *ethaddr = wlc_hw->wlc->pub->cur_etheraddr;
if (mute_tx) {
/* suspend tx fifos */
@@ -2482,8 +2474,7 @@ static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool mute_tx)
brcms_b_tx_fifo_suspend(wlc_hw, TX_AC_VI_FIFO);
/* zero the address match register so we do not send ACKs */
- brcms_b_set_addrmatch(wlc_hw, RCM_MAC_OFFSET,
- null_ether_addr);
+ brcms_b_set_addrmatch(wlc_hw, RCM_MAC_OFFSET, null_ether_addr);
} else {
/* resume tx fifos */
brcms_b_tx_fifo_resume(wlc_hw, TX_DATA_FIFO);
@@ -2492,8 +2483,7 @@ static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool mute_tx)
brcms_b_tx_fifo_resume(wlc_hw, TX_AC_VI_FIFO);
/* Restore address */
- brcms_b_set_addrmatch(wlc_hw, RCM_MAC_OFFSET,
- wlc_hw->etheraddr);
+ brcms_b_set_addrmatch(wlc_hw, RCM_MAC_OFFSET, ethaddr);
}
wlc_phy_mute_upd(wlc_hw->band->pi, mute_tx, 0);
@@ -3148,8 +3138,7 @@ void brcms_c_reset(struct brcms_c_info *wlc)
brcms_c_statsupd(wlc);
/* reset our snapshot of macstat counters */
- memset((char *)wlc->core->macstat_snapshot, 0,
- sizeof(struct macstat));
+ memset(wlc->core->macstat_snapshot, 0, sizeof(struct macstat));
brcms_b_reset(wlc->hw);
}
@@ -4062,7 +4051,7 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
return;
}
- memset((char *)&acp_shm, 0, sizeof(struct shm_acparams));
+ memset(&acp_shm, 0, sizeof(struct shm_acparams));
/* fill in shm ac params struct */
acp_shm.txop = params->txop;
/* convert from units of 32us to us for ucode */
@@ -4778,7 +4767,7 @@ static void brcms_c_bss_default_init(struct brcms_c_info *wlc)
struct brcms_bss_info *bi = wlc->default_bss;
/* init default and target BSS with some sane initial values */
- memset((char *)(bi), 0, sizeof(struct brcms_bss_info));
+ memset(bi, 0, sizeof(*bi));
bi->beacon_period = BEACON_INTERVAL_DEFAULT;
/* fill the default channel as the first valid channel
@@ -5307,7 +5296,7 @@ int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config)
brcms_c_protection_upd(wlc, BRCMS_PROT_G_USER, gmode);
/* Clear rateset override */
- memset(&rs, 0, sizeof(struct brcms_c_rateset));
+ memset(&rs, 0, sizeof(rs));
switch (gmode) {
case GMODE_LEGACY_B:
@@ -5530,7 +5519,7 @@ int brcms_c_set_rateset(struct brcms_c_info *wlc, struct brcm_rateset *rs)
if (rs->count > BRCMS_NUMRATES)
return -ENOBUFS;
- memset(&internal_rs, 0, sizeof(struct brcms_c_rateset));
+ memset(&internal_rs, 0, sizeof(internal_rs));
/* Copy only legacy rateset section */
internal_rs.count = rs->count;
@@ -5556,8 +5545,7 @@ int brcms_c_set_rateset(struct brcms_c_info *wlc, struct brcm_rateset *rs)
int brcms_c_set_beacon_period(struct brcms_c_info *wlc, u16 period)
{
- if (period < DOT11_MIN_BEACON_PERIOD ||
- period > DOT11_MAX_BEACON_PERIOD)
+ if (period == 0)
return -EINVAL;
wlc->default_bss->beacon_period = period;
@@ -5634,7 +5622,7 @@ int brcms_c_module_unregister(struct brcms_pub *pub, const char *name,
for (i = 0; i < BRCMS_MAXMODULES; i++) {
if (!strcmp(wlc->modulecb[i].name, name) &&
(wlc->modulecb[i].hdl == hdl)) {
- memset(&wlc->modulecb[i], 0, sizeof(struct modulecb));
+ memset(&wlc->modulecb[i], 0, sizeof(wlc->modulecb[i]));
return 0;
}
}
@@ -6454,10 +6442,9 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
if ((txrate[k]->flags & IEEE80211_TX_RC_MCS)
&& (!is_mcs_rate(rspec[k]))) {
- brcms_err(wlc->hw->d11core,
- "wl%d: %s: IEEE80211_TX_"
- "RC_MCS != is_mcs_rate(rspec)\n",
- wlc->pub->unit, __func__);
+ brcms_warn(wlc->hw->d11core,
+ "wl%d: %s: IEEE80211_TX_RC_MCS != is_mcs_rate(rspec)\n",
+ wlc->pub->unit, __func__);
}
if (is_mcs_rate(rspec[k])) {
@@ -6690,11 +6677,9 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
(struct ofdm_phy_hdr *) rts_plcp) :
rts_plcp[0]) << 8;
} else {
- memset((char *)txh->RTSPhyHeader, 0, D11_PHY_HDR_LEN);
- memset((char *)&txh->rts_frame, 0,
- sizeof(struct ieee80211_rts));
- memset((char *)txh->RTSPLCPFallback, 0,
- sizeof(txh->RTSPLCPFallback));
+ memset(txh->RTSPhyHeader, 0, D11_PHY_HDR_LEN);
+ memset(&txh->rts_frame, 0, sizeof(struct ieee80211_rts));
+ memset(txh->RTSPLCPFallback, 0, sizeof(txh->RTSPLCPFallback));
txh->RTSDurFallback = 0;
}
@@ -6849,21 +6834,19 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
wlc->fragthresh[queue] =
(u16) newfragthresh;
} else {
- brcms_err(wlc->hw->d11core,
- "wl%d: %s txop invalid "
- "for rate %d\n",
- wlc->pub->unit, fifo_names[queue],
- rspec2rate(rspec[0]));
+ brcms_warn(wlc->hw->d11core,
+ "wl%d: %s txop invalid for rate %d\n",
+ wlc->pub->unit, fifo_names[queue],
+ rspec2rate(rspec[0]));
}
if (dur > wlc->edcf_txop[ac])
- brcms_err(wlc->hw->d11core,
- "wl%d: %s: %s txop "
- "exceeded phylen %d/%d dur %d/%d\n",
- wlc->pub->unit, __func__,
- fifo_names[queue],
- phylen, wlc->fragthresh[queue],
- dur, wlc->edcf_txop[ac]);
+ brcms_warn(wlc->hw->d11core,
+ "wl%d: %s: %s txop exceeded phylen %d/%d dur %d/%d\n",
+ wlc->pub->unit, __func__,
+ fifo_names[queue],
+ phylen, wlc->fragthresh[queue],
+ dur, wlc->edcf_txop[ac]);
}
}
@@ -7338,7 +7321,7 @@ brcms_c_bcn_prb_template(struct brcms_c_info *wlc, u16 type,
*len = hdr_len + body_len;
/* format PHY and MAC headers */
- memset((char *)buf, 0, hdr_len);
+ memset(buf, 0, hdr_len);
plcp = (struct cck_phy_hdr *) buf;
@@ -7409,9 +7392,13 @@ brcms_c_bss_update_probe_resp(struct brcms_c_info *wlc,
struct brcms_bss_cfg *cfg,
bool suspend)
{
- u16 prb_resp[BCN_TMPL_LEN / 2];
+ u16 *prb_resp;
int len = BCN_TMPL_LEN;
+ prb_resp = kmalloc(BCN_TMPL_LEN, GFP_ATOMIC);
+ if (!prb_resp)
+ return;
+
/*
* write the probe response to hardware, or save in
* the config structure
@@ -7445,6 +7432,8 @@ brcms_c_bss_update_probe_resp(struct brcms_c_info *wlc,
if (suspend)
brcms_c_enable_mac(wlc);
+
+ kfree(prb_resp);
}
void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend)
@@ -7518,25 +7507,16 @@ int brcms_c_get_curband(struct brcms_c_info *wlc)
return wlc->band->bandunit;
}
-void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop)
+bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc)
{
- int timeout = 20;
int i;
/* Kick DMA to send any pending AMPDU */
for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++)
if (wlc->hw->di[i])
- dma_txflush(wlc->hw->di[i]);
-
- /* wait for queue and DMA fifos to run dry */
- while (brcms_txpktpendtot(wlc) > 0) {
- brcms_msleep(wlc->wl, 1);
-
- if (--timeout == 0)
- break;
- }
+ dma_kick_tx(wlc->hw->di[i]);
- WARN_ON_ONCE(timeout == 0);
+ return !brcms_txpktpendtot(wlc);
}
void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval)
@@ -7633,7 +7613,7 @@ brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound)
uint n = 0;
uint bound_limit = bound ? RXBND : -1;
- bool morepending;
+ bool morepending = false;
skb_queue_head_init(&recv_frames);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
index 4fb2834f4e64..b0f14b7b8616 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
@@ -314,8 +314,6 @@ extern void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
extern void brcms_c_scan_start(struct brcms_c_info *wlc);
extern void brcms_c_scan_stop(struct brcms_c_info *wlc);
extern int brcms_c_get_curband(struct brcms_c_info *wlc);
-extern void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc,
- bool drop);
extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
@@ -332,5 +330,6 @@ extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);
extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
extern void brcms_c_mute(struct brcms_c_info *wlc, bool on);
+extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
#endif /* _BRCM_PUB_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/scb.h b/drivers/net/wireless/brcm80211/brcmsmac/scb.h
index 51c79c7239b7..3a3d73699f83 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/scb.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/scb.h
@@ -36,7 +36,6 @@
/* structure to store per-tid state for the ampdu initiator */
struct scb_ampdu_tid_ini {
- u8 tx_in_transit; /* number of pending mpdus in transit in driver */
u8 tid; /* initiator tid for easy lookup */
/* tx retry count; indexed by seq modulo */
u8 txretry[AMPDU_TX_BA_MAX_WSIZE];
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index c6ea995750db..dd9a18f8dbca 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -376,7 +376,7 @@ int ap_control_add_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
entry = kmalloc(sizeof(struct mac_entry), GFP_KERNEL);
if (entry == NULL)
- return -1;
+ return -ENOMEM;
memcpy(entry->addr, mac, ETH_ALEN);
diff --git a/drivers/net/wireless/ipw2x00/Kconfig b/drivers/net/wireless/ipw2x00/Kconfig
index 2715b101aded..91c0cb3c368e 100644
--- a/drivers/net/wireless/ipw2x00/Kconfig
+++ b/drivers/net/wireless/ipw2x00/Kconfig
@@ -137,7 +137,7 @@ config IPW2200_PROMISCUOUS
config IPW2200_QOS
bool "Enable QoS support"
- depends on IPW2200 && EXPERIMENTAL
+ depends on IPW2200
config IPW2200_DEBUG
bool "Enable full debugging output in IPW2200 module."
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index d92b21a8e597..cb066f62879d 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -2181,9 +2181,10 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
mod_delayed_work(system_wq, &priv->rf_kill, round_jiffies_relative(HZ));
}
-static void send_scan_event(void *data)
+static void ipw2100_scan_event(struct work_struct *work)
{
- struct ipw2100_priv *priv = data;
+ struct ipw2100_priv *priv = container_of(work, struct ipw2100_priv,
+ scan_event.work);
union iwreq_data wrqu;
wrqu.data.length = 0;
@@ -2191,18 +2192,6 @@ static void send_scan_event(void *data)
wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
}
-static void ipw2100_scan_event_later(struct work_struct *work)
-{
- send_scan_event(container_of(work, struct ipw2100_priv,
- scan_event_later.work));
-}
-
-static void ipw2100_scan_event_now(struct work_struct *work)
-{
- send_scan_event(container_of(work, struct ipw2100_priv,
- scan_event_now));
-}
-
static void isr_scan_complete(struct ipw2100_priv *priv, u32 status)
{
IPW_DEBUG_SCAN("scan complete\n");
@@ -2212,13 +2201,11 @@ static void isr_scan_complete(struct ipw2100_priv *priv, u32 status)
/* Only userspace-requested scan completion events go out immediately */
if (!priv->user_requested_scan) {
- if (!delayed_work_pending(&priv->scan_event_later))
- schedule_delayed_work(&priv->scan_event_later,
- round_jiffies_relative(msecs_to_jiffies(4000)));
+ schedule_delayed_work(&priv->scan_event,
+ round_jiffies_relative(msecs_to_jiffies(4000)));
} else {
priv->user_requested_scan = 0;
- cancel_delayed_work(&priv->scan_event_later);
- schedule_work(&priv->scan_event_now);
+ mod_delayed_work(system_wq, &priv->scan_event, 0);
}
}
@@ -4459,8 +4446,7 @@ static void ipw2100_kill_works(struct ipw2100_priv *priv)
cancel_delayed_work_sync(&priv->wx_event_work);
cancel_delayed_work_sync(&priv->hang_check);
cancel_delayed_work_sync(&priv->rf_kill);
- cancel_work_sync(&priv->scan_event_now);
- cancel_delayed_work_sync(&priv->scan_event_later);
+ cancel_delayed_work_sync(&priv->scan_event);
}
static int ipw2100_tx_allocate(struct ipw2100_priv *priv)
@@ -4478,13 +4464,10 @@ static int ipw2100_tx_allocate(struct ipw2100_priv *priv)
return err;
}
- priv->tx_buffers =
- kmalloc(TX_PENDED_QUEUE_LENGTH * sizeof(struct ipw2100_tx_packet),
- GFP_ATOMIC);
+ priv->tx_buffers = kmalloc_array(TX_PENDED_QUEUE_LENGTH,
+ sizeof(struct ipw2100_tx_packet),
+ GFP_ATOMIC);
if (!priv->tx_buffers) {
- printk(KERN_ERR DRV_NAME
- ": %s: alloc failed form tx buffers.\n",
- priv->net_dev->name);
bd_queue_free(priv, &priv->tx_queue);
return -ENOMEM;
}
@@ -6195,8 +6178,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work);
INIT_DELAYED_WORK(&priv->hang_check, ipw2100_hang_check);
INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill);
- INIT_WORK(&priv->scan_event_now, ipw2100_scan_event_now);
- INIT_DELAYED_WORK(&priv->scan_event_later, ipw2100_scan_event_later);
+ INIT_DELAYED_WORK(&priv->scan_event, ipw2100_scan_event);
tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
ipw2100_irq_tasklet, (unsigned long)priv);
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h
index 5fe17cbab1f3..c6d78790cb0d 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.h
+++ b/drivers/net/wireless/ipw2x00/ipw2100.h
@@ -577,8 +577,7 @@ struct ipw2100_priv {
struct delayed_work wx_event_work;
struct delayed_work hang_check;
struct delayed_work rf_kill;
- struct work_struct scan_event_now;
- struct delayed_work scan_event_later;
+ struct delayed_work scan_event;
int user_requested_scan;
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 844f201b7b70..d96257b79a84 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -4480,18 +4480,11 @@ static void handle_scan_event(struct ipw_priv *priv)
{
/* Only userspace-requested scan completion events go out immediately */
if (!priv->user_requested_scan) {
- if (!delayed_work_pending(&priv->scan_event))
- schedule_delayed_work(&priv->scan_event,
- round_jiffies_relative(msecs_to_jiffies(4000)));
+ schedule_delayed_work(&priv->scan_event,
+ round_jiffies_relative(msecs_to_jiffies(4000)));
} else {
- union iwreq_data wrqu;
-
priv->user_requested_scan = 0;
- cancel_delayed_work(&priv->scan_event);
-
- wrqu.data.length = 0;
- wrqu.data.flags = 0;
- wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
+ mod_delayed_work(system_wq, &priv->scan_event, 0);
}
}
@@ -11327,7 +11320,6 @@ static int ipw_up(struct ipw_priv *priv)
if (!(priv->config & CFG_CUSTOM_MAC))
eeprom_parse_mac(priv, priv->mac_addr);
memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
- memcpy(priv->net_dev->perm_addr, priv->mac_addr, ETH_ALEN);
ipw_set_geo(priv);
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index d604b4036a76..3630a41df50d 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -572,26 +572,11 @@ il3945_tx_skb(struct il_priv *il,
il3945_hw_build_tx_cmd_rate(il, out_cmd, info, hdr, sta_id);
/* Total # bytes to be transmitted */
- len = (u16) skb->len;
- tx_cmd->len = cpu_to_le16(len);
+ tx_cmd->len = cpu_to_le16((u16) skb->len);
- il_update_stats(il, true, fc, len);
tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
- if (!ieee80211_has_morefrags(hdr->frame_control)) {
- txq->need_update = 1;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
-
- D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
- D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
- il_print_hex_dump(il, IL_DL_TX, tx_cmd, sizeof(*tx_cmd));
- il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr,
- ieee80211_hdrlen(fc));
-
/*
* Use the first empty entry in this queue's command buffer array
* to contain the Tx command and MAC header concatenated together
@@ -610,14 +595,8 @@ il3945_tx_skb(struct il_priv *il,
* within command buffer array. */
txcmd_phys =
pci_map_single(il->pci_dev, &out_cmd->hdr, len, PCI_DMA_TODEVICE);
- /* we do not map meta data ... so we can safely access address to
- * provide to unmap command*/
- dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
- dma_unmap_len_set(out_meta, len, len);
-
- /* Add buffer containing Tx command and MAC(!) header to TFD's
- * first entry */
- il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1, 0);
+ if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys)))
+ goto drop_unlock;
/* Set up TFD's 2nd entry to point directly to remainder of skb,
* if any (802.11 null frames have no payload). */
@@ -626,10 +605,34 @@ il3945_tx_skb(struct il_priv *il,
phys_addr =
pci_map_single(il->pci_dev, skb->data + hdr_len, len,
PCI_DMA_TODEVICE);
+ if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr)))
+ goto drop_unlock;
+ }
+
+ /* Add buffer containing Tx command and MAC(!) header to TFD's
+ * first entry */
+ il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1, 0);
+ dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ dma_unmap_len_set(out_meta, len, len);
+ if (len)
il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, len, 0,
U32_PAD(len));
+
+ if (!ieee80211_has_morefrags(hdr->frame_control)) {
+ txq->need_update = 1;
+ } else {
+ wait_write_ptr = 1;
+ txq->need_update = 0;
}
+ il_update_stats(il, true, fc, skb->len);
+
+ D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
+ D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+ il_print_hex_dump(il, IL_DL_TX, tx_cmd, sizeof(*tx_cmd));
+ il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr,
+ ieee80211_hdrlen(fc));
+
/* Tell device the write idx *just past* this latest filled TFD */
q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
il_txq_update_write_ptr(il, txq);
@@ -1001,12 +1004,12 @@ il3945_rx_allocate(struct il_priv *il, gfp_t priority)
struct list_head *element;
struct il_rx_buf *rxb;
struct page *page;
+ dma_addr_t page_dma;
unsigned long flags;
gfp_t gfp_mask = priority;
while (1) {
spin_lock_irqsave(&rxq->lock, flags);
-
if (list_empty(&rxq->rx_used)) {
spin_unlock_irqrestore(&rxq->lock, flags);
return;
@@ -1035,26 +1038,34 @@ il3945_rx_allocate(struct il_priv *il, gfp_t priority)
break;
}
+ /* Get physical address of RB/SKB */
+ page_dma =
+ pci_map_page(il->pci_dev, page, 0,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+
+ if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) {
+ __free_pages(page, il->hw_params.rx_page_order);
+ break;
+ }
+
spin_lock_irqsave(&rxq->lock, flags);
+
if (list_empty(&rxq->rx_used)) {
spin_unlock_irqrestore(&rxq->lock, flags);
+ pci_unmap_page(il->pci_dev, page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
__free_pages(page, il->hw_params.rx_page_order);
return;
}
+
element = rxq->rx_used.next;
rxb = list_entry(element, struct il_rx_buf, list);
list_del(element);
- spin_unlock_irqrestore(&rxq->lock, flags);
rxb->page = page;
- /* Get physical address of RB/SKB */
- rxb->page_dma =
- pci_map_page(il->pci_dev, page, 0,
- PAGE_SIZE << il->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
-
- spin_lock_irqsave(&rxq->lock, flags);
-
+ rxb->page_dma = page_dma;
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
il->alloc_rxb_page++;
@@ -1284,8 +1295,15 @@ il3945_rx_handle(struct il_priv *il)
pci_map_page(il->pci_dev, rxb->page, 0,
PAGE_SIZE << il->hw_params.
rx_page_order, PCI_DMA_FROMDEVICE);
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
+ if (unlikely(pci_dma_mapping_error(il->pci_dev,
+ rxb->page_dma))) {
+ __il_free_pages(il, rxb->page);
+ rxb->page = NULL;
+ list_add_tail(&rxb->list, &rxq->rx_used);
+ } else {
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ }
} else
list_add_tail(&rxb->list, &rxq->rx_used);
@@ -3273,7 +3291,7 @@ il3945_store_measurement(struct device *d, struct device_attribute *attr,
if (count) {
char *p = buffer;
- strncpy(buffer, buf, min(sizeof(buffer), count));
+ strlcpy(buffer, buf, sizeof(buffer));
channel = simple_strtoul(p, NULL, 0);
if (channel)
params.channel = channel;
@@ -3474,6 +3492,7 @@ struct ieee80211_ops il3945_mac_ops = {
.sta_add = il3945_mac_sta_add,
.sta_remove = il_mac_sta_remove,
.tx_last_beacon = il_mac_tx_last_beacon,
+ .flush = il_mac_flush,
};
static int
@@ -3548,7 +3567,8 @@ il3945_setup_mac(struct il_priv *il)
hw->vif_data_size = sizeof(struct il_vif_priv);
/* Tell mac80211 our characteristics */
- hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SPECTRUM_MGMT;
+ hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
@@ -3557,6 +3577,8 @@ il3945_setup_mac(struct il_priv *il)
WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS |
WIPHY_FLAG_IBSS_RSN;
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
/* we create the 802.11 header and a zero-length SSID element */
hw->wiphy->max_scan_ie_len = IL3945_MAX_PROBE_REQUEST - 24 - 2;
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index c3fbf6717564..7941eb3a0166 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -319,6 +319,7 @@ il4965_rx_allocate(struct il_priv *il, gfp_t priority)
struct list_head *element;
struct il_rx_buf *rxb;
struct page *page;
+ dma_addr_t page_dma;
unsigned long flags;
gfp_t gfp_mask = priority;
@@ -356,33 +357,35 @@ il4965_rx_allocate(struct il_priv *il, gfp_t priority)
return;
}
+ /* Get physical address of the RB */
+ page_dma =
+ pci_map_page(il->pci_dev, page, 0,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) {
+ __free_pages(page, il->hw_params.rx_page_order);
+ break;
+ }
+
spin_lock_irqsave(&rxq->lock, flags);
if (list_empty(&rxq->rx_used)) {
spin_unlock_irqrestore(&rxq->lock, flags);
+ pci_unmap_page(il->pci_dev, page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
__free_pages(page, il->hw_params.rx_page_order);
return;
}
+
element = rxq->rx_used.next;
rxb = list_entry(element, struct il_rx_buf, list);
list_del(element);
- spin_unlock_irqrestore(&rxq->lock, flags);
-
BUG_ON(rxb->page);
- rxb->page = page;
- /* Get physical address of the RB */
- rxb->page_dma =
- pci_map_page(il->pci_dev, page, 0,
- PAGE_SIZE << il->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- /* dma address must be no more than 36 bits */
- BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
- /* and also 256 byte aligned! */
- BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
-
- spin_lock_irqsave(&rxq->lock, flags);
+ rxb->page = page;
+ rxb->page_dma = page_dma;
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
il->alloc_rxb_page++;
@@ -725,6 +728,16 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
if (rate_n_flags & RATE_MCS_SGI_MSK)
rx_status.flag |= RX_FLAG_SHORT_GI;
+ if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) {
+ /* We know which subframes of an A-MPDU belong
+ * together since we get a single PHY response
+ * from the firmware for all of them.
+ */
+
+ rx_status.flag |= RX_FLAG_AMPDU_DETAILS;
+ rx_status.ampdu_reference = il->_4965.ampdu_ref;
+ }
+
il4965_pass_packet_to_mac80211(il, header, len, ampdu_status, rxb,
&rx_status);
}
@@ -736,6 +749,7 @@ il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
{
struct il_rx_pkt *pkt = rxb_addr(rxb);
il->_4965.last_phy_res_valid = true;
+ il->_4965.ampdu_ref++;
memcpy(&il->_4965.last_phy_res, pkt->u.raw,
sizeof(struct il_rx_phy_res));
}
@@ -1779,8 +1793,7 @@ il4965_tx_skb(struct il_priv *il,
memcpy(tx_cmd->hdr, hdr, hdr_len);
/* Total # bytes to be transmitted */
- len = (u16) skb->len;
- tx_cmd->len = cpu_to_le16(len);
+ tx_cmd->len = cpu_to_le16((u16) skb->len);
if (info->control.hw_key)
il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id);
@@ -1790,7 +1803,6 @@ il4965_tx_skb(struct il_priv *il,
il4965_tx_cmd_build_rate(il, tx_cmd, info, sta, fc);
- il_update_stats(il, true, fc, len);
/*
* Use the first empty entry in this queue's command buffer array
* to contain the Tx command and MAC header concatenated together
@@ -1812,18 +1824,8 @@ il4965_tx_skb(struct il_priv *il,
txcmd_phys =
pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
PCI_DMA_BIDIRECTIONAL);
- dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
- dma_unmap_len_set(out_meta, len, firstlen);
- /* Add buffer containing Tx command and MAC(!) header to TFD's
- * first entry */
- il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
-
- if (!ieee80211_has_morefrags(hdr->frame_control)) {
- txq->need_update = 1;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
+ if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys)))
+ goto drop_unlock;
/* Set up TFD's 2nd entry to point directly to remainder of skb,
* if any (802.11 null frames have no payload). */
@@ -1832,8 +1834,24 @@ il4965_tx_skb(struct il_priv *il,
phys_addr =
pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
PCI_DMA_TODEVICE);
+ if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr)))
+ goto drop_unlock;
+ }
+
+ /* Add buffer containing Tx command and MAC(!) header to TFD's
+ * first entry */
+ il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
+ dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ dma_unmap_len_set(out_meta, len, firstlen);
+ if (secondlen)
il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen,
0, 0);
+
+ if (!ieee80211_has_morefrags(hdr->frame_control)) {
+ txq->need_update = 1;
+ } else {
+ wait_write_ptr = 1;
+ txq->need_update = 0;
}
scratch_phys =
@@ -1846,6 +1864,8 @@ il4965_tx_skb(struct il_priv *il,
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys);
+ il_update_stats(il, true, fc, skb->len);
+
D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd, sizeof(*tx_cmd));
@@ -4281,8 +4301,16 @@ il4965_rx_handle(struct il_priv *il)
pci_map_page(il->pci_dev, rxb->page, 0,
PAGE_SIZE << il->hw_params.
rx_page_order, PCI_DMA_FROMDEVICE);
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
+
+ if (unlikely(pci_dma_mapping_error(il->pci_dev,
+ rxb->page_dma))) {
+ __il_free_pages(il, rxb->page);
+ rxb->page = NULL;
+ list_add_tail(&rxb->list, &rxq->rx_used);
+ } else {
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ }
} else
list_add_tail(&rxb->list, &rxq->rx_used);
@@ -5711,9 +5739,9 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
/* Tell mac80211 our characteristics */
hw->flags =
IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_NEED_DTIM_PERIOD | IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS;
-
+ IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
if (il->cfg->sku & IL_SKU_N)
hw->flags |=
IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
@@ -5968,7 +5996,9 @@ il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
D_HT("start Tx\n");
ret = il4965_tx_agg_start(il, vif, sta, tid, ssn);
break;
- case IEEE80211_AMPDU_TX_STOP:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
D_HT("stop Tx\n");
ret = il4965_tx_agg_stop(il, vif, sta, tid);
if (test_bit(S_EXIT_PENDING, &il->status))
@@ -6306,6 +6336,7 @@ const struct ieee80211_ops il4965_mac_ops = {
.sta_remove = il_mac_sta_remove,
.channel_switch = il4965_mac_channel_switch,
.tx_last_beacon = il_mac_tx_last_beacon,
+ .flush = il_mac_flush,
};
static int
@@ -6553,6 +6584,7 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
il4965_prepare_card_hw(il);
if (!il->hw_ready) {
IL_WARN("Failed, HW not ready\n");
+ err = -EIO;
goto out_iounmap;
}
@@ -6569,9 +6601,6 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto out_free_eeprom;
- if (err)
- goto out_free_eeprom;
-
/* extract MAC Address */
il4965_eeprom_get_mac(il, il->addresses[0].addr);
D_INFO("MAC address: %pM\n", il->addresses[0].addr);
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index f3b8e91aa3dc..e8324b5e5bfe 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -1183,8 +1183,7 @@ il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta,
if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
return -1;
- if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2) ==
- WLAN_HT_CAP_SM_PS_STATIC)
+ if (sta->smps_mode == IEEE80211_SMPS_STATIC)
return -1;
/* Need both Tx chains/antennas to support MIMO */
diff --git a/drivers/net/wireless/iwlegacy/4965.c b/drivers/net/wireless/iwlegacy/4965.c
index 5db11714e047..91eb2d07fdb8 100644
--- a/drivers/net/wireless/iwlegacy/4965.c
+++ b/drivers/net/wireless/iwlegacy/4965.c
@@ -1748,7 +1748,6 @@ static void
il4965_post_associate(struct il_priv *il)
{
struct ieee80211_vif *vif = il->vif;
- struct ieee80211_conf *conf = NULL;
int ret = 0;
if (!vif || !il->is_open)
@@ -1759,8 +1758,6 @@ il4965_post_associate(struct il_priv *il)
il_scan_cancel_timeout(il, 200);
- conf = &il->hw->conf;
-
il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
il_commit_rxon(il);
diff --git a/drivers/net/wireless/iwlegacy/commands.h b/drivers/net/wireless/iwlegacy/commands.h
index 25dd7d28d022..3b6c99400892 100644
--- a/drivers/net/wireless/iwlegacy/commands.h
+++ b/drivers/net/wireless/iwlegacy/commands.h
@@ -1134,8 +1134,9 @@ struct il_wep_cmd {
#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1)
#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2)
#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3)
-#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0xf0
+#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0x70
#define RX_RES_PHY_FLAGS_ANTENNA_POS 4
+#define RX_RES_PHY_FLAGS_AGG_MSK cpu_to_le16(1 << 7)
#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8)
#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8)
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 7e16d10a7f14..e006ea831320 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -1830,32 +1830,30 @@ il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta)
{
struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
__le32 sta_flags;
- u8 mimo_ps_mode;
if (!sta || !sta_ht_inf->ht_supported)
goto done;
- mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
D_ASSOC("spatial multiplexing power save mode: %s\n",
- (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? "static" :
- (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? "dynamic" :
+ (sta->smps_mode == IEEE80211_SMPS_STATIC) ? "static" :
+ (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ? "dynamic" :
"disabled");
sta_flags = il->stations[idx].sta.station_flags;
sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
- switch (mimo_ps_mode) {
- case WLAN_HT_CAP_SM_PS_STATIC:
+ switch (sta->smps_mode) {
+ case IEEE80211_SMPS_STATIC:
sta_flags |= STA_FLG_MIMO_DIS_MSK;
break;
- case WLAN_HT_CAP_SM_PS_DYNAMIC:
+ case IEEE80211_SMPS_DYNAMIC:
sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
break;
- case WLAN_HT_CAP_SM_PS_DISABLED:
+ case IEEE80211_SMPS_OFF:
break;
default:
- IL_WARN("Invalid MIMO PS mode %d\n", mimo_ps_mode);
+ IL_WARN("Invalid MIMO PS mode %d\n", sta->smps_mode);
break;
}
@@ -3162,18 +3160,23 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
idx, il->cmd_queue);
}
#endif
- txq->need_update = 1;
-
- if (il->ops->txq_update_byte_cnt_tbl)
- /* Set up entry in queue's byte count circular buffer */
- il->ops->txq_update_byte_cnt_tbl(il, txq, 0);
phys_addr =
pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size,
PCI_DMA_BIDIRECTIONAL);
+ if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) {
+ idx = -ENOMEM;
+ goto out;
+ }
dma_unmap_addr_set(out_meta, mapping, phys_addr);
dma_unmap_len_set(out_meta, len, fix_size);
+ txq->need_update = 1;
+
+ if (il->ops->txq_update_byte_cnt_tbl)
+ /* Set up entry in queue's byte count circular buffer */
+ il->ops->txq_update_byte_cnt_tbl(il, txq, 0);
+
il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size, 1,
U32_PAD(cmd->len));
@@ -3181,6 +3184,7 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
il_txq_update_write_ptr(il, txq);
+out:
spin_unlock_irqrestore(&il->hcmd_lock, flags);
return idx;
}
@@ -3958,17 +3962,21 @@ il_connection_init_rx_config(struct il_priv *il)
memset(&il->staging, 0, sizeof(il->staging));
- if (!il->vif) {
+ switch (il->iw_mode) {
+ case NL80211_IFTYPE_UNSPECIFIED:
il->staging.dev_type = RXON_DEV_TYPE_ESS;
- } else if (il->vif->type == NL80211_IFTYPE_STATION) {
+ break;
+ case NL80211_IFTYPE_STATION:
il->staging.dev_type = RXON_DEV_TYPE_ESS;
il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
- } else if (il->vif->type == NL80211_IFTYPE_ADHOC) {
+ break;
+ case NL80211_IFTYPE_ADHOC:
il->staging.dev_type = RXON_DEV_TYPE_IBSS;
il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
il->staging.filter_flags =
RXON_FILTER_BCON_AWARE_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
- } else {
+ break;
+ default:
IL_ERR("Unsupported interface type %d\n", il->vif->type);
return;
}
@@ -4550,8 +4558,7 @@ out:
EXPORT_SYMBOL(il_mac_add_interface);
static void
-il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif,
- bool mode_change)
+il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif)
{
lockdep_assert_held(&il->mutex);
@@ -4560,9 +4567,7 @@ il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif,
il_force_scan_end(il);
}
- if (!mode_change)
- il_set_mode(il);
-
+ il_set_mode(il);
}
void
@@ -4575,8 +4580,8 @@ il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
WARN_ON(il->vif != vif);
il->vif = NULL;
-
- il_teardown_interface(il, vif, false);
+ il->iw_mode = NL80211_IFTYPE_UNSPECIFIED;
+ il_teardown_interface(il, vif);
memset(il->bssid, 0, ETH_ALEN);
D_MAC80211("leave\n");
@@ -4685,18 +4690,10 @@ il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
/* success */
- il_teardown_interface(il, vif, true);
vif->type = newtype;
vif->p2p = false;
- err = il_set_mode(il);
- WARN_ON(err);
- /*
- * We've switched internally, but submitting to the
- * device may have failed for some reason. Mask this
- * error, because otherwise mac80211 will not switch
- * (and set the interface type back) and we'll be
- * out of sync with it.
- */
+ il->iw_mode = newtype;
+ il_teardown_interface(il, vif);
err = 0;
out:
@@ -4707,6 +4704,42 @@ out:
}
EXPORT_SYMBOL(il_mac_change_interface);
+void
+il_mac_flush(struct ieee80211_hw *hw, bool drop)
+{
+ struct il_priv *il = hw->priv;
+ unsigned long timeout = jiffies + msecs_to_jiffies(500);
+ int i;
+
+ mutex_lock(&il->mutex);
+ D_MAC80211("enter\n");
+
+ if (il->txq == NULL)
+ goto out;
+
+ for (i = 0; i < il->hw_params.max_txq_num; i++) {
+ struct il_queue *q;
+
+ if (i == il->cmd_queue)
+ continue;
+
+ q = &il->txq[i].q;
+ if (q->read_ptr == q->write_ptr)
+ continue;
+
+ if (time_after(jiffies, timeout)) {
+ IL_ERR("Failed to flush queue %d\n", q->id);
+ break;
+ }
+
+ msleep(20);
+ }
+out:
+ D_MAC80211("leave\n");
+ mutex_unlock(&il->mutex);
+}
+EXPORT_SYMBOL(il_mac_flush);
+
/*
* On every watchdog tick we check (latest) time stamp. If it does not
* change during timeout period and queue is not empty we reset firmware.
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index a9a569f432fb..96f2025d936e 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -1356,6 +1356,7 @@ struct il_priv {
struct {
struct il_rx_phy_res last_phy_res;
bool last_phy_res_valid;
+ u32 ampdu_ref;
struct completion firmware_loading_complete;
@@ -1723,6 +1724,7 @@ void il_mac_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum nl80211_iftype newtype, bool newp2p);
+void il_mac_flush(struct ieee80211_hw *hw, bool drop);
int il_alloc_txq_mem(struct il_priv *il);
void il_free_txq_mem(struct il_priv *il);
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 5cf43236421e..ba319cba3f1e 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -43,8 +43,20 @@ config IWLWIFI
module will be called iwlwifi.
config IWLDVM
- tristate "Intel Wireless WiFi"
+ tristate "Intel Wireless WiFi DVM Firmware support"
depends on IWLWIFI
+ help
+ This is the driver supporting the DVM firmware which is
+ currently the only firmware available for existing devices.
+
+config IWLMVM
+ tristate "Intel Wireless WiFi MVM Firmware support"
+ depends on IWLWIFI
+ help
+ This is the driver supporting the MVM firmware which is
+ currently only available for 7000 series devices.
+
+ Say yes if you have such a device.
menu "Debugging Options"
depends on IWLWIFI
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 170ec330d2a9..6c7800044a04 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -5,8 +5,10 @@ iwlwifi-objs += iwl-drv.o
iwlwifi-objs += iwl-debug.o
iwlwifi-objs += iwl-notif-wait.o
iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
+iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
iwlwifi-objs += pcie/1000.o pcie/2000.o pcie/5000.o pcie/6000.o
+iwlwifi-objs += pcie/7000.o
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o
@@ -15,5 +17,6 @@ ccflags-y += -D__CHECK_ENDIAN__ -I$(src)
obj-$(CONFIG_IWLDVM) += dvm/
+obj-$(CONFIG_IWLMVM) += mvm/
CFLAGS_iwl-devtrace.o := -I$(src)
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 33b3ad2e546b..41ec27cb6efe 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -338,7 +338,7 @@ int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
- struct ieee80211_sta_ht_cap *ht_cap);
+ struct ieee80211_sta *sta);
static inline int iwl_sta_id(struct ieee80211_sta *sta)
{
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c
index de54713b680c..6468de8634b0 100644
--- a/drivers/net/wireless/iwlwifi/dvm/calib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.h b/drivers/net/wireless/iwlwifi/dvm/calib.h
index 2349f393cc42..65e920cab2b7 100644
--- a/drivers/net/wireless/iwlwifi/dvm/calib.h
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h
index 71ab76b2b39d..84e2c0fcfef6 100644
--- a/drivers/net/wireless/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/iwlwifi/dvm/commands.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -1403,6 +1403,7 @@ enum {
#define AGG_TX_STATUS_MSK 0x00000fff /* bits 0:11 */
#define AGG_TX_TRY_MSK 0x0000f000 /* bits 12:15 */
+#define AGG_TX_TRY_POS 12
#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL_MSK | \
AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK | \
@@ -3695,7 +3696,7 @@ struct iwl_bt_uart_msg {
u8 frame5;
u8 frame6;
u8 frame7;
-} __attribute__((packed));
+} __packed;
struct iwl_bt_coex_profile_notif {
struct iwl_bt_uart_msg last_bt_uart_msg;
@@ -3703,7 +3704,7 @@ struct iwl_bt_coex_profile_notif {
u8 bt_traffic_load; /* 0 .. 3? */
u8 bt_ci_compliance; /* 0 - not complied, 1 - complied */
u8 reserved;
-} __attribute__((packed));
+} __packed;
#define IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS 0
#define IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_MSK 0x1
@@ -3752,7 +3753,7 @@ enum bt_coex_prio_table_priorities {
struct iwl_bt_coex_prio_table_cmd {
u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX];
-} __attribute__((packed));
+} __packed;
#define IWL_BT_COEX_ENV_CLOSE 0
#define IWL_BT_COEX_ENV_OPEN 1
@@ -3764,7 +3765,7 @@ struct iwl_bt_coex_prot_env_cmd {
u8 action; /* 0 = closed, 1 = open */
u8 type; /* 0 .. 15 */
u8 reserved[2];
-} __attribute__((packed));
+} __packed;
/*
* REPLY_D3_CONFIG
@@ -3897,6 +3898,24 @@ struct iwlagn_wowlan_kek_kck_material_cmd {
__le64 replay_ctr;
} __packed;
+#define RF_KILL_INDICATOR_FOR_WOWLAN 0x87
+
+/*
+ * REPLY_WOWLAN_GET_STATUS = 0xe5
+ */
+struct iwlagn_wowlan_status {
+ __le64 replay_ctr;
+ __le32 rekey_status;
+ __le32 wakeup_reason;
+ u8 pattern_number;
+ u8 reserved1;
+ __le16 qos_seq_ctr[8];
+ __le16 non_qos_seq_ctr;
+ __le16 reserved2;
+ union iwlagn_all_tsc_rsc tsc_rsc;
+ __le16 reserved3;
+} __packed;
+
/*
* REPLY_WIPAN_PARAMS = 0xb2 (Commands and Notification)
*/
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index 5b9533eef54d..20806cae11b7 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -157,7 +157,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
sram = priv->dbgfs_sram_offset & ~0x3;
/* read the first u32 from sram */
- val = iwl_read_targ_mem(priv->trans, sram);
+ val = iwl_trans_read_mem32(priv->trans, sram);
for (; len; len--) {
/* put the address at the start of every line */
@@ -176,7 +176,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
if (++offset == 4) {
sram += 4;
offset = 0;
- val = iwl_read_targ_mem(priv->trans, sram);
+ val = iwl_trans_read_mem32(priv->trans, sram);
}
/* put in extra spaces and split lines for human readability */
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 2653a891cc7e..71ea77576d22 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c
index 8c72be3f37c1..15cca2ef9294 100644
--- a/drivers/net/wireless/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/iwlwifi/dvm/devices.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/led.c b/drivers/net/wireless/iwlwifi/dvm/led.c
index bf479f709091..33c7e15d24f5 100644
--- a/drivers/net/wireless/iwlwifi/dvm/led.c
+++ b/drivers/net/wireless/iwlwifi/dvm/led.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -69,7 +69,7 @@ static const struct ieee80211_tpt_blink iwl_blink[] = {
/* Set led register off */
void iwlagn_led_enable(struct iwl_priv *priv)
{
- iwl_write32(priv->trans, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
+ iwl_write32(priv->trans, CSR_LED_REG, CSR_LED_REG_TURN_ON);
}
/*
diff --git a/drivers/net/wireless/iwlwifi/dvm/led.h b/drivers/net/wireless/iwlwifi/dvm/led.h
index b02a853103d3..8749dcfe695f 100644
--- a/drivers/net/wireless/iwlwifi/dvm/led.h
+++ b/drivers/net/wireless/iwlwifi/dvm/led.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index 6ff46605ad4f..86ea5f4c3939 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 3163e0f38c25..323e4a33fcac 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -145,14 +145,13 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
/* Tell mac80211 our characteristics */
hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_NEED_DTIM_PERIOD |
+ IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_QUEUE_CONTROL |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
- IEEE80211_HW_WANT_MONITOR_VIF |
- IEEE80211_HW_SCAN_WHILE_IDLE;
+ IEEE80211_HW_WANT_MONITOR_VIF;
hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE;
hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FMT;
@@ -206,7 +205,8 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
#ifdef CONFIG_PM_SLEEP
if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
- priv->trans->ops->wowlan_suspend &&
+ priv->trans->ops->d3_suspend &&
+ priv->trans->ops->d3_resume &&
device_can_wakeup(priv->trans->dev)) {
hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_DISCONNECT |
@@ -426,7 +426,7 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
if (ret)
goto error;
- iwl_trans_wowlan_suspend(priv->trans);
+ iwl_trans_d3_suspend(priv->trans);
goto out;
@@ -441,54 +441,154 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
return ret;
}
+struct iwl_resume_data {
+ struct iwl_priv *priv;
+ struct iwlagn_wowlan_status *cmd;
+ bool valid;
+};
+
+static bool iwl_resume_status_fn(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_resume_data *resume_data = data;
+ struct iwl_priv *priv = resume_data->priv;
+ u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+
+ if (len - 4 != sizeof(*resume_data->cmd)) {
+ IWL_ERR(priv, "rx wrong size data\n");
+ return true;
+ }
+ memcpy(resume_data->cmd, pkt->data, sizeof(*resume_data->cmd));
+ resume_data->valid = true;
+
+ return true;
+}
+
static int iwlagn_mac_resume(struct ieee80211_hw *hw)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct ieee80211_vif *vif;
- unsigned long flags;
- u32 base, status = 0xffffffff;
- int ret = -EIO;
+ u32 base;
+ int ret;
+ enum iwl_d3_status d3_status;
+ struct error_table_start {
+ /* cf. struct iwl_error_event_table */
+ u32 valid;
+ u32 error_id;
+ } err_info;
+ struct iwl_notification_wait status_wait;
+ static const u8 status_cmd[] = {
+ REPLY_WOWLAN_GET_STATUS,
+ };
+ struct iwlagn_wowlan_status status_data = {};
+ struct iwl_resume_data resume_data = {
+ .priv = priv,
+ .cmd = &status_data,
+ .valid = false,
+ };
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .pattern_idx = -1,
+ };
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ const struct fw_img *img;
+#endif
IWL_DEBUG_MAC80211(priv, "enter\n");
mutex_lock(&priv->mutex);
- iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+ /* we'll clear ctx->vif during iwlagn_prepare_restart() */
+ vif = ctx->vif;
+
+ ret = iwl_trans_d3_resume(priv->trans, &d3_status);
+ if (ret)
+ goto out_unlock;
+
+ if (d3_status != IWL_D3_STATUS_ALIVE) {
+ IWL_INFO(priv, "Device was reset during suspend\n");
+ goto out_unlock;
+ }
base = priv->device_pointers.error_event_table;
- if (iwlagn_hw_valid_rtc_data_addr(base)) {
- spin_lock_irqsave(&priv->trans->reg_lock, flags);
- ret = iwl_grab_nic_access_silent(priv->trans);
- if (likely(ret == 0)) {
- iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, base);
- status = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
- iwl_release_nic_access(priv->trans);
+ if (!iwlagn_hw_valid_rtc_data_addr(base)) {
+ IWL_WARN(priv, "Invalid error table during resume!\n");
+ goto out_unlock;
+ }
+
+ iwl_trans_read_mem_bytes(priv->trans, base,
+ &err_info, sizeof(err_info));
+
+ if (err_info.valid) {
+ IWL_INFO(priv, "error table is valid (%d, 0x%x)\n",
+ err_info.valid, err_info.error_id);
+ if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+ wakeup.rfkill_release = true;
+ ieee80211_report_wowlan_wakeup(vif, &wakeup,
+ GFP_KERNEL);
}
- spin_unlock_irqrestore(&priv->trans->reg_lock, flags);
+ goto out_unlock;
+ }
#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (ret == 0) {
- const struct fw_img *img;
-
- img = &(priv->fw->img[IWL_UCODE_WOWLAN]);
- if (!priv->wowlan_sram) {
- priv->wowlan_sram =
- kzalloc(img->sec[IWL_UCODE_SECTION_DATA].len,
- GFP_KERNEL);
- }
+ img = &priv->fw->img[IWL_UCODE_WOWLAN];
+ if (!priv->wowlan_sram)
+ priv->wowlan_sram =
+ kzalloc(img->sec[IWL_UCODE_SECTION_DATA].len,
+ GFP_KERNEL);
+
+ if (priv->wowlan_sram)
+ iwl_trans_read_mem(priv->trans, 0x800000,
+ priv->wowlan_sram,
+ img->sec[IWL_UCODE_SECTION_DATA].len / 4);
+#endif
+
+ /*
+ * This is very strange. The GET_STATUS command is sent but the device
+ * doesn't reply properly, it seems it doesn't close the RBD so one is
+ * always left open ... As a result, we need to send another command
+ * and have to reset the driver afterwards. As we need to switch to
+ * runtime firmware again that'll happen.
+ */
- if (priv->wowlan_sram)
- _iwl_read_targ_mem_dwords(
- priv->trans, 0x800000,
- priv->wowlan_sram,
- img->sec[IWL_UCODE_SECTION_DATA].len / 4);
+ iwl_init_notification_wait(&priv->notif_wait, &status_wait, status_cmd,
+ ARRAY_SIZE(status_cmd), iwl_resume_status_fn,
+ &resume_data);
+
+ iwl_dvm_send_cmd_pdu(priv, REPLY_WOWLAN_GET_STATUS, CMD_ASYNC, 0, NULL);
+ iwl_dvm_send_cmd_pdu(priv, REPLY_ECHO, CMD_ASYNC, 0, NULL);
+ /* an RBD is left open in the firmware now! */
+
+ ret = iwl_wait_notification(&priv->notif_wait, &status_wait, HZ/5);
+ if (ret)
+ goto out_unlock;
+
+ if (resume_data.valid && priv->contexts[IWL_RXON_CTX_BSS].vif) {
+ u32 reasons = le32_to_cpu(status_data.wakeup_reason);
+ struct cfg80211_wowlan_wakeup *wakeup_report;
+
+ IWL_INFO(priv, "WoWLAN wakeup reason(s): 0x%.8x\n", reasons);
+
+ if (reasons) {
+ if (reasons & IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET)
+ wakeup.magic_pkt = true;
+ if (reasons & IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH)
+ wakeup.pattern_idx = status_data.pattern_number;
+ if (reasons & (IWLAGN_WOWLAN_WAKEUP_BEACON_MISS |
+ IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE))
+ wakeup.disconnect = true;
+ if (reasons & IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL)
+ wakeup.gtk_rekey_failure = true;
+ if (reasons & IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ)
+ wakeup.eap_identity_req = true;
+ if (reasons & IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE)
+ wakeup.four_way_handshake = true;
+ wakeup_report = &wakeup;
+ } else {
+ wakeup_report = NULL;
}
-#endif
- }
- /* we'll clear ctx->vif during iwlagn_prepare_restart() */
- vif = ctx->vif;
+ ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
+ }
priv->wowlan = false;
@@ -498,6 +598,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
iwl_connection_init_rx_config(priv, ctx);
iwlagn_set_rxon_chain(priv, ctx);
+ out_unlock:
mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -520,9 +621,6 @@ static void iwlagn_mac_tx(struct ieee80211_hw *hw,
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
- ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
-
if (iwlagn_tx_skb(priv, control->sta, skb))
ieee80211_free_txskb(hw, skb);
}
@@ -679,7 +777,9 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
IWL_DEBUG_HT(priv, "start Tx\n");
ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
break;
- case IEEE80211_AMPDU_TX_STOP:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
IWL_DEBUG_HT(priv, "stop Tx\n");
ret = iwlagn_tx_agg_stop(priv, vif, sta, tid);
if ((ret == 0) && (priv->agg_tids_count > 0)) {
@@ -1154,6 +1254,7 @@ static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
}
static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
enum ieee80211_rssi_event rssi_event)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index faa05932efae..b9e3517652d6 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -353,11 +353,8 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
/* Make sure device is powered up for SRAM reads */
- spin_lock_irqsave(&priv->trans->reg_lock, reg_flags);
- if (unlikely(!iwl_grab_nic_access(priv->trans))) {
- spin_unlock_irqrestore(&priv->trans->reg_lock, reg_flags);
+ if (!iwl_trans_grab_nic_access(priv->trans, false, &reg_flags))
return;
- }
/* Set starting address; reads will auto-increment */
iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, ptr);
@@ -388,8 +385,7 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
}
}
/* Allow device to power down */
- iwl_release_nic_access(priv->trans);
- spin_unlock_irqrestore(&priv->trans->reg_lock, reg_flags);
+ iwl_trans_release_nic_access(priv->trans, &reg_flags);
}
static void iwl_continuous_event_trace(struct iwl_priv *priv)
@@ -408,7 +404,8 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
base = priv->device_pointers.log_event_table;
if (iwlagn_hw_valid_rtc_data_addr(base)) {
- iwl_read_targ_mem_bytes(priv->trans, base, &read, sizeof(read));
+ iwl_trans_read_mem_bytes(priv->trans, base,
+ &read, sizeof(read));
capacity = read.capacity;
mode = read.mode;
num_wraps = read.wrap_counter;
@@ -1627,7 +1624,7 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv)
}
/*TODO: Update dbgfs with ISR error stats obtained below */
- iwl_read_targ_mem_bytes(trans, base, &table, sizeof(table));
+ iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
@@ -1716,9 +1713,8 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
/* Make sure device is powered up for SRAM reads */
- spin_lock_irqsave(&trans->reg_lock, reg_flags);
- if (unlikely(!iwl_grab_nic_access(trans)))
- goto out_unlock;
+ if (!iwl_trans_grab_nic_access(trans, false, &reg_flags))
+ return pos;
/* Set starting address; reads will auto-increment */
iwl_write32(trans, HBUS_TARG_MEM_RADDR, ptr);
@@ -1756,9 +1752,7 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
}
/* Allow device to power down */
- iwl_release_nic_access(trans);
-out_unlock:
- spin_unlock_irqrestore(&trans->reg_lock, reg_flags);
+ iwl_trans_release_nic_access(trans, &reg_flags);
return pos;
}
@@ -1835,10 +1829,10 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
}
/* event log header */
- capacity = iwl_read_targ_mem(trans, base);
- mode = iwl_read_targ_mem(trans, base + (1 * sizeof(u32)));
- num_wraps = iwl_read_targ_mem(trans, base + (2 * sizeof(u32)));
- next_entry = iwl_read_targ_mem(trans, base + (3 * sizeof(u32)));
+ capacity = iwl_trans_read_mem32(trans, base);
+ mode = iwl_trans_read_mem32(trans, base + (1 * sizeof(u32)));
+ num_wraps = iwl_trans_read_mem32(trans, base + (2 * sizeof(u32)));
+ next_entry = iwl_trans_read_mem32(trans, base + (3 * sizeof(u32)));
if (capacity > logsize) {
IWL_ERR(priv, "Log capacity %d is bogus, limit to %d "
@@ -1990,13 +1984,13 @@ static void iwl_nic_config(struct iwl_op_mode *op_mode)
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
/* SKU Control */
- iwl_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
- CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP,
- (CSR_HW_REV_STEP(priv->trans->hw_rev) <<
- CSR_HW_IF_CONFIG_REG_POS_MAC_STEP) |
- (CSR_HW_REV_DASH(priv->trans->hw_rev) <<
- CSR_HW_IF_CONFIG_REG_POS_MAC_DASH));
+ iwl_trans_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
+ CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP,
+ (CSR_HW_REV_STEP(priv->trans->hw_rev) <<
+ CSR_HW_IF_CONFIG_REG_POS_MAC_STEP) |
+ (CSR_HW_REV_DASH(priv->trans->hw_rev) <<
+ CSR_HW_IF_CONFIG_REG_POS_MAC_DASH));
/* write radio config values to register */
if (priv->nvm_data->radio_cfg_type <= EEPROM_RF_CONFIG_TYPE_MAX) {
@@ -2008,10 +2002,11 @@ static void iwl_nic_config(struct iwl_op_mode *op_mode)
priv->nvm_data->radio_cfg_dash <<
CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
- iwl_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
- CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
- CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH, reg_val);
+ iwl_trans_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
+ CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
+ CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH,
+ reg_val);
IWL_INFO(priv, "Radio type=0x%x-0x%x-0x%x\n",
priv->nvm_data->radio_cfg_type,
diff --git a/drivers/net/wireless/iwlwifi/dvm/power.c b/drivers/net/wireless/iwlwifi/dvm/power.c
index 518cf3715809..bd69018d07a9 100644
--- a/drivers/net/wireless/iwlwifi/dvm/power.c
+++ b/drivers/net/wireless/iwlwifi/dvm/power.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/dvm/power.h b/drivers/net/wireless/iwlwifi/dvm/power.h
index a2cee7f04848..7b03e1342d47 100644
--- a/drivers/net/wireless/iwlwifi/dvm/power.h
+++ b/drivers/net/wireless/iwlwifi/dvm/power.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index f3dd0da60d8a..abe304267261 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -411,8 +411,9 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
* BT traffic, as they would just be disrupted by BT.
*/
if (priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) {
- IWL_ERR(priv, "BT traffic (%d), no aggregation allowed\n",
- priv->bt_traffic_load);
+ IWL_DEBUG_COEX(priv,
+ "BT traffic (%d), no aggregation allowed\n",
+ priv->bt_traffic_load);
return ret;
}
@@ -1288,8 +1289,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
return -1;
- if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
- == WLAN_HT_CAP_SM_PS_STATIC)
+ if (sta->smps_mode == IEEE80211_SMPS_STATIC)
return -1;
/* Need both Tx chains/antennas to support MIMO */
@@ -1304,7 +1304,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
tbl->max_search = IWL_MAX_SEARCH;
rate_mask = lq_sta->active_mimo2_rate;
- if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+ if (iwl_is_ht40_tx_allowed(priv, ctx, sta))
tbl->is_ht40 = 1;
else
tbl->is_ht40 = 0;
@@ -1344,8 +1344,7 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv,
if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
return -1;
- if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
- == WLAN_HT_CAP_SM_PS_STATIC)
+ if (sta->smps_mode == IEEE80211_SMPS_STATIC)
return -1;
/* Need both Tx chains/antennas to support MIMO */
@@ -1360,7 +1359,7 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv,
tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
rate_mask = lq_sta->active_mimo3_rate;
- if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+ if (iwl_is_ht40_tx_allowed(priv, ctx, sta))
tbl->is_ht40 = 1;
else
tbl->is_ht40 = 0;
@@ -1409,7 +1408,7 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
tbl->max_search = IWL_MAX_SEARCH;
rate_mask = lq_sta->active_siso_rate;
- if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+ if (iwl_is_ht40_tx_allowed(priv, ctx, sta))
tbl->is_ht40 = 1;
else
tbl->is_ht40 = 0;
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.h b/drivers/net/wireless/iwlwifi/dvm/rs.h
index ad3aea8f626a..5d83cab22d62 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index cac4f37cc427..a4eed2055fdb 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portionhelp of the ieee80211 subsystem header files.
@@ -790,7 +790,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
- ieee80211_rx(priv->hw, skb);
+ ieee80211_rx_ni(priv->hw, skb);
}
static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index 9a891e6e60e8..23be948cf162 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -1545,10 +1545,9 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
bss_conf->bssid);
}
- if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_ADHOC &&
- priv->beacon_ctx) {
+ if (changes & BSS_CHANGED_BEACON && priv->beacon_ctx == ctx) {
if (iwlagn_update_beacon(priv, vif))
- IWL_ERR(priv, "Error sending IBSS beacon\n");
+ IWL_ERR(priv, "Error updating beacon\n");
}
mutex_unlock(&priv->mutex);
diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index 610ed2204e1f..3a4aa5239c45 100644
--- a/drivers/net/wireless/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index bdba9543c351..94ef33838bc6 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -77,7 +77,7 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv,
IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
sta_id);
- spin_lock(&priv->sta_lock);
+ spin_lock_bh(&priv->sta_lock);
switch (add_sta_resp->status) {
case ADD_STA_SUCCESS_MSK:
@@ -119,7 +119,7 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv,
priv->stations[sta_id].sta.mode ==
STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
addsta->sta.addr);
- spin_unlock(&priv->sta_lock);
+ spin_unlock_bh(&priv->sta_lock);
return ret;
}
@@ -173,7 +173,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
- struct ieee80211_sta_ht_cap *ht_cap)
+ struct ieee80211_sta *sta)
{
if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
return false;
@@ -183,20 +183,11 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
return false;
#endif
- /*
- * Remainder of this function checks ht_cap, but if it's
- * NULL then we can do HT40 (special case for RXON)
- */
- if (!ht_cap)
+ /* special case for RXON */
+ if (!sta)
return true;
- if (!ht_cap->ht_supported)
- return false;
-
- if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
- return false;
-
- return true;
+ return sta->bandwidth >= IEEE80211_STA_RX_BW_40;
}
static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
@@ -205,7 +196,6 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
__le32 *flags, __le32 *mask)
{
struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
- u8 mimo_ps_mode;
*mask = STA_FLG_RTS_MIMO_PROT_MSK |
STA_FLG_MIMO_DIS_MSK |
@@ -217,26 +207,24 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
if (!sta || !sta_ht_inf->ht_supported)
return;
- mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
-
IWL_DEBUG_INFO(priv, "STA %pM SM PS mode: %s\n",
sta->addr,
- (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
+ (sta->smps_mode == IEEE80211_SMPS_STATIC) ?
"static" :
- (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
+ (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ?
"dynamic" : "disabled");
- switch (mimo_ps_mode) {
- case WLAN_HT_CAP_SM_PS_STATIC:
+ switch (sta->smps_mode) {
+ case IEEE80211_SMPS_STATIC:
*flags |= STA_FLG_MIMO_DIS_MSK;
break;
- case WLAN_HT_CAP_SM_PS_DYNAMIC:
+ case IEEE80211_SMPS_DYNAMIC:
*flags |= STA_FLG_RTS_MIMO_PROT_MSK;
break;
- case WLAN_HT_CAP_SM_PS_DISABLED:
+ case IEEE80211_SMPS_OFF:
break;
default:
- IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode);
+ IWL_WARN(priv, "Invalid MIMO PS mode %d\n", sta->smps_mode);
break;
}
@@ -246,7 +234,7 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
*flags |= cpu_to_le32(
(u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
- if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+ if (iwl_is_ht40_tx_allowed(priv, ctx, sta))
*flags |= STA_FLG_HT40_EN_MSK;
}
diff --git a/drivers/net/wireless/iwlwifi/dvm/testmode.c b/drivers/net/wireless/iwlwifi/dvm/testmode.c
index 57b918ce3b5f..dc6f965a123a 100644
--- a/drivers/net/wireless/iwlwifi/dvm/testmode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/testmode.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.c b/drivers/net/wireless/iwlwifi/dvm/tt.c
index eb864433e59d..03f9bc01c0cc 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -185,10 +185,8 @@ static void iwl_tt_check_exit_ct_kill(unsigned long data)
priv->thermal_throttle.ct_kill_toggle = true;
}
iwl_read32(priv->trans, CSR_UCODE_DRV_GP1);
- spin_lock_irqsave(&priv->trans->reg_lock, flags);
- if (likely(iwl_grab_nic_access(priv->trans)))
- iwl_release_nic_access(priv->trans);
- spin_unlock_irqrestore(&priv->trans->reg_lock, flags);
+ if (iwl_trans_grab_nic_access(priv->trans, false, &flags))
+ iwl_trans_release_nic_access(priv->trans, &flags);
/* Reschedule the ct_kill timer to occur in
* CT_KILL_EXIT_DURATION seconds to ensure we get a
@@ -473,8 +471,8 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
set_bit(STATUS_CT_KILL, &priv->status);
iwl_perform_ct_kill_task(priv, true);
} else {
- iwl_prepare_ct_kill_task(priv);
tt->state = old_state;
+ iwl_prepare_ct_kill_task(priv);
}
} else if (old_state == IWL_TI_CT_KILL &&
tt->state != IWL_TI_CT_KILL) {
diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.h b/drivers/net/wireless/iwlwifi/dvm/tt.h
index 44c7c8f30a2d..9356c4b908ca 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tt.h
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index da21328ca8ed..6aec2df3bb27 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -231,13 +231,11 @@ static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
if (info->flags & IEEE80211_TX_CTL_AMPDU)
tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
- IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
break;
case WLAN_CIPHER_SUITE_TKIP:
tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
- IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
break;
case WLAN_CIPHER_SUITE_WEP104:
@@ -355,8 +353,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
}
}
- IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
-
if (sta)
sta_priv = (void *)sta->drv_priv;
@@ -472,6 +468,9 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
WARN_ON_ONCE(is_agg &&
priv->queue_to_mac80211[txq_id] != info->hw_queue);
+ IWL_DEBUG_TX(priv, "TX to [%d|%d] Q:%d - seq: 0x%x\n", sta_id, tid,
+ txq_id, seq_number);
+
if (iwl_trans_tx(priv->trans, skb, dev_cmd, txq_id))
goto drop_unlock_sta;
@@ -541,9 +540,9 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
spin_lock_bh(&priv->sta_lock);
tid_data = &priv->tid_data[sta_id][tid];
- txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
+ txq_id = tid_data->agg.txq_id;
- switch (priv->tid_data[sta_id][tid].agg.state) {
+ switch (tid_data->agg.state) {
case IWL_EMPTYING_HW_QUEUE_ADDBA:
/*
* This can happen if the peer stops aggregation
@@ -563,9 +562,9 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
case IWL_AGG_ON:
break;
default:
- IWL_WARN(priv, "Stopping AGG while state not ON "
- "or starting for %d on %d (%d)\n", sta_id, tid,
- priv->tid_data[sta_id][tid].agg.state);
+ IWL_WARN(priv,
+ "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
+ sta_id, tid, tid_data->agg.state);
spin_unlock_bh(&priv->sta_lock);
return 0;
}
@@ -578,12 +577,11 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
"stopping AGG on STA/TID %d/%d but hwq %d not used\n",
sta_id, tid, txq_id);
} else if (tid_data->agg.ssn != tid_data->next_reclaimed) {
- IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
- "next_recl = %d\n",
+ IWL_DEBUG_TX_QUEUES(priv,
+ "Can't proceed: ssn %d, next_recl = %d\n",
tid_data->agg.ssn,
tid_data->next_reclaimed);
- priv->tid_data[sta_id][tid].agg.state =
- IWL_EMPTYING_HW_QUEUE_DELBA;
+ tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_DELBA;
spin_unlock_bh(&priv->sta_lock);
return 0;
}
@@ -591,8 +589,8 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
tid_data->agg.ssn);
turn_off:
- agg_state = priv->tid_data[sta_id][tid].agg.state;
- priv->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
+ agg_state = tid_data->agg.state;
+ tid_data->agg.state = IWL_AGG_OFF;
spin_unlock_bh(&priv->sta_lock);
@@ -910,6 +908,12 @@ static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
}
}
+static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
+{
+ return le32_to_cpup((__le32 *)&tx_resp->status +
+ tx_resp->frame_count) & MAX_SN;
+}
+
static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
struct iwlagn_tx_resp *tx_resp)
{
@@ -944,9 +948,15 @@ static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
if (tx_resp->frame_count == 1)
return;
+ IWL_DEBUG_TX_REPLY(priv, "TXQ %d initial_rate 0x%x ssn %d frm_cnt %d\n",
+ agg->txq_id,
+ le32_to_cpu(tx_resp->rate_n_flags),
+ iwlagn_get_scd_ssn(tx_resp), tx_resp->frame_count);
+
/* Construct bit-map of pending frames within Tx window */
for (i = 0; i < tx_resp->frame_count; i++) {
u16 fstatus = le16_to_cpu(frame_status[i].status);
+ u8 retry_cnt = (fstatus & AGG_TX_TRY_MSK) >> AGG_TX_TRY_POS;
if (status & AGG_TX_STATUS_MSK)
iwlagn_count_agg_tx_err_status(priv, fstatus);
@@ -955,11 +965,13 @@ static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
AGG_TX_STATE_ABORT_MSK))
continue;
- IWL_DEBUG_TX_REPLY(priv, "status %s (0x%08x), "
- "try-count (0x%08x)\n",
- iwl_get_agg_tx_fail_reason(fstatus),
- fstatus & AGG_TX_STATUS_MSK,
- fstatus & AGG_TX_TRY_MSK);
+ if (status & AGG_TX_STATUS_MSK || retry_cnt > 1)
+ IWL_DEBUG_TX_REPLY(priv,
+ "%d: status %s (0x%04x), try-count (0x%01x)\n",
+ i,
+ iwl_get_agg_tx_fail_reason(fstatus),
+ fstatus & AGG_TX_STATUS_MSK,
+ retry_cnt);
}
}
@@ -990,12 +1002,6 @@ const char *iwl_get_agg_tx_fail_reason(u16 status)
}
#endif /* CONFIG_IWLWIFI_DEBUG */
-static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
-{
- return le32_to_cpup((__le32 *)&tx_resp->status +
- tx_resp->frame_count) & MAX_SN;
-}
-
static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
{
status &= TX_STATUS_MSK;
@@ -1079,6 +1085,8 @@ static void iwlagn_set_tx_status(struct iwl_priv *priv,
{
u16 status = le16_to_cpu(tx_resp->status.status);
+ info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
info->status.rates[0].count = tx_resp->failure_frame + 1;
info->flags |= iwl_tx_status_to_mac80211(status);
iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
@@ -1123,10 +1131,16 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
IWLAGN_TX_RES_RA_POS;
- spin_lock(&priv->sta_lock);
+ spin_lock_bh(&priv->sta_lock);
- if (is_agg)
+ if (is_agg) {
+ WARN_ON_ONCE(sta_id >= IWLAGN_STATION_COUNT ||
+ tid >= IWL_MAX_TID_COUNT);
+ if (txq_id != priv->tid_data[sta_id][tid].agg.txq_id)
+ IWL_ERR(priv, "txq_id mismatch: %d %d\n", txq_id,
+ priv->tid_data[sta_id][tid].agg.txq_id);
iwl_rx_reply_tx_agg(priv, tx_resp);
+ }
__skb_queue_head_init(&skbs);
@@ -1213,22 +1227,41 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
freed++;
}
- WARN_ON(!is_agg && freed != 1);
+ if (tid != IWL_TID_NON_QOS) {
+ priv->tid_data[sta_id][tid].next_reclaimed =
+ next_reclaimed;
+ IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
+ next_reclaimed);
+ }
+
+ if (!is_agg && freed != 1)
+ IWL_ERR(priv, "Q: %d, freed %d\n", txq_id, freed);
/*
* An offchannel frame can be send only on the AUX queue, where
* there is no aggregation (and reordering) so it only is single
* skb is expected to be processed.
*/
- WARN_ON(is_offchannel_skb && freed != 1);
+ if (is_offchannel_skb && freed != 1)
+ IWL_ERR(priv, "OFFCHANNEL SKB freed %d\n", freed);
+
+ IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x)\n", txq_id,
+ iwl_get_tx_fail_reason(status), status);
+
+ IWL_DEBUG_TX_REPLY(priv,
+ "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d seq_ctl=0x%x\n",
+ le32_to_cpu(tx_resp->rate_n_flags),
+ tx_resp->failure_frame,
+ SEQ_TO_INDEX(sequence), ssn,
+ le16_to_cpu(tx_resp->seq_ctl));
}
iwl_check_abort_status(priv, tx_resp->frame_count, status);
- spin_unlock(&priv->sta_lock);
+ spin_unlock_bh(&priv->sta_lock);
while (!skb_queue_empty(&skbs)) {
skb = __skb_dequeue(&skbs);
- ieee80211_tx_status(priv->hw, skb);
+ ieee80211_tx_status_ni(priv->hw, skb);
}
if (is_offchannel_skb)
@@ -1275,12 +1308,12 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
tid = ba_resp->tid;
agg = &priv->tid_data[sta_id][tid].agg;
- spin_lock(&priv->sta_lock);
+ spin_lock_bh(&priv->sta_lock);
if (unlikely(!agg->wait_for_ba)) {
if (unlikely(ba_resp->bitmap))
IWL_ERR(priv, "Received BA when not expected\n");
- spin_unlock(&priv->sta_lock);
+ spin_unlock_bh(&priv->sta_lock);
return 0;
}
@@ -1294,7 +1327,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
IWL_DEBUG_TX_QUEUES(priv,
"Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
scd_flow, sta_id, tid, agg->txq_id);
- spin_unlock(&priv->sta_lock);
+ spin_unlock_bh(&priv->sta_lock);
return 0;
}
@@ -1363,11 +1396,11 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
}
}
- spin_unlock(&priv->sta_lock);
+ spin_unlock_bh(&priv->sta_lock);
while (!skb_queue_empty(&reclaimed_skbs)) {
skb = __skb_dequeue(&reclaimed_skbs);
- ieee80211_tx_status(priv->hw, skb);
+ ieee80211_tx_status_ni(priv->hw, skb);
}
return 0;
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index c6467e5554f5..736fe9bb140e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -286,89 +286,6 @@ static int iwl_alive_notify(struct iwl_priv *priv)
return iwl_send_calib_results(priv);
}
-
-/**
- * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
- * using sample data 100 bytes apart. If these sample points are good,
- * it's a pretty good bet that everything between them is good, too.
- */
-static int iwl_verify_sec_sparse(struct iwl_priv *priv,
- const struct fw_desc *fw_desc)
-{
- __le32 *image = (__le32 *)fw_desc->data;
- u32 len = fw_desc->len;
- u32 val;
- u32 i;
-
- IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
-
- for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
- /* read data comes through single port, auto-incr addr */
- /* NOTE: Use the debugless read so we don't flood kernel log
- * if IWL_DL_IO is set */
- iwl_write_direct32(priv->trans, HBUS_TARG_MEM_RADDR,
- i + fw_desc->offset);
- val = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
- if (val != le32_to_cpu(*image))
- return -EIO;
- }
-
- return 0;
-}
-
-static void iwl_print_mismatch_sec(struct iwl_priv *priv,
- const struct fw_desc *fw_desc)
-{
- __le32 *image = (__le32 *)fw_desc->data;
- u32 len = fw_desc->len;
- u32 val;
- u32 offs;
- int errors = 0;
-
- IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
-
- iwl_write_direct32(priv->trans, HBUS_TARG_MEM_RADDR,
- fw_desc->offset);
-
- for (offs = 0;
- offs < len && errors < 20;
- offs += sizeof(u32), image++) {
- /* read data comes through single port, auto-incr addr */
- val = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
- if (val != le32_to_cpu(*image)) {
- IWL_ERR(priv, "uCode INST section at "
- "offset 0x%x, is 0x%x, s/b 0x%x\n",
- offs, val, le32_to_cpu(*image));
- errors++;
- }
- }
-}
-
-/**
- * iwl_verify_ucode - determine which instruction image is in SRAM,
- * and verify its contents
- */
-static int iwl_verify_ucode(struct iwl_priv *priv,
- enum iwl_ucode_type ucode_type)
-{
- const struct fw_img *img = iwl_get_ucode_image(priv, ucode_type);
-
- if (!img) {
- IWL_ERR(priv, "Invalid ucode requested (%d)\n", ucode_type);
- return -EINVAL;
- }
-
- if (!iwl_verify_sec_sparse(priv, &img->sec[IWL_UCODE_SECTION_INST])) {
- IWL_DEBUG_FW(priv, "uCode is good in inst SRAM\n");
- return 0;
- }
-
- IWL_ERR(priv, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
-
- iwl_print_mismatch_sec(priv, &img->sec[IWL_UCODE_SECTION_INST]);
- return -EIO;
-}
-
struct iwl_alive_data {
bool valid;
u8 subtype;
@@ -426,7 +343,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
alive_cmd, ARRAY_SIZE(alive_cmd),
iwl_alive_fn, &alive_data);
- ret = iwl_trans_start_fw(priv->trans, fw);
+ ret = iwl_trans_start_fw(priv->trans, fw, false);
if (ret) {
priv->cur_ucode = old_type;
iwl_remove_notification(&priv->notif_wait, &alive_wait);
@@ -450,18 +367,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
return -EIO;
}
- /*
- * This step takes a long time (60-80ms!!) and
- * WoWLAN image should be loaded quickly, so
- * skip it for WoWLAN.
- */
if (ucode_type != IWL_UCODE_WOWLAN) {
- ret = iwl_verify_ucode(priv, ucode_type);
- if (ret) {
- priv->cur_ucode = old_type;
- return ret;
- }
-
/* delay a bit to give rfkill time to run */
msleep(5);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index 7960a52f6ad4..e9975c54c276 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 864219d2136a..743b48343358 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -83,6 +83,7 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_6030,
IWL_DEVICE_FAMILY_6050,
IWL_DEVICE_FAMILY_6150,
+ IWL_DEVICE_FAMILY_7000,
};
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 34a5287dfc2f..df3463a38704 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -381,8 +381,8 @@
/* LED */
#define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF)
-#define CSR_LED_REG_TRUN_ON (0x78)
-#define CSR_LED_REG_TRUN_OFF (0x38)
+#define CSR_LED_REG_TURN_ON (0x60)
+#define CSR_LED_REG_TURN_OFF (0x20)
/* ANA_PLL */
#define CSR50_ANA_PLL_CFG_VAL (0x00880300)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 42b20b0e83bc..8cf5db7fb5c9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
@@ -116,6 +116,7 @@ do { \
#define IWL_DL_HCMD 0x00000004
#define IWL_DL_STATE 0x00000008
/* 0x000000F0 - 0x00000010 */
+#define IWL_DL_TE 0x00000020
#define IWL_DL_EEPROM 0x00000040
#define IWL_DL_RADIO 0x00000080
/* 0x00000F00 - 0x00000100 */
@@ -156,6 +157,7 @@ do { \
#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
+#define IWL_DEBUG_TE(p, f, a...) IWL_DEBUG(p, IWL_DL_TE, f, ## a)
#define IWL_DEBUG_EEPROM(d, f, a...) IWL_DEBUG_DEV(d, IWL_DL_EEPROM, f, ## a)
#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index 70191ddbd8f6..8f61c717f619 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2009 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index dc7e26b2f383..9a0f45ec9e01 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2009 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2009 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index d3549f493a17..6f228bb2b844 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -139,8 +139,10 @@ struct iwl_drv {
#endif
};
-#define DVM_OP_MODE 0
-#define MVM_OP_MODE 1
+enum {
+ DVM_OP_MODE = 0,
+ MVM_OP_MODE = 1,
+};
/* Protects the table contents, i.e. the ops pointer & drv list */
static struct mutex iwlwifi_opmode_table_mtx;
@@ -149,8 +151,8 @@ static struct iwlwifi_opmode_table {
const struct iwl_op_mode_ops *ops; /* pointer to op_mode ops */
struct list_head drv; /* list of devices using this op_mode */
} iwlwifi_opmode_table[] = { /* ops set when driver is initialized */
- { .name = "iwldvm", .ops = NULL },
- { .name = "iwlmvm", .ops = NULL },
+ [DVM_OP_MODE] = { .name = "iwldvm", .ops = NULL },
+ [MVM_OP_MODE] = { .name = "iwlmvm", .ops = NULL },
};
/*
@@ -268,7 +270,7 @@ struct fw_sec_parsing {
*/
struct iwl_tlv_calib_data {
__le32 ucode_type;
- __le64 calib;
+ struct iwl_tlv_calib_ctrl calib;
} __packed;
struct iwl_firmware_pieces {
@@ -358,7 +360,11 @@ static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data)
ucode_type);
return -EINVAL;
}
- drv->fw.default_calib[ucode_type] = le64_to_cpu(def_calib->calib);
+ drv->fw.default_calib[ucode_type].flow_trigger =
+ def_calib->calib.flow_trigger;
+ drv->fw.default_calib[ucode_type].event_trigger =
+ def_calib->calib.event_trigger;
+
return 0;
}
@@ -959,7 +965,10 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
release_firmware(ucode_raw);
mutex_lock(&iwlwifi_opmode_table_mtx);
- op = &iwlwifi_opmode_table[DVM_OP_MODE];
+ if (fw->mvm_fw)
+ op = &iwlwifi_opmode_table[MVM_OP_MODE];
+ else
+ op = &iwlwifi_opmode_table[DVM_OP_MODE];
/* add this device to the list of devices using this op_mode */
list_add_tail(&drv->list, &op->drv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
index 285de5f68c05..594a5c71b272 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -66,7 +66,7 @@
/* for all modules */
#define DRV_NAME "iwlwifi"
#define IWLWIFI_VERSION "in-tree:"
-#define DRV_COPYRIGHT "Copyright(c) 2003-2012 Intel Corporation"
+#define DRV_COPYRIGHT "Copyright(c) 2003-2013 Intel Corporation"
#define DRV_AUTHOR "<ilw@linux.intel.com>"
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
index 471986690cf0..034f2ff4f43d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -703,9 +703,9 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
return n_channels;
}
-static int iwl_init_sband_channels(struct iwl_nvm_data *data,
- struct ieee80211_supported_band *sband,
- int n_channels, enum ieee80211_band band)
+int iwl_init_sband_channels(struct iwl_nvm_data *data,
+ struct ieee80211_supported_band *sband,
+ int n_channels, enum ieee80211_band band)
{
struct ieee80211_channel *chan = &data->channels[0];
int n = 0, idx = 0;
@@ -728,10 +728,10 @@ static int iwl_init_sband_channels(struct iwl_nvm_data *data,
#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
-static void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
- struct iwl_nvm_data *data,
- struct ieee80211_sta_ht_cap *ht_info,
- enum ieee80211_band band)
+void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
+ struct iwl_nvm_data *data,
+ struct ieee80211_sta_ht_cap *ht_info,
+ enum ieee80211_band band)
{
int max_bit_rate = 0;
u8 rx_chains;
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
index 555f0eb61d48..683fe6a8c58f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -126,4 +126,13 @@ static inline void iwl_free_nvm_data(struct iwl_nvm_data *data)
int iwl_nvm_check_version(struct iwl_nvm_data *data,
struct iwl_trans *trans);
+int iwl_init_sband_channels(struct iwl_nvm_data *data,
+ struct ieee80211_supported_band *sband,
+ int n_channels, enum ieee80211_band band);
+
+void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
+ struct iwl_nvm_data *data,
+ struct ieee80211_sta_ht_cap *ht_info,
+ enum ieee80211_band band);
+
#endif /* __iwl_eeprom_parse_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
index 27c7da3c6ed1..ef4806f27cf8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
index 1337c9d36fee..b2588c5cbf93 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index ec48563d3c6a..f5592fb3b1ed 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -225,6 +225,8 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008)
#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
+#define FW_RSCSR_CHNL0_RXDCB_RDPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x00c)
+#define FH_RSCSR_CHNL0_RDPTR FW_RSCSR_CHNL0_RXDCB_RDPTR_REG
/**
* Rx Config/Status Registers (RCSR)
@@ -257,6 +259,8 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND)
#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0)
+#define FH_MEM_RCSR_CHNL0_RBDCB_WPTR (FH_MEM_RCSR_CHNL0 + 0x8)
+#define FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ (FH_MEM_RCSR_CHNL0 + 0x10)
#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
@@ -410,6 +414,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
* uCode/driver must write "1" in order to clear this flag
*/
#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018)
+#define FH_TSSR_TX_MSG_CONFIG_REG (FH_TSSR_LOWER_BOUND + 0x008)
#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index e71564053e7f..90873eca35f7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index d1a86b66bc51..b545178e46e3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -139,6 +139,19 @@ struct fw_img {
#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
+/*
+ * Calibration control struct.
+ * Sent as part of the phy configuration command.
+ * @flow_trigger: bitmap for which calibrations to perform according to
+ * flow triggers.
+ * @event_trigger: bitmap for which calibrations to perform according to
+ * event triggers.
+ */
+struct iwl_tlv_calib_ctrl {
+ __le32 flow_trigger;
+ __le32 event_trigger;
+} __packed;
+
/**
* struct iwl_fw - variables associated with the firmware
*
@@ -153,11 +166,12 @@ struct fw_img {
* @inst_evtlog_ptr: event log offset for runtime ucode.
* @inst_evtlog_size: event log size for runtime ucode.
* @inst_errlog_ptr: error log offfset for runtime ucode.
+ * @mvm_fw: indicates this is MVM firmware
*/
struct iwl_fw {
u32 ucode_ver;
- char fw_version[ETHTOOL_BUSINFO_LEN];
+ char fw_version[ETHTOOL_FWVERS_LEN];
/* ucode images */
struct fw_img img[IWL_UCODE_TYPE_MAX];
@@ -168,7 +182,7 @@ struct iwl_fw {
u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
- u64 default_calib[IWL_UCODE_TYPE_MAX];
+ struct iwl_tlv_calib_ctrl default_calib[IWL_UCODE_TYPE_MAX];
u32 phy_config;
bool mvm_fw;
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index cdaff9572059..276410d82de4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
@@ -35,54 +35,6 @@
#define IWL_POLL_INTERVAL 10 /* microseconds */
-static inline void __iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
-{
- iwl_write32(trans, reg, iwl_read32(trans, reg) | mask);
-}
-
-static inline void __iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
-{
- iwl_write32(trans, reg, iwl_read32(trans, reg) & ~mask);
-}
-
-void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&trans->reg_lock, flags);
- __iwl_set_bit(trans, reg, mask);
- spin_unlock_irqrestore(&trans->reg_lock, flags);
-}
-EXPORT_SYMBOL_GPL(iwl_set_bit);
-
-void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&trans->reg_lock, flags);
- __iwl_clear_bit(trans, reg, mask);
- spin_unlock_irqrestore(&trans->reg_lock, flags);
-}
-EXPORT_SYMBOL_GPL(iwl_clear_bit);
-
-void iwl_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
-{
- unsigned long flags;
- u32 v;
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- WARN_ON_ONCE(value & ~mask);
-#endif
-
- spin_lock_irqsave(&trans->reg_lock, flags);
- v = iwl_read32(trans, reg);
- v &= ~mask;
- v |= value;
- iwl_write32(trans, reg, v);
- spin_unlock_irqrestore(&trans->reg_lock, flags);
-}
-EXPORT_SYMBOL_GPL(iwl_set_bits_mask);
-
int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout)
{
@@ -99,87 +51,14 @@ int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
}
EXPORT_SYMBOL_GPL(iwl_poll_bit);
-int iwl_grab_nic_access_silent(struct iwl_trans *trans)
-{
- int ret;
-
- lockdep_assert_held(&trans->reg_lock);
-
- /* this bit wakes up the NIC */
- __iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-
- /*
- * These bits say the device is running, and should keep running for
- * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
- * but they do not indicate that embedded SRAM is restored yet;
- * 3945 and 4965 have volatile SRAM, and must save/restore contents
- * to/from host DRAM when sleeping/waking for power-saving.
- * Each direction takes approximately 1/4 millisecond; with this
- * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
- * series of register accesses are expected (e.g. reading Event Log),
- * to keep device from sleeping.
- *
- * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
- * SRAM is okay/restored. We don't check that here because this call
- * is just for hardware register access; but GP1 MAC_SLEEP check is a
- * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
- *
- * 5000 series and later (including 1000 series) have non-volatile SRAM,
- * and do not save/restore SRAM when power cycling.
- */
- ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
- (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
- CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
- if (ret < 0) {
- iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
- return -EIO;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(iwl_grab_nic_access_silent);
-
-bool iwl_grab_nic_access(struct iwl_trans *trans)
-{
- int ret = iwl_grab_nic_access_silent(trans);
- if (unlikely(ret)) {
- u32 val = iwl_read32(trans, CSR_GP_CNTRL);
- WARN_ONCE(1, "Timeout waiting for hardware access "
- "(CSR_GP_CNTRL 0x%08x)\n", val);
- return false;
- }
-
- return true;
-}
-EXPORT_SYMBOL_GPL(iwl_grab_nic_access);
-
-void iwl_release_nic_access(struct iwl_trans *trans)
-{
- lockdep_assert_held(&trans->reg_lock);
- __iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- /*
- * Above we read the CSR_GP_CNTRL register, which will flush
- * any previous writes, but we need the write that clears the
- * MAC_ACCESS_REQ bit to be performed before any other writes
- * scheduled on different CPUs (after we drop reg_lock).
- */
- mmiowb();
-}
-EXPORT_SYMBOL_GPL(iwl_release_nic_access);
-
u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
{
- u32 value;
+ u32 value = 0x5a5a5a5a;
unsigned long flags;
-
- spin_lock_irqsave(&trans->reg_lock, flags);
- iwl_grab_nic_access(trans);
- value = iwl_read32(trans, reg);
- iwl_release_nic_access(trans);
- spin_unlock_irqrestore(&trans->reg_lock, flags);
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
+ value = iwl_read32(trans, reg);
+ iwl_trans_release_nic_access(trans, &flags);
+ }
return value;
}
@@ -189,12 +68,10 @@ void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
{
unsigned long flags;
- spin_lock_irqsave(&trans->reg_lock, flags);
- if (likely(iwl_grab_nic_access(trans))) {
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
iwl_write32(trans, reg, value);
- iwl_release_nic_access(trans);
+ iwl_trans_release_nic_access(trans, &flags);
}
- spin_unlock_irqrestore(&trans->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(iwl_write_direct32);
@@ -230,13 +107,12 @@ static inline void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
{
unsigned long flags;
- u32 val;
+ u32 val = 0x5a5a5a5a;
- spin_lock_irqsave(&trans->reg_lock, flags);
- iwl_grab_nic_access(trans);
- val = __iwl_read_prph(trans, ofs);
- iwl_release_nic_access(trans);
- spin_unlock_irqrestore(&trans->reg_lock, flags);
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
+ val = __iwl_read_prph(trans, ofs);
+ iwl_trans_release_nic_access(trans, &flags);
+ }
return val;
}
EXPORT_SYMBOL_GPL(iwl_read_prph);
@@ -245,12 +121,10 @@ void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
{
unsigned long flags;
- spin_lock_irqsave(&trans->reg_lock, flags);
- if (likely(iwl_grab_nic_access(trans))) {
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
__iwl_write_prph(trans, ofs, val);
- iwl_release_nic_access(trans);
+ iwl_trans_release_nic_access(trans, &flags);
}
- spin_unlock_irqrestore(&trans->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(iwl_write_prph);
@@ -258,13 +132,11 @@ void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
{
unsigned long flags;
- spin_lock_irqsave(&trans->reg_lock, flags);
- if (likely(iwl_grab_nic_access(trans))) {
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
__iwl_write_prph(trans, ofs,
__iwl_read_prph(trans, ofs) | mask);
- iwl_release_nic_access(trans);
+ iwl_trans_release_nic_access(trans, &flags);
}
- spin_unlock_irqrestore(&trans->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(iwl_set_bits_prph);
@@ -273,13 +145,11 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
{
unsigned long flags;
- spin_lock_irqsave(&trans->reg_lock, flags);
- if (likely(iwl_grab_nic_access(trans))) {
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
__iwl_write_prph(trans, ofs,
(__iwl_read_prph(trans, ofs) & mask) | bits);
- iwl_release_nic_access(trans);
+ iwl_trans_release_nic_access(trans, &flags);
}
- spin_unlock_irqrestore(&trans->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(iwl_set_bits_mask_prph);
@@ -288,67 +158,10 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
unsigned long flags;
u32 val;
- spin_lock_irqsave(&trans->reg_lock, flags);
- if (likely(iwl_grab_nic_access(trans))) {
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
val = __iwl_read_prph(trans, ofs);
__iwl_write_prph(trans, ofs, (val & ~mask));
- iwl_release_nic_access(trans);
+ iwl_trans_release_nic_access(trans, &flags);
}
- spin_unlock_irqrestore(&trans->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(iwl_clear_bits_prph);
-
-void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
- void *buf, int dwords)
-{
- unsigned long flags;
- int offs;
- u32 *vals = buf;
-
- spin_lock_irqsave(&trans->reg_lock, flags);
- if (likely(iwl_grab_nic_access(trans))) {
- iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
- for (offs = 0; offs < dwords; offs++)
- vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
- iwl_release_nic_access(trans);
- }
- spin_unlock_irqrestore(&trans->reg_lock, flags);
-}
-EXPORT_SYMBOL_GPL(_iwl_read_targ_mem_dwords);
-
-u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr)
-{
- u32 value;
-
- _iwl_read_targ_mem_dwords(trans, addr, &value, 1);
-
- return value;
-}
-EXPORT_SYMBOL_GPL(iwl_read_targ_mem);
-
-int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
- const void *buf, int dwords)
-{
- unsigned long flags;
- int offs, result = 0;
- const u32 *vals = buf;
-
- spin_lock_irqsave(&trans->reg_lock, flags);
- if (likely(iwl_grab_nic_access(trans))) {
- iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
- for (offs = 0; offs < dwords; offs++)
- iwl_write32(trans, HBUS_TARG_MEM_WDAT, vals[offs]);
- iwl_release_nic_access(trans);
- } else
- result = -EBUSY;
- spin_unlock_irqrestore(&trans->reg_lock, flags);
-
- return result;
-}
-EXPORT_SYMBOL_GPL(_iwl_write_targ_mem_dwords);
-
-int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val)
-{
- return _iwl_write_targ_mem_dwords(trans, addr, &val, 1);
-}
-EXPORT_SYMBOL_GPL(iwl_write_targ_mem);
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 48dc753e3742..fd9f5b97fff3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
@@ -51,20 +51,21 @@ static inline u32 iwl_read32(struct iwl_trans *trans, u32 ofs)
return val;
}
-void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask);
-void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask);
+static inline void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
+{
+ iwl_trans_set_bits_mask(trans, reg, mask, mask);
+}
-void iwl_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value);
+static inline void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
+{
+ iwl_trans_set_bits_mask(trans, reg, mask, 0);
+}
int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout);
int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
int timeout);
-int iwl_grab_nic_access_silent(struct iwl_trans *trans);
-bool iwl_grab_nic_access(struct iwl_trans *trans);
-void iwl_release_nic_access(struct iwl_trans *trans);
-
u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg);
void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value);
@@ -76,19 +77,4 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
u32 bits, u32 mask);
void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
-void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
- void *buf, int dwords);
-
-#define iwl_read_targ_mem_bytes(trans, addr, buf, bufsize) \
- do { \
- BUILD_BUG_ON((bufsize) % sizeof(u32)); \
- _iwl_read_targ_mem_dwords(trans, addr, buf, \
- (bufsize) / sizeof(u32));\
- } while (0)
-
-int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
- const void *buf, int dwords);
-
-u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr);
-int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val);
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index d9a86d6b2bd7..e5e3a79eae2f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
index c61f2070f15a..c3affbc62cdf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
index 821523100cf1..c2ce764463a3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
new file mode 100644
index 000000000000..a70213bdb83c
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -0,0 +1,346 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include "iwl-modparams.h"
+#include "iwl-nvm-parse.h"
+
+/* NVM offsets (in words) definitions */
+enum wkp_nvm_offsets {
+ /* NVM HW-Section offset (in words) definitions */
+ HW_ADDR = 0x15,
+
+/* NVM SW-Section offset (in words) definitions */
+ NVM_SW_SECTION = 0x1C0,
+ NVM_VERSION = 0,
+ RADIO_CFG = 1,
+ SKU = 2,
+ N_HW_ADDRS = 3,
+ NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION,
+
+/* NVM calibration section offset (in words) definitions */
+ NVM_CALIB_SECTION = 0x2B8,
+ XTAL_CALIB = 0x316 - NVM_CALIB_SECTION
+};
+
+/* SKU Capabilities (actual values from NVM definition) */
+enum nvm_sku_bits {
+ NVM_SKU_CAP_BAND_24GHZ = BIT(0),
+ NVM_SKU_CAP_BAND_52GHZ = BIT(1),
+ NVM_SKU_CAP_11N_ENABLE = BIT(2),
+};
+
+/* radio config bits (actual values from NVM definition) */
+#define NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
+#define NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
+#define NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
+#define NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
+#define NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
+#define NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
+
+/*
+ * These are the channel numbers in the order that they are stored in the NVM
+ */
+static const u8 iwl_nvm_channels[] = {
+ /* 2.4 GHz */
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ /* 5 GHz */
+ 36, 40, 44 , 48, 52, 56, 60, 64,
+ 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
+ 149, 153, 157, 161, 165
+};
+
+#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
+#define NUM_2GHZ_CHANNELS 14
+#define FIRST_2GHZ_HT_MINUS 5
+#define LAST_2GHZ_HT_PLUS 9
+#define LAST_5GHZ_HT 161
+
+
+/* rate data (static) */
+static struct ieee80211_rate iwl_cfg80211_rates[] = {
+ { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, },
+ { .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
+ { .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
+ { .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
+ { .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, },
+ { .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, },
+ { .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, },
+ { .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, },
+ { .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, },
+ { .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, },
+ { .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, },
+ { .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, },
+};
+#define RATES_24_OFFS 0
+#define N_RATES_24 ARRAY_SIZE(iwl_cfg80211_rates)
+#define RATES_52_OFFS 4
+#define N_RATES_52 (N_RATES_24 - RATES_52_OFFS)
+
+/**
+ * enum iwl_nvm_channel_flags - channel flags in NVM
+ * @NVM_CHANNEL_VALID: channel is usable for this SKU/geo
+ * @NVM_CHANNEL_IBSS: usable as an IBSS channel
+ * @NVM_CHANNEL_ACTIVE: active scanning allowed
+ * @NVM_CHANNEL_RADAR: radar detection required
+ * @NVM_CHANNEL_DFS: dynamic freq selection candidate
+ * @NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
+ * @NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
+ */
+enum iwl_nvm_channel_flags {
+ NVM_CHANNEL_VALID = BIT(0),
+ NVM_CHANNEL_IBSS = BIT(1),
+ NVM_CHANNEL_ACTIVE = BIT(3),
+ NVM_CHANNEL_RADAR = BIT(4),
+ NVM_CHANNEL_DFS = BIT(7),
+ NVM_CHANNEL_WIDE = BIT(8),
+ NVM_CHANNEL_40MHZ = BIT(9),
+};
+
+#define CHECK_AND_PRINT_I(x) \
+ ((ch_flags & NVM_CHANNEL_##x) ? # x " " : "")
+
+static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
+ struct iwl_nvm_data *data,
+ const __le16 * const nvm_ch_flags)
+{
+ int ch_idx;
+ int n_channels = 0;
+ struct ieee80211_channel *channel;
+ u16 ch_flags;
+ bool is_5ghz;
+
+ for (ch_idx = 0; ch_idx < IWL_NUM_CHANNELS; ch_idx++) {
+ ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
+ if (!(ch_flags & NVM_CHANNEL_VALID)) {
+ IWL_DEBUG_EEPROM(dev,
+ "Ch. %d Flags %x [%sGHz] - No traffic\n",
+ iwl_nvm_channels[ch_idx],
+ ch_flags,
+ (ch_idx >= NUM_2GHZ_CHANNELS) ?
+ "5.2" : "2.4");
+ continue;
+ }
+
+ channel = &data->channels[n_channels];
+ n_channels++;
+
+ channel->hw_value = iwl_nvm_channels[ch_idx];
+ channel->band = (ch_idx < NUM_2GHZ_CHANNELS) ?
+ IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ channel->center_freq =
+ ieee80211_channel_to_frequency(
+ channel->hw_value, channel->band);
+
+ /* TODO: Need to be dependent to the NVM */
+ channel->flags = IEEE80211_CHAN_NO_HT40;
+ if (ch_idx < NUM_2GHZ_CHANNELS &&
+ (ch_flags & NVM_CHANNEL_40MHZ)) {
+ if (iwl_nvm_channels[ch_idx] <= LAST_2GHZ_HT_PLUS)
+ channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
+ if (iwl_nvm_channels[ch_idx] >= FIRST_2GHZ_HT_MINUS)
+ channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
+ } else if (iwl_nvm_channels[ch_idx] <= LAST_5GHZ_HT &&
+ (ch_flags & NVM_CHANNEL_40MHZ)) {
+ if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
+ channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
+ else
+ channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
+ }
+
+ if (!(ch_flags & NVM_CHANNEL_IBSS))
+ channel->flags |= IEEE80211_CHAN_NO_IBSS;
+
+ if (!(ch_flags & NVM_CHANNEL_ACTIVE))
+ channel->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+
+ if (ch_flags & NVM_CHANNEL_RADAR)
+ channel->flags |= IEEE80211_CHAN_RADAR;
+
+ /* Initialize regulatory-based run-time data */
+
+ /* TODO: read the real value from the NVM */
+ channel->max_power = 0;
+ is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
+ IWL_DEBUG_EEPROM(dev,
+ "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
+ channel->hw_value,
+ is_5ghz ? "5.2" : "2.4",
+ CHECK_AND_PRINT_I(VALID),
+ CHECK_AND_PRINT_I(IBSS),
+ CHECK_AND_PRINT_I(ACTIVE),
+ CHECK_AND_PRINT_I(RADAR),
+ CHECK_AND_PRINT_I(WIDE),
+ CHECK_AND_PRINT_I(DFS),
+ ch_flags,
+ channel->max_power,
+ ((ch_flags & NVM_CHANNEL_IBSS) &&
+ !(ch_flags & NVM_CHANNEL_RADAR))
+ ? "" : "not ");
+ }
+
+ return n_channels;
+}
+
+static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
+ struct iwl_nvm_data *data, const __le16 *nvm_sw)
+{
+ int n_channels = iwl_init_channel_map(dev, cfg, data,
+ &nvm_sw[NVM_CHANNELS]);
+ int n_used = 0;
+ struct ieee80211_supported_band *sband;
+
+ sband = &data->bands[IEEE80211_BAND_2GHZ];
+ sband->band = IEEE80211_BAND_2GHZ;
+ sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
+ sband->n_bitrates = N_RATES_24;
+ n_used += iwl_init_sband_channels(data, sband, n_channels,
+ IEEE80211_BAND_2GHZ);
+ iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ);
+
+ sband = &data->bands[IEEE80211_BAND_5GHZ];
+ sband->band = IEEE80211_BAND_5GHZ;
+ sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
+ sband->n_bitrates = N_RATES_52;
+ n_used += iwl_init_sband_channels(data, sband, n_channels,
+ IEEE80211_BAND_5GHZ);
+ iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ);
+
+ if (n_channels != n_used)
+ IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
+ n_used, n_channels);
+}
+
+struct iwl_nvm_data *
+iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
+ const __le16 *nvm_hw, const __le16 *nvm_sw,
+ const __le16 *nvm_calib)
+{
+ struct iwl_nvm_data *data;
+ u8 hw_addr[ETH_ALEN];
+ u16 radio_cfg, sku;
+
+ data = kzalloc(sizeof(*data) +
+ sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS,
+ GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ data->nvm_version = le16_to_cpup(nvm_sw + NVM_VERSION);
+
+ radio_cfg = le16_to_cpup(nvm_sw + RADIO_CFG);
+ data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg);
+ data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg);
+ data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg);
+ data->radio_cfg_pnum = NVM_RF_CFG_PNUM_MSK(radio_cfg);
+ data->valid_tx_ant = NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
+ data->valid_rx_ant = NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
+
+ sku = le16_to_cpup(nvm_sw + SKU);
+ data->sku_cap_band_24GHz_enable = sku & NVM_SKU_CAP_BAND_24GHZ;
+ data->sku_cap_band_52GHz_enable = sku & NVM_SKU_CAP_BAND_52GHZ;
+ data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE;
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
+ data->sku_cap_11n_enable = false;
+
+ /* check overrides (some devices have wrong NVM) */
+ if (cfg->valid_tx_ant)
+ data->valid_tx_ant = cfg->valid_tx_ant;
+ if (cfg->valid_rx_ant)
+ data->valid_rx_ant = cfg->valid_rx_ant;
+
+ if (!data->valid_tx_ant || !data->valid_rx_ant) {
+ IWL_ERR_DEV(dev, "invalid antennas (0x%x, 0x%x)\n",
+ data->valid_tx_ant, data->valid_rx_ant);
+ kfree(data);
+ return NULL;
+ }
+
+ data->n_hw_addrs = le16_to_cpup(nvm_sw + N_HW_ADDRS);
+
+ data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
+ data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
+
+ /* The byte order is little endian 16 bit, meaning 214365 */
+ memcpy(hw_addr, nvm_hw + HW_ADDR, ETH_ALEN);
+ data->hw_addr[0] = hw_addr[1];
+ data->hw_addr[1] = hw_addr[0];
+ data->hw_addr[2] = hw_addr[3];
+ data->hw_addr[3] = hw_addr[2];
+ data->hw_addr[4] = hw_addr[5];
+ data->hw_addr[5] = hw_addr[4];
+
+ iwl_init_sbands(dev, cfg, data, nvm_sw);
+
+ data->calib_version = 255; /* TODO:
+ this value will prevent some checks from
+ failing, we need to check if this
+ field is still needed, and if it does,
+ where is it in the NVM*/
+
+ return data;
+}
+EXPORT_SYMBOL_GPL(iwl_parse_nvm_data);
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
new file mode 100644
index 000000000000..b2692bd287fa
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
@@ -0,0 +1,80 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#ifndef __iwl_nvm_parse_h__
+#define __iwl_nvm_parse_h__
+
+#include "iwl-eeprom-parse.h"
+
+/**
+ * iwl_parse_nvm_data - parse NVM data and return values
+ *
+ * This function parses all NVM values we need and then
+ * returns a (newly allocated) struct containing all the
+ * relevant values for driver use. The struct must be freed
+ * later with iwl_free_nvm_data().
+ */
+struct iwl_nvm_data *
+iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
+ const __le16 *nvm_hw, const __le16 *nvm_sw,
+ const __le16 *nvm_calib);
+
+#endif /* __iwl_nvm_parse_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index c8d9b9517468..4a680019e117 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -63,6 +63,8 @@
#ifndef __iwl_op_mode_h__
#define __iwl_op_mode_h__
+#include <linux/debugfs.h>
+
struct iwl_op_mode;
struct iwl_trans;
struct sk_buff;
@@ -111,13 +113,13 @@ struct iwl_cfg;
* May sleep
* @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
* HCMD the this Rx responds to.
- * Must be atomic and called with BH disabled.
+ * This callback may sleep, it is called from a threaded IRQ handler.
* @queue_full: notifies that a HW queue is full.
* Must be atomic and called with BH disabled.
* @queue_not_full: notifies that a HW queue is not full any more.
* Must be atomic and called with BH disabled.
* @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
- * the radio is killed. Must be atomic.
+ * the radio is killed. May sleep.
* @free_skb: allows the transport layer to free skbs that haven't been
* reclaimed by the op_mode. This can happen when the driver is freed and
* there are Tx packets pending in the transport layer.
@@ -128,8 +130,7 @@ struct iwl_cfg;
* called with BH disabled.
* @nic_config: configure NIC, called before firmware is started.
* May sleep
- * @wimax_active: invoked when WiMax becomes active. Must be atomic and called
- * with BH disabled.
+ * @wimax_active: invoked when WiMax becomes active. May sleep
*/
struct iwl_op_mode_ops {
struct iwl_op_mode *(*start)(struct iwl_trans *trans,
@@ -176,6 +177,7 @@ static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
+ might_sleep();
return op_mode->ops->rx(op_mode, rxb, cmd);
}
@@ -194,6 +196,7 @@ static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode,
static inline void iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode,
bool state)
{
+ might_sleep();
op_mode->ops->hw_rf_kill(op_mode, state);
}
@@ -221,6 +224,7 @@ static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode)
static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
{
+ might_sleep();
op_mode->ops->wimax_active(op_mode);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
new file mode 100644
index 000000000000..14fc8d39fc28
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
@@ -0,0 +1,514 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/export.h>
+
+#include "iwl-phy-db.h"
+#include "iwl-debug.h"
+#include "iwl-op-mode.h"
+#include "iwl-trans.h"
+
+#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
+#define IWL_NUM_PAPD_CH_GROUPS 4
+#define IWL_NUM_TXP_CH_GROUPS 9
+
+struct iwl_phy_db_entry {
+ u16 size;
+ u8 *data;
+};
+
+/**
+ * struct iwl_phy_db - stores phy configuration and calibration data.
+ *
+ * @cfg: phy configuration.
+ * @calib_nch: non channel specific calibration data.
+ * @calib_ch: channel specific calibration data.
+ * @calib_ch_group_papd: calibration data related to papd channel group.
+ * @calib_ch_group_txp: calibration data related to tx power chanel group.
+ */
+struct iwl_phy_db {
+ struct iwl_phy_db_entry cfg;
+ struct iwl_phy_db_entry calib_nch;
+ struct iwl_phy_db_entry calib_ch;
+ struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS];
+ struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS];
+
+ u32 channel_num;
+ u32 channel_size;
+
+ struct iwl_trans *trans;
+};
+
+enum iwl_phy_db_section_type {
+ IWL_PHY_DB_CFG = 1,
+ IWL_PHY_DB_CALIB_NCH,
+ IWL_PHY_DB_CALIB_CH,
+ IWL_PHY_DB_CALIB_CHG_PAPD,
+ IWL_PHY_DB_CALIB_CHG_TXP,
+ IWL_PHY_DB_MAX
+};
+
+#define PHY_DB_CMD 0x6c /* TEMP API - The actual is 0x8c */
+
+/*
+ * phy db - configure operational ucode
+ */
+struct iwl_phy_db_cmd {
+ __le16 type;
+ __le16 length;
+ u8 data[];
+} __packed;
+
+/* for parsing of tx power channel group data that comes from the firmware*/
+struct iwl_phy_db_chg_txp {
+ __le32 space;
+ __le16 max_channel_idx;
+} __packed;
+
+/*
+ * phy db - Receieve phy db chunk after calibrations
+ */
+struct iwl_calib_res_notif_phy_db {
+ __le16 type;
+ __le16 length;
+ u8 data[];
+} __packed;
+
+#define IWL_PHY_DB_STATIC_PIC cpu_to_le32(0x21436587)
+static inline void iwl_phy_db_test_pic(__le32 pic)
+{
+ WARN_ON(IWL_PHY_DB_STATIC_PIC != pic);
+}
+
+struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans)
+{
+ struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db),
+ GFP_KERNEL);
+
+ if (!phy_db)
+ return phy_db;
+
+ phy_db->trans = trans;
+
+ /* TODO: add default values of the phy db. */
+ return phy_db;
+}
+EXPORT_SYMBOL(iwl_phy_db_init);
+
+/*
+ * get phy db section: returns a pointer to a phy db section specified by
+ * type and channel group id.
+ */
+static struct iwl_phy_db_entry *
+iwl_phy_db_get_section(struct iwl_phy_db *phy_db,
+ enum iwl_phy_db_section_type type,
+ u16 chg_id)
+{
+ if (!phy_db || type >= IWL_PHY_DB_MAX)
+ return NULL;
+
+ switch (type) {
+ case IWL_PHY_DB_CFG:
+ return &phy_db->cfg;
+ case IWL_PHY_DB_CALIB_NCH:
+ return &phy_db->calib_nch;
+ case IWL_PHY_DB_CALIB_CH:
+ return &phy_db->calib_ch;
+ case IWL_PHY_DB_CALIB_CHG_PAPD:
+ if (chg_id >= IWL_NUM_PAPD_CH_GROUPS)
+ return NULL;
+ return &phy_db->calib_ch_group_papd[chg_id];
+ case IWL_PHY_DB_CALIB_CHG_TXP:
+ if (chg_id >= IWL_NUM_TXP_CH_GROUPS)
+ return NULL;
+ return &phy_db->calib_ch_group_txp[chg_id];
+ default:
+ return NULL;
+ }
+ return NULL;
+}
+
+static void iwl_phy_db_free_section(struct iwl_phy_db *phy_db,
+ enum iwl_phy_db_section_type type,
+ u16 chg_id)
+{
+ struct iwl_phy_db_entry *entry =
+ iwl_phy_db_get_section(phy_db, type, chg_id);
+ if (!entry)
+ return;
+
+ kfree(entry->data);
+ entry->data = NULL;
+ entry->size = 0;
+}
+
+void iwl_phy_db_free(struct iwl_phy_db *phy_db)
+{
+ int i;
+
+ if (!phy_db)
+ return;
+
+ iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
+ iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
+ iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CH, 0);
+ for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++)
+ iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
+ for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++)
+ iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i);
+
+ kfree(phy_db);
+}
+EXPORT_SYMBOL(iwl_phy_db_free);
+
+int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
+ gfp_t alloc_ctx)
+{
+ struct iwl_calib_res_notif_phy_db *phy_db_notif =
+ (struct iwl_calib_res_notif_phy_db *)pkt->data;
+ enum iwl_phy_db_section_type type = le16_to_cpu(phy_db_notif->type);
+ u16 size = le16_to_cpu(phy_db_notif->length);
+ struct iwl_phy_db_entry *entry;
+ u16 chg_id = 0;
+
+ if (!phy_db)
+ return -EINVAL;
+
+ if (type == IWL_PHY_DB_CALIB_CHG_PAPD ||
+ type == IWL_PHY_DB_CALIB_CHG_TXP)
+ chg_id = le16_to_cpup((__le16 *)phy_db_notif->data);
+
+ entry = iwl_phy_db_get_section(phy_db, type, chg_id);
+ if (!entry)
+ return -EINVAL;
+
+ kfree(entry->data);
+ entry->data = kmemdup(phy_db_notif->data, size, alloc_ctx);
+ if (!entry->data) {
+ entry->size = 0;
+ return -ENOMEM;
+ }
+
+ entry->size = size;
+
+ if (type == IWL_PHY_DB_CALIB_CH) {
+ phy_db->channel_num =
+ le32_to_cpup((__le32 *)phy_db_notif->data);
+ phy_db->channel_size =
+ (size - CHANNEL_NUM_SIZE) / phy_db->channel_num;
+ }
+
+ /* Test PIC */
+ if (type != IWL_PHY_DB_CFG)
+ iwl_phy_db_test_pic(*(((__le32 *)phy_db_notif->data) +
+ (size / sizeof(__le32)) - 1));
+
+ IWL_DEBUG_INFO(phy_db->trans,
+ "%s(%d): [PHYDB]SET: Type %d , Size: %d\n",
+ __func__, __LINE__, type, size);
+
+ return 0;
+}
+EXPORT_SYMBOL(iwl_phy_db_set_section);
+
+static int is_valid_channel(u16 ch_id)
+{
+ if (ch_id <= 14 ||
+ (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
+ (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
+ (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
+ return 1;
+ return 0;
+}
+
+static u8 ch_id_to_ch_index(u16 ch_id)
+{
+ if (WARN_ON(!is_valid_channel(ch_id)))
+ return 0xff;
+
+ if (ch_id <= 14)
+ return ch_id - 1;
+ if (ch_id <= 64)
+ return (ch_id + 20) / 4;
+ if (ch_id <= 140)
+ return (ch_id - 12) / 4;
+ return (ch_id - 13) / 4;
+}
+
+
+static u16 channel_id_to_papd(u16 ch_id)
+{
+ if (WARN_ON(!is_valid_channel(ch_id)))
+ return 0xff;
+
+ if (1 <= ch_id && ch_id <= 14)
+ return 0;
+ if (36 <= ch_id && ch_id <= 64)
+ return 1;
+ if (100 <= ch_id && ch_id <= 140)
+ return 2;
+ return 3;
+}
+
+static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id)
+{
+ struct iwl_phy_db_chg_txp *txp_chg;
+ int i;
+ u8 ch_index = ch_id_to_ch_index(ch_id);
+ if (ch_index == 0xff)
+ return 0xff;
+
+ for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) {
+ txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
+ if (!txp_chg)
+ return 0xff;
+ /*
+ * Looking for the first channel group that its max channel is
+ * higher then wanted channel.
+ */
+ if (le16_to_cpu(txp_chg->max_channel_idx) >= ch_index)
+ return i;
+ }
+ return 0xff;
+}
+static
+int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
+ u32 type, u8 **data, u16 *size, u16 ch_id)
+{
+ struct iwl_phy_db_entry *entry;
+ u32 channel_num;
+ u32 channel_size;
+ u16 ch_group_id = 0;
+ u16 index;
+
+ if (!phy_db)
+ return -EINVAL;
+
+ /* find wanted channel group */
+ if (type == IWL_PHY_DB_CALIB_CHG_PAPD)
+ ch_group_id = channel_id_to_papd(ch_id);
+ else if (type == IWL_PHY_DB_CALIB_CHG_TXP)
+ ch_group_id = channel_id_to_txp(phy_db, ch_id);
+
+ entry = iwl_phy_db_get_section(phy_db, type, ch_group_id);
+ if (!entry)
+ return -EINVAL;
+
+ if (type == IWL_PHY_DB_CALIB_CH) {
+ index = ch_id_to_ch_index(ch_id);
+ channel_num = phy_db->channel_num;
+ channel_size = phy_db->channel_size;
+ if (index >= channel_num) {
+ IWL_ERR(phy_db->trans, "Wrong channel number %d\n",
+ ch_id);
+ return -EINVAL;
+ }
+ *data = entry->data + CHANNEL_NUM_SIZE + index * channel_size;
+ *size = channel_size;
+ } else {
+ *data = entry->data;
+ *size = entry->size;
+ }
+
+ /* Test PIC */
+ if (type != IWL_PHY_DB_CFG)
+ iwl_phy_db_test_pic(*(((__le32 *)*data) +
+ (*size / sizeof(__le32)) - 1));
+
+ IWL_DEBUG_INFO(phy_db->trans,
+ "%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
+ __func__, __LINE__, type, *size);
+
+ return 0;
+}
+
+static int iwl_send_phy_db_cmd(struct iwl_phy_db *phy_db, u16 type,
+ u16 length, void *data)
+{
+ struct iwl_phy_db_cmd phy_db_cmd;
+ struct iwl_host_cmd cmd = {
+ .id = PHY_DB_CMD,
+ .flags = CMD_SYNC,
+ };
+
+ IWL_DEBUG_INFO(phy_db->trans,
+ "Sending PHY-DB hcmd of type %d, of length %d\n",
+ type, length);
+
+ /* Set phy db cmd variables */
+ phy_db_cmd.type = cpu_to_le16(type);
+ phy_db_cmd.length = cpu_to_le16(length);
+
+ /* Set hcmd variables */
+ cmd.data[0] = &phy_db_cmd;
+ cmd.len[0] = sizeof(struct iwl_phy_db_cmd);
+ cmd.data[1] = data;
+ cmd.len[1] = length;
+ cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
+
+ return iwl_trans_send_cmd(phy_db->trans, &cmd);
+}
+
+static int iwl_phy_db_send_all_channel_groups(
+ struct iwl_phy_db *phy_db,
+ enum iwl_phy_db_section_type type,
+ u8 max_ch_groups)
+{
+ u16 i;
+ int err;
+ struct iwl_phy_db_entry *entry;
+
+ /* Send all the channel specific groups to operational fw */
+ for (i = 0; i < max_ch_groups; i++) {
+ entry = iwl_phy_db_get_section(phy_db,
+ type,
+ i);
+ if (!entry)
+ return -EINVAL;
+
+ /* Send the requested PHY DB section */
+ err = iwl_send_phy_db_cmd(phy_db,
+ type,
+ entry->size,
+ entry->data);
+ if (err) {
+ IWL_ERR(phy_db->trans,
+ "Can't SEND phy_db section %d (%d), err %d",
+ type, i, err);
+ return err;
+ }
+
+ IWL_DEBUG_INFO(phy_db->trans,
+ "Sent PHY_DB HCMD, type = %d num = %d",
+ type, i);
+ }
+
+ return 0;
+}
+
+int iwl_send_phy_db_data(struct iwl_phy_db *phy_db)
+{
+ u8 *data = NULL;
+ u16 size = 0;
+ int err;
+
+ IWL_DEBUG_INFO(phy_db->trans,
+ "Sending phy db data and configuration to runtime image\n");
+
+ /* Send PHY DB CFG section */
+ err = iwl_phy_db_get_section_data(phy_db, IWL_PHY_DB_CFG,
+ &data, &size, 0);
+ if (err) {
+ IWL_ERR(phy_db->trans, "Cannot get Phy DB cfg section\n");
+ return err;
+ }
+
+ err = iwl_send_phy_db_cmd(phy_db, IWL_PHY_DB_CFG, size, data);
+ if (err) {
+ IWL_ERR(phy_db->trans,
+ "Cannot send HCMD of Phy DB cfg section\n");
+ return err;
+ }
+
+ err = iwl_phy_db_get_section_data(phy_db, IWL_PHY_DB_CALIB_NCH,
+ &data, &size, 0);
+ if (err) {
+ IWL_ERR(phy_db->trans,
+ "Cannot get Phy DB non specific channel section\n");
+ return err;
+ }
+
+ err = iwl_send_phy_db_cmd(phy_db, IWL_PHY_DB_CALIB_NCH, size, data);
+ if (err) {
+ IWL_ERR(phy_db->trans,
+ "Cannot send HCMD of Phy DB non specific channel section\n");
+ return err;
+ }
+
+ /* Send all the TXP channel specific data */
+ err = iwl_phy_db_send_all_channel_groups(phy_db,
+ IWL_PHY_DB_CALIB_CHG_PAPD,
+ IWL_NUM_PAPD_CH_GROUPS);
+ if (err) {
+ IWL_ERR(phy_db->trans,
+ "Cannot send channel specific PAPD groups");
+ return err;
+ }
+
+ /* Send all the TXP channel specific data */
+ err = iwl_phy_db_send_all_channel_groups(phy_db,
+ IWL_PHY_DB_CALIB_CHG_TXP,
+ IWL_NUM_TXP_CH_GROUPS);
+ if (err) {
+ IWL_ERR(phy_db->trans,
+ "Cannot send channel specific TX power groups");
+ return err;
+ }
+
+ IWL_DEBUG_INFO(phy_db->trans,
+ "Finished sending phy db non channel data\n");
+ return 0;
+}
+EXPORT_SYMBOL(iwl_send_phy_db_data);
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
new file mode 100644
index 000000000000..d0e43d96ab38
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
@@ -0,0 +1,82 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __IWL_PHYDB_H__
+#define __IWL_PHYDB_H__
+
+#include <linux/types.h>
+
+#include "iwl-op-mode.h"
+#include "iwl-trans.h"
+
+struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans);
+
+void iwl_phy_db_free(struct iwl_phy_db *phy_db);
+
+int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
+ gfp_t alloc_ctx);
+
+
+int iwl_send_phy_db_data(struct iwl_phy_db *phy_db);
+
+#endif /* __IWL_PHYDB_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index c3a4bb41e533..f76e9cad7757 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -97,6 +97,9 @@
#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
+/* Device system time */
+#define DEVICE_SYSTEM_TIME_REG 0xA0206C
+
/**
* Tx Scheduler
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.c b/drivers/net/wireless/iwlwifi/iwl-test.c
index 81e8c7126d72..ce0c67b425ee 100644
--- a/drivers/net/wireless/iwlwifi/iwl-test.c
+++ b/drivers/net/wireless/iwlwifi/iwl-test.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -466,19 +466,18 @@ static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size)
/* Hard-coded periphery absolute address */
if (IWL_ABS_PRPH_START <= addr &&
addr < IWL_ABS_PRPH_START + PRPH_END) {
- spin_lock_irqsave(&trans->reg_lock, flags);
- iwl_grab_nic_access(trans);
+ if (!iwl_trans_grab_nic_access(trans, false, &flags)) {
+ return -EIO;
+ }
iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
addr | (3 << 24));
for (i = 0; i < size; i += 4)
*(u32 *)(tst->mem.addr + i) =
iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
- iwl_release_nic_access(trans);
- spin_unlock_irqrestore(&trans->reg_lock, flags);
+ iwl_trans_release_nic_access(trans, &flags);
} else { /* target memory (SRAM) */
- _iwl_read_targ_mem_dwords(trans, addr,
- tst->mem.addr,
- tst->mem.size / 4);
+ iwl_trans_read_mem(trans, addr, tst->mem.addr,
+ tst->mem.size / 4);
}
tst->mem.nchunks =
@@ -501,28 +500,25 @@ static int iwl_test_indirect_write(struct iwl_test *tst, u32 addr,
if (IWL_ABS_PRPH_START <= addr &&
addr < IWL_ABS_PRPH_START + PRPH_END) {
- /* Periphery writes can be 1-3 bytes long, or DWORDs */
- if (size < 4) {
- memcpy(&val, buf, size);
- spin_lock_irqsave(&trans->reg_lock, flags);
- iwl_grab_nic_access(trans);
- iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
- (addr & 0x0000FFFF) |
- ((size - 1) << 24));
- iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
- iwl_release_nic_access(trans);
- /* needed after consecutive writes w/o read */
- mmiowb();
- spin_unlock_irqrestore(&trans->reg_lock, flags);
- } else {
- if (size % 4)
- return -EINVAL;
- for (i = 0; i < size; i += 4)
- iwl_write_prph(trans, addr+i,
- *(u32 *)(buf+i));
- }
+ /* Periphery writes can be 1-3 bytes long, or DWORDs */
+ if (size < 4) {
+ memcpy(&val, buf, size);
+ if (!iwl_trans_grab_nic_access(trans, false, &flags))
+ return -EIO;
+ iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
+ (addr & 0x0000FFFF) |
+ ((size - 1) << 24));
+ iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
+ iwl_trans_release_nic_access(trans, &flags);
+ } else {
+ if (size % 4)
+ return -EINVAL;
+ for (i = 0; i < size; i += 4)
+ iwl_write_prph(trans, addr+i,
+ *(u32 *)(buf+i));
+ }
} else if (iwl_test_valid_hw_addr(tst, addr)) {
- _iwl_write_targ_mem_dwords(trans, addr, buf, size / 4);
+ iwl_trans_write_mem(trans, addr, buf, size / 4);
} else {
return -EINVAL;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.h b/drivers/net/wireless/iwlwifi/iwl-test.h
index e13ffa8acc02..7fbf4d717caa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-test.h
+++ b/drivers/net/wireless/iwlwifi/iwl-test.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.h b/drivers/net/wireless/iwlwifi/iwl-testmode.h
index 6ba211b09426..a963f45c6849 100644
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index b76532e238c1..8c7bec6b9a0b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -65,6 +65,7 @@
#include <linux/ieee80211.h>
#include <linux/mm.h> /* for page_address */
+#include <linux/lockdep.h>
#include "iwl-debug.h"
#include "iwl-config.h"
@@ -193,11 +194,11 @@ struct iwl_rx_packet {
* @CMD_ON_DEMAND: This command is sent by the test mode pipe.
*/
enum CMD_MODE {
- CMD_SYNC = 0,
- CMD_ASYNC = BIT(0),
- CMD_WANT_SKB = BIT(1),
- CMD_WANT_HCMD = BIT(2),
- CMD_ON_DEMAND = BIT(3),
+ CMD_SYNC = 0,
+ CMD_ASYNC = BIT(0),
+ CMD_WANT_SKB = BIT(1),
+ CMD_WANT_HCMD = BIT(2),
+ CMD_ON_DEMAND = BIT(3),
};
#define DEF_CMD_PAYLOAD_SIZE 320
@@ -274,6 +275,7 @@ struct iwl_rx_cmd_buffer {
struct page *_page;
int _offset;
bool _page_stolen;
+ u32 _rx_page_order;
unsigned int truesize;
};
@@ -294,6 +296,11 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
return r->_page;
}
+static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
+{
+ __free_pages(r->_page, r->_rx_page_order);
+}
+
#define MAX_NO_RECLAIM_CMDS 6
#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
@@ -308,6 +315,16 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
#define IWL_FRAME_LIMIT 64
/**
+ * enum iwl_wowlan_status - WoWLAN image/device status
+ * @IWL_D3_STATUS_ALIVE: firmware is still running after resume
+ * @IWL_D3_STATUS_RESET: device was reset while suspended
+ */
+enum iwl_d3_status {
+ IWL_D3_STATUS_ALIVE,
+ IWL_D3_STATUS_RESET,
+};
+
+/**
* struct iwl_trans_config - transport configuration
*
* @op_mode: pointer to the upper layer.
@@ -321,6 +338,8 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
* @n_no_reclaim_cmds: # of commands in list
* @rx_buf_size_8k: 8 kB RX buffer size needed for A-MSDUs,
* if unset 4k will be the RX buffer size
+ * @bc_table_dword: set to true if the BC table expects the byte count to be
+ * in DWORD (as opposed to bytes)
* @queue_watchdog_timeout: time (in ms) after which queues
* are considered stuck and will trigger device restart
* @command_names: array of command names, must be 256 entries
@@ -335,6 +354,7 @@ struct iwl_trans_config {
int n_no_reclaim_cmds;
bool rx_buf_size_8k;
+ bool bc_table_dword;
unsigned int queue_watchdog_timeout;
const char **command_names;
};
@@ -360,9 +380,12 @@ struct iwl_trans;
* May sleep
* @stop_device:stops the whole device (embedded CPU put to reset)
* May sleep
- * @wowlan_suspend: put the device into the correct mode for WoWLAN during
+ * @d3_suspend: put the device into the correct mode for WoWLAN during
* suspend. This is optional, if not implemented WoWLAN will not be
* supported. This callback may sleep.
+ * @d3_resume: resume the device after WoWLAN, enabling the opmode to
+ * talk to the WoWLAN image to get its status. This is optional, if not
+ * implemented WoWLAN will not be supported. This callback may sleep.
* @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
* If RFkill is asserted in the middle of a SYNC host command, it must
* return -ERFKILL straight away.
@@ -387,20 +410,31 @@ struct iwl_trans;
* @read32: read a u32 register at offset ofs from the BAR
* @read_prph: read a DWORD from a periphery register
* @write_prph: write a DWORD to a periphery register
+ * @read_mem: read device's SRAM in DWORD
+ * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
+ * will be zeroed.
* @configure: configure parameters required by the transport layer from
* the op_mode. May be called several times before start_fw, can't be
* called after that.
* @set_pmi: set the power pmi state
+ * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
+ * Sleeping is not allowed between grab_nic_access and
+ * release_nic_access.
+ * @release_nic_access: let the NIC go to sleep. The "flags" parameter
+ * must be the same one that was sent before to the grab_nic_access.
+ * @set_bits_mask - set SRAM register according to value and mask.
*/
struct iwl_trans_ops {
int (*start_hw)(struct iwl_trans *iwl_trans);
void (*stop_hw)(struct iwl_trans *iwl_trans, bool op_mode_leaving);
- int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw);
+ int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
+ bool run_in_rfkill);
void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
void (*stop_device)(struct iwl_trans *trans);
- void (*wowlan_suspend)(struct iwl_trans *trans);
+ void (*d3_suspend)(struct iwl_trans *trans);
+ int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status);
int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
@@ -424,9 +458,19 @@ struct iwl_trans_ops {
u32 (*read32)(struct iwl_trans *trans, u32 ofs);
u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
+ int (*read_mem)(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords);
+ int (*write_mem)(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords);
void (*configure)(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg);
void (*set_pmi)(struct iwl_trans *trans, bool state);
+ bool (*grab_nic_access)(struct iwl_trans *trans, bool silent,
+ unsigned long *flags);
+ void (*release_nic_access)(struct iwl_trans *trans,
+ unsigned long *flags);
+ void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
+ u32 value);
};
/**
@@ -446,7 +490,6 @@ enum iwl_trans_state {
* @ops - pointer to iwl_trans_ops
* @op_mode - pointer to the op_mode
* @cfg - pointer to the configuration
- * @reg_lock - protect hw register access
* @dev - pointer to struct device * that represents the device
* @hw_id: a u32 with the ID of the device / subdevice.
* Set during transport allocation.
@@ -467,7 +510,6 @@ struct iwl_trans {
struct iwl_op_mode *op_mode;
const struct iwl_cfg *cfg;
enum iwl_trans_state state;
- spinlock_t reg_lock;
struct device *dev;
u32 hw_rev;
@@ -485,6 +527,10 @@ struct iwl_trans {
struct dentry *dbgfs_dir;
+#ifdef CONFIG_LOCKDEP
+ struct lockdep_map sync_cmd_lockdep_map;
+#endif
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *));
@@ -528,13 +574,14 @@ static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
}
static inline int iwl_trans_start_fw(struct iwl_trans *trans,
- const struct fw_img *fw)
+ const struct fw_img *fw,
+ bool run_in_rfkill)
{
might_sleep();
WARN_ON_ONCE(!trans->rx_mpdu_cmd);
- return trans->ops->start_fw(trans, fw);
+ return trans->ops->start_fw(trans, fw, run_in_rfkill);
}
static inline void iwl_trans_stop_device(struct iwl_trans *trans)
@@ -546,19 +593,36 @@ static inline void iwl_trans_stop_device(struct iwl_trans *trans)
trans->state = IWL_TRANS_NO_FW;
}
-static inline void iwl_trans_wowlan_suspend(struct iwl_trans *trans)
+static inline void iwl_trans_d3_suspend(struct iwl_trans *trans)
{
might_sleep();
- trans->ops->wowlan_suspend(trans);
+ trans->ops->d3_suspend(trans);
+}
+
+static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
+ enum iwl_d3_status *status)
+{
+ might_sleep();
+ return trans->ops->d3_resume(trans, status);
}
static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
- struct iwl_host_cmd *cmd)
+ struct iwl_host_cmd *cmd)
{
+ int ret;
+
WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
"%s bad state = %d", __func__, trans->state);
- return trans->ops->send_cmd(trans, cmd);
+ if (!(cmd->flags & CMD_ASYNC))
+ lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
+
+ ret = trans->ops->send_cmd(trans, cmd);
+
+ if (!(cmd->flags & CMD_ASYNC))
+ lock_map_release(&trans->sync_cmd_lockdep_map);
+
+ return ret;
}
static inline struct iwl_device_cmd *
@@ -636,7 +700,7 @@ static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
}
static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
- struct dentry *dir)
+ struct dentry *dir)
{
return trans->ops->dbgfs_register(trans, dir);
}
@@ -679,15 +743,77 @@ static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
return trans->ops->write_prph(trans, ofs, val);
}
+static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords)
+{
+ return trans->ops->read_mem(trans, addr, buf, dwords);
+}
+
+#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
+ do { \
+ if (__builtin_constant_p(bufsize)) \
+ BUILD_BUG_ON((bufsize) % sizeof(u32)); \
+ iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
+ } while (0)
+
+static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)